diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl3945-base.c | 435 |
1 files changed, 265 insertions, 170 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index d00a80334095..0db9b79a69a6 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/if_arp.h> | 42 | #include <linux/if_arp.h> |
43 | 43 | ||
44 | #include <net/ieee80211_radiotap.h> | 44 | #include <net/ieee80211_radiotap.h> |
45 | #include <net/lib80211.h> | ||
46 | #include <net/mac80211.h> | 45 | #include <net/mac80211.h> |
47 | 46 | ||
48 | #include <asm/div64.h> | 47 | #include <asm/div64.h> |
@@ -90,7 +89,6 @@ MODULE_LICENSE("GPL"); | |||
90 | 89 | ||
91 | /* module parameters */ | 90 | /* module parameters */ |
92 | struct iwl_mod_params iwl3945_mod_params = { | 91 | struct iwl_mod_params iwl3945_mod_params = { |
93 | .num_of_queues = IWL39_NUM_QUEUES, /* Not used */ | ||
94 | .sw_crypto = 1, | 92 | .sw_crypto = 1, |
95 | .restart_fw = 1, | 93 | .restart_fw = 1, |
96 | /* the rest are 0 by default */ | 94 | /* the rest are 0 by default */ |
@@ -368,13 +366,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | |||
368 | struct sk_buff *skb_frag, | 366 | struct sk_buff *skb_frag, |
369 | int sta_id) | 367 | int sta_id) |
370 | { | 368 | { |
371 | struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | 369 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; |
372 | struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; | 370 | struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; |
373 | 371 | ||
374 | switch (keyinfo->alg) { | 372 | switch (keyinfo->alg) { |
375 | case ALG_CCMP: | 373 | case ALG_CCMP: |
376 | tx->sec_ctl = TX_CMD_SEC_CCM; | 374 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; |
377 | memcpy(tx->key, keyinfo->key, keyinfo->keylen); | 375 | memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); |
378 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | 376 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); |
379 | break; | 377 | break; |
380 | 378 | ||
@@ -382,13 +380,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | |||
382 | break; | 380 | break; |
383 | 381 | ||
384 | case ALG_WEP: | 382 | case ALG_WEP: |
385 | tx->sec_ctl = TX_CMD_SEC_WEP | | 383 | tx_cmd->sec_ctl = TX_CMD_SEC_WEP | |
386 | (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; | 384 | (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; |
387 | 385 | ||
388 | if (keyinfo->keylen == 13) | 386 | if (keyinfo->keylen == 13) |
389 | tx->sec_ctl |= TX_CMD_SEC_KEY128; | 387 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; |
390 | 388 | ||
391 | memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen); | 389 | memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); |
392 | 390 | ||
393 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | 391 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " |
394 | "with key %d\n", info->control.hw_key->hw_key_idx); | 392 | "with key %d\n", info->control.hw_key->hw_key_idx); |
@@ -408,12 +406,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, | |||
408 | struct ieee80211_tx_info *info, | 406 | struct ieee80211_tx_info *info, |
409 | struct ieee80211_hdr *hdr, u8 std_id) | 407 | struct ieee80211_hdr *hdr, u8 std_id) |
410 | { | 408 | { |
411 | struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | 409 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; |
412 | __le32 tx_flags = tx->tx_flags; | 410 | __le32 tx_flags = tx_cmd->tx_flags; |
413 | __le16 fc = hdr->frame_control; | 411 | __le16 fc = hdr->frame_control; |
414 | u8 rc_flags = info->control.rates[0].flags; | ||
415 | 412 | ||
416 | tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | 413 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; |
417 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | 414 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { |
418 | tx_flags |= TX_CMD_FLG_ACK_MSK; | 415 | tx_flags |= TX_CMD_FLG_ACK_MSK; |
419 | if (ieee80211_is_mgmt(fc)) | 416 | if (ieee80211_is_mgmt(fc)) |
@@ -426,25 +423,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, | |||
426 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 423 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
427 | } | 424 | } |
428 | 425 | ||
429 | tx->sta_id = std_id; | 426 | tx_cmd->sta_id = std_id; |
430 | if (ieee80211_has_morefrags(fc)) | 427 | if (ieee80211_has_morefrags(fc)) |
431 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | 428 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; |
432 | 429 | ||
433 | if (ieee80211_is_data_qos(fc)) { | 430 | if (ieee80211_is_data_qos(fc)) { |
434 | u8 *qc = ieee80211_get_qos_ctl(hdr); | 431 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
435 | tx->tid_tspec = qc[0] & 0xf; | 432 | tx_cmd->tid_tspec = qc[0] & 0xf; |
436 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | 433 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; |
437 | } else { | 434 | } else { |
438 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 435 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
439 | } | 436 | } |
440 | 437 | ||
441 | if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { | 438 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
442 | tx_flags |= TX_CMD_FLG_RTS_MSK; | ||
443 | tx_flags &= ~TX_CMD_FLG_CTS_MSK; | ||
444 | } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { | ||
445 | tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
446 | tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
447 | } | ||
448 | 439 | ||
449 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | 440 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) |
450 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | 441 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
@@ -452,19 +443,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, | |||
452 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | 443 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); |
453 | if (ieee80211_is_mgmt(fc)) { | 444 | if (ieee80211_is_mgmt(fc)) { |
454 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | 445 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) |
455 | tx->timeout.pm_frame_timeout = cpu_to_le16(3); | 446 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); |
456 | else | 447 | else |
457 | tx->timeout.pm_frame_timeout = cpu_to_le16(2); | 448 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); |
458 | } else { | 449 | } else { |
459 | tx->timeout.pm_frame_timeout = 0; | 450 | tx_cmd->timeout.pm_frame_timeout = 0; |
460 | #ifdef CONFIG_IWLWIFI_LEDS | ||
461 | priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len); | ||
462 | #endif | ||
463 | } | 451 | } |
464 | 452 | ||
465 | tx->driver_txop = 0; | 453 | tx_cmd->driver_txop = 0; |
466 | tx->tx_flags = tx_flags; | 454 | tx_cmd->tx_flags = tx_flags; |
467 | tx->next_frame_len = 0; | 455 | tx_cmd->next_frame_len = 0; |
468 | } | 456 | } |
469 | 457 | ||
470 | /* | 458 | /* |
@@ -474,7 +462,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
474 | { | 462 | { |
475 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 463 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
476 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 464 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
477 | struct iwl3945_tx_cmd *tx; | 465 | struct iwl3945_tx_cmd *tx_cmd; |
478 | struct iwl_tx_queue *txq = NULL; | 466 | struct iwl_tx_queue *txq = NULL; |
479 | struct iwl_queue *q = NULL; | 467 | struct iwl_queue *q = NULL; |
480 | struct iwl_device_cmd *out_cmd; | 468 | struct iwl_device_cmd *out_cmd; |
@@ -573,9 +561,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
573 | /* Init first empty entry in queue's array of Tx/cmd buffers */ | 561 | /* Init first empty entry in queue's array of Tx/cmd buffers */ |
574 | out_cmd = txq->cmd[idx]; | 562 | out_cmd = txq->cmd[idx]; |
575 | out_meta = &txq->meta[idx]; | 563 | out_meta = &txq->meta[idx]; |
576 | tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; | 564 | tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; |
577 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | 565 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); |
578 | memset(tx, 0, sizeof(*tx)); | 566 | memset(tx_cmd, 0, sizeof(*tx_cmd)); |
579 | 567 | ||
580 | /* | 568 | /* |
581 | * Set up the Tx-command (not MAC!) header. | 569 | * Set up the Tx-command (not MAC!) header. |
@@ -588,7 +576,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
588 | INDEX_TO_SEQ(q->write_ptr))); | 576 | INDEX_TO_SEQ(q->write_ptr))); |
589 | 577 | ||
590 | /* Copy MAC header from skb into command buffer */ | 578 | /* Copy MAC header from skb into command buffer */ |
591 | memcpy(tx->hdr, hdr, hdr_len); | 579 | memcpy(tx_cmd->hdr, hdr, hdr_len); |
592 | 580 | ||
593 | 581 | ||
594 | if (info->control.hw_key) | 582 | if (info->control.hw_key) |
@@ -602,12 +590,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
602 | 590 | ||
603 | /* Total # bytes to be transmitted */ | 591 | /* Total # bytes to be transmitted */ |
604 | len = (u16)skb->len; | 592 | len = (u16)skb->len; |
605 | tx->len = cpu_to_le16(len); | 593 | tx_cmd->len = cpu_to_le16(len); |
606 | 594 | ||
607 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | 595 | iwl_dbg_log_tx_data_frame(priv, len, hdr); |
608 | iwl_update_stats(priv, true, fc, len); | 596 | iwl_update_stats(priv, true, fc, len); |
609 | tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; | 597 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; |
610 | tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; | 598 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; |
611 | 599 | ||
612 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | 600 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
613 | txq->need_update = 1; | 601 | txq->need_update = 1; |
@@ -620,9 +608,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
620 | 608 | ||
621 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", | 609 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", |
622 | le16_to_cpu(out_cmd->hdr.sequence)); | 610 | le16_to_cpu(out_cmd->hdr.sequence)); |
623 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); | 611 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); |
624 | iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); | 612 | iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); |
625 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, | 613 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, |
626 | ieee80211_hdrlen(fc)); | 614 | ieee80211_hdrlen(fc)); |
627 | 615 | ||
628 | /* | 616 | /* |
@@ -758,7 +746,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, | |||
758 | u8 type) | 746 | u8 type) |
759 | { | 747 | { |
760 | struct iwl_spectrum_cmd spectrum; | 748 | struct iwl_spectrum_cmd spectrum; |
761 | struct iwl_rx_packet *res; | 749 | struct iwl_rx_packet *pkt; |
762 | struct iwl_host_cmd cmd = { | 750 | struct iwl_host_cmd cmd = { |
763 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, | 751 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, |
764 | .data = (void *)&spectrum, | 752 | .data = (void *)&spectrum, |
@@ -803,18 +791,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, | |||
803 | if (rc) | 791 | if (rc) |
804 | return rc; | 792 | return rc; |
805 | 793 | ||
806 | res = (struct iwl_rx_packet *)cmd.reply_skb->data; | 794 | pkt = (struct iwl_rx_packet *)cmd.reply_page; |
807 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | 795 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { |
808 | IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); | 796 | IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); |
809 | rc = -EIO; | 797 | rc = -EIO; |
810 | } | 798 | } |
811 | 799 | ||
812 | spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); | 800 | spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); |
813 | switch (spectrum_resp_status) { | 801 | switch (spectrum_resp_status) { |
814 | case 0: /* Command will be handled */ | 802 | case 0: /* Command will be handled */ |
815 | if (res->u.spectrum.id != 0xff) { | 803 | if (pkt->u.spectrum.id != 0xff) { |
816 | IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", | 804 | IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", |
817 | res->u.spectrum.id); | 805 | pkt->u.spectrum.id); |
818 | priv->measurement_status &= ~MEASUREMENT_READY; | 806 | priv->measurement_status &= ~MEASUREMENT_READY; |
819 | } | 807 | } |
820 | priv->measurement_status |= MEASUREMENT_ACTIVE; | 808 | priv->measurement_status |= MEASUREMENT_ACTIVE; |
@@ -826,7 +814,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, | |||
826 | break; | 814 | break; |
827 | } | 815 | } |
828 | 816 | ||
829 | dev_kfree_skb_any(cmd.reply_skb); | 817 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); |
830 | 818 | ||
831 | return rc; | 819 | return rc; |
832 | } | 820 | } |
@@ -835,7 +823,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, | |||
835 | static void iwl3945_rx_reply_alive(struct iwl_priv *priv, | 823 | static void iwl3945_rx_reply_alive(struct iwl_priv *priv, |
836 | struct iwl_rx_mem_buffer *rxb) | 824 | struct iwl_rx_mem_buffer *rxb) |
837 | { | 825 | { |
838 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | 826 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
839 | struct iwl_alive_resp *palive; | 827 | struct iwl_alive_resp *palive; |
840 | struct delayed_work *pwork; | 828 | struct delayed_work *pwork; |
841 | 829 | ||
@@ -872,7 +860,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, | |||
872 | struct iwl_rx_mem_buffer *rxb) | 860 | struct iwl_rx_mem_buffer *rxb) |
873 | { | 861 | { |
874 | #ifdef CONFIG_IWLWIFI_DEBUG | 862 | #ifdef CONFIG_IWLWIFI_DEBUG |
875 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | 863 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
876 | #endif | 864 | #endif |
877 | 865 | ||
878 | IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); | 866 | IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); |
@@ -908,7 +896,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, | |||
908 | struct iwl_rx_mem_buffer *rxb) | 896 | struct iwl_rx_mem_buffer *rxb) |
909 | { | 897 | { |
910 | #ifdef CONFIG_IWLWIFI_DEBUG | 898 | #ifdef CONFIG_IWLWIFI_DEBUG |
911 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | 899 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
912 | struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); | 900 | struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); |
913 | u8 rate = beacon->beacon_notify_hdr.rate; | 901 | u8 rate = beacon->beacon_notify_hdr.rate; |
914 | 902 | ||
@@ -931,7 +919,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, | |||
931 | static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | 919 | static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, |
932 | struct iwl_rx_mem_buffer *rxb) | 920 | struct iwl_rx_mem_buffer *rxb) |
933 | { | 921 | { |
934 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | 922 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
935 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | 923 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); |
936 | unsigned long status = priv->status; | 924 | unsigned long status = priv->status; |
937 | 925 | ||
@@ -1095,7 +1083,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv) | |||
1095 | list_del(element); | 1083 | list_del(element); |
1096 | 1084 | ||
1097 | /* Point to Rx buffer via next RBD in circular buffer */ | 1085 | /* Point to Rx buffer via next RBD in circular buffer */ |
1098 | rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); | 1086 | rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); |
1099 | rxq->queue[rxq->write] = rxb; | 1087 | rxq->queue[rxq->write] = rxb; |
1100 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | 1088 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; |
1101 | rxq->free_count--; | 1089 | rxq->free_count--; |
@@ -1135,8 +1123,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
1135 | struct iwl_rx_queue *rxq = &priv->rxq; | 1123 | struct iwl_rx_queue *rxq = &priv->rxq; |
1136 | struct list_head *element; | 1124 | struct list_head *element; |
1137 | struct iwl_rx_mem_buffer *rxb; | 1125 | struct iwl_rx_mem_buffer *rxb; |
1138 | struct sk_buff *skb; | 1126 | struct page *page; |
1139 | unsigned long flags; | 1127 | unsigned long flags; |
1128 | gfp_t gfp_mask = priority; | ||
1140 | 1129 | ||
1141 | while (1) { | 1130 | while (1) { |
1142 | spin_lock_irqsave(&rxq->lock, flags); | 1131 | spin_lock_irqsave(&rxq->lock, flags); |
@@ -1148,10 +1137,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
1148 | spin_unlock_irqrestore(&rxq->lock, flags); | 1137 | spin_unlock_irqrestore(&rxq->lock, flags); |
1149 | 1138 | ||
1150 | if (rxq->free_count > RX_LOW_WATERMARK) | 1139 | if (rxq->free_count > RX_LOW_WATERMARK) |
1151 | priority |= __GFP_NOWARN; | 1140 | gfp_mask |= __GFP_NOWARN; |
1141 | |||
1142 | if (priv->hw_params.rx_page_order > 0) | ||
1143 | gfp_mask |= __GFP_COMP; | ||
1144 | |||
1152 | /* Alloc a new receive buffer */ | 1145 | /* Alloc a new receive buffer */ |
1153 | skb = alloc_skb(priv->hw_params.rx_buf_size, priority); | 1146 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); |
1154 | if (!skb) { | 1147 | if (!page) { |
1155 | if (net_ratelimit()) | 1148 | if (net_ratelimit()) |
1156 | IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); | 1149 | IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); |
1157 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | 1150 | if ((rxq->free_count <= RX_LOW_WATERMARK) && |
@@ -1168,7 +1161,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
1168 | spin_lock_irqsave(&rxq->lock, flags); | 1161 | spin_lock_irqsave(&rxq->lock, flags); |
1169 | if (list_empty(&rxq->rx_used)) { | 1162 | if (list_empty(&rxq->rx_used)) { |
1170 | spin_unlock_irqrestore(&rxq->lock, flags); | 1163 | spin_unlock_irqrestore(&rxq->lock, flags); |
1171 | dev_kfree_skb_any(skb); | 1164 | __free_pages(page, priv->hw_params.rx_page_order); |
1172 | return; | 1165 | return; |
1173 | } | 1166 | } |
1174 | element = rxq->rx_used.next; | 1167 | element = rxq->rx_used.next; |
@@ -1176,26 +1169,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
1176 | list_del(element); | 1169 | list_del(element); |
1177 | spin_unlock_irqrestore(&rxq->lock, flags); | 1170 | spin_unlock_irqrestore(&rxq->lock, flags); |
1178 | 1171 | ||
1179 | rxb->skb = skb; | 1172 | rxb->page = page; |
1180 | |||
1181 | /* If radiotap head is required, reserve some headroom here. | ||
1182 | * The physical head count is a variable rx_stats->phy_count. | ||
1183 | * We reserve 4 bytes here. Plus these extra bytes, the | ||
1184 | * headroom of the physical head should be enough for the | ||
1185 | * radiotap head that iwl3945 supported. See iwl3945_rt. | ||
1186 | */ | ||
1187 | skb_reserve(rxb->skb, 4); | ||
1188 | |||
1189 | /* Get physical address of RB/SKB */ | 1173 | /* Get physical address of RB/SKB */ |
1190 | rxb->real_dma_addr = pci_map_single(priv->pci_dev, | 1174 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, |
1191 | rxb->skb->data, | 1175 | PAGE_SIZE << priv->hw_params.rx_page_order, |
1192 | priv->hw_params.rx_buf_size, | 1176 | PCI_DMA_FROMDEVICE); |
1193 | PCI_DMA_FROMDEVICE); | ||
1194 | 1177 | ||
1195 | spin_lock_irqsave(&rxq->lock, flags); | 1178 | spin_lock_irqsave(&rxq->lock, flags); |
1179 | |||
1196 | list_add_tail(&rxb->list, &rxq->rx_free); | 1180 | list_add_tail(&rxb->list, &rxq->rx_free); |
1197 | priv->alloc_rxb_skb++; | ||
1198 | rxq->free_count++; | 1181 | rxq->free_count++; |
1182 | priv->alloc_rxb_page++; | ||
1183 | |||
1199 | spin_unlock_irqrestore(&rxq->lock, flags); | 1184 | spin_unlock_irqrestore(&rxq->lock, flags); |
1200 | } | 1185 | } |
1201 | } | 1186 | } |
@@ -1211,14 +1196,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
1211 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | 1196 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { |
1212 | /* In the reset function, these buffers may have been allocated | 1197 | /* In the reset function, these buffers may have been allocated |
1213 | * to an SKB, so we need to unmap and free potential storage */ | 1198 | * to an SKB, so we need to unmap and free potential storage */ |
1214 | if (rxq->pool[i].skb != NULL) { | 1199 | if (rxq->pool[i].page != NULL) { |
1215 | pci_unmap_single(priv->pci_dev, | 1200 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
1216 | rxq->pool[i].real_dma_addr, | 1201 | PAGE_SIZE << priv->hw_params.rx_page_order, |
1217 | priv->hw_params.rx_buf_size, | 1202 | PCI_DMA_FROMDEVICE); |
1218 | PCI_DMA_FROMDEVICE); | 1203 | priv->alloc_rxb_page--; |
1219 | priv->alloc_rxb_skb--; | 1204 | __free_pages(rxq->pool[i].page, |
1220 | dev_kfree_skb(rxq->pool[i].skb); | 1205 | priv->hw_params.rx_page_order); |
1221 | rxq->pool[i].skb = NULL; | 1206 | rxq->pool[i].page = NULL; |
1222 | } | 1207 | } |
1223 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | 1208 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); |
1224 | } | 1209 | } |
@@ -1226,8 +1211,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
1226 | /* Set us so that we have processed and used all buffers, but have | 1211 | /* Set us so that we have processed and used all buffers, but have |
1227 | * not restocked the Rx queue with fresh buffers */ | 1212 | * not restocked the Rx queue with fresh buffers */ |
1228 | rxq->read = rxq->write = 0; | 1213 | rxq->read = rxq->write = 0; |
1229 | rxq->free_count = 0; | ||
1230 | rxq->write_actual = 0; | 1214 | rxq->write_actual = 0; |
1215 | rxq->free_count = 0; | ||
1231 | spin_unlock_irqrestore(&rxq->lock, flags); | 1216 | spin_unlock_irqrestore(&rxq->lock, flags); |
1232 | } | 1217 | } |
1233 | 1218 | ||
@@ -1260,12 +1245,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx | |||
1260 | { | 1245 | { |
1261 | int i; | 1246 | int i; |
1262 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | 1247 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { |
1263 | if (rxq->pool[i].skb != NULL) { | 1248 | if (rxq->pool[i].page != NULL) { |
1264 | pci_unmap_single(priv->pci_dev, | 1249 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
1265 | rxq->pool[i].real_dma_addr, | 1250 | PAGE_SIZE << priv->hw_params.rx_page_order, |
1266 | priv->hw_params.rx_buf_size, | 1251 | PCI_DMA_FROMDEVICE); |
1267 | PCI_DMA_FROMDEVICE); | 1252 | __free_pages(rxq->pool[i].page, |
1268 | dev_kfree_skb(rxq->pool[i].skb); | 1253 | priv->hw_params.rx_page_order); |
1254 | rxq->pool[i].page = NULL; | ||
1255 | priv->alloc_rxb_page--; | ||
1269 | } | 1256 | } |
1270 | } | 1257 | } |
1271 | 1258 | ||
@@ -1381,7 +1368,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) | |||
1381 | i = rxq->read; | 1368 | i = rxq->read; |
1382 | 1369 | ||
1383 | /* calculate total frames need to be restock after handling RX */ | 1370 | /* calculate total frames need to be restock after handling RX */ |
1384 | total_empty = r - priv->rxq.write_actual; | 1371 | total_empty = r - rxq->write_actual; |
1385 | if (total_empty < 0) | 1372 | if (total_empty < 0) |
1386 | total_empty += RX_QUEUE_SIZE; | 1373 | total_empty += RX_QUEUE_SIZE; |
1387 | 1374 | ||
@@ -1401,10 +1388,13 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) | |||
1401 | 1388 | ||
1402 | rxq->queue[i] = NULL; | 1389 | rxq->queue[i] = NULL; |
1403 | 1390 | ||
1404 | pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, | 1391 | pci_unmap_page(priv->pci_dev, rxb->page_dma, |
1405 | priv->hw_params.rx_buf_size, | 1392 | PAGE_SIZE << priv->hw_params.rx_page_order, |
1406 | PCI_DMA_FROMDEVICE); | 1393 | PCI_DMA_FROMDEVICE); |
1407 | pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1394 | pkt = rxb_addr(rxb); |
1395 | |||
1396 | trace_iwlwifi_dev_rx(priv, pkt, | ||
1397 | le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); | ||
1408 | 1398 | ||
1409 | /* Reclaim a command buffer only if this packet is a response | 1399 | /* Reclaim a command buffer only if this packet is a response |
1410 | * to a (driver-originated) command. | 1400 | * to a (driver-originated) command. |
@@ -1422,44 +1412,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) | |||
1422 | if (priv->rx_handlers[pkt->hdr.cmd]) { | 1412 | if (priv->rx_handlers[pkt->hdr.cmd]) { |
1423 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, | 1413 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, |
1424 | get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | 1414 | get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); |
1425 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
1426 | priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; | 1415 | priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; |
1416 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
1427 | } else { | 1417 | } else { |
1428 | /* No handling needed */ | 1418 | /* No handling needed */ |
1429 | IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", | 1419 | IWL_DEBUG_RX(priv, |
1420 | "r %d i %d No handler needed for %s, 0x%02x\n", | ||
1430 | r, i, get_cmd_string(pkt->hdr.cmd), | 1421 | r, i, get_cmd_string(pkt->hdr.cmd), |
1431 | pkt->hdr.cmd); | 1422 | pkt->hdr.cmd); |
1432 | } | 1423 | } |
1433 | 1424 | ||
1425 | /* | ||
1426 | * XXX: After here, we should always check rxb->page | ||
1427 | * against NULL before touching it or its virtual | ||
1428 | * memory (pkt). Because some rx_handler might have | ||
1429 | * already taken or freed the pages. | ||
1430 | */ | ||
1431 | |||
1434 | if (reclaim) { | 1432 | if (reclaim) { |
1435 | /* Invoke any callbacks, transfer the skb to caller, and | 1433 | /* Invoke any callbacks, transfer the buffer to caller, |
1436 | * fire off the (possibly) blocking iwl_send_cmd() | 1434 | * and fire off the (possibly) blocking iwl_send_cmd() |
1437 | * as we reclaim the driver command queue */ | 1435 | * as we reclaim the driver command queue */ |
1438 | if (rxb && rxb->skb) | 1436 | if (rxb->page) |
1439 | iwl_tx_cmd_complete(priv, rxb); | 1437 | iwl_tx_cmd_complete(priv, rxb); |
1440 | else | 1438 | else |
1441 | IWL_WARN(priv, "Claim null rxb?\n"); | 1439 | IWL_WARN(priv, "Claim null rxb?\n"); |
1442 | } | 1440 | } |
1443 | 1441 | ||
1444 | /* For now we just don't re-use anything. We can tweak this | 1442 | /* Reuse the page if possible. For notification packets and |
1445 | * later to try and re-use notification packets and SKBs that | 1443 | * SKBs that fail to Rx correctly, add them back into the |
1446 | * fail to Rx correctly */ | 1444 | * rx_free list for reuse later. */ |
1447 | if (rxb->skb != NULL) { | ||
1448 | priv->alloc_rxb_skb--; | ||
1449 | dev_kfree_skb_any(rxb->skb); | ||
1450 | rxb->skb = NULL; | ||
1451 | } | ||
1452 | |||
1453 | spin_lock_irqsave(&rxq->lock, flags); | 1445 | spin_lock_irqsave(&rxq->lock, flags); |
1454 | list_add_tail(&rxb->list, &priv->rxq.rx_used); | 1446 | if (rxb->page != NULL) { |
1447 | rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, | ||
1448 | 0, PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1449 | PCI_DMA_FROMDEVICE); | ||
1450 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
1451 | rxq->free_count++; | ||
1452 | } else | ||
1453 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
1454 | |||
1455 | spin_unlock_irqrestore(&rxq->lock, flags); | 1455 | spin_unlock_irqrestore(&rxq->lock, flags); |
1456 | |||
1456 | i = (i + 1) & RX_QUEUE_MASK; | 1457 | i = (i + 1) & RX_QUEUE_MASK; |
1457 | /* If there are a lot of unused frames, | 1458 | /* If there are a lot of unused frames, |
1458 | * restock the Rx queue so ucode won't assert. */ | 1459 | * restock the Rx queue so ucode won't assert. */ |
1459 | if (fill_rx) { | 1460 | if (fill_rx) { |
1460 | count++; | 1461 | count++; |
1461 | if (count >= 8) { | 1462 | if (count >= 8) { |
1462 | priv->rxq.read = i; | 1463 | rxq->read = i; |
1463 | iwl3945_rx_replenish_now(priv); | 1464 | iwl3945_rx_replenish_now(priv); |
1464 | count = 0; | 1465 | count = 0; |
1465 | } | 1466 | } |
@@ -1467,7 +1468,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) | |||
1467 | } | 1468 | } |
1468 | 1469 | ||
1469 | /* Backtrack one entry */ | 1470 | /* Backtrack one entry */ |
1470 | priv->rxq.read = i; | 1471 | rxq->read = i; |
1471 | if (fill_rx) | 1472 | if (fill_rx) |
1472 | iwl3945_rx_replenish_now(priv); | 1473 | iwl3945_rx_replenish_now(priv); |
1473 | else | 1474 | else |
@@ -1482,7 +1483,6 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv) | |||
1482 | tasklet_kill(&priv->irq_tasklet); | 1483 | tasklet_kill(&priv->irq_tasklet); |
1483 | } | 1484 | } |
1484 | 1485 | ||
1485 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1486 | static const char *desc_lookup(int i) | 1486 | static const char *desc_lookup(int i) |
1487 | { | 1487 | { |
1488 | switch (i) { | 1488 | switch (i) { |
@@ -1551,8 +1551,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv) | |||
1551 | "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", | 1551 | "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", |
1552 | desc_lookup(desc), desc, time, blink1, blink2, | 1552 | desc_lookup(desc), desc, time, blink1, blink2, |
1553 | ilink1, ilink2, data1); | 1553 | ilink1, ilink2, data1); |
1554 | trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, | ||
1555 | 0, blink1, blink2, ilink1, ilink2); | ||
1554 | } | 1556 | } |
1555 | |||
1556 | } | 1557 | } |
1557 | 1558 | ||
1558 | #define EVENT_START_OFFSET (6 * sizeof(u32)) | 1559 | #define EVENT_START_OFFSET (6 * sizeof(u32)) |
@@ -1569,6 +1570,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, | |||
1569 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | 1570 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ |
1570 | u32 ptr; /* SRAM byte address of log data */ | 1571 | u32 ptr; /* SRAM byte address of log data */ |
1571 | u32 ev, time, data; /* event log data */ | 1572 | u32 ev, time, data; /* event log data */ |
1573 | unsigned long reg_flags; | ||
1572 | 1574 | ||
1573 | if (num_events == 0) | 1575 | if (num_events == 0) |
1574 | return; | 1576 | return; |
@@ -1582,25 +1584,71 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, | |||
1582 | 1584 | ||
1583 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | 1585 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); |
1584 | 1586 | ||
1587 | /* Make sure device is powered up for SRAM reads */ | ||
1588 | spin_lock_irqsave(&priv->reg_lock, reg_flags); | ||
1589 | iwl_grab_nic_access(priv); | ||
1590 | |||
1591 | /* Set starting address; reads will auto-increment */ | ||
1592 | _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); | ||
1593 | rmb(); | ||
1594 | |||
1585 | /* "time" is actually "data" for mode 0 (no timestamp). | 1595 | /* "time" is actually "data" for mode 0 (no timestamp). |
1586 | * place event id # at far right for easier visual parsing. */ | 1596 | * place event id # at far right for easier visual parsing. */ |
1587 | for (i = 0; i < num_events; i++) { | 1597 | for (i = 0; i < num_events; i++) { |
1588 | ev = iwl_read_targ_mem(priv, ptr); | 1598 | ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
1589 | ptr += sizeof(u32); | 1599 | time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
1590 | time = iwl_read_targ_mem(priv, ptr); | ||
1591 | ptr += sizeof(u32); | ||
1592 | if (mode == 0) { | 1600 | if (mode == 0) { |
1593 | /* data, ev */ | 1601 | /* data, ev */ |
1594 | IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); | 1602 | IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); |
1603 | trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); | ||
1595 | } else { | 1604 | } else { |
1596 | data = iwl_read_targ_mem(priv, ptr); | 1605 | data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
1597 | ptr += sizeof(u32); | ||
1598 | IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); | 1606 | IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); |
1607 | trace_iwlwifi_dev_ucode_event(priv, time, data, ev); | ||
1599 | } | 1608 | } |
1600 | } | 1609 | } |
1610 | |||
1611 | /* Allow device to power down */ | ||
1612 | iwl_release_nic_access(priv); | ||
1613 | spin_unlock_irqrestore(&priv->reg_lock, reg_flags); | ||
1601 | } | 1614 | } |
1602 | 1615 | ||
1603 | void iwl3945_dump_nic_event_log(struct iwl_priv *priv) | 1616 | /** |
1617 | * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog | ||
1618 | */ | ||
1619 | static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, | ||
1620 | u32 num_wraps, u32 next_entry, | ||
1621 | u32 size, u32 mode) | ||
1622 | { | ||
1623 | /* | ||
1624 | * display the newest DEFAULT_LOG_ENTRIES entries | ||
1625 | * i.e the entries just before the next ont that uCode would fill. | ||
1626 | */ | ||
1627 | if (num_wraps) { | ||
1628 | if (next_entry < size) { | ||
1629 | iwl3945_print_event_log(priv, | ||
1630 | capacity - (size - next_entry), | ||
1631 | size - next_entry, mode); | ||
1632 | iwl3945_print_event_log(priv, 0, | ||
1633 | next_entry, mode); | ||
1634 | } else | ||
1635 | iwl3945_print_event_log(priv, next_entry - size, | ||
1636 | size, mode); | ||
1637 | } else { | ||
1638 | if (next_entry < size) | ||
1639 | iwl3945_print_event_log(priv, 0, next_entry, mode); | ||
1640 | else | ||
1641 | iwl3945_print_event_log(priv, next_entry - size, | ||
1642 | size, mode); | ||
1643 | } | ||
1644 | } | ||
1645 | |||
1646 | /* For sanity check only. Actual size is determined by uCode, typ. 512 */ | ||
1647 | #define IWL3945_MAX_EVENT_LOG_SIZE (512) | ||
1648 | |||
1649 | #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) | ||
1650 | |||
1651 | void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log) | ||
1604 | { | 1652 | { |
1605 | u32 base; /* SRAM byte address of event log header */ | 1653 | u32 base; /* SRAM byte address of event log header */ |
1606 | u32 capacity; /* event log capacity in # entries */ | 1654 | u32 capacity; /* event log capacity in # entries */ |
@@ -1621,6 +1669,18 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv) | |||
1621 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); | 1669 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); |
1622 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); | 1670 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); |
1623 | 1671 | ||
1672 | if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) { | ||
1673 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", | ||
1674 | capacity, IWL3945_MAX_EVENT_LOG_SIZE); | ||
1675 | capacity = IWL3945_MAX_EVENT_LOG_SIZE; | ||
1676 | } | ||
1677 | |||
1678 | if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) { | ||
1679 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", | ||
1680 | next_entry, IWL3945_MAX_EVENT_LOG_SIZE); | ||
1681 | next_entry = IWL3945_MAX_EVENT_LOG_SIZE; | ||
1682 | } | ||
1683 | |||
1624 | size = num_wraps ? capacity : next_entry; | 1684 | size = num_wraps ? capacity : next_entry; |
1625 | 1685 | ||
1626 | /* bail out if nothing in log */ | 1686 | /* bail out if nothing in log */ |
@@ -1629,8 +1689,17 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv) | |||
1629 | return; | 1689 | return; |
1630 | } | 1690 | } |
1631 | 1691 | ||
1632 | IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n", | 1692 | #ifdef CONFIG_IWLWIFI_DEBUG |
1633 | size, num_wraps); | 1693 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) |
1694 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1695 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1696 | #else | ||
1697 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1698 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1699 | #endif | ||
1700 | |||
1701 | IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", | ||
1702 | size); | ||
1634 | 1703 | ||
1635 | /* if uCode has wrapped back to top of log, start at the oldest entry, | 1704 | /* if uCode has wrapped back to top of log, start at the oldest entry, |
1636 | * i.e the next one that uCode would fill. */ | 1705 | * i.e the next one that uCode would fill. */ |
@@ -1641,18 +1710,28 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv) | |||
1641 | /* (then/else) start at top of log */ | 1710 | /* (then/else) start at top of log */ |
1642 | iwl3945_print_event_log(priv, 0, next_entry, mode); | 1711 | iwl3945_print_event_log(priv, 0, next_entry, mode); |
1643 | 1712 | ||
1644 | } | 1713 | #ifdef CONFIG_IWLWIFI_DEBUG |
1714 | if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { | ||
1715 | /* if uCode has wrapped back to top of log, | ||
1716 | * start at the oldest entry, | ||
1717 | * i.e the next one that uCode would fill. | ||
1718 | */ | ||
1719 | if (num_wraps) | ||
1720 | iwl3945_print_event_log(priv, next_entry, | ||
1721 | capacity - next_entry, mode); | ||
1722 | |||
1723 | /* (then/else) start at top of log */ | ||
1724 | iwl3945_print_event_log(priv, 0, next_entry, mode); | ||
1725 | } else | ||
1726 | iwl3945_print_last_event_logs(priv, capacity, num_wraps, | ||
1727 | next_entry, size, mode); | ||
1645 | #else | 1728 | #else |
1646 | void iwl3945_dump_nic_event_log(struct iwl_priv *priv) | 1729 | iwl3945_print_last_event_logs(priv, capacity, num_wraps, |
1647 | { | 1730 | next_entry, size, mode); |
1648 | } | 1731 | #endif |
1649 | 1732 | ||
1650 | void iwl3945_dump_nic_error_log(struct iwl_priv *priv) | ||
1651 | { | ||
1652 | } | 1733 | } |
1653 | 1734 | ||
1654 | #endif | ||
1655 | |||
1656 | static void iwl3945_irq_tasklet(struct iwl_priv *priv) | 1735 | static void iwl3945_irq_tasklet(struct iwl_priv *priv) |
1657 | { | 1736 | { |
1658 | u32 inta, handled = 0; | 1737 | u32 inta, handled = 0; |
@@ -1685,6 +1764,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) | |||
1685 | } | 1764 | } |
1686 | #endif | 1765 | #endif |
1687 | 1766 | ||
1767 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1768 | |||
1688 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | 1769 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not |
1689 | * atomic, make sure that inta covers all the interrupts that | 1770 | * atomic, make sure that inta covers all the interrupts that |
1690 | * we've discovered, even if FH interrupt came in just after | 1771 | * we've discovered, even if FH interrupt came in just after |
@@ -1706,8 +1787,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) | |||
1706 | 1787 | ||
1707 | handled |= CSR_INT_BIT_HW_ERR; | 1788 | handled |= CSR_INT_BIT_HW_ERR; |
1708 | 1789 | ||
1709 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1710 | |||
1711 | return; | 1790 | return; |
1712 | } | 1791 | } |
1713 | 1792 | ||
@@ -1799,7 +1878,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) | |||
1799 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | 1878 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); |
1800 | } | 1879 | } |
1801 | #endif | 1880 | #endif |
1802 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1803 | } | 1881 | } |
1804 | 1882 | ||
1805 | static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, | 1883 | static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, |
@@ -2158,6 +2236,14 @@ static int iwl3945_read_ucode(struct iwl_priv *priv) | |||
2158 | IWL_UCODE_API(priv->ucode_ver), | 2236 | IWL_UCODE_API(priv->ucode_ver), |
2159 | IWL_UCODE_SERIAL(priv->ucode_ver)); | 2237 | IWL_UCODE_SERIAL(priv->ucode_ver)); |
2160 | 2238 | ||
2239 | snprintf(priv->hw->wiphy->fw_version, | ||
2240 | sizeof(priv->hw->wiphy->fw_version), | ||
2241 | "%u.%u.%u.%u", | ||
2242 | IWL_UCODE_MAJOR(priv->ucode_ver), | ||
2243 | IWL_UCODE_MINOR(priv->ucode_ver), | ||
2244 | IWL_UCODE_API(priv->ucode_ver), | ||
2245 | IWL_UCODE_SERIAL(priv->ucode_ver)); | ||
2246 | |||
2161 | IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", | 2247 | IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", |
2162 | priv->ucode_ver); | 2248 | priv->ucode_ver); |
2163 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", | 2249 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", |
@@ -2458,7 +2544,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2458 | priv->active_rate = priv->rates_mask; | 2544 | priv->active_rate = priv->rates_mask; |
2459 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | 2545 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; |
2460 | 2546 | ||
2461 | iwl_power_update_mode(priv, false); | 2547 | iwl_power_update_mode(priv, true); |
2462 | 2548 | ||
2463 | if (iwl_is_associated(priv)) { | 2549 | if (iwl_is_associated(priv)) { |
2464 | struct iwl3945_rxon_cmd *active_rxon = | 2550 | struct iwl3945_rxon_cmd *active_rxon = |
@@ -2479,7 +2565,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2479 | 2565 | ||
2480 | iwl3945_reg_txpower_periodic(priv); | 2566 | iwl3945_reg_txpower_periodic(priv); |
2481 | 2567 | ||
2482 | iwl3945_led_register(priv); | 2568 | iwl_leds_init(priv); |
2483 | 2569 | ||
2484 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 2570 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
2485 | set_bit(STATUS_READY, &priv->status); | 2571 | set_bit(STATUS_READY, &priv->status); |
@@ -2517,7 +2603,6 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2517 | if (!exit_pending) | 2603 | if (!exit_pending) |
2518 | set_bit(STATUS_EXIT_PENDING, &priv->status); | 2604 | set_bit(STATUS_EXIT_PENDING, &priv->status); |
2519 | 2605 | ||
2520 | iwl3945_led_unregister(priv); | ||
2521 | iwl_clear_stations_table(priv); | 2606 | iwl_clear_stations_table(priv); |
2522 | 2607 | ||
2523 | /* Unblock any waiting calls */ | 2608 | /* Unblock any waiting calls */ |
@@ -2563,23 +2648,15 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2563 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | 2648 | test_bit(STATUS_EXIT_PENDING, &priv->status) << |
2564 | STATUS_EXIT_PENDING; | 2649 | STATUS_EXIT_PENDING; |
2565 | 2650 | ||
2566 | priv->cfg->ops->lib->apm_ops.reset(priv); | ||
2567 | spin_lock_irqsave(&priv->lock, flags); | ||
2568 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
2569 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2570 | |||
2571 | iwl3945_hw_txq_ctx_stop(priv); | 2651 | iwl3945_hw_txq_ctx_stop(priv); |
2572 | iwl3945_hw_rxq_stop(priv); | 2652 | iwl3945_hw_rxq_stop(priv); |
2573 | 2653 | ||
2574 | iwl_write_prph(priv, APMG_CLK_DIS_REG, | 2654 | /* Power-down device's busmaster DMA clocks */ |
2575 | APMG_CLK_VAL_DMA_CLK_RQT); | 2655 | iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); |
2576 | |||
2577 | udelay(5); | 2656 | udelay(5); |
2578 | 2657 | ||
2579 | if (exit_pending) | 2658 | /* Stop the device, and put it in low power state */ |
2580 | priv->cfg->ops->lib->apm_ops.stop(priv); | 2659 | priv->cfg->ops->lib->apm_ops.stop(priv); |
2581 | else | ||
2582 | priv->cfg->ops->lib->apm_ops.reset(priv); | ||
2583 | 2660 | ||
2584 | exit: | 2661 | exit: |
2585 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | 2662 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); |
@@ -2724,19 +2801,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data) | |||
2724 | mutex_unlock(&priv->mutex); | 2801 | mutex_unlock(&priv->mutex); |
2725 | } | 2802 | } |
2726 | 2803 | ||
2804 | /* | ||
2805 | * 3945 cannot interrupt driver when hardware rf kill switch toggles; | ||
2806 | * driver must poll CSR_GP_CNTRL_REG register for change. This register | ||
2807 | * *is* readable even when device has been SW_RESET into low power mode | ||
2808 | * (e.g. during RF KILL). | ||
2809 | */ | ||
2727 | static void iwl3945_rfkill_poll(struct work_struct *data) | 2810 | static void iwl3945_rfkill_poll(struct work_struct *data) |
2728 | { | 2811 | { |
2729 | struct iwl_priv *priv = | 2812 | struct iwl_priv *priv = |
2730 | container_of(data, struct iwl_priv, rfkill_poll.work); | 2813 | container_of(data, struct iwl_priv, rfkill_poll.work); |
2814 | bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2815 | bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) | ||
2816 | & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | ||
2731 | 2817 | ||
2732 | if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | 2818 | if (new_rfkill != old_rfkill) { |
2733 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | 2819 | if (new_rfkill) |
2734 | else | 2820 | set_bit(STATUS_RF_KILL_HW, &priv->status); |
2735 | set_bit(STATUS_RF_KILL_HW, &priv->status); | 2821 | else |
2822 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2823 | |||
2824 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); | ||
2736 | 2825 | ||
2737 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 2826 | IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", |
2738 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 2827 | new_rfkill ? "disable radio" : "enable radio"); |
2828 | } | ||
2739 | 2829 | ||
2830 | /* Keep this running, even if radio now enabled. This will be | ||
2831 | * cancelled in mac_start() if system decides to start again */ | ||
2740 | queue_delayed_work(priv->workqueue, &priv->rfkill_poll, | 2832 | queue_delayed_work(priv->workqueue, &priv->rfkill_poll, |
2741 | round_jiffies_relative(2 * HZ)); | 2833 | round_jiffies_relative(2 * HZ)); |
2742 | 2834 | ||
@@ -3152,6 +3244,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) | |||
3152 | * no need to poll the killswitch state anymore */ | 3244 | * no need to poll the killswitch state anymore */ |
3153 | cancel_delayed_work(&priv->rfkill_poll); | 3245 | cancel_delayed_work(&priv->rfkill_poll); |
3154 | 3246 | ||
3247 | iwl_led_start(priv); | ||
3248 | |||
3155 | priv->is_open = 1; | 3249 | priv->is_open = 1; |
3156 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 3250 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
3157 | return 0; | 3251 | return 0; |
@@ -3606,7 +3700,7 @@ static ssize_t show_statistics(struct device *d, | |||
3606 | return -EAGAIN; | 3700 | return -EAGAIN; |
3607 | 3701 | ||
3608 | mutex_lock(&priv->mutex); | 3702 | mutex_lock(&priv->mutex); |
3609 | rc = iwl_send_statistics_request(priv, 0); | 3703 | rc = iwl_send_statistics_request(priv, CMD_SYNC, false); |
3610 | mutex_unlock(&priv->mutex); | 3704 | mutex_unlock(&priv->mutex); |
3611 | 3705 | ||
3612 | if (rc) { | 3706 | if (rc) { |
@@ -3795,7 +3889,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) | |||
3795 | /* Clear the driver's (not device's) station table */ | 3889 | /* Clear the driver's (not device's) station table */ |
3796 | iwl_clear_stations_table(priv); | 3890 | iwl_clear_stations_table(priv); |
3797 | 3891 | ||
3798 | priv->data_retry_limit = -1; | ||
3799 | priv->ieee_channels = NULL; | 3892 | priv->ieee_channels = NULL; |
3800 | priv->ieee_rates = NULL; | 3893 | priv->ieee_rates = NULL; |
3801 | priv->band = IEEE80211_BAND_2GHZ; | 3894 | priv->band = IEEE80211_BAND_2GHZ; |
@@ -3862,10 +3955,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
3862 | BIT(NL80211_IFTYPE_STATION) | | 3955 | BIT(NL80211_IFTYPE_STATION) | |
3863 | BIT(NL80211_IFTYPE_ADHOC); | 3956 | BIT(NL80211_IFTYPE_ADHOC); |
3864 | 3957 | ||
3865 | hw->wiphy->custom_regulatory = true; | 3958 | hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | |
3866 | 3959 | WIPHY_FLAG_DISABLE_BEACON_HINTS; | |
3867 | /* Firmware does not support this */ | ||
3868 | hw->wiphy->disable_beacon_hints = true; | ||
3869 | 3960 | ||
3870 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | 3961 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; |
3871 | /* we create the 802.11 header and a zero-length SSID element */ | 3962 | /* we create the 802.11 header and a zero-length SSID element */ |
@@ -3982,13 +4073,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
3982 | */ | 4073 | */ |
3983 | spin_lock_init(&priv->reg_lock); | 4074 | spin_lock_init(&priv->reg_lock); |
3984 | 4075 | ||
3985 | /* amp init */ | ||
3986 | err = priv->cfg->ops->lib->apm_ops.init(priv); | ||
3987 | if (err < 0) { | ||
3988 | IWL_DEBUG_INFO(priv, "Failed to init the card\n"); | ||
3989 | goto out_iounmap; | ||
3990 | } | ||
3991 | |||
3992 | /*********************** | 4076 | /*********************** |
3993 | * 4. Read EEPROM | 4077 | * 4. Read EEPROM |
3994 | * ********************/ | 4078 | * ********************/ |
@@ -4054,6 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
4054 | &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); | 4138 | &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); |
4055 | iwl3945_setup_deferred_work(priv); | 4139 | iwl3945_setup_deferred_work(priv); |
4056 | iwl3945_setup_rx_handlers(priv); | 4140 | iwl3945_setup_rx_handlers(priv); |
4141 | iwl_power_initialize(priv); | ||
4057 | 4142 | ||
4058 | /********************************* | 4143 | /********************************* |
4059 | * 8. Setup and Register mac80211 | 4144 | * 8. Setup and Register mac80211 |
@@ -4124,6 +4209,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) | |||
4124 | iwl3945_down(priv); | 4209 | iwl3945_down(priv); |
4125 | } | 4210 | } |
4126 | 4211 | ||
4212 | /* | ||
4213 | * Make sure device is reset to low power before unloading driver. | ||
4214 | * This may be redundant with iwl_down(), but there are paths to | ||
4215 | * run iwl_down() without calling apm_ops.stop(), and there are | ||
4216 | * paths to avoid running iwl_down() at all before leaving driver. | ||
4217 | * This (inexpensive) call *makes sure* device is reset. | ||
4218 | */ | ||
4219 | priv->cfg->ops->lib->apm_ops.stop(priv); | ||
4220 | |||
4127 | /* make sure we flush any pending irq or | 4221 | /* make sure we flush any pending irq or |
4128 | * tasklet for the driver | 4222 | * tasklet for the driver |
4129 | */ | 4223 | */ |
@@ -4226,18 +4320,19 @@ static void __exit iwl3945_exit(void) | |||
4226 | 4320 | ||
4227 | MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); | 4321 | MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); |
4228 | 4322 | ||
4229 | module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444); | 4323 | module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); |
4230 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | 4324 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); |
4231 | module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444); | 4325 | module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); |
4232 | MODULE_PARM_DESC(swcrypto, | 4326 | MODULE_PARM_DESC(swcrypto, |
4233 | "using software crypto (default 1 [software])\n"); | 4327 | "using software crypto (default 1 [software])\n"); |
4234 | #ifdef CONFIG_IWLWIFI_DEBUG | 4328 | #ifdef CONFIG_IWLWIFI_DEBUG |
4235 | module_param_named(debug, iwl_debug_level, uint, 0644); | 4329 | module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); |
4236 | MODULE_PARM_DESC(debug, "debug output mask"); | 4330 | MODULE_PARM_DESC(debug, "debug output mask"); |
4237 | #endif | 4331 | #endif |
4238 | module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444); | 4332 | module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, |
4333 | int, S_IRUGO); | ||
4239 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | 4334 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); |
4240 | module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444); | 4335 | module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); |
4241 | MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); | 4336 | MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); |
4242 | 4337 | ||
4243 | module_exit(iwl3945_exit); | 4338 | module_exit(iwl3945_exit); |