aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl3945-base.c
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2009-10-09 05:19:45 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-10-27 16:48:06 -0400
commit2f301227a1ede57504694e1f64839839f5737cac (patch)
treec148ca6c3409f5f8fed4455fba3a78fe31469135 /drivers/net/wireless/iwlwifi/iwl3945-base.c
parentae751bab9f55c3152ebf713c89a4fb6f439c2575 (diff)
iwlwifi: use paged Rx
This switches the iwlwifi driver to use paged skb from linear skb for Rx buffer. So that it relieves some Rx buffer allocation pressure for the memory subsystem. Currently iwlwifi (4K for 3945) requests 8K bytes for Rx buffer. Due to the trailing skb_shared_info in the skb->data, alloc_skb() will do the next order allocation, which is 16K bytes. This is suboptimal and more likely to fail when the system is under memory usage pressure. Switching to paged Rx skb lets us allocate the RXB directly by alloc_pages(), so that only order 1 allocation is required. It also adjusts the area spin_lock (with IRQ disabled) protected in the tasklet because tasklet guarentees to run only on one CPU and the new unprotected code can be preempted by the IRQ handler. This saves us from spawning another workqueue to make skb_linearize/__pskb_pull_tail happy (which cannot be called in hard irq context). Finally, mac80211 doesn't support paged Rx yet. So we linearize the skb for all the management frames and software decryption or defragmentation required data frames before handed to mac80211. For all the other frames, we __pskb_pull_tail 64 bytes in the linear area of the skb for mac80211 to handle them properly. Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c120
1 files changed, 59 insertions, 61 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 515f29b8a7a7..5977a57a234c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -745,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
745 u8 type) 745 u8 type)
746{ 746{
747 struct iwl_spectrum_cmd spectrum; 747 struct iwl_spectrum_cmd spectrum;
748 struct iwl_rx_packet *res; 748 struct iwl_rx_packet *pkt;
749 struct iwl_host_cmd cmd = { 749 struct iwl_host_cmd cmd = {
750 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 750 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
751 .data = (void *)&spectrum, 751 .data = (void *)&spectrum,
@@ -790,18 +790,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
790 if (rc) 790 if (rc)
791 return rc; 791 return rc;
792 792
793 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 793 pkt = (struct iwl_rx_packet *)cmd.reply_page;
794 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 794 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
795 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 795 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
796 rc = -EIO; 796 rc = -EIO;
797 } 797 }
798 798
799 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); 799 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
800 switch (spectrum_resp_status) { 800 switch (spectrum_resp_status) {
801 case 0: /* Command will be handled */ 801 case 0: /* Command will be handled */
802 if (res->u.spectrum.id != 0xff) { 802 if (pkt->u.spectrum.id != 0xff) {
803 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 803 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
804 res->u.spectrum.id); 804 pkt->u.spectrum.id);
805 priv->measurement_status &= ~MEASUREMENT_READY; 805 priv->measurement_status &= ~MEASUREMENT_READY;
806 } 806 }
807 priv->measurement_status |= MEASUREMENT_ACTIVE; 807 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -813,7 +813,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
813 break; 813 break;
814 } 814 }
815 815
816 dev_kfree_skb_any(cmd.reply_skb); 816 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
817 817
818 return rc; 818 return rc;
819} 819}
@@ -822,7 +822,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
822static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 822static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
823 struct iwl_rx_mem_buffer *rxb) 823 struct iwl_rx_mem_buffer *rxb)
824{ 824{
825 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 825 struct iwl_rx_packet *pkt = rxb_addr(rxb);
826 struct iwl_alive_resp *palive; 826 struct iwl_alive_resp *palive;
827 struct delayed_work *pwork; 827 struct delayed_work *pwork;
828 828
@@ -859,7 +859,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
859 struct iwl_rx_mem_buffer *rxb) 859 struct iwl_rx_mem_buffer *rxb)
860{ 860{
861#ifdef CONFIG_IWLWIFI_DEBUG 861#ifdef CONFIG_IWLWIFI_DEBUG
862 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 862 struct iwl_rx_packet *pkt = rxb_addr(rxb);
863#endif 863#endif
864 864
865 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 865 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -895,7 +895,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
895 struct iwl_rx_mem_buffer *rxb) 895 struct iwl_rx_mem_buffer *rxb)
896{ 896{
897#ifdef CONFIG_IWLWIFI_DEBUG 897#ifdef CONFIG_IWLWIFI_DEBUG
898 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 898 struct iwl_rx_packet *pkt = rxb_addr(rxb);
899 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 899 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
900 u8 rate = beacon->beacon_notify_hdr.rate; 900 u8 rate = beacon->beacon_notify_hdr.rate;
901 901
@@ -918,7 +918,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
918static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 918static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
919 struct iwl_rx_mem_buffer *rxb) 919 struct iwl_rx_mem_buffer *rxb)
920{ 920{
921 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 921 struct iwl_rx_packet *pkt = rxb_addr(rxb);
922 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 922 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
923 unsigned long status = priv->status; 923 unsigned long status = priv->status;
924 924
@@ -1082,7 +1082,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1082 list_del(element); 1082 list_del(element);
1083 1083
1084 /* Point to Rx buffer via next RBD in circular buffer */ 1084 /* Point to Rx buffer via next RBD in circular buffer */
1085 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); 1085 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1086 rxq->queue[rxq->write] = rxb; 1086 rxq->queue[rxq->write] = rxb;
1087 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1087 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1088 rxq->free_count--; 1088 rxq->free_count--;
@@ -1122,7 +1122,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1122 struct iwl_rx_queue *rxq = &priv->rxq; 1122 struct iwl_rx_queue *rxq = &priv->rxq;
1123 struct list_head *element; 1123 struct list_head *element;
1124 struct iwl_rx_mem_buffer *rxb; 1124 struct iwl_rx_mem_buffer *rxb;
1125 struct sk_buff *skb; 1125 struct page *page;
1126 unsigned long flags; 1126 unsigned long flags;
1127 1127
1128 while (1) { 1128 while (1) {
@@ -1136,9 +1136,13 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1136 1136
1137 if (rxq->free_count > RX_LOW_WATERMARK) 1137 if (rxq->free_count > RX_LOW_WATERMARK)
1138 priority |= __GFP_NOWARN; 1138 priority |= __GFP_NOWARN;
1139
1140 if (priv->hw_params.rx_page_order > 0)
1141 priority |= __GFP_COMP;
1142
1139 /* Alloc a new receive buffer */ 1143 /* Alloc a new receive buffer */
1140 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1144 page = alloc_pages(priority, priv->hw_params.rx_page_order);
1141 if (!skb) { 1145 if (!page) {
1142 if (net_ratelimit()) 1146 if (net_ratelimit())
1143 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1147 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1144 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1148 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1155,7 +1159,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1155 spin_lock_irqsave(&rxq->lock, flags); 1159 spin_lock_irqsave(&rxq->lock, flags);
1156 if (list_empty(&rxq->rx_used)) { 1160 if (list_empty(&rxq->rx_used)) {
1157 spin_unlock_irqrestore(&rxq->lock, flags); 1161 spin_unlock_irqrestore(&rxq->lock, flags);
1158 dev_kfree_skb_any(skb); 1162 __free_pages(page, priv->hw_params.rx_page_order);
1159 return; 1163 return;
1160 } 1164 }
1161 element = rxq->rx_used.next; 1165 element = rxq->rx_used.next;
@@ -1163,26 +1167,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1163 list_del(element); 1167 list_del(element);
1164 spin_unlock_irqrestore(&rxq->lock, flags); 1168 spin_unlock_irqrestore(&rxq->lock, flags);
1165 1169
1166 rxb->skb = skb; 1170 rxb->page = page;
1167
1168 /* If radiotap head is required, reserve some headroom here.
1169 * The physical head count is a variable rx_stats->phy_count.
1170 * We reserve 4 bytes here. Plus these extra bytes, the
1171 * headroom of the physical head should be enough for the
1172 * radiotap head that iwl3945 supported. See iwl3945_rt.
1173 */
1174 skb_reserve(rxb->skb, 4);
1175
1176 /* Get physical address of RB/SKB */ 1171 /* Get physical address of RB/SKB */
1177 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1172 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1178 rxb->skb->data, 1173 PAGE_SIZE << priv->hw_params.rx_page_order,
1179 priv->hw_params.rx_buf_size, 1174 PCI_DMA_FROMDEVICE);
1180 PCI_DMA_FROMDEVICE);
1181 1175
1182 spin_lock_irqsave(&rxq->lock, flags); 1176 spin_lock_irqsave(&rxq->lock, flags);
1177
1183 list_add_tail(&rxb->list, &rxq->rx_free); 1178 list_add_tail(&rxb->list, &rxq->rx_free);
1184 priv->alloc_rxb_skb++;
1185 rxq->free_count++; 1179 rxq->free_count++;
1180 priv->alloc_rxb_page++;
1181
1186 spin_unlock_irqrestore(&rxq->lock, flags); 1182 spin_unlock_irqrestore(&rxq->lock, flags);
1187 } 1183 }
1188} 1184}
@@ -1198,14 +1194,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1198 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1194 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1199 /* In the reset function, these buffers may have been allocated 1195 /* In the reset function, these buffers may have been allocated
1200 * to an SKB, so we need to unmap and free potential storage */ 1196 * to an SKB, so we need to unmap and free potential storage */
1201 if (rxq->pool[i].skb != NULL) { 1197 if (rxq->pool[i].page != NULL) {
1202 pci_unmap_single(priv->pci_dev, 1198 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1203 rxq->pool[i].real_dma_addr, 1199 PAGE_SIZE << priv->hw_params.rx_page_order,
1204 priv->hw_params.rx_buf_size, 1200 PCI_DMA_FROMDEVICE);
1205 PCI_DMA_FROMDEVICE); 1201 priv->alloc_rxb_page--;
1206 priv->alloc_rxb_skb--; 1202 __free_pages(rxq->pool[i].page,
1207 dev_kfree_skb(rxq->pool[i].skb); 1203 priv->hw_params.rx_page_order);
1208 rxq->pool[i].skb = NULL; 1204 rxq->pool[i].page = NULL;
1209 } 1205 }
1210 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1206 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1211 } 1207 }
@@ -1213,8 +1209,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1213 /* Set us so that we have processed and used all buffers, but have 1209 /* Set us so that we have processed and used all buffers, but have
1214 * not restocked the Rx queue with fresh buffers */ 1210 * not restocked the Rx queue with fresh buffers */
1215 rxq->read = rxq->write = 0; 1211 rxq->read = rxq->write = 0;
1216 rxq->free_count = 0;
1217 rxq->write_actual = 0; 1212 rxq->write_actual = 0;
1213 rxq->free_count = 0;
1218 spin_unlock_irqrestore(&rxq->lock, flags); 1214 spin_unlock_irqrestore(&rxq->lock, flags);
1219} 1215}
1220 1216
@@ -1247,12 +1243,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1247{ 1243{
1248 int i; 1244 int i;
1249 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1245 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1250 if (rxq->pool[i].skb != NULL) { 1246 if (rxq->pool[i].page != NULL) {
1251 pci_unmap_single(priv->pci_dev, 1247 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1252 rxq->pool[i].real_dma_addr, 1248 PAGE_SIZE << priv->hw_params.rx_page_order,
1253 priv->hw_params.rx_buf_size, 1249 PCI_DMA_FROMDEVICE);
1254 PCI_DMA_FROMDEVICE); 1250 __free_pages(rxq->pool[i].page,
1255 dev_kfree_skb(rxq->pool[i].skb); 1251 priv->hw_params.rx_page_order);
1252 rxq->pool[i].page = NULL;
1253 priv->alloc_rxb_page--;
1256 } 1254 }
1257 } 1255 }
1258 1256
@@ -1388,10 +1386,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1388 1386
1389 rxq->queue[i] = NULL; 1387 rxq->queue[i] = NULL;
1390 1388
1391 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1389 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1392 priv->hw_params.rx_buf_size, 1390 PAGE_SIZE << priv->hw_params.rx_page_order,
1393 PCI_DMA_FROMDEVICE); 1391 PCI_DMA_FROMDEVICE);
1394 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1392 pkt = rxb_addr(rxb);
1395 1393
1396 trace_iwlwifi_dev_rx(priv, pkt, 1394 trace_iwlwifi_dev_rx(priv, pkt,
1397 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 1395 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
@@ -1416,16 +1414,17 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1416 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1414 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1417 } else { 1415 } else {
1418 /* No handling needed */ 1416 /* No handling needed */
1419 IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", 1417 IWL_DEBUG_RX(priv,
1418 "r %d i %d No handler needed for %s, 0x%02x\n",
1420 r, i, get_cmd_string(pkt->hdr.cmd), 1419 r, i, get_cmd_string(pkt->hdr.cmd),
1421 pkt->hdr.cmd); 1420 pkt->hdr.cmd);
1422 } 1421 }
1423 1422
1424 if (reclaim) { 1423 if (reclaim) {
1425 /* Invoke any callbacks, transfer the skb to caller, and 1424 /* Invoke any callbacks, transfer the buffer to caller,
1426 * fire off the (possibly) blocking iwl_send_cmd() 1425 * and fire off the (possibly) blocking iwl_send_cmd()
1427 * as we reclaim the driver command queue */ 1426 * as we reclaim the driver command queue */
1428 if (rxb && rxb->skb) 1427 if (rxb && rxb->page)
1429 iwl_tx_cmd_complete(priv, rxb); 1428 iwl_tx_cmd_complete(priv, rxb);
1430 else 1429 else
1431 IWL_WARN(priv, "Claim null rxb?\n"); 1430 IWL_WARN(priv, "Claim null rxb?\n");
@@ -1434,10 +1433,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1434 /* For now we just don't re-use anything. We can tweak this 1433 /* For now we just don't re-use anything. We can tweak this
1435 * later to try and re-use notification packets and SKBs that 1434 * later to try and re-use notification packets and SKBs that
1436 * fail to Rx correctly */ 1435 * fail to Rx correctly */
1437 if (rxb->skb != NULL) { 1436 if (rxb->page != NULL) {
1438 priv->alloc_rxb_skb--; 1437 priv->alloc_rxb_page--;
1439 dev_kfree_skb_any(rxb->skb); 1438 __free_pages(rxb->page, priv->hw_params.rx_page_order);
1440 rxb->skb = NULL; 1439 rxb->page = NULL;
1441 } 1440 }
1442 1441
1443 spin_lock_irqsave(&rxq->lock, flags); 1442 spin_lock_irqsave(&rxq->lock, flags);
@@ -1678,6 +1677,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1678 } 1677 }
1679#endif 1678#endif
1680 1679
1680 spin_unlock_irqrestore(&priv->lock, flags);
1681
1681 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1682 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1682 * atomic, make sure that inta covers all the interrupts that 1683 * atomic, make sure that inta covers all the interrupts that
1683 * we've discovered, even if FH interrupt came in just after 1684 * we've discovered, even if FH interrupt came in just after
@@ -1699,8 +1700,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1699 1700
1700 handled |= CSR_INT_BIT_HW_ERR; 1701 handled |= CSR_INT_BIT_HW_ERR;
1701 1702
1702 spin_unlock_irqrestore(&priv->lock, flags);
1703
1704 return; 1703 return;
1705 } 1704 }
1706 1705
@@ -1792,7 +1791,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1792 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1791 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1793 } 1792 }
1794#endif 1793#endif
1795 spin_unlock_irqrestore(&priv->lock, flags);
1796} 1794}
1797 1795
1798static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1796static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,