aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl4965-base.c
diff options
context:
space:
mode:
authorTomas Winkler <tomas.winkler@intel.com>2008-05-04 22:22:28 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-05-14 16:29:37 -0400
commita55360e458551b0add4ec147ef786d71e163bf50 (patch)
tree2695c5fcc25ecf2ae2de6923706c873b6989ceb5 /drivers/net/wireless/iwlwifi/iwl4965-base.c
parent0d0b2c1c49814ee54f1b4efd2c715a7465219ede (diff)
iwlwifi: move RX code to iwl-rx.c
This patch moves partialy rx code into iwl-rx.c as part of iwlcore. The second part of the code can be merged only with moving of tx code as well. Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl4965-base.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c545
1 files changed, 106 insertions, 439 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 54534270d46f..aa0393589dae 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -2736,7 +2736,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2736 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response 2736 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2737 */ 2737 */
2738static void iwl4965_rx_reply_tx(struct iwl_priv *priv, 2738static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2739 struct iwl4965_rx_mem_buffer *rxb) 2739 struct iwl_rx_mem_buffer *rxb)
2740{ 2740{
2741 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2741 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2742 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 2742 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -2849,7 +2849,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2849 2849
2850 2850
2851static void iwl4965_rx_reply_alive(struct iwl_priv *priv, 2851static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
2852 struct iwl4965_rx_mem_buffer *rxb) 2852 struct iwl_rx_mem_buffer *rxb)
2853{ 2853{
2854 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2854 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2855 struct iwl4965_alive_resp *palive; 2855 struct iwl4965_alive_resp *palive;
@@ -2885,7 +2885,7 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
2885} 2885}
2886 2886
2887static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv, 2887static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
2888 struct iwl4965_rx_mem_buffer *rxb) 2888 struct iwl_rx_mem_buffer *rxb)
2889{ 2889{
2890 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2890 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2891 2891
@@ -2894,7 +2894,7 @@ static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
2894} 2894}
2895 2895
2896static void iwl4965_rx_reply_error(struct iwl_priv *priv, 2896static void iwl4965_rx_reply_error(struct iwl_priv *priv,
2897 struct iwl4965_rx_mem_buffer *rxb) 2897 struct iwl_rx_mem_buffer *rxb)
2898{ 2898{
2899 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2899 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2900 2900
@@ -2909,7 +2909,7 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv,
2909 2909
2910#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 2910#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2911 2911
2912static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 2912static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
2913{ 2913{
2914 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2914 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2915 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; 2915 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
@@ -2921,7 +2921,7 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *
2921} 2921}
2922 2922
2923static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 2923static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
2924 struct iwl4965_rx_mem_buffer *rxb) 2924 struct iwl_rx_mem_buffer *rxb)
2925{ 2925{
2926#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 2926#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
2927 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2927 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -2939,7 +2939,7 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
2939} 2939}
2940 2940
2941static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, 2941static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
2942 struct iwl4965_rx_mem_buffer *rxb) 2942 struct iwl_rx_mem_buffer *rxb)
2943{ 2943{
2944#ifdef CONFIG_IWLWIFI_DEBUG 2944#ifdef CONFIG_IWLWIFI_DEBUG
2945 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2945 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -2950,7 +2950,7 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
2950} 2950}
2951 2951
2952static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 2952static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2953 struct iwl4965_rx_mem_buffer *rxb) 2953 struct iwl_rx_mem_buffer *rxb)
2954{ 2954{
2955 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2955 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2956 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 2956 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
@@ -2985,7 +2985,7 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
2985} 2985}
2986 2986
2987static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, 2987static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2988 struct iwl4965_rx_mem_buffer *rxb) 2988 struct iwl_rx_mem_buffer *rxb)
2989{ 2989{
2990#ifdef CONFIG_IWLWIFI_DEBUG 2990#ifdef CONFIG_IWLWIFI_DEBUG
2991 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2991 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3008,7 +3008,7 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3008 3008
3009/* Service response to REPLY_SCAN_CMD (0x80) */ 3009/* Service response to REPLY_SCAN_CMD (0x80) */
3010static void iwl4965_rx_reply_scan(struct iwl_priv *priv, 3010static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3011 struct iwl4965_rx_mem_buffer *rxb) 3011 struct iwl_rx_mem_buffer *rxb)
3012{ 3012{
3013#ifdef CONFIG_IWLWIFI_DEBUG 3013#ifdef CONFIG_IWLWIFI_DEBUG
3014 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3014 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3021,7 +3021,7 @@ static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3021 3021
3022/* Service SCAN_START_NOTIFICATION (0x82) */ 3022/* Service SCAN_START_NOTIFICATION (0x82) */
3023static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, 3023static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3024 struct iwl4965_rx_mem_buffer *rxb) 3024 struct iwl_rx_mem_buffer *rxb)
3025{ 3025{
3026 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3026 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3027 struct iwl4965_scanstart_notification *notif = 3027 struct iwl4965_scanstart_notification *notif =
@@ -3038,7 +3038,7 @@ static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3038 3038
3039/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ 3039/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3040static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, 3040static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3041 struct iwl4965_rx_mem_buffer *rxb) 3041 struct iwl_rx_mem_buffer *rxb)
3042{ 3042{
3043 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3043 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3044 struct iwl4965_scanresults_notification *notif = 3044 struct iwl4965_scanresults_notification *notif =
@@ -3063,7 +3063,7 @@ static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3063 3063
3064/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 3064/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3065static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, 3065static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
3066 struct iwl4965_rx_mem_buffer *rxb) 3066 struct iwl_rx_mem_buffer *rxb)
3067{ 3067{
3068 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3068 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3069 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 3069 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
@@ -3121,7 +3121,7 @@ reschedule:
3121/* Handle notification from uCode that card's power state is changing 3121/* Handle notification from uCode that card's power state is changing
3122 * due to software, hardware, or critical temperature RFKILL */ 3122 * due to software, hardware, or critical temperature RFKILL */
3123static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, 3123static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3124 struct iwl4965_rx_mem_buffer *rxb) 3124 struct iwl_rx_mem_buffer *rxb)
3125{ 3125{
3126 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3126 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3127 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 3127 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
@@ -3241,7 +3241,7 @@ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3241 * if the callback returns 1 3241 * if the callback returns 1
3242 */ 3242 */
3243static void iwl4965_tx_cmd_complete(struct iwl_priv *priv, 3243static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
3244 struct iwl4965_rx_mem_buffer *rxb) 3244 struct iwl_rx_mem_buffer *rxb)
3245{ 3245{
3246 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 3246 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3247 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3247 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -3278,438 +3278,28 @@ static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
3278 } 3278 }
3279} 3279}
3280 3280
3281/************************** RX-FUNCTIONS ****************************/
3282/*
3283 * Rx theory of operation
3284 *
3285 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
3286 * each of which point to Receive Buffers to be filled by 4965. These get
3287 * used not only for Rx frames, but for any command response or notification
3288 * from the 4965. The driver and 4965 manage the Rx buffers by means
3289 * of indexes into the circular buffer.
3290 *
3291 * Rx Queue Indexes
3292 * The host/firmware share two index registers for managing the Rx buffers.
3293 *
3294 * The READ index maps to the first position that the firmware may be writing
3295 * to -- the driver can read up to (but not including) this position and get
3296 * good data.
3297 * The READ index is managed by the firmware once the card is enabled.
3298 *
3299 * The WRITE index maps to the last position the driver has read from -- the
3300 * position preceding WRITE is the last slot the firmware can place a packet.
3301 *
3302 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3303 * WRITE = READ.
3304 *
3305 * During initialization, the host sets up the READ queue position to the first
3306 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3307 *
3308 * When the firmware places a packet in a buffer, it will advance the READ index
3309 * and fire the RX interrupt. The driver can then query the READ index and
3310 * process as many packets as possible, moving the WRITE index forward as it
3311 * resets the Rx queue buffers with new memory.
3312 *
3313 * The management in the driver is as follows:
3314 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3315 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3316 * to replenish the iwl->rxq->rx_free.
3317 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
3318 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3319 * 'processed' and 'read' driver indexes as well)
3320 * + A received packet is processed and handed to the kernel network stack,
3321 * detached from the iwl->rxq. The driver 'processed' index is updated.
3322 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3323 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3324 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3325 * were enough free buffers and RX_STALLED is set it is cleared.
3326 *
3327 *
3328 * Driver sequence:
3329 *
3330 * iwl4965_rx_queue_alloc() Allocates rx_free
3331 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
3332 * iwl4965_rx_queue_restock
3333 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
3334 * queue, updates firmware pointers, and updates
3335 * the WRITE index. If insufficient rx_free buffers
3336 * are available, schedules iwl4965_rx_replenish
3337 *
3338 * -- enable interrupts --
3339 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
3340 * READ INDEX, detaching the SKB from the pool.
3341 * Moves the packet buffer from queue to rx_used.
3342 * Calls iwl4965_rx_queue_restock to refill any empty
3343 * slots.
3344 * ...
3345 *
3346 */
3347
3348/**
3349 * iwl4965_rx_queue_space - Return number of free slots available in queue.
3350 */
3351static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
3352{
3353 int s = q->read - q->write;
3354 if (s <= 0)
3355 s += RX_QUEUE_SIZE;
3356 /* keep some buffer to not confuse full and empty queue */
3357 s -= 2;
3358 if (s < 0)
3359 s = 0;
3360 return s;
3361}
3362
3363/**
3364 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3365 */
3366int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
3367{
3368 u32 reg = 0;
3369 int rc = 0;
3370 unsigned long flags;
3371
3372 spin_lock_irqsave(&q->lock, flags);
3373
3374 if (q->need_update == 0)
3375 goto exit_unlock;
3376
3377 /* If power-saving is in use, make sure device is awake */
3378 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3379 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3380
3381 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3382 iwl_set_bit(priv, CSR_GP_CNTRL,
3383 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3384 goto exit_unlock;
3385 }
3386
3387 rc = iwl_grab_nic_access(priv);
3388 if (rc)
3389 goto exit_unlock;
3390
3391 /* Device expects a multiple of 8 */
3392 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
3393 q->write & ~0x7);
3394 iwl_release_nic_access(priv);
3395
3396 /* Else device is assumed to be awake */
3397 } else
3398 /* Device expects a multiple of 8 */
3399 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3400
3401
3402 q->need_update = 0;
3403
3404 exit_unlock:
3405 spin_unlock_irqrestore(&q->lock, flags);
3406 return rc;
3407}
3408
3409/**
3410 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3411 */
3412static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
3413 dma_addr_t dma_addr)
3414{
3415 return cpu_to_le32((u32)(dma_addr >> 8));
3416}
3417
3418
3419/**
3420 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
3421 *
3422 * If there are slots in the RX queue that need to be restocked,
3423 * and we have free pre-allocated buffers, fill the ranks as much
3424 * as we can, pulling from rx_free.
3425 *
3426 * This moves the 'write' index forward to catch up with 'processed', and
3427 * also updates the memory address in the firmware to reference the new
3428 * target buffer.
3429 */
3430static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
3431{
3432 struct iwl4965_rx_queue *rxq = &priv->rxq;
3433 struct list_head *element;
3434 struct iwl4965_rx_mem_buffer *rxb;
3435 unsigned long flags;
3436 int write, rc;
3437
3438 spin_lock_irqsave(&rxq->lock, flags);
3439 write = rxq->write & ~0x7;
3440 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3441 /* Get next free Rx buffer, remove from free list */
3442 element = rxq->rx_free.next;
3443 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3444 list_del(element);
3445
3446 /* Point to Rx buffer via next RBD in circular buffer */
3447 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
3448 rxq->queue[rxq->write] = rxb;
3449 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3450 rxq->free_count--;
3451 }
3452 spin_unlock_irqrestore(&rxq->lock, flags);
3453 /* If the pre-allocated buffer pool is dropping low, schedule to
3454 * refill it */
3455 if (rxq->free_count <= RX_LOW_WATERMARK)
3456 queue_work(priv->workqueue, &priv->rx_replenish);
3457
3458
3459 /* If we've added more space for the firmware to place data, tell it.
3460 * Increment device's write pointer in multiples of 8. */
3461 if ((write != (rxq->write & ~0x7))
3462 || (abs(rxq->write - rxq->read) > 7)) {
3463 spin_lock_irqsave(&rxq->lock, flags);
3464 rxq->need_update = 1;
3465 spin_unlock_irqrestore(&rxq->lock, flags);
3466 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
3467 if (rc)
3468 return rc;
3469 }
3470
3471 return 0;
3472}
3473
3474/**
3475 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
3476 *
3477 * When moving to rx_free an SKB is allocated for the slot.
3478 *
3479 * Also restock the Rx queue via iwl4965_rx_queue_restock.
3480 * This is called as a scheduled work item (except for during initialization)
3481 */
3482static void iwl4965_rx_allocate(struct iwl_priv *priv)
3483{
3484 struct iwl4965_rx_queue *rxq = &priv->rxq;
3485 struct list_head *element;
3486 struct iwl4965_rx_mem_buffer *rxb;
3487 unsigned long flags;
3488 spin_lock_irqsave(&rxq->lock, flags);
3489 while (!list_empty(&rxq->rx_used)) {
3490 element = rxq->rx_used.next;
3491 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3492
3493 /* Alloc a new receive buffer */
3494 rxb->skb =
3495 alloc_skb(priv->hw_params.rx_buf_size,
3496 __GFP_NOWARN | GFP_ATOMIC);
3497 if (!rxb->skb) {
3498 if (net_ratelimit())
3499 printk(KERN_CRIT DRV_NAME
3500 ": Can not allocate SKB buffers\n");
3501 /* We don't reschedule replenish work here -- we will
3502 * call the restock method and if it still needs
3503 * more buffers it will schedule replenish */
3504 break;
3505 }
3506 priv->alloc_rxb_skb++;
3507 list_del(element);
3508
3509 /* Get physical address of RB/SKB */
3510 rxb->dma_addr =
3511 pci_map_single(priv->pci_dev, rxb->skb->data,
3512 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
3513 list_add_tail(&rxb->list, &rxq->rx_free);
3514 rxq->free_count++;
3515 }
3516 spin_unlock_irqrestore(&rxq->lock, flags);
3517}
3518
3519/* 3281/*
3520 * this should be called while priv->lock is locked 3282 * this should be called while priv->lock is locked
3521*/ 3283*/
3522static void __iwl4965_rx_replenish(void *data) 3284static void __iwl_rx_replenish(struct iwl_priv *priv)
3523{
3524 struct iwl_priv *priv = data;
3525
3526 iwl4965_rx_allocate(priv);
3527 iwl4965_rx_queue_restock(priv);
3528}
3529
3530
3531void iwl4965_rx_replenish(void *data)
3532{
3533 struct iwl_priv *priv = data;
3534 unsigned long flags;
3535
3536 iwl4965_rx_allocate(priv);
3537
3538 spin_lock_irqsave(&priv->lock, flags);
3539 iwl4965_rx_queue_restock(priv);
3540 spin_unlock_irqrestore(&priv->lock, flags);
3541}
3542
3543/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3544 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3545 * This free routine walks the list of POOL entries and if SKB is set to
3546 * non NULL it is unmapped and freed
3547 */
3548static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3549{
3550 int i;
3551 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3552 if (rxq->pool[i].skb != NULL) {
3553 pci_unmap_single(priv->pci_dev,
3554 rxq->pool[i].dma_addr,
3555 priv->hw_params.rx_buf_size,
3556 PCI_DMA_FROMDEVICE);
3557 dev_kfree_skb(rxq->pool[i].skb);
3558 }
3559 }
3560
3561 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3562 rxq->dma_addr);
3563 rxq->bd = NULL;
3564}
3565
3566int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
3567{
3568 struct iwl4965_rx_queue *rxq = &priv->rxq;
3569 struct pci_dev *dev = priv->pci_dev;
3570 int i;
3571
3572 spin_lock_init(&rxq->lock);
3573 INIT_LIST_HEAD(&rxq->rx_free);
3574 INIT_LIST_HEAD(&rxq->rx_used);
3575
3576 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3577 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3578 if (!rxq->bd)
3579 return -ENOMEM;
3580
3581 /* Fill the rx_used queue with _all_ of the Rx buffers */
3582 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3583 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3584
3585 /* Set us so that we have processed and used all buffers, but have
3586 * not restocked the Rx queue with fresh buffers */
3587 rxq->read = rxq->write = 0;
3588 rxq->free_count = 0;
3589 rxq->need_update = 0;
3590 return 0;
3591}
3592
3593void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3594{
3595 unsigned long flags;
3596 int i;
3597 spin_lock_irqsave(&rxq->lock, flags);
3598 INIT_LIST_HEAD(&rxq->rx_free);
3599 INIT_LIST_HEAD(&rxq->rx_used);
3600 /* Fill the rx_used queue with _all_ of the Rx buffers */
3601 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3602 /* In the reset function, these buffers may have been allocated
3603 * to an SKB, so we need to unmap and free potential storage */
3604 if (rxq->pool[i].skb != NULL) {
3605 pci_unmap_single(priv->pci_dev,
3606 rxq->pool[i].dma_addr,
3607 priv->hw_params.rx_buf_size,
3608 PCI_DMA_FROMDEVICE);
3609 priv->alloc_rxb_skb--;
3610 dev_kfree_skb(rxq->pool[i].skb);
3611 rxq->pool[i].skb = NULL;
3612 }
3613 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3614 }
3615
3616 /* Set us so that we have processed and used all buffers, but have
3617 * not restocked the Rx queue with fresh buffers */
3618 rxq->read = rxq->write = 0;
3619 rxq->free_count = 0;
3620 spin_unlock_irqrestore(&rxq->lock, flags);
3621}
3622
3623/* Convert linear signal-to-noise ratio into dB */
3624static u8 ratio2dB[100] = {
3625/* 0 1 2 3 4 5 6 7 8 9 */
3626 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3627 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3628 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3629 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3630 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3631 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3632 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3633 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3634 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3635 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3636};
3637
3638/* Calculates a relative dB value from a ratio of linear
3639 * (i.e. not dB) signal levels.
3640 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3641int iwl4965_calc_db_from_ratio(int sig_ratio)
3642{ 3285{
3643 /* 1000:1 or higher just report as 60 dB */ 3286 iwl_rx_allocate(priv);
3644 if (sig_ratio >= 1000) 3287 iwl_rx_queue_restock(priv);
3645 return 60;
3646
3647 /* 100:1 or higher, divide by 10 and use table,
3648 * add 20 dB to make up for divide by 10 */
3649 if (sig_ratio >= 100)
3650 return (20 + (int)ratio2dB[sig_ratio/10]);
3651
3652 /* We shouldn't see this */
3653 if (sig_ratio < 1)
3654 return 0;
3655
3656 /* Use table for ratios 1:1 - 99:1 */
3657 return (int)ratio2dB[sig_ratio];
3658} 3288}
3659 3289
3660#define PERFECT_RSSI (-20) /* dBm */
3661#define WORST_RSSI (-95) /* dBm */
3662#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3663
3664/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3665 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3666 * about formulas used below. */
3667int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
3668{
3669 int sig_qual;
3670 int degradation = PERFECT_RSSI - rssi_dbm;
3671
3672 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3673 * as indicator; formula is (signal dbm - noise dbm).
3674 * SNR at or above 40 is a great signal (100%).
3675 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3676 * Weakest usable signal is usually 10 - 15 dB SNR. */
3677 if (noise_dbm) {
3678 if (rssi_dbm - noise_dbm >= 40)
3679 return 100;
3680 else if (rssi_dbm < noise_dbm)
3681 return 0;
3682 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3683
3684 /* Else use just the signal level.
3685 * This formula is a least squares fit of data points collected and
3686 * compared with a reference system that had a percentage (%) display
3687 * for signal quality. */
3688 } else
3689 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3690 (15 * RSSI_RANGE + 62 * degradation)) /
3691 (RSSI_RANGE * RSSI_RANGE);
3692
3693 if (sig_qual > 100)
3694 sig_qual = 100;
3695 else if (sig_qual < 1)
3696 sig_qual = 0;
3697
3698 return sig_qual;
3699}
3700 3290
3701/** 3291/**
3702 * iwl4965_rx_handle - Main entry function for receiving responses from uCode 3292 * iwl_rx_handle - Main entry function for receiving responses from uCode
3703 * 3293 *
3704 * Uses the priv->rx_handlers callback function array to invoke 3294 * Uses the priv->rx_handlers callback function array to invoke
3705 * the appropriate handlers, including command responses, 3295 * the appropriate handlers, including command responses,
3706 * frame-received notifications, and other notifications. 3296 * frame-received notifications, and other notifications.
3707 */ 3297 */
3708static void iwl4965_rx_handle(struct iwl_priv *priv) 3298void iwl_rx_handle(struct iwl_priv *priv)
3709{ 3299{
3710 struct iwl4965_rx_mem_buffer *rxb; 3300 struct iwl_rx_mem_buffer *rxb;
3711 struct iwl4965_rx_packet *pkt; 3301 struct iwl4965_rx_packet *pkt;
3712 struct iwl4965_rx_queue *rxq = &priv->rxq; 3302 struct iwl_rx_queue *rxq = &priv->rxq;
3713 u32 r, i; 3303 u32 r, i;
3714 int reclaim; 3304 int reclaim;
3715 unsigned long flags; 3305 unsigned long flags;
@@ -3725,7 +3315,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3725 if (i == r) 3315 if (i == r)
3726 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); 3316 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3727 3317
3728 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 3318 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3729 fill_rx = 1; 3319 fill_rx = 1;
3730 3320
3731 while (i != r) { 3321 while (i != r) {
@@ -3804,7 +3394,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3804 count++; 3394 count++;
3805 if (count >= 8) { 3395 if (count >= 8) {
3806 priv->rxq.read = i; 3396 priv->rxq.read = i;
3807 __iwl4965_rx_replenish(priv); 3397 __iwl_rx_replenish(priv);
3808 count = 0; 3398 count = 0;
3809 } 3399 }
3810 } 3400 }
@@ -3812,7 +3402,84 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3812 3402
3813 /* Backtrack one entry */ 3403 /* Backtrack one entry */
3814 priv->rxq.read = i; 3404 priv->rxq.read = i;
3815 iwl4965_rx_queue_restock(priv); 3405 iwl_rx_queue_restock(priv);
3406}
3407/* Convert linear signal-to-noise ratio into dB */
3408static u8 ratio2dB[100] = {
3409/* 0 1 2 3 4 5 6 7 8 9 */
3410 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3411 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3412 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3413 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3414 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3415 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3416 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3417 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3418 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3419 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3420};
3421
3422/* Calculates a relative dB value from a ratio of linear
3423 * (i.e. not dB) signal levels.
3424 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3425int iwl4965_calc_db_from_ratio(int sig_ratio)
3426{
3427 /* 1000:1 or higher just report as 60 dB */
3428 if (sig_ratio >= 1000)
3429 return 60;
3430
3431 /* 100:1 or higher, divide by 10 and use table,
3432 * add 20 dB to make up for divide by 10 */
3433 if (sig_ratio >= 100)
3434 return (20 + (int)ratio2dB[sig_ratio/10]);
3435
3436 /* We shouldn't see this */
3437 if (sig_ratio < 1)
3438 return 0;
3439
3440 /* Use table for ratios 1:1 - 99:1 */
3441 return (int)ratio2dB[sig_ratio];
3442}
3443
3444#define PERFECT_RSSI (-20) /* dBm */
3445#define WORST_RSSI (-95) /* dBm */
3446#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3447
3448/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3449 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3450 * about formulas used below. */
3451int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
3452{
3453 int sig_qual;
3454 int degradation = PERFECT_RSSI - rssi_dbm;
3455
3456 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3457 * as indicator; formula is (signal dbm - noise dbm).
3458 * SNR at or above 40 is a great signal (100%).
3459 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3460 * Weakest usable signal is usually 10 - 15 dB SNR. */
3461 if (noise_dbm) {
3462 if (rssi_dbm - noise_dbm >= 40)
3463 return 100;
3464 else if (rssi_dbm < noise_dbm)
3465 return 0;
3466 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3467
3468 /* Else use just the signal level.
3469 * This formula is a least squares fit of data points collected and
3470 * compared with a reference system that had a percentage (%) display
3471 * for signal quality. */
3472 } else
3473 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3474 (15 * RSSI_RANGE + 62 * degradation)) /
3475 (RSSI_RANGE * RSSI_RANGE);
3476
3477 if (sig_qual > 100)
3478 sig_qual = 100;
3479 else if (sig_qual < 1)
3480 sig_qual = 0;
3481
3482 return sig_qual;
3816} 3483}
3817 3484
3818/** 3485/**
@@ -4248,7 +3915,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4248 /* uCode wakes up after power-down sleep */ 3915 /* uCode wakes up after power-down sleep */
4249 if (inta & CSR_INT_BIT_WAKEUP) { 3916 if (inta & CSR_INT_BIT_WAKEUP) {
4250 IWL_DEBUG_ISR("Wakeup interrupt\n"); 3917 IWL_DEBUG_ISR("Wakeup interrupt\n");
4251 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq); 3918 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4252 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); 3919 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4253 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); 3920 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4254 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); 3921 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
@@ -4263,7 +3930,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4263 * Rx "responses" (frame-received notification), and other 3930 * Rx "responses" (frame-received notification), and other
4264 * notifications from uCode come through here*/ 3931 * notifications from uCode come through here*/
4265 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 3932 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4266 iwl4965_rx_handle(priv); 3933 iwl_rx_handle(priv);
4267 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 3934 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4268 } 3935 }
4269 3936
@@ -5452,7 +5119,7 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
5452 return; 5119 return;
5453 5120
5454 mutex_lock(&priv->mutex); 5121 mutex_lock(&priv->mutex);
5455 iwl4965_rx_replenish(priv); 5122 iwl_rx_replenish(priv);
5456 mutex_unlock(&priv->mutex); 5123 mutex_unlock(&priv->mutex);
5457} 5124}
5458 5125
@@ -7309,7 +6976,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7309 iwl4965_dealloc_ucode_pci(priv); 6976 iwl4965_dealloc_ucode_pci(priv);
7310 6977
7311 if (priv->rxq.bd) 6978 if (priv->rxq.bd)
7312 iwl4965_rx_queue_free(priv, &priv->rxq); 6979 iwl_rx_queue_free(priv, &priv->rxq);
7313 iwl4965_hw_txq_ctx_free(priv); 6980 iwl4965_hw_txq_ctx_free(priv);
7314 6981
7315 iwlcore_clear_stations_table(priv); 6982 iwlcore_clear_stations_table(priv);