diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-06 11:02:22 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-06 11:02:22 -0400 |
commit | a2413598b8c5f14d75f914ce95d72bacdeabd05e (patch) | |
tree | 35e1340b05b295dbefefaa45424e00a55c28402e | |
parent | f2ad2d9b65963322186a8af2bd2965c734a7badb (diff) | |
parent | c576af479162c0a11d4e2691ebc97354958d9285 (diff) |
Merge branch 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6 into upstream
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | drivers/net/wireless/Kconfig | 23 | ||||
-rw-r--r-- | drivers/net/wireless/airo.c | 40 | ||||
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx.h | 58 | ||||
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_dma.c | 583 | ||||
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_dma.h | 296 | ||||
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_main.c | 97 | ||||
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_wx.c | 4 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2100.c | 5 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2200.c | 215 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2200.h | 51 | ||||
-rw-r--r-- | drivers/net/wireless/orinoco.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/orinoco.h | 8 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_crypt_ccmp.c | 23 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_crypt_tkip.c | 108 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_crypt_wep.c | 35 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_rx.c | 17 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_tx.c | 9 |
18 files changed, 1027 insertions, 552 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 6c399b872e3a..562775007785 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -449,9 +449,9 @@ L: linux-hams@vger.kernel.org | |||
449 | W: http://www.baycom.org/~tom/ham/ham.html | 449 | W: http://www.baycom.org/~tom/ham/ham.html |
450 | S: Maintained | 450 | S: Maintained |
451 | 451 | ||
452 | BCM43XX WIRELESS DRIVER | 452 | BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION) |
453 | P: Michael Buesch | 453 | P: Larry Finger |
454 | M: mb@bu3sch.de | 454 | M: Larry.Finger@lwfinger.net |
455 | P: Stefano Brivio | 455 | P: Stefano Brivio |
456 | M: st3@riseup.net | 456 | M: st3@riseup.net |
457 | W: http://bcm43xx.berlios.de/ | 457 | W: http://bcm43xx.berlios.de/ |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 2e8ac995d56f..bd4a68c85a47 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -271,25 +271,14 @@ config IPW2200_DEBUG | |||
271 | bool "Enable full debugging output in IPW2200 module." | 271 | bool "Enable full debugging output in IPW2200 module." |
272 | depends on IPW2200 | 272 | depends on IPW2200 |
273 | ---help--- | 273 | ---help--- |
274 | This option will enable debug tracing output for the IPW2200. | 274 | This option will enable low level debug tracing output for IPW2200. |
275 | 275 | ||
276 | This will result in the kernel module being ~100k larger. You can | 276 | Note, normal debug code is already compiled in. This low level |
277 | control which debug output is sent to the kernel log by setting the | 277 | debug option enables debug on hot paths (e.g Tx, Rx, ISR) and |
278 | value in | 278 | will result in the kernel module being ~70 larger. Most users |
279 | 279 | will typically not need this high verbosity debug information. | |
280 | /sys/bus/pci/drivers/ipw2200/debug_level | ||
281 | |||
282 | This entry will only exist if this option is enabled. | ||
283 | 280 | ||
284 | To set a value, simply echo an 8-byte hex value to the same file: | 281 | If you are not sure, say N here. |
285 | |||
286 | % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level | ||
287 | |||
288 | You can find the list of debug mask values in | ||
289 | drivers/net/wireless/ipw2200.h | ||
290 | |||
291 | If you are not trying to debug or develop the IPW2200 driver, you | ||
292 | most likely want to say N here. | ||
293 | 282 | ||
294 | config AIRO | 283 | config AIRO |
295 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" | 284 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 16befbcea58c..e088ceefb4a3 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
48 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
49 | #include <net/ieee80211.h> | 49 | #include <net/ieee80211.h> |
50 | #include <linux/kthread.h> | ||
50 | 51 | ||
51 | #include "airo.h" | 52 | #include "airo.h" |
52 | 53 | ||
@@ -1187,11 +1188,10 @@ struct airo_info { | |||
1187 | int whichbap); | 1188 | int whichbap); |
1188 | unsigned short *flash; | 1189 | unsigned short *flash; |
1189 | tdsRssiEntry *rssi; | 1190 | tdsRssiEntry *rssi; |
1190 | struct task_struct *task; | 1191 | struct task_struct *list_bss_task; |
1192 | struct task_struct *airo_thread_task; | ||
1191 | struct semaphore sem; | 1193 | struct semaphore sem; |
1192 | pid_t thr_pid; | ||
1193 | wait_queue_head_t thr_wait; | 1194 | wait_queue_head_t thr_wait; |
1194 | struct completion thr_exited; | ||
1195 | unsigned long expires; | 1195 | unsigned long expires; |
1196 | struct { | 1196 | struct { |
1197 | struct sk_buff *skb; | 1197 | struct sk_buff *skb; |
@@ -1733,12 +1733,12 @@ static int readBSSListRid(struct airo_info *ai, int first, | |||
1733 | cmd.cmd=CMD_LISTBSS; | 1733 | cmd.cmd=CMD_LISTBSS; |
1734 | if (down_interruptible(&ai->sem)) | 1734 | if (down_interruptible(&ai->sem)) |
1735 | return -ERESTARTSYS; | 1735 | return -ERESTARTSYS; |
1736 | ai->list_bss_task = current; | ||
1736 | issuecommand(ai, &cmd, &rsp); | 1737 | issuecommand(ai, &cmd, &rsp); |
1737 | up(&ai->sem); | 1738 | up(&ai->sem); |
1738 | /* Let the command take effect */ | 1739 | /* Let the command take effect */ |
1739 | ai->task = current; | 1740 | schedule_timeout_uninterruptible(3 * HZ); |
1740 | ssleep(3); | 1741 | ai->list_bss_task = NULL; |
1741 | ai->task = NULL; | ||
1742 | } | 1742 | } |
1743 | rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, | 1743 | rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, |
1744 | list, ai->bssListRidLen, 1); | 1744 | list, ai->bssListRidLen, 1); |
@@ -2400,8 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2400 | clear_bit(FLAG_REGISTERED, &ai->flags); | 2400 | clear_bit(FLAG_REGISTERED, &ai->flags); |
2401 | } | 2401 | } |
2402 | set_bit(JOB_DIE, &ai->jobs); | 2402 | set_bit(JOB_DIE, &ai->jobs); |
2403 | kill_proc(ai->thr_pid, SIGTERM, 1); | 2403 | kthread_stop(ai->airo_thread_task); |
2404 | wait_for_completion(&ai->thr_exited); | ||
2405 | 2404 | ||
2406 | /* | 2405 | /* |
2407 | * Clean out tx queue | 2406 | * Clean out tx queue |
@@ -2811,9 +2810,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, | |||
2811 | ai->config.len = 0; | 2810 | ai->config.len = 0; |
2812 | ai->pci = pci; | 2811 | ai->pci = pci; |
2813 | init_waitqueue_head (&ai->thr_wait); | 2812 | init_waitqueue_head (&ai->thr_wait); |
2814 | init_completion (&ai->thr_exited); | 2813 | ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); |
2815 | ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); | 2814 | if (IS_ERR(ai->airo_thread_task)) |
2816 | if (ai->thr_pid < 0) | ||
2817 | goto err_out_free; | 2815 | goto err_out_free; |
2818 | ai->tfm = NULL; | 2816 | ai->tfm = NULL; |
2819 | rc = add_airo_dev( dev ); | 2817 | rc = add_airo_dev( dev ); |
@@ -2930,8 +2928,7 @@ err_out_unlink: | |||
2930 | del_airo_dev(dev); | 2928 | del_airo_dev(dev); |
2931 | err_out_thr: | 2929 | err_out_thr: |
2932 | set_bit(JOB_DIE, &ai->jobs); | 2930 | set_bit(JOB_DIE, &ai->jobs); |
2933 | kill_proc(ai->thr_pid, SIGTERM, 1); | 2931 | kthread_stop(ai->airo_thread_task); |
2934 | wait_for_completion(&ai->thr_exited); | ||
2935 | err_out_free: | 2932 | err_out_free: |
2936 | free_netdev(dev); | 2933 | free_netdev(dev); |
2937 | return NULL; | 2934 | return NULL; |
@@ -3063,13 +3060,7 @@ static int airo_thread(void *data) { | |||
3063 | struct airo_info *ai = dev->priv; | 3060 | struct airo_info *ai = dev->priv; |
3064 | int locked; | 3061 | int locked; |
3065 | 3062 | ||
3066 | daemonize("%s", dev->name); | ||
3067 | allow_signal(SIGTERM); | ||
3068 | |||
3069 | while(1) { | 3063 | while(1) { |
3070 | if (signal_pending(current)) | ||
3071 | flush_signals(current); | ||
3072 | |||
3073 | /* make swsusp happy with our thread */ | 3064 | /* make swsusp happy with our thread */ |
3074 | try_to_freeze(); | 3065 | try_to_freeze(); |
3075 | 3066 | ||
@@ -3097,7 +3088,7 @@ static int airo_thread(void *data) { | |||
3097 | set_bit(JOB_AUTOWEP, &ai->jobs); | 3088 | set_bit(JOB_AUTOWEP, &ai->jobs); |
3098 | break; | 3089 | break; |
3099 | } | 3090 | } |
3100 | if (!signal_pending(current)) { | 3091 | if (!kthread_should_stop()) { |
3101 | unsigned long wake_at; | 3092 | unsigned long wake_at; |
3102 | if (!ai->expires || !ai->scan_timeout) { | 3093 | if (!ai->expires || !ai->scan_timeout) { |
3103 | wake_at = max(ai->expires, | 3094 | wake_at = max(ai->expires, |
@@ -3109,7 +3100,7 @@ static int airo_thread(void *data) { | |||
3109 | schedule_timeout(wake_at - jiffies); | 3100 | schedule_timeout(wake_at - jiffies); |
3110 | continue; | 3101 | continue; |
3111 | } | 3102 | } |
3112 | } else if (!signal_pending(current)) { | 3103 | } else if (!kthread_should_stop()) { |
3113 | schedule(); | 3104 | schedule(); |
3114 | continue; | 3105 | continue; |
3115 | } | 3106 | } |
@@ -3154,7 +3145,8 @@ static int airo_thread(void *data) { | |||
3154 | else /* Shouldn't get here, but we make sure to unlock */ | 3145 | else /* Shouldn't get here, but we make sure to unlock */ |
3155 | up(&ai->sem); | 3146 | up(&ai->sem); |
3156 | } | 3147 | } |
3157 | complete_and_exit (&ai->thr_exited, 0); | 3148 | |
3149 | return 0; | ||
3158 | } | 3150 | } |
3159 | 3151 | ||
3160 | static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) { | 3152 | static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) { |
@@ -3235,8 +3227,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3235 | if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) { | 3227 | if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) { |
3236 | if (auto_wep) | 3228 | if (auto_wep) |
3237 | apriv->expires = 0; | 3229 | apriv->expires = 0; |
3238 | if (apriv->task) | 3230 | if (apriv->list_bss_task) |
3239 | wake_up_process (apriv->task); | 3231 | wake_up_process(apriv->list_bss_task); |
3240 | set_bit(FLAG_UPDATE_UNI, &apriv->flags); | 3232 | set_bit(FLAG_UPDATE_UNI, &apriv->flags); |
3241 | set_bit(FLAG_UPDATE_MULTI, &apriv->flags); | 3233 | set_bit(FLAG_UPDATE_MULTI, &apriv->flags); |
3242 | 3234 | ||
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index c6ee1e974c84..62fd7e237789 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h | |||
@@ -33,14 +33,18 @@ | |||
33 | #define BCM43xx_PCICFG_ICR 0x94 | 33 | #define BCM43xx_PCICFG_ICR 0x94 |
34 | 34 | ||
35 | /* MMIO offsets */ | 35 | /* MMIO offsets */ |
36 | #define BCM43xx_MMIO_DMA1_REASON 0x20 | 36 | #define BCM43xx_MMIO_DMA0_REASON 0x20 |
37 | #define BCM43xx_MMIO_DMA1_IRQ_MASK 0x24 | 37 | #define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24 |
38 | #define BCM43xx_MMIO_DMA2_REASON 0x28 | 38 | #define BCM43xx_MMIO_DMA1_REASON 0x28 |
39 | #define BCM43xx_MMIO_DMA2_IRQ_MASK 0x2C | 39 | #define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C |
40 | #define BCM43xx_MMIO_DMA3_REASON 0x30 | 40 | #define BCM43xx_MMIO_DMA2_REASON 0x30 |
41 | #define BCM43xx_MMIO_DMA3_IRQ_MASK 0x34 | 41 | #define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34 |
42 | #define BCM43xx_MMIO_DMA4_REASON 0x38 | 42 | #define BCM43xx_MMIO_DMA3_REASON 0x38 |
43 | #define BCM43xx_MMIO_DMA4_IRQ_MASK 0x3C | 43 | #define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C |
44 | #define BCM43xx_MMIO_DMA4_REASON 0x40 | ||
45 | #define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44 | ||
46 | #define BCM43xx_MMIO_DMA5_REASON 0x48 | ||
47 | #define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C | ||
44 | #define BCM43xx_MMIO_STATUS_BITFIELD 0x120 | 48 | #define BCM43xx_MMIO_STATUS_BITFIELD 0x120 |
45 | #define BCM43xx_MMIO_STATUS2_BITFIELD 0x124 | 49 | #define BCM43xx_MMIO_STATUS2_BITFIELD 0x124 |
46 | #define BCM43xx_MMIO_GEN_IRQ_REASON 0x128 | 50 | #define BCM43xx_MMIO_GEN_IRQ_REASON 0x128 |
@@ -56,14 +60,27 @@ | |||
56 | #define BCM43xx_MMIO_XMITSTAT_1 0x174 | 60 | #define BCM43xx_MMIO_XMITSTAT_1 0x174 |
57 | #define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ | 61 | #define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ |
58 | #define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ | 62 | #define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ |
59 | #define BCM43xx_MMIO_DMA1_BASE 0x200 | 63 | |
60 | #define BCM43xx_MMIO_DMA2_BASE 0x220 | 64 | /* 32-bit DMA */ |
61 | #define BCM43xx_MMIO_DMA3_BASE 0x240 | 65 | #define BCM43xx_MMIO_DMA32_BASE0 0x200 |
62 | #define BCM43xx_MMIO_DMA4_BASE 0x260 | 66 | #define BCM43xx_MMIO_DMA32_BASE1 0x220 |
67 | #define BCM43xx_MMIO_DMA32_BASE2 0x240 | ||
68 | #define BCM43xx_MMIO_DMA32_BASE3 0x260 | ||
69 | #define BCM43xx_MMIO_DMA32_BASE4 0x280 | ||
70 | #define BCM43xx_MMIO_DMA32_BASE5 0x2A0 | ||
71 | /* 64-bit DMA */ | ||
72 | #define BCM43xx_MMIO_DMA64_BASE0 0x200 | ||
73 | #define BCM43xx_MMIO_DMA64_BASE1 0x240 | ||
74 | #define BCM43xx_MMIO_DMA64_BASE2 0x280 | ||
75 | #define BCM43xx_MMIO_DMA64_BASE3 0x2C0 | ||
76 | #define BCM43xx_MMIO_DMA64_BASE4 0x300 | ||
77 | #define BCM43xx_MMIO_DMA64_BASE5 0x340 | ||
78 | /* PIO */ | ||
63 | #define BCM43xx_MMIO_PIO1_BASE 0x300 | 79 | #define BCM43xx_MMIO_PIO1_BASE 0x300 |
64 | #define BCM43xx_MMIO_PIO2_BASE 0x310 | 80 | #define BCM43xx_MMIO_PIO2_BASE 0x310 |
65 | #define BCM43xx_MMIO_PIO3_BASE 0x320 | 81 | #define BCM43xx_MMIO_PIO3_BASE 0x320 |
66 | #define BCM43xx_MMIO_PIO4_BASE 0x330 | 82 | #define BCM43xx_MMIO_PIO4_BASE 0x330 |
83 | |||
67 | #define BCM43xx_MMIO_PHY_VER 0x3E0 | 84 | #define BCM43xx_MMIO_PHY_VER 0x3E0 |
68 | #define BCM43xx_MMIO_PHY_RADIO 0x3E2 | 85 | #define BCM43xx_MMIO_PHY_RADIO 0x3E2 |
69 | #define BCM43xx_MMIO_ANTENNA 0x3E8 | 86 | #define BCM43xx_MMIO_ANTENNA 0x3E8 |
@@ -233,8 +250,14 @@ | |||
233 | #define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000 | 250 | #define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000 |
234 | 251 | ||
235 | /* sbtmstatehigh state flags */ | 252 | /* sbtmstatehigh state flags */ |
236 | #define BCM43xx_SBTMSTATEHIGH_SERROR 0x1 | 253 | #define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001 |
237 | #define BCM43xx_SBTMSTATEHIGH_BUSY 0x4 | 254 | #define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004 |
255 | #define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020 | ||
256 | #define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000 | ||
257 | #define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000 | ||
258 | #define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000 | ||
259 | #define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000 | ||
260 | #define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000 | ||
238 | 261 | ||
239 | /* sbimstate flags */ | 262 | /* sbimstate flags */ |
240 | #define BCM43xx_SBIMSTATE_IB_ERROR 0x20000 | 263 | #define BCM43xx_SBIMSTATE_IB_ERROR 0x20000 |
@@ -574,8 +597,11 @@ struct bcm43xx_dma { | |||
574 | struct bcm43xx_dmaring *tx_ring1; | 597 | struct bcm43xx_dmaring *tx_ring1; |
575 | struct bcm43xx_dmaring *tx_ring2; | 598 | struct bcm43xx_dmaring *tx_ring2; |
576 | struct bcm43xx_dmaring *tx_ring3; | 599 | struct bcm43xx_dmaring *tx_ring3; |
600 | struct bcm43xx_dmaring *tx_ring4; | ||
601 | struct bcm43xx_dmaring *tx_ring5; | ||
602 | |||
577 | struct bcm43xx_dmaring *rx_ring0; | 603 | struct bcm43xx_dmaring *rx_ring0; |
578 | struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */ | 604 | struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */ |
579 | }; | 605 | }; |
580 | 606 | ||
581 | /* Data structures for PIO transmission, per 80211 core. */ | 607 | /* Data structures for PIO transmission, per 80211 core. */ |
@@ -739,7 +765,7 @@ struct bcm43xx_private { | |||
739 | 765 | ||
740 | /* Reason code of the last interrupt. */ | 766 | /* Reason code of the last interrupt. */ |
741 | u32 irq_reason; | 767 | u32 irq_reason; |
742 | u32 dma_reason[4]; | 768 | u32 dma_reason[6]; |
743 | /* saved irq enable/disable state bitfield. */ | 769 | /* saved irq enable/disable state bitfield. */ |
744 | u32 irq_savedstate; | 770 | u32 irq_savedstate; |
745 | /* Link Quality calculation context. */ | 771 | /* Link Quality calculation context. */ |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c index d0318e525ba7..76e3aed4b471 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | DMA ringbuffer and descriptor allocation/management | 5 | DMA ringbuffer and descriptor allocation/management |
6 | 6 | ||
7 | Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> | 7 | Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de> |
8 | 8 | ||
9 | Some code in this file is derived from the b44.c driver | 9 | Some code in this file is derived from the b44.c driver |
10 | Copyright (C) 2002 David S. Miller | 10 | Copyright (C) 2002 David S. Miller |
@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot) | |||
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
112 | u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx) | ||
113 | { | ||
114 | static const u16 map64[] = { | ||
115 | BCM43xx_MMIO_DMA64_BASE0, | ||
116 | BCM43xx_MMIO_DMA64_BASE1, | ||
117 | BCM43xx_MMIO_DMA64_BASE2, | ||
118 | BCM43xx_MMIO_DMA64_BASE3, | ||
119 | BCM43xx_MMIO_DMA64_BASE4, | ||
120 | BCM43xx_MMIO_DMA64_BASE5, | ||
121 | }; | ||
122 | static const u16 map32[] = { | ||
123 | BCM43xx_MMIO_DMA32_BASE0, | ||
124 | BCM43xx_MMIO_DMA32_BASE1, | ||
125 | BCM43xx_MMIO_DMA32_BASE2, | ||
126 | BCM43xx_MMIO_DMA32_BASE3, | ||
127 | BCM43xx_MMIO_DMA32_BASE4, | ||
128 | BCM43xx_MMIO_DMA32_BASE5, | ||
129 | }; | ||
130 | |||
131 | if (dma64bit) { | ||
132 | assert(controller_idx >= 0 && | ||
133 | controller_idx < ARRAY_SIZE(map64)); | ||
134 | return map64[controller_idx]; | ||
135 | } | ||
136 | assert(controller_idx >= 0 && | ||
137 | controller_idx < ARRAY_SIZE(map32)); | ||
138 | return map32[controller_idx]; | ||
139 | } | ||
140 | |||
112 | static inline | 141 | static inline |
113 | dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, | 142 | dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, |
114 | unsigned char *buf, | 143 | unsigned char *buf, |
@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring, | |||
172 | /* Unmap and free a descriptor buffer. */ | 201 | /* Unmap and free a descriptor buffer. */ |
173 | static inline | 202 | static inline |
174 | void free_descriptor_buffer(struct bcm43xx_dmaring *ring, | 203 | void free_descriptor_buffer(struct bcm43xx_dmaring *ring, |
175 | struct bcm43xx_dmadesc *desc, | ||
176 | struct bcm43xx_dmadesc_meta *meta, | 204 | struct bcm43xx_dmadesc_meta *meta, |
177 | int irq_context) | 205 | int irq_context) |
178 | { | 206 | { |
@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring) | |||
188 | { | 216 | { |
189 | struct device *dev = &(ring->bcm->pci_dev->dev); | 217 | struct device *dev = &(ring->bcm->pci_dev->dev); |
190 | 218 | ||
191 | ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, | 219 | ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, |
192 | &(ring->dmabase), GFP_KERNEL); | 220 | &(ring->dmabase), GFP_KERNEL); |
193 | if (!ring->vbase) { | 221 | if (!ring->descbase) { |
194 | printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); | 222 | printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); |
195 | return -ENOMEM; | 223 | return -ENOMEM; |
196 | } | 224 | } |
197 | if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { | 225 | memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE); |
198 | printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G " | ||
199 | "(0x%llx, len: %lu)\n", | ||
200 | (unsigned long long)ring->dmabase, | ||
201 | BCM43xx_DMA_RINGMEMSIZE); | ||
202 | dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, | ||
203 | ring->vbase, ring->dmabase); | ||
204 | return -ENOMEM; | ||
205 | } | ||
206 | assert(!(ring->dmabase & 0x000003FF)); | ||
207 | memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE); | ||
208 | 226 | ||
209 | return 0; | 227 | return 0; |
210 | } | 228 | } |
@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring) | |||
214 | struct device *dev = &(ring->bcm->pci_dev->dev); | 232 | struct device *dev = &(ring->bcm->pci_dev->dev); |
215 | 233 | ||
216 | dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, | 234 | dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, |
217 | ring->vbase, ring->dmabase); | 235 | ring->descbase, ring->dmabase); |
218 | } | 236 | } |
219 | 237 | ||
220 | /* Reset the RX DMA channel */ | 238 | /* Reset the RX DMA channel */ |
221 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, | 239 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, |
222 | u16 mmio_base) | 240 | u16 mmio_base, int dma64) |
223 | { | 241 | { |
224 | int i; | 242 | int i; |
225 | u32 value; | 243 | u32 value; |
244 | u16 offset; | ||
226 | 245 | ||
227 | bcm43xx_write32(bcm, | 246 | offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL; |
228 | mmio_base + BCM43xx_DMA_RX_CONTROL, | 247 | bcm43xx_write32(bcm, mmio_base + offset, 0); |
229 | 0x00000000); | ||
230 | for (i = 0; i < 1000; i++) { | 248 | for (i = 0; i < 1000; i++) { |
231 | value = bcm43xx_read32(bcm, | 249 | offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS; |
232 | mmio_base + BCM43xx_DMA_RX_STATUS); | 250 | value = bcm43xx_read32(bcm, mmio_base + offset); |
233 | value &= BCM43xx_DMA_RXSTAT_STAT_MASK; | 251 | if (dma64) { |
234 | if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { | 252 | value &= BCM43xx_DMA64_RXSTAT; |
235 | i = -1; | 253 | if (value == BCM43xx_DMA64_RXSTAT_DISABLED) { |
236 | break; | 254 | i = -1; |
255 | break; | ||
256 | } | ||
257 | } else { | ||
258 | value &= BCM43xx_DMA32_RXSTATE; | ||
259 | if (value == BCM43xx_DMA32_RXSTAT_DISABLED) { | ||
260 | i = -1; | ||
261 | break; | ||
262 | } | ||
237 | } | 263 | } |
238 | udelay(10); | 264 | udelay(10); |
239 | } | 265 | } |
@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, | |||
247 | 273 | ||
248 | /* Reset the RX DMA channel */ | 274 | /* Reset the RX DMA channel */ |
249 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, | 275 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, |
250 | u16 mmio_base) | 276 | u16 mmio_base, int dma64) |
251 | { | 277 | { |
252 | int i; | 278 | int i; |
253 | u32 value; | 279 | u32 value; |
280 | u16 offset; | ||
254 | 281 | ||
255 | for (i = 0; i < 1000; i++) { | 282 | for (i = 0; i < 1000; i++) { |
256 | value = bcm43xx_read32(bcm, | 283 | offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; |
257 | mmio_base + BCM43xx_DMA_TX_STATUS); | 284 | value = bcm43xx_read32(bcm, mmio_base + offset); |
258 | value &= BCM43xx_DMA_TXSTAT_STAT_MASK; | 285 | if (dma64) { |
259 | if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || | 286 | value &= BCM43xx_DMA64_TXSTAT; |
260 | value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || | 287 | if (value == BCM43xx_DMA64_TXSTAT_DISABLED || |
261 | value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) | 288 | value == BCM43xx_DMA64_TXSTAT_IDLEWAIT || |
262 | break; | 289 | value == BCM43xx_DMA64_TXSTAT_STOPPED) |
290 | break; | ||
291 | } else { | ||
292 | value &= BCM43xx_DMA32_TXSTATE; | ||
293 | if (value == BCM43xx_DMA32_TXSTAT_DISABLED || | ||
294 | value == BCM43xx_DMA32_TXSTAT_IDLEWAIT || | ||
295 | value == BCM43xx_DMA32_TXSTAT_STOPPED) | ||
296 | break; | ||
297 | } | ||
263 | udelay(10); | 298 | udelay(10); |
264 | } | 299 | } |
265 | bcm43xx_write32(bcm, | 300 | offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL; |
266 | mmio_base + BCM43xx_DMA_TX_CONTROL, | 301 | bcm43xx_write32(bcm, mmio_base + offset, 0); |
267 | 0x00000000); | ||
268 | for (i = 0; i < 1000; i++) { | 302 | for (i = 0; i < 1000; i++) { |
269 | value = bcm43xx_read32(bcm, | 303 | offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; |
270 | mmio_base + BCM43xx_DMA_TX_STATUS); | 304 | value = bcm43xx_read32(bcm, mmio_base + offset); |
271 | value &= BCM43xx_DMA_TXSTAT_STAT_MASK; | 305 | if (dma64) { |
272 | if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { | 306 | value &= BCM43xx_DMA64_TXSTAT; |
273 | i = -1; | 307 | if (value == BCM43xx_DMA64_TXSTAT_DISABLED) { |
274 | break; | 308 | i = -1; |
309 | break; | ||
310 | } | ||
311 | } else { | ||
312 | value &= BCM43xx_DMA32_TXSTATE; | ||
313 | if (value == BCM43xx_DMA32_TXSTAT_DISABLED) { | ||
314 | i = -1; | ||
315 | break; | ||
316 | } | ||
275 | } | 317 | } |
276 | udelay(10); | 318 | udelay(10); |
277 | } | 319 | } |
@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, | |||
285 | return 0; | 327 | return 0; |
286 | } | 328 | } |
287 | 329 | ||
330 | static void fill_descriptor(struct bcm43xx_dmaring *ring, | ||
331 | struct bcm43xx_dmadesc_generic *desc, | ||
332 | dma_addr_t dmaaddr, | ||
333 | u16 bufsize, | ||
334 | int start, int end, int irq) | ||
335 | { | ||
336 | int slot; | ||
337 | |||
338 | slot = bcm43xx_dma_desc2idx(ring, desc); | ||
339 | assert(slot >= 0 && slot < ring->nr_slots); | ||
340 | |||
341 | if (ring->dma64) { | ||
342 | u32 ctl0 = 0, ctl1 = 0; | ||
343 | u32 addrlo, addrhi; | ||
344 | u32 addrext; | ||
345 | |||
346 | addrlo = (u32)(dmaaddr & 0xFFFFFFFF); | ||
347 | addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING); | ||
348 | addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); | ||
349 | addrhi |= ring->routing; | ||
350 | if (slot == ring->nr_slots - 1) | ||
351 | ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND; | ||
352 | if (start) | ||
353 | ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART; | ||
354 | if (end) | ||
355 | ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND; | ||
356 | if (irq) | ||
357 | ctl0 |= BCM43xx_DMA64_DCTL0_IRQ; | ||
358 | ctl1 |= (bufsize - ring->frameoffset) | ||
359 | & BCM43xx_DMA64_DCTL1_BYTECNT; | ||
360 | ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT) | ||
361 | & BCM43xx_DMA64_DCTL1_ADDREXT_MASK; | ||
362 | |||
363 | desc->dma64.control0 = cpu_to_le32(ctl0); | ||
364 | desc->dma64.control1 = cpu_to_le32(ctl1); | ||
365 | desc->dma64.address_low = cpu_to_le32(addrlo); | ||
366 | desc->dma64.address_high = cpu_to_le32(addrhi); | ||
367 | } else { | ||
368 | u32 ctl; | ||
369 | u32 addr; | ||
370 | u32 addrext; | ||
371 | |||
372 | addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING); | ||
373 | addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING) | ||
374 | >> BCM43xx_DMA32_ROUTING_SHIFT; | ||
375 | addr |= ring->routing; | ||
376 | ctl = (bufsize - ring->frameoffset) | ||
377 | & BCM43xx_DMA32_DCTL_BYTECNT; | ||
378 | if (slot == ring->nr_slots - 1) | ||
379 | ctl |= BCM43xx_DMA32_DCTL_DTABLEEND; | ||
380 | if (start) | ||
381 | ctl |= BCM43xx_DMA32_DCTL_FRAMESTART; | ||
382 | if (end) | ||
383 | ctl |= BCM43xx_DMA32_DCTL_FRAMEEND; | ||
384 | if (irq) | ||
385 | ctl |= BCM43xx_DMA32_DCTL_IRQ; | ||
386 | ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT) | ||
387 | & BCM43xx_DMA32_DCTL_ADDREXT_MASK; | ||
388 | |||
389 | desc->dma32.control = cpu_to_le32(ctl); | ||
390 | desc->dma32.address = cpu_to_le32(addr); | ||
391 | } | ||
392 | } | ||
393 | |||
288 | static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, | 394 | static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, |
289 | struct bcm43xx_dmadesc *desc, | 395 | struct bcm43xx_dmadesc_generic *desc, |
290 | struct bcm43xx_dmadesc_meta *meta, | 396 | struct bcm43xx_dmadesc_meta *meta, |
291 | gfp_t gfp_flags) | 397 | gfp_t gfp_flags) |
292 | { | 398 | { |
293 | struct bcm43xx_rxhdr *rxhdr; | 399 | struct bcm43xx_rxhdr *rxhdr; |
400 | struct bcm43xx_hwxmitstatus *xmitstat; | ||
294 | dma_addr_t dmaaddr; | 401 | dma_addr_t dmaaddr; |
295 | u32 desc_addr; | ||
296 | u32 desc_ctl; | ||
297 | const int slot = (int)(desc - ring->vbase); | ||
298 | struct sk_buff *skb; | 402 | struct sk_buff *skb; |
299 | 403 | ||
300 | assert(slot >= 0 && slot < ring->nr_slots); | ||
301 | assert(!ring->tx); | 404 | assert(!ring->tx); |
302 | 405 | ||
303 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | 406 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); |
304 | if (unlikely(!skb)) | 407 | if (unlikely(!skb)) |
305 | return -ENOMEM; | 408 | return -ENOMEM; |
306 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); | 409 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); |
307 | if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) { | ||
308 | unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); | ||
309 | dev_kfree_skb_any(skb); | ||
310 | printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G " | ||
311 | "(0x%llx, len: %u)\n", | ||
312 | (unsigned long long)dmaaddr, ring->rx_buffersize); | ||
313 | return -ENOMEM; | ||
314 | } | ||
315 | meta->skb = skb; | 410 | meta->skb = skb; |
316 | meta->dmaaddr = dmaaddr; | 411 | meta->dmaaddr = dmaaddr; |
317 | skb->dev = ring->bcm->net_dev; | 412 | skb->dev = ring->bcm->net_dev; |
318 | desc_addr = (u32)(dmaaddr + ring->memoffset); | 413 | |
319 | desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & | 414 | fill_descriptor(ring, desc, dmaaddr, |
320 | (u32)(ring->rx_buffersize - ring->frameoffset)); | 415 | ring->rx_buffersize, 0, 0, 0); |
321 | if (slot == ring->nr_slots - 1) | ||
322 | desc_ctl |= BCM43xx_DMADTOR_DTABLEEND; | ||
323 | set_desc_addr(desc, desc_addr); | ||
324 | set_desc_ctl(desc, desc_ctl); | ||
325 | 416 | ||
326 | rxhdr = (struct bcm43xx_rxhdr *)(skb->data); | 417 | rxhdr = (struct bcm43xx_rxhdr *)(skb->data); |
327 | rxhdr->frame_length = 0; | 418 | rxhdr->frame_length = 0; |
328 | rxhdr->flags1 = 0; | 419 | rxhdr->flags1 = 0; |
420 | xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data); | ||
421 | xmitstat->cookie = 0; | ||
329 | 422 | ||
330 | return 0; | 423 | return 0; |
331 | } | 424 | } |
@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, | |||
336 | static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) | 429 | static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) |
337 | { | 430 | { |
338 | int i, err = -ENOMEM; | 431 | int i, err = -ENOMEM; |
339 | struct bcm43xx_dmadesc *desc; | 432 | struct bcm43xx_dmadesc_generic *desc; |
340 | struct bcm43xx_dmadesc_meta *meta; | 433 | struct bcm43xx_dmadesc_meta *meta; |
341 | 434 | ||
342 | for (i = 0; i < ring->nr_slots; i++) { | 435 | for (i = 0; i < ring->nr_slots; i++) { |
343 | desc = ring->vbase + i; | 436 | desc = bcm43xx_dma_idx2desc(ring, i, &meta); |
344 | meta = ring->meta + i; | ||
345 | 437 | ||
346 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); | 438 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); |
347 | if (err) | 439 | if (err) |
348 | goto err_unwind; | 440 | goto err_unwind; |
349 | } | 441 | } |
442 | mb(); | ||
350 | ring->used_slots = ring->nr_slots; | 443 | ring->used_slots = ring->nr_slots; |
351 | err = 0; | 444 | err = 0; |
352 | out: | 445 | out: |
@@ -354,8 +447,7 @@ out: | |||
354 | 447 | ||
355 | err_unwind: | 448 | err_unwind: |
356 | for (i--; i >= 0; i--) { | 449 | for (i--; i >= 0; i--) { |
357 | desc = ring->vbase + i; | 450 | desc = bcm43xx_dma_idx2desc(ring, i, &meta); |
358 | meta = ring->meta + i; | ||
359 | 451 | ||
360 | unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); | 452 | unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); |
361 | dev_kfree_skb(meta->skb); | 453 | dev_kfree_skb(meta->skb); |
@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring) | |||
371 | { | 463 | { |
372 | int err = 0; | 464 | int err = 0; |
373 | u32 value; | 465 | u32 value; |
466 | u32 addrext; | ||
374 | 467 | ||
375 | if (ring->tx) { | 468 | if (ring->tx) { |
376 | /* Set Transmit Control register to "transmit enable" */ | 469 | if (ring->dma64) { |
377 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, | 470 | u64 ringbase = (u64)(ring->dmabase); |
378 | BCM43xx_DMA_TXCTRL_ENABLE); | 471 | |
379 | /* Set Transmit Descriptor ring address. */ | 472 | addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); |
380 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, | 473 | value = BCM43xx_DMA64_TXENABLE; |
381 | ring->dmabase + ring->memoffset); | 474 | value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT) |
475 | & BCM43xx_DMA64_TXADDREXT_MASK; | ||
476 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value); | ||
477 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, | ||
478 | (ringbase & 0xFFFFFFFF)); | ||
479 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, | ||
480 | ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING) | ||
481 | | ring->routing); | ||
482 | } else { | ||
483 | u32 ringbase = (u32)(ring->dmabase); | ||
484 | |||
485 | addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT); | ||
486 | value = BCM43xx_DMA32_TXENABLE; | ||
487 | value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT) | ||
488 | & BCM43xx_DMA32_TXADDREXT_MASK; | ||
489 | bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value); | ||
490 | bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, | ||
491 | (ringbase & ~BCM43xx_DMA32_ROUTING) | ||
492 | | ring->routing); | ||
493 | } | ||
382 | } else { | 494 | } else { |
383 | err = alloc_initial_descbuffers(ring); | 495 | err = alloc_initial_descbuffers(ring); |
384 | if (err) | 496 | if (err) |
385 | goto out; | 497 | goto out; |
386 | /* Set Receive Control "receive enable" and frame offset */ | 498 | if (ring->dma64) { |
387 | value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); | 499 | u64 ringbase = (u64)(ring->dmabase); |
388 | value |= BCM43xx_DMA_RXCTRL_ENABLE; | 500 | |
389 | bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value); | 501 | addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); |
390 | /* Set Receive Descriptor ring address. */ | 502 | value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT); |
391 | bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, | 503 | value |= BCM43xx_DMA64_RXENABLE; |
392 | ring->dmabase + ring->memoffset); | 504 | value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT) |
393 | /* Init the descriptor pointer. */ | 505 | & BCM43xx_DMA64_RXADDREXT_MASK; |
394 | bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200); | 506 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value); |
507 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, | ||
508 | (ringbase & 0xFFFFFFFF)); | ||
509 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, | ||
510 | ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING) | ||
511 | | ring->routing); | ||
512 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200); | ||
513 | } else { | ||
514 | u32 ringbase = (u32)(ring->dmabase); | ||
515 | |||
516 | addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT); | ||
517 | value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT); | ||
518 | value |= BCM43xx_DMA32_RXENABLE; | ||
519 | value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT) | ||
520 | & BCM43xx_DMA32_RXADDREXT_MASK; | ||
521 | bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value); | ||
522 | bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, | ||
523 | (ringbase & ~BCM43xx_DMA32_ROUTING) | ||
524 | | ring->routing); | ||
525 | bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200); | ||
526 | } | ||
395 | } | 527 | } |
396 | 528 | ||
397 | out: | 529 | out: |
@@ -402,27 +534,32 @@ out: | |||
402 | static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) | 534 | static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) |
403 | { | 535 | { |
404 | if (ring->tx) { | 536 | if (ring->tx) { |
405 | bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); | 537 | bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64); |
406 | /* Zero out Transmit Descriptor ring address. */ | 538 | if (ring->dma64) { |
407 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0); | 539 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0); |
540 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0); | ||
541 | } else | ||
542 | bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0); | ||
408 | } else { | 543 | } else { |
409 | bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); | 544 | bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64); |
410 | /* Zero out Receive Descriptor ring address. */ | 545 | if (ring->dma64) { |
411 | bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0); | 546 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0); |
547 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0); | ||
548 | } else | ||
549 | bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0); | ||
412 | } | 550 | } |
413 | } | 551 | } |
414 | 552 | ||
415 | static void free_all_descbuffers(struct bcm43xx_dmaring *ring) | 553 | static void free_all_descbuffers(struct bcm43xx_dmaring *ring) |
416 | { | 554 | { |
417 | struct bcm43xx_dmadesc *desc; | 555 | struct bcm43xx_dmadesc_generic *desc; |
418 | struct bcm43xx_dmadesc_meta *meta; | 556 | struct bcm43xx_dmadesc_meta *meta; |
419 | int i; | 557 | int i; |
420 | 558 | ||
421 | if (!ring->used_slots) | 559 | if (!ring->used_slots) |
422 | return; | 560 | return; |
423 | for (i = 0; i < ring->nr_slots; i++) { | 561 | for (i = 0; i < ring->nr_slots; i++) { |
424 | desc = ring->vbase + i; | 562 | desc = bcm43xx_dma_idx2desc(ring, i, &meta); |
425 | meta = ring->meta + i; | ||
426 | 563 | ||
427 | if (!meta->skb) { | 564 | if (!meta->skb) { |
428 | assert(ring->tx); | 565 | assert(ring->tx); |
@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring) | |||
430 | } | 567 | } |
431 | if (ring->tx) { | 568 | if (ring->tx) { |
432 | unmap_descbuffer(ring, meta->dmaaddr, | 569 | unmap_descbuffer(ring, meta->dmaaddr, |
433 | meta->skb->len, 1); | 570 | meta->skb->len, 1); |
434 | } else { | 571 | } else { |
435 | unmap_descbuffer(ring, meta->dmaaddr, | 572 | unmap_descbuffer(ring, meta->dmaaddr, |
436 | ring->rx_buffersize, 0); | 573 | ring->rx_buffersize, 0); |
437 | } | 574 | } |
438 | free_descriptor_buffer(ring, desc, meta, 0); | 575 | free_descriptor_buffer(ring, meta, 0); |
439 | } | 576 | } |
440 | } | 577 | } |
441 | 578 | ||
442 | /* Main initialization function. */ | 579 | /* Main initialization function. */ |
443 | static | 580 | static |
444 | struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, | 581 | struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, |
445 | u16 dma_controller_base, | 582 | int controller_index, |
446 | int nr_descriptor_slots, | 583 | int for_tx, |
447 | int tx) | 584 | int dma64) |
448 | { | 585 | { |
449 | struct bcm43xx_dmaring *ring; | 586 | struct bcm43xx_dmaring *ring; |
450 | int err; | 587 | int err; |
588 | int nr_slots; | ||
451 | 589 | ||
452 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 590 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
453 | if (!ring) | 591 | if (!ring) |
454 | goto out; | 592 | goto out; |
455 | 593 | ||
456 | ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, | 594 | nr_slots = BCM43xx_RXRING_SLOTS; |
595 | if (for_tx) | ||
596 | nr_slots = BCM43xx_TXRING_SLOTS; | ||
597 | |||
598 | ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta), | ||
457 | GFP_KERNEL); | 599 | GFP_KERNEL); |
458 | if (!ring->meta) | 600 | if (!ring->meta) |
459 | goto err_kfree_ring; | 601 | goto err_kfree_ring; |
460 | 602 | ||
461 | ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET; | 603 | ring->routing = BCM43xx_DMA32_CLIENTTRANS; |
604 | if (dma64) | ||
605 | ring->routing = BCM43xx_DMA64_CLIENTTRANS; | ||
462 | #ifdef CONFIG_BCM947XX | 606 | #ifdef CONFIG_BCM947XX |
463 | if (bcm->pci_dev->bus->number == 0) | 607 | if (bcm->pci_dev->bus->number == 0) |
464 | ring->memoffset = 0; | 608 | ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS; |
465 | #endif | 609 | #endif |
466 | 610 | ||
467 | ring->bcm = bcm; | 611 | ring->bcm = bcm; |
468 | ring->nr_slots = nr_descriptor_slots; | 612 | ring->nr_slots = nr_slots; |
469 | ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; | 613 | ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; |
470 | ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; | 614 | ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; |
471 | assert(ring->suspend_mark < ring->resume_mark); | 615 | assert(ring->suspend_mark < ring->resume_mark); |
472 | ring->mmio_base = dma_controller_base; | 616 | ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index); |
473 | if (tx) { | 617 | ring->index = controller_index; |
618 | ring->dma64 = !!dma64; | ||
619 | if (for_tx) { | ||
474 | ring->tx = 1; | 620 | ring->tx = 1; |
475 | ring->current_slot = -1; | 621 | ring->current_slot = -1; |
476 | } else { | 622 | } else { |
477 | switch (dma_controller_base) { | 623 | if (ring->index == 0) { |
478 | case BCM43xx_MMIO_DMA1_BASE: | 624 | ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE; |
479 | ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE; | 625 | ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET; |
480 | ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET; | 626 | } else if (ring->index == 3) { |
481 | break; | 627 | ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE; |
482 | case BCM43xx_MMIO_DMA4_BASE: | 628 | ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET; |
483 | ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE; | 629 | } else |
484 | ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET; | ||
485 | break; | ||
486 | default: | ||
487 | assert(0); | 630 | assert(0); |
488 | } | ||
489 | } | 631 | } |
490 | 632 | ||
491 | err = alloc_ringmemory(ring); | 633 | err = alloc_ringmemory(ring); |
@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring) | |||
514 | if (!ring) | 656 | if (!ring) |
515 | return; | 657 | return; |
516 | 658 | ||
517 | dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n", | 659 | dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", |
660 | (ring->dma64) ? "64" : "32", | ||
518 | ring->mmio_base, | 661 | ring->mmio_base, |
519 | (ring->tx) ? "TX" : "RX", | 662 | (ring->tx) ? "TX" : "RX", |
520 | ring->max_used_slots, ring->nr_slots); | 663 | ring->max_used_slots, ring->nr_slots); |
@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm) | |||
537 | return; | 680 | return; |
538 | dma = bcm43xx_current_dma(bcm); | 681 | dma = bcm43xx_current_dma(bcm); |
539 | 682 | ||
540 | bcm43xx_destroy_dmaring(dma->rx_ring1); | 683 | bcm43xx_destroy_dmaring(dma->rx_ring3); |
541 | dma->rx_ring1 = NULL; | 684 | dma->rx_ring3 = NULL; |
542 | bcm43xx_destroy_dmaring(dma->rx_ring0); | 685 | bcm43xx_destroy_dmaring(dma->rx_ring0); |
543 | dma->rx_ring0 = NULL; | 686 | dma->rx_ring0 = NULL; |
687 | |||
688 | bcm43xx_destroy_dmaring(dma->tx_ring5); | ||
689 | dma->tx_ring5 = NULL; | ||
690 | bcm43xx_destroy_dmaring(dma->tx_ring4); | ||
691 | dma->tx_ring4 = NULL; | ||
544 | bcm43xx_destroy_dmaring(dma->tx_ring3); | 692 | bcm43xx_destroy_dmaring(dma->tx_ring3); |
545 | dma->tx_ring3 = NULL; | 693 | dma->tx_ring3 = NULL; |
546 | bcm43xx_destroy_dmaring(dma->tx_ring2); | 694 | bcm43xx_destroy_dmaring(dma->tx_ring2); |
@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm) | |||
556 | struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); | 704 | struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); |
557 | struct bcm43xx_dmaring *ring; | 705 | struct bcm43xx_dmaring *ring; |
558 | int err = -ENOMEM; | 706 | int err = -ENOMEM; |
707 | int dma64 = 0; | ||
708 | u32 sbtmstatehi; | ||
709 | |||
710 | sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH); | ||
711 | if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT) | ||
712 | dma64 = 1; | ||
559 | 713 | ||
560 | /* setup TX DMA channels. */ | 714 | /* setup TX DMA channels. */ |
561 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, | 715 | ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64); |
562 | BCM43xx_TXRING_SLOTS, 1); | ||
563 | if (!ring) | 716 | if (!ring) |
564 | goto out; | 717 | goto out; |
565 | dma->tx_ring0 = ring; | 718 | dma->tx_ring0 = ring; |
566 | 719 | ||
567 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, | 720 | ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64); |
568 | BCM43xx_TXRING_SLOTS, 1); | ||
569 | if (!ring) | 721 | if (!ring) |
570 | goto err_destroy_tx0; | 722 | goto err_destroy_tx0; |
571 | dma->tx_ring1 = ring; | 723 | dma->tx_ring1 = ring; |
572 | 724 | ||
573 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, | 725 | ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64); |
574 | BCM43xx_TXRING_SLOTS, 1); | ||
575 | if (!ring) | 726 | if (!ring) |
576 | goto err_destroy_tx1; | 727 | goto err_destroy_tx1; |
577 | dma->tx_ring2 = ring; | 728 | dma->tx_ring2 = ring; |
578 | 729 | ||
579 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, | 730 | ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64); |
580 | BCM43xx_TXRING_SLOTS, 1); | ||
581 | if (!ring) | 731 | if (!ring) |
582 | goto err_destroy_tx2; | 732 | goto err_destroy_tx2; |
583 | dma->tx_ring3 = ring; | 733 | dma->tx_ring3 = ring; |
584 | 734 | ||
585 | /* setup RX DMA channels. */ | 735 | ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64); |
586 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, | ||
587 | BCM43xx_RXRING_SLOTS, 0); | ||
588 | if (!ring) | 736 | if (!ring) |
589 | goto err_destroy_tx3; | 737 | goto err_destroy_tx3; |
738 | dma->tx_ring4 = ring; | ||
739 | |||
740 | ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64); | ||
741 | if (!ring) | ||
742 | goto err_destroy_tx4; | ||
743 | dma->tx_ring5 = ring; | ||
744 | |||
745 | /* setup RX DMA channels. */ | ||
746 | ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64); | ||
747 | if (!ring) | ||
748 | goto err_destroy_tx5; | ||
590 | dma->rx_ring0 = ring; | 749 | dma->rx_ring0 = ring; |
591 | 750 | ||
592 | if (bcm->current_core->rev < 5) { | 751 | if (bcm->current_core->rev < 5) { |
593 | ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, | 752 | ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64); |
594 | BCM43xx_RXRING_SLOTS, 0); | ||
595 | if (!ring) | 753 | if (!ring) |
596 | goto err_destroy_rx0; | 754 | goto err_destroy_rx0; |
597 | dma->rx_ring1 = ring; | 755 | dma->rx_ring3 = ring; |
598 | } | 756 | } |
599 | 757 | ||
600 | dprintk(KERN_INFO PFX "DMA initialized\n"); | 758 | dprintk(KERN_INFO PFX "%s DMA initialized\n", |
759 | dma64 ? "64-bit" : "32-bit"); | ||
601 | err = 0; | 760 | err = 0; |
602 | out: | 761 | out: |
603 | return err; | 762 | return err; |
@@ -605,6 +764,12 @@ out: | |||
605 | err_destroy_rx0: | 764 | err_destroy_rx0: |
606 | bcm43xx_destroy_dmaring(dma->rx_ring0); | 765 | bcm43xx_destroy_dmaring(dma->rx_ring0); |
607 | dma->rx_ring0 = NULL; | 766 | dma->rx_ring0 = NULL; |
767 | err_destroy_tx5: | ||
768 | bcm43xx_destroy_dmaring(dma->tx_ring5); | ||
769 | dma->tx_ring5 = NULL; | ||
770 | err_destroy_tx4: | ||
771 | bcm43xx_destroy_dmaring(dma->tx_ring4); | ||
772 | dma->tx_ring4 = NULL; | ||
608 | err_destroy_tx3: | 773 | err_destroy_tx3: |
609 | bcm43xx_destroy_dmaring(dma->tx_ring3); | 774 | bcm43xx_destroy_dmaring(dma->tx_ring3); |
610 | dma->tx_ring3 = NULL; | 775 | dma->tx_ring3 = NULL; |
@@ -624,7 +789,7 @@ err_destroy_tx0: | |||
624 | static u16 generate_cookie(struct bcm43xx_dmaring *ring, | 789 | static u16 generate_cookie(struct bcm43xx_dmaring *ring, |
625 | int slot) | 790 | int slot) |
626 | { | 791 | { |
627 | u16 cookie = 0xF000; | 792 | u16 cookie = 0x1000; |
628 | 793 | ||
629 | /* Use the upper 4 bits of the cookie as | 794 | /* Use the upper 4 bits of the cookie as |
630 | * DMA controller ID and store the slot number | 795 | * DMA controller ID and store the slot number |
@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring, | |||
632 | * Note that the cookie must never be 0, as this | 797 | * Note that the cookie must never be 0, as this |
633 | * is a special value used in RX path. | 798 | * is a special value used in RX path. |
634 | */ | 799 | */ |
635 | switch (ring->mmio_base) { | 800 | switch (ring->index) { |
636 | default: | 801 | case 0: |
637 | assert(0); | ||
638 | case BCM43xx_MMIO_DMA1_BASE: | ||
639 | cookie = 0xA000; | 802 | cookie = 0xA000; |
640 | break; | 803 | break; |
641 | case BCM43xx_MMIO_DMA2_BASE: | 804 | case 1: |
642 | cookie = 0xB000; | 805 | cookie = 0xB000; |
643 | break; | 806 | break; |
644 | case BCM43xx_MMIO_DMA3_BASE: | 807 | case 2: |
645 | cookie = 0xC000; | 808 | cookie = 0xC000; |
646 | break; | 809 | break; |
647 | case BCM43xx_MMIO_DMA4_BASE: | 810 | case 3: |
648 | cookie = 0xD000; | 811 | cookie = 0xD000; |
649 | break; | 812 | break; |
813 | case 4: | ||
814 | cookie = 0xE000; | ||
815 | break; | ||
816 | case 5: | ||
817 | cookie = 0xF000; | ||
818 | break; | ||
650 | } | 819 | } |
651 | assert(((u16)slot & 0xF000) == 0x0000); | 820 | assert(((u16)slot & 0xF000) == 0x0000); |
652 | cookie |= (u16)slot; | 821 | cookie |= (u16)slot; |
@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, | |||
675 | case 0xD000: | 844 | case 0xD000: |
676 | ring = dma->tx_ring3; | 845 | ring = dma->tx_ring3; |
677 | break; | 846 | break; |
847 | case 0xE000: | ||
848 | ring = dma->tx_ring4; | ||
849 | break; | ||
850 | case 0xF000: | ||
851 | ring = dma->tx_ring5; | ||
852 | break; | ||
678 | default: | 853 | default: |
679 | assert(0); | 854 | assert(0); |
680 | } | 855 | } |
@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, | |||
687 | static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, | 862 | static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, |
688 | int slot) | 863 | int slot) |
689 | { | 864 | { |
865 | u16 offset; | ||
866 | int descsize; | ||
867 | |||
690 | /* Everything is ready to start. Buffers are DMA mapped and | 868 | /* Everything is ready to start. Buffers are DMA mapped and |
691 | * associated with slots. | 869 | * associated with slots. |
692 | * "slot" is the last slot of the new frame we want to transmit. | 870 | * "slot" is the last slot of the new frame we want to transmit. |
@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, | |||
694 | */ | 872 | */ |
695 | wmb(); | 873 | wmb(); |
696 | slot = next_slot(ring, slot); | 874 | slot = next_slot(ring, slot); |
697 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX, | 875 | offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX; |
698 | (u32)(slot * sizeof(struct bcm43xx_dmadesc))); | 876 | descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64) |
877 | : sizeof(struct bcm43xx_dmadesc32); | ||
878 | bcm43xx_dma_write(ring, offset, | ||
879 | (u32)(slot * descsize)); | ||
699 | } | 880 | } |
700 | 881 | ||
701 | static int dma_tx_fragment(struct bcm43xx_dmaring *ring, | 882 | static void dma_tx_fragment(struct bcm43xx_dmaring *ring, |
702 | struct sk_buff *skb, | 883 | struct sk_buff *skb, |
703 | u8 cur_frag) | 884 | u8 cur_frag) |
704 | { | 885 | { |
705 | int slot; | 886 | int slot; |
706 | struct bcm43xx_dmadesc *desc; | 887 | struct bcm43xx_dmadesc_generic *desc; |
707 | struct bcm43xx_dmadesc_meta *meta; | 888 | struct bcm43xx_dmadesc_meta *meta; |
708 | u32 desc_ctl; | 889 | dma_addr_t dmaaddr; |
709 | u32 desc_addr; | ||
710 | 890 | ||
711 | assert(skb_shinfo(skb)->nr_frags == 0); | 891 | assert(skb_shinfo(skb)->nr_frags == 0); |
712 | 892 | ||
713 | slot = request_slot(ring); | 893 | slot = request_slot(ring); |
714 | desc = ring->vbase + slot; | 894 | desc = bcm43xx_dma_idx2desc(ring, slot, &meta); |
715 | meta = ring->meta + slot; | ||
716 | 895 | ||
717 | /* Add a device specific TX header. */ | 896 | /* Add a device specific TX header. */ |
718 | assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); | 897 | assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); |
@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring, | |||
729 | generate_cookie(ring, slot)); | 908 | generate_cookie(ring, slot)); |
730 | 909 | ||
731 | meta->skb = skb; | 910 | meta->skb = skb; |
732 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 911 | dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
733 | if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { | 912 | meta->dmaaddr = dmaaddr; |
734 | return_slot(ring, slot); | ||
735 | printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G " | ||
736 | "(0x%llx, len: %u)\n", | ||
737 | (unsigned long long)meta->dmaaddr, skb->len); | ||
738 | return -ENOMEM; | ||
739 | } | ||
740 | 913 | ||
741 | desc_addr = (u32)(meta->dmaaddr + ring->memoffset); | 914 | fill_descriptor(ring, desc, dmaaddr, |
742 | desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND; | 915 | skb->len, 1, 1, 1); |
743 | desc_ctl |= BCM43xx_DMADTOR_COMPIRQ; | ||
744 | desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK & | ||
745 | (u32)(meta->skb->len - ring->frameoffset)); | ||
746 | if (slot == ring->nr_slots - 1) | ||
747 | desc_ctl |= BCM43xx_DMADTOR_DTABLEEND; | ||
748 | 916 | ||
749 | set_desc_ctl(desc, desc_ctl); | ||
750 | set_desc_addr(desc, desc_addr); | ||
751 | /* Now transfer the whole frame. */ | 917 | /* Now transfer the whole frame. */ |
752 | dmacontroller_poke_tx(ring, slot); | 918 | dmacontroller_poke_tx(ring, slot); |
753 | |||
754 | return 0; | ||
755 | } | 919 | } |
756 | 920 | ||
757 | int bcm43xx_dma_tx(struct bcm43xx_private *bcm, | 921 | int bcm43xx_dma_tx(struct bcm43xx_private *bcm, |
@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm, | |||
781 | /* Take skb from ieee80211_txb_free */ | 945 | /* Take skb from ieee80211_txb_free */ |
782 | txb->fragments[i] = NULL; | 946 | txb->fragments[i] = NULL; |
783 | dma_tx_fragment(ring, skb, i); | 947 | dma_tx_fragment(ring, skb, i); |
784 | //TODO: handle failure of dma_tx_fragment | ||
785 | } | 948 | } |
786 | ieee80211_txb_free(txb); | 949 | ieee80211_txb_free(txb); |
787 | 950 | ||
@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, | |||
792 | struct bcm43xx_xmitstatus *status) | 955 | struct bcm43xx_xmitstatus *status) |
793 | { | 956 | { |
794 | struct bcm43xx_dmaring *ring; | 957 | struct bcm43xx_dmaring *ring; |
795 | struct bcm43xx_dmadesc *desc; | 958 | struct bcm43xx_dmadesc_generic *desc; |
796 | struct bcm43xx_dmadesc_meta *meta; | 959 | struct bcm43xx_dmadesc_meta *meta; |
797 | int is_last_fragment; | 960 | int is_last_fragment; |
798 | int slot; | 961 | int slot; |
962 | u32 tmp; | ||
799 | 963 | ||
800 | ring = parse_cookie(bcm, status->cookie, &slot); | 964 | ring = parse_cookie(bcm, status->cookie, &slot); |
801 | assert(ring); | 965 | assert(ring); |
802 | assert(ring->tx); | 966 | assert(ring->tx); |
803 | assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART); | ||
804 | while (1) { | 967 | while (1) { |
805 | assert(slot >= 0 && slot < ring->nr_slots); | 968 | assert(slot >= 0 && slot < ring->nr_slots); |
806 | desc = ring->vbase + slot; | 969 | desc = bcm43xx_dma_idx2desc(ring, slot, &meta); |
807 | meta = ring->meta + slot; | ||
808 | 970 | ||
809 | is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND); | 971 | if (ring->dma64) { |
972 | tmp = le32_to_cpu(desc->dma64.control0); | ||
973 | is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND); | ||
974 | } else { | ||
975 | tmp = le32_to_cpu(desc->dma32.control); | ||
976 | is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND); | ||
977 | } | ||
810 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); | 978 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); |
811 | free_descriptor_buffer(ring, desc, meta, 1); | 979 | free_descriptor_buffer(ring, meta, 1); |
812 | /* Everything belonging to the slot is unmapped | 980 | /* Everything belonging to the slot is unmapped |
813 | * and freed, so we can return it. | 981 | * and freed, so we can return it. |
814 | */ | 982 | */ |
@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, | |||
824 | static void dma_rx(struct bcm43xx_dmaring *ring, | 992 | static void dma_rx(struct bcm43xx_dmaring *ring, |
825 | int *slot) | 993 | int *slot) |
826 | { | 994 | { |
827 | struct bcm43xx_dmadesc *desc; | 995 | struct bcm43xx_dmadesc_generic *desc; |
828 | struct bcm43xx_dmadesc_meta *meta; | 996 | struct bcm43xx_dmadesc_meta *meta; |
829 | struct bcm43xx_rxhdr *rxhdr; | 997 | struct bcm43xx_rxhdr *rxhdr; |
830 | struct sk_buff *skb; | 998 | struct sk_buff *skb; |
@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring, | |||
832 | int err; | 1000 | int err; |
833 | dma_addr_t dmaaddr; | 1001 | dma_addr_t dmaaddr; |
834 | 1002 | ||
835 | desc = ring->vbase + *slot; | 1003 | desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); |
836 | meta = ring->meta + *slot; | ||
837 | 1004 | ||
838 | sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); | 1005 | sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); |
839 | skb = meta->skb; | 1006 | skb = meta->skb; |
840 | 1007 | ||
841 | if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) { | 1008 | if (ring->index == 3) { |
842 | /* We received an xmit status. */ | 1009 | /* We received an xmit status. */ |
843 | struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; | 1010 | struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; |
844 | struct bcm43xx_xmitstatus stat; | 1011 | struct bcm43xx_xmitstatus stat; |
@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring, | |||
894 | s32 tmp = len; | 1061 | s32 tmp = len; |
895 | 1062 | ||
896 | while (1) { | 1063 | while (1) { |
897 | desc = ring->vbase + *slot; | 1064 | desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); |
898 | meta = ring->meta + *slot; | ||
899 | /* recycle the descriptor buffer. */ | 1065 | /* recycle the descriptor buffer. */ |
900 | sync_descbuffer_for_device(ring, meta->dmaaddr, | 1066 | sync_descbuffer_for_device(ring, meta->dmaaddr, |
901 | ring->rx_buffersize); | 1067 | ring->rx_buffersize); |
@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring, | |||
906 | break; | 1072 | break; |
907 | } | 1073 | } |
908 | printkl(KERN_ERR PFX "DMA RX buffer too small " | 1074 | printkl(KERN_ERR PFX "DMA RX buffer too small " |
909 | "(len: %u, buffer: %u, nr-dropped: %d)\n", | 1075 | "(len: %u, buffer: %u, nr-dropped: %d)\n", |
910 | len, ring->rx_buffersize, cnt); | 1076 | len, ring->rx_buffersize, cnt); |
911 | goto drop; | 1077 | goto drop; |
912 | } | 1078 | } |
913 | len -= IEEE80211_FCS_LEN; | 1079 | len -= IEEE80211_FCS_LEN; |
@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) | |||
945 | #endif | 1111 | #endif |
946 | 1112 | ||
947 | assert(!ring->tx); | 1113 | assert(!ring->tx); |
948 | status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS); | 1114 | if (ring->dma64) { |
949 | descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); | 1115 | status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS); |
950 | current_slot = descptr / sizeof(struct bcm43xx_dmadesc); | 1116 | descptr = (status & BCM43xx_DMA64_RXSTATDPTR); |
1117 | current_slot = descptr / sizeof(struct bcm43xx_dmadesc64); | ||
1118 | } else { | ||
1119 | status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS); | ||
1120 | descptr = (status & BCM43xx_DMA32_RXDPTR); | ||
1121 | current_slot = descptr / sizeof(struct bcm43xx_dmadesc32); | ||
1122 | } | ||
951 | assert(current_slot >= 0 && current_slot < ring->nr_slots); | 1123 | assert(current_slot >= 0 && current_slot < ring->nr_slots); |
952 | 1124 | ||
953 | slot = ring->current_slot; | 1125 | slot = ring->current_slot; |
@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) | |||
958 | ring->max_used_slots = used_slots; | 1130 | ring->max_used_slots = used_slots; |
959 | #endif | 1131 | #endif |
960 | } | 1132 | } |
961 | bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, | 1133 | if (ring->dma64) { |
962 | (u32)(slot * sizeof(struct bcm43xx_dmadesc))); | 1134 | bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, |
1135 | (u32)(slot * sizeof(struct bcm43xx_dmadesc64))); | ||
1136 | } else { | ||
1137 | bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, | ||
1138 | (u32)(slot * sizeof(struct bcm43xx_dmadesc32))); | ||
1139 | } | ||
963 | ring->current_slot = slot; | 1140 | ring->current_slot = slot; |
964 | } | 1141 | } |
965 | 1142 | ||
@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring) | |||
967 | { | 1144 | { |
968 | assert(ring->tx); | 1145 | assert(ring->tx); |
969 | bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); | 1146 | bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); |
970 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, | 1147 | if (ring->dma64) { |
971 | bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) | 1148 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, |
972 | | BCM43xx_DMA_TXCTRL_SUSPEND); | 1149 | bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) |
1150 | | BCM43xx_DMA64_TXSUSPEND); | ||
1151 | } else { | ||
1152 | bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, | ||
1153 | bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) | ||
1154 | | BCM43xx_DMA32_TXSUSPEND); | ||
1155 | } | ||
973 | } | 1156 | } |
974 | 1157 | ||
975 | void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) | 1158 | void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) |
976 | { | 1159 | { |
977 | assert(ring->tx); | 1160 | assert(ring->tx); |
978 | bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, | 1161 | if (ring->dma64) { |
979 | bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) | 1162 | bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, |
980 | & ~BCM43xx_DMA_TXCTRL_SUSPEND); | 1163 | bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) |
1164 | & ~BCM43xx_DMA64_TXSUSPEND); | ||
1165 | } else { | ||
1166 | bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, | ||
1167 | bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) | ||
1168 | & ~BCM43xx_DMA32_TXSUSPEND); | ||
1169 | } | ||
981 | bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); | 1170 | bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); |
982 | } | 1171 | } |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h index b7d77638ba8c..e04bcaddd1d0 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h | |||
@@ -14,63 +14,179 @@ | |||
14 | #define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13) | 14 | #define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13) |
15 | #define BCM43xx_DMAIRQ_RX_DONE (1 << 16) | 15 | #define BCM43xx_DMAIRQ_RX_DONE (1 << 16) |
16 | 16 | ||
17 | /* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */ | 17 | |
18 | #define BCM43xx_DMA_TX_CONTROL 0x00 | 18 | /*** 32-bit DMA Engine. ***/ |
19 | #define BCM43xx_DMA_TX_DESC_RING 0x04 | 19 | |
20 | #define BCM43xx_DMA_TX_DESC_INDEX 0x08 | 20 | /* 32-bit DMA controller registers. */ |
21 | #define BCM43xx_DMA_TX_STATUS 0x0c | 21 | #define BCM43xx_DMA32_TXCTL 0x00 |
22 | #define BCM43xx_DMA_RX_CONTROL 0x10 | 22 | #define BCM43xx_DMA32_TXENABLE 0x00000001 |
23 | #define BCM43xx_DMA_RX_DESC_RING 0x14 | 23 | #define BCM43xx_DMA32_TXSUSPEND 0x00000002 |
24 | #define BCM43xx_DMA_RX_DESC_INDEX 0x18 | 24 | #define BCM43xx_DMA32_TXLOOPBACK 0x00000004 |
25 | #define BCM43xx_DMA_RX_STATUS 0x1c | 25 | #define BCM43xx_DMA32_TXFLUSH 0x00000010 |
26 | 26 | #define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000 | |
27 | /* DMA controller channel control word values. */ | 27 | #define BCM43xx_DMA32_TXADDREXT_SHIFT 16 |
28 | #define BCM43xx_DMA_TXCTRL_ENABLE (1 << 0) | 28 | #define BCM43xx_DMA32_TXRING 0x04 |
29 | #define BCM43xx_DMA_TXCTRL_SUSPEND (1 << 1) | 29 | #define BCM43xx_DMA32_TXINDEX 0x08 |
30 | #define BCM43xx_DMA_TXCTRL_LOOPBACK (1 << 2) | 30 | #define BCM43xx_DMA32_TXSTATUS 0x0C |
31 | #define BCM43xx_DMA_TXCTRL_FLUSH (1 << 4) | 31 | #define BCM43xx_DMA32_TXDPTR 0x00000FFF |
32 | #define BCM43xx_DMA_RXCTRL_ENABLE (1 << 0) | 32 | #define BCM43xx_DMA32_TXSTATE 0x0000F000 |
33 | #define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK 0x000000fe | 33 | #define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000 |
34 | #define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT 1 | 34 | #define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000 |
35 | #define BCM43xx_DMA_RXCTRL_PIO (1 << 8) | 35 | #define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000 |
36 | /* DMA controller channel status word values. */ | 36 | #define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000 |
37 | #define BCM43xx_DMA_TXSTAT_DPTR_MASK 0x00000fff | 37 | #define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000 |
38 | #define BCM43xx_DMA_TXSTAT_STAT_MASK 0x0000f000 | 38 | #define BCM43xx_DMA32_TXERROR 0x000F0000 |
39 | #define BCM43xx_DMA_TXSTAT_STAT_DISABLED 0x00000000 | 39 | #define BCM43xx_DMA32_TXERR_NOERR 0x00000000 |
40 | #define BCM43xx_DMA_TXSTAT_STAT_ACTIVE 0x00001000 | 40 | #define BCM43xx_DMA32_TXERR_PROT 0x00010000 |
41 | #define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT 0x00002000 | 41 | #define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000 |
42 | #define BCM43xx_DMA_TXSTAT_STAT_STOPPED 0x00003000 | 42 | #define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000 |
43 | #define BCM43xx_DMA_TXSTAT_STAT_SUSP 0x00004000 | 43 | #define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000 |
44 | #define BCM43xx_DMA_TXSTAT_ERROR_MASK 0x000f0000 | 44 | #define BCM43xx_DMA32_TXACTIVE 0xFFF00000 |
45 | #define BCM43xx_DMA_TXSTAT_FLUSHED (1 << 20) | 45 | #define BCM43xx_DMA32_RXCTL 0x10 |
46 | #define BCM43xx_DMA_RXSTAT_DPTR_MASK 0x00000fff | 46 | #define BCM43xx_DMA32_RXENABLE 0x00000001 |
47 | #define BCM43xx_DMA_RXSTAT_STAT_MASK 0x0000f000 | 47 | #define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE |
48 | #define BCM43xx_DMA_RXSTAT_STAT_DISABLED 0x00000000 | 48 | #define BCM43xx_DMA32_RXFROFF_SHIFT 1 |
49 | #define BCM43xx_DMA_RXSTAT_STAT_ACTIVE 0x00001000 | 49 | #define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100 |
50 | #define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT 0x00002000 | 50 | #define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000 |
51 | #define BCM43xx_DMA_RXSTAT_STAT_RESERVED 0x00003000 | 51 | #define BCM43xx_DMA32_RXADDREXT_SHIFT 16 |
52 | #define BCM43xx_DMA_RXSTAT_STAT_ERRORS 0x00004000 | 52 | #define BCM43xx_DMA32_RXRING 0x14 |
53 | #define BCM43xx_DMA_RXSTAT_ERROR_MASK 0x000f0000 | 53 | #define BCM43xx_DMA32_RXINDEX 0x18 |
54 | 54 | #define BCM43xx_DMA32_RXSTATUS 0x1C | |
55 | /* DMA descriptor control field values. */ | 55 | #define BCM43xx_DMA32_RXDPTR 0x00000FFF |
56 | #define BCM43xx_DMADTOR_BYTECNT_MASK 0x00001fff | 56 | #define BCM43xx_DMA32_RXSTATE 0x0000F000 |
57 | #define BCM43xx_DMADTOR_DTABLEEND (1 << 28) /* End of descriptor table */ | 57 | #define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000 |
58 | #define BCM43xx_DMADTOR_COMPIRQ (1 << 29) /* IRQ on completion request */ | 58 | #define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000 |
59 | #define BCM43xx_DMADTOR_FRAMEEND (1 << 30) | 59 | #define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000 |
60 | #define BCM43xx_DMADTOR_FRAMESTART (1 << 31) | 60 | #define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000 |
61 | #define BCM43xx_DMA32_RXERROR 0x000F0000 | ||
62 | #define BCM43xx_DMA32_RXERR_NOERR 0x00000000 | ||
63 | #define BCM43xx_DMA32_RXERR_PROT 0x00010000 | ||
64 | #define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000 | ||
65 | #define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000 | ||
66 | #define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000 | ||
67 | #define BCM43xx_DMA32_RXACTIVE 0xFFF00000 | ||
68 | |||
69 | /* 32-bit DMA descriptor. */ | ||
70 | struct bcm43xx_dmadesc32 { | ||
71 | __le32 control; | ||
72 | __le32 address; | ||
73 | } __attribute__((__packed__)); | ||
74 | #define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF | ||
75 | #define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000 | ||
76 | #define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16 | ||
77 | #define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000 | ||
78 | #define BCM43xx_DMA32_DCTL_IRQ 0x20000000 | ||
79 | #define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000 | ||
80 | #define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000 | ||
81 | |||
82 | /* Address field Routing value. */ | ||
83 | #define BCM43xx_DMA32_ROUTING 0xC0000000 | ||
84 | #define BCM43xx_DMA32_ROUTING_SHIFT 30 | ||
85 | #define BCM43xx_DMA32_NOTRANS 0x00000000 | ||
86 | #define BCM43xx_DMA32_CLIENTTRANS 0x40000000 | ||
87 | |||
88 | |||
89 | |||
90 | /*** 64-bit DMA Engine. ***/ | ||
91 | |||
92 | /* 64-bit DMA controller registers. */ | ||
93 | #define BCM43xx_DMA64_TXCTL 0x00 | ||
94 | #define BCM43xx_DMA64_TXENABLE 0x00000001 | ||
95 | #define BCM43xx_DMA64_TXSUSPEND 0x00000002 | ||
96 | #define BCM43xx_DMA64_TXLOOPBACK 0x00000004 | ||
97 | #define BCM43xx_DMA64_TXFLUSH 0x00000010 | ||
98 | #define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000 | ||
99 | #define BCM43xx_DMA64_TXADDREXT_SHIFT 16 | ||
100 | #define BCM43xx_DMA64_TXINDEX 0x04 | ||
101 | #define BCM43xx_DMA64_TXRINGLO 0x08 | ||
102 | #define BCM43xx_DMA64_TXRINGHI 0x0C | ||
103 | #define BCM43xx_DMA64_TXSTATUS 0x10 | ||
104 | #define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF | ||
105 | #define BCM43xx_DMA64_TXSTAT 0xF0000000 | ||
106 | #define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000 | ||
107 | #define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000 | ||
108 | #define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000 | ||
109 | #define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000 | ||
110 | #define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000 | ||
111 | #define BCM43xx_DMA64_TXERROR 0x14 | ||
112 | #define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF | ||
113 | #define BCM43xx_DMA64_TXERR 0xF0000000 | ||
114 | #define BCM43xx_DMA64_TXERR_NOERR 0x00000000 | ||
115 | #define BCM43xx_DMA64_TXERR_PROT 0x10000000 | ||
116 | #define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000 | ||
117 | #define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000 | ||
118 | #define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000 | ||
119 | #define BCM43xx_DMA64_TXERR_CORE 0x50000000 | ||
120 | #define BCM43xx_DMA64_RXCTL 0x20 | ||
121 | #define BCM43xx_DMA64_RXENABLE 0x00000001 | ||
122 | #define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE | ||
123 | #define BCM43xx_DMA64_RXFROFF_SHIFT 1 | ||
124 | #define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100 | ||
125 | #define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000 | ||
126 | #define BCM43xx_DMA64_RXADDREXT_SHIFT 16 | ||
127 | #define BCM43xx_DMA64_RXINDEX 0x24 | ||
128 | #define BCM43xx_DMA64_RXRINGLO 0x28 | ||
129 | #define BCM43xx_DMA64_RXRINGHI 0x2C | ||
130 | #define BCM43xx_DMA64_RXSTATUS 0x30 | ||
131 | #define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF | ||
132 | #define BCM43xx_DMA64_RXSTAT 0xF0000000 | ||
133 | #define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000 | ||
134 | #define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000 | ||
135 | #define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000 | ||
136 | #define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000 | ||
137 | #define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000 | ||
138 | #define BCM43xx_DMA64_RXERROR 0x34 | ||
139 | #define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF | ||
140 | #define BCM43xx_DMA64_RXERR 0xF0000000 | ||
141 | #define BCM43xx_DMA64_RXERR_NOERR 0x00000000 | ||
142 | #define BCM43xx_DMA64_RXERR_PROT 0x10000000 | ||
143 | #define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000 | ||
144 | #define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000 | ||
145 | #define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000 | ||
146 | #define BCM43xx_DMA64_RXERR_CORE 0x50000000 | ||
147 | |||
148 | /* 64-bit DMA descriptor. */ | ||
149 | struct bcm43xx_dmadesc64 { | ||
150 | __le32 control0; | ||
151 | __le32 control1; | ||
152 | __le32 address_low; | ||
153 | __le32 address_high; | ||
154 | } __attribute__((__packed__)); | ||
155 | #define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000 | ||
156 | #define BCM43xx_DMA64_DCTL0_IRQ 0x20000000 | ||
157 | #define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000 | ||
158 | #define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000 | ||
159 | #define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF | ||
160 | #define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000 | ||
161 | #define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16 | ||
162 | |||
163 | /* Address field Routing value. */ | ||
164 | #define BCM43xx_DMA64_ROUTING 0xC0000000 | ||
165 | #define BCM43xx_DMA64_ROUTING_SHIFT 30 | ||
166 | #define BCM43xx_DMA64_NOTRANS 0x00000000 | ||
167 | #define BCM43xx_DMA64_CLIENTTRANS 0x80000000 | ||
168 | |||
169 | |||
170 | |||
171 | struct bcm43xx_dmadesc_generic { | ||
172 | union { | ||
173 | struct bcm43xx_dmadesc32 dma32; | ||
174 | struct bcm43xx_dmadesc64 dma64; | ||
175 | } __attribute__((__packed__)); | ||
176 | } __attribute__((__packed__)); | ||
177 | |||
61 | 178 | ||
62 | /* Misc DMA constants */ | 179 | /* Misc DMA constants */ |
63 | #define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE | 180 | #define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE |
64 | #define BCM43xx_DMA_BUSADDRMAX 0x3FFFFFFF | 181 | #define BCM43xx_DMA0_RX_FRAMEOFFSET 30 |
65 | #define BCM43xx_DMA_DMABUSADDROFFSET (1 << 30) | 182 | #define BCM43xx_DMA3_RX_FRAMEOFFSET 0 |
66 | #define BCM43xx_DMA1_RX_FRAMEOFFSET 30 | 183 | |
67 | #define BCM43xx_DMA4_RX_FRAMEOFFSET 0 | ||
68 | 184 | ||
69 | /* DMA engine tuning knobs */ | 185 | /* DMA engine tuning knobs */ |
70 | #define BCM43xx_TXRING_SLOTS 512 | 186 | #define BCM43xx_TXRING_SLOTS 512 |
71 | #define BCM43xx_RXRING_SLOTS 64 | 187 | #define BCM43xx_RXRING_SLOTS 64 |
72 | #define BCM43xx_DMA1_RXBUFFERSIZE (2304 + 100) | 188 | #define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100) |
73 | #define BCM43xx_DMA4_RXBUFFERSIZE 16 | 189 | #define BCM43xx_DMA3_RX_BUFFERSIZE 16 |
74 | /* Suspend the tx queue, if less than this percent slots are free. */ | 190 | /* Suspend the tx queue, if less than this percent slots are free. */ |
75 | #define BCM43xx_TXSUSPEND_PERCENT 20 | 191 | #define BCM43xx_TXSUSPEND_PERCENT 20 |
76 | /* Resume the tx queue, if more than this percent slots are free. */ | 192 | /* Resume the tx queue, if more than this percent slots are free. */ |
@@ -86,17 +202,6 @@ struct bcm43xx_private; | |||
86 | struct bcm43xx_xmitstatus; | 202 | struct bcm43xx_xmitstatus; |
87 | 203 | ||
88 | 204 | ||
89 | struct bcm43xx_dmadesc { | ||
90 | __le32 _control; | ||
91 | __le32 _address; | ||
92 | } __attribute__((__packed__)); | ||
93 | |||
94 | /* Macros to access the bcm43xx_dmadesc struct */ | ||
95 | #define get_desc_ctl(desc) le32_to_cpu((desc)->_control) | ||
96 | #define set_desc_ctl(desc, ctl) do { (desc)->_control = cpu_to_le32(ctl); } while (0) | ||
97 | #define get_desc_addr(desc) le32_to_cpu((desc)->_address) | ||
98 | #define set_desc_addr(desc, addr) do { (desc)->_address = cpu_to_le32(addr); } while (0) | ||
99 | |||
100 | struct bcm43xx_dmadesc_meta { | 205 | struct bcm43xx_dmadesc_meta { |
101 | /* The kernel DMA-able buffer. */ | 206 | /* The kernel DMA-able buffer. */ |
102 | struct sk_buff *skb; | 207 | struct sk_buff *skb; |
@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta { | |||
105 | }; | 210 | }; |
106 | 211 | ||
107 | struct bcm43xx_dmaring { | 212 | struct bcm43xx_dmaring { |
108 | struct bcm43xx_private *bcm; | ||
109 | /* Kernel virtual base address of the ring memory. */ | 213 | /* Kernel virtual base address of the ring memory. */ |
110 | struct bcm43xx_dmadesc *vbase; | 214 | void *descbase; |
111 | /* DMA memory offset */ | ||
112 | dma_addr_t memoffset; | ||
113 | /* (Unadjusted) DMA base bus-address of the ring memory. */ | ||
114 | dma_addr_t dmabase; | ||
115 | /* Meta data about all descriptors. */ | 215 | /* Meta data about all descriptors. */ |
116 | struct bcm43xx_dmadesc_meta *meta; | 216 | struct bcm43xx_dmadesc_meta *meta; |
217 | /* DMA Routing value. */ | ||
218 | u32 routing; | ||
219 | /* (Unadjusted) DMA base bus-address of the ring memory. */ | ||
220 | dma_addr_t dmabase; | ||
117 | /* Number of descriptor slots in the ring. */ | 221 | /* Number of descriptor slots in the ring. */ |
118 | int nr_slots; | 222 | int nr_slots; |
119 | /* Number of used descriptor slots. */ | 223 | /* Number of used descriptor slots. */ |
@@ -127,12 +231,17 @@ struct bcm43xx_dmaring { | |||
127 | u32 frameoffset; | 231 | u32 frameoffset; |
128 | /* Descriptor buffer size. */ | 232 | /* Descriptor buffer size. */ |
129 | u16 rx_buffersize; | 233 | u16 rx_buffersize; |
130 | /* The MMIO base register of the DMA controller, this | 234 | /* The MMIO base register of the DMA controller. */ |
131 | * ring is posted to. | ||
132 | */ | ||
133 | u16 mmio_base; | 235 | u16 mmio_base; |
134 | u8 tx:1, /* TRUE, if this is a TX ring. */ | 236 | /* DMA controller index number (0-5). */ |
135 | suspended:1; /* TRUE, if transfers are suspended on this ring. */ | 237 | int index; |
238 | /* Boolean. Is this a TX ring? */ | ||
239 | u8 tx; | ||
240 | /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ | ||
241 | u8 dma64; | ||
242 | /* Boolean. Are transfers suspended on this ring? */ | ||
243 | u8 suspended; | ||
244 | struct bcm43xx_private *bcm; | ||
136 | #ifdef CONFIG_BCM43XX_DEBUG | 245 | #ifdef CONFIG_BCM43XX_DEBUG |
137 | /* Maximum number of used slots. */ | 246 | /* Maximum number of used slots. */ |
138 | int max_used_slots; | 247 | int max_used_slots; |
@@ -141,6 +250,34 @@ struct bcm43xx_dmaring { | |||
141 | 250 | ||
142 | 251 | ||
143 | static inline | 252 | static inline |
253 | int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring, | ||
254 | struct bcm43xx_dmadesc_generic *desc) | ||
255 | { | ||
256 | if (ring->dma64) { | ||
257 | struct bcm43xx_dmadesc64 *dd64 = ring->descbase; | ||
258 | return (int)(&(desc->dma64) - dd64); | ||
259 | } else { | ||
260 | struct bcm43xx_dmadesc32 *dd32 = ring->descbase; | ||
261 | return (int)(&(desc->dma32) - dd32); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | static inline | ||
266 | struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring, | ||
267 | int slot, | ||
268 | struct bcm43xx_dmadesc_meta **meta) | ||
269 | { | ||
270 | *meta = &(ring->meta[slot]); | ||
271 | if (ring->dma64) { | ||
272 | struct bcm43xx_dmadesc64 *dd64 = ring->descbase; | ||
273 | return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot])); | ||
274 | } else { | ||
275 | struct bcm43xx_dmadesc32 *dd32 = ring->descbase; | ||
276 | return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot])); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static inline | ||
144 | u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring, | 281 | u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring, |
145 | u16 offset) | 282 | u16 offset) |
146 | { | 283 | { |
@@ -159,9 +296,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm); | |||
159 | void bcm43xx_dma_free(struct bcm43xx_private *bcm); | 296 | void bcm43xx_dma_free(struct bcm43xx_private *bcm); |
160 | 297 | ||
161 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, | 298 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, |
162 | u16 dmacontroller_mmio_base); | 299 | u16 dmacontroller_mmio_base, |
300 | int dma64); | ||
163 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, | 301 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, |
164 | u16 dmacontroller_mmio_base); | 302 | u16 dmacontroller_mmio_base, |
303 | int dma64); | ||
304 | |||
305 | u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx); | ||
165 | 306 | ||
166 | void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring); | 307 | void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring); |
167 | void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring); | 308 | void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring); |
@@ -173,7 +314,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm, | |||
173 | struct ieee80211_txb *txb); | 314 | struct ieee80211_txb *txb); |
174 | void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring); | 315 | void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring); |
175 | 316 | ||
176 | |||
177 | #else /* CONFIG_BCM43XX_DMA */ | 317 | #else /* CONFIG_BCM43XX_DMA */ |
178 | 318 | ||
179 | 319 | ||
@@ -188,13 +328,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm) | |||
188 | } | 328 | } |
189 | static inline | 329 | static inline |
190 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, | 330 | int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, |
191 | u16 dmacontroller_mmio_base) | 331 | u16 dmacontroller_mmio_base, |
332 | int dma64) | ||
192 | { | 333 | { |
193 | return 0; | 334 | return 0; |
194 | } | 335 | } |
195 | static inline | 336 | static inline |
196 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, | 337 | int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, |
197 | u16 dmacontroller_mmio_base) | 338 | u16 dmacontroller_mmio_base, |
339 | int dma64) | ||
198 | { | 340 | { |
199 | return 0; | 341 | return 0; |
200 | } | 342 | } |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index b095f3cc6730..966815be6955 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -1371,6 +1371,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy) | |||
1371 | if ((bcm43xx_core_enabled(bcm)) && | 1371 | if ((bcm43xx_core_enabled(bcm)) && |
1372 | !bcm43xx_using_pio(bcm)) { | 1372 | !bcm43xx_using_pio(bcm)) { |
1373 | //FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here? | 1373 | //FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here? |
1374 | #if 0 | ||
1374 | #ifndef CONFIG_BCM947XX | 1375 | #ifndef CONFIG_BCM947XX |
1375 | /* reset all used DMA controllers. */ | 1376 | /* reset all used DMA controllers. */ |
1376 | bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); | 1377 | bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); |
@@ -1381,6 +1382,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy) | |||
1381 | if (bcm->current_core->rev < 5) | 1382 | if (bcm->current_core->rev < 5) |
1382 | bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE); | 1383 | bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE); |
1383 | #endif | 1384 | #endif |
1385 | #endif | ||
1384 | } | 1386 | } |
1385 | if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { | 1387 | if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { |
1386 | bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, | 1388 | bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, |
@@ -1671,8 +1673,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm) | |||
1671 | static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) | 1673 | static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) |
1672 | { | 1674 | { |
1673 | u32 reason; | 1675 | u32 reason; |
1674 | u32 dma_reason[4]; | 1676 | u32 dma_reason[6]; |
1675 | int activity = 0; | 1677 | u32 merged_dma_reason = 0; |
1678 | int i, activity = 0; | ||
1676 | unsigned long flags; | 1679 | unsigned long flags; |
1677 | 1680 | ||
1678 | #ifdef CONFIG_BCM43XX_DEBUG | 1681 | #ifdef CONFIG_BCM43XX_DEBUG |
@@ -1684,10 +1687,10 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) | |||
1684 | 1687 | ||
1685 | spin_lock_irqsave(&bcm->irq_lock, flags); | 1688 | spin_lock_irqsave(&bcm->irq_lock, flags); |
1686 | reason = bcm->irq_reason; | 1689 | reason = bcm->irq_reason; |
1687 | dma_reason[0] = bcm->dma_reason[0]; | 1690 | for (i = 5; i >= 0; i--) { |
1688 | dma_reason[1] = bcm->dma_reason[1]; | 1691 | dma_reason[i] = bcm->dma_reason[i]; |
1689 | dma_reason[2] = bcm->dma_reason[2]; | 1692 | merged_dma_reason |= dma_reason[i]; |
1690 | dma_reason[3] = bcm->dma_reason[3]; | 1693 | } |
1691 | 1694 | ||
1692 | if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) { | 1695 | if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) { |
1693 | /* TX error. We get this when Template Ram is written in wrong endianess | 1696 | /* TX error. We get this when Template Ram is written in wrong endianess |
@@ -1698,27 +1701,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) | |||
1698 | printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n"); | 1701 | printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n"); |
1699 | bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR); | 1702 | bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR); |
1700 | } | 1703 | } |
1701 | if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) | | 1704 | if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) { |
1702 | (dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) | | ||
1703 | (dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) | | ||
1704 | (dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) { | ||
1705 | printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: " | 1705 | printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: " |
1706 | "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", | 1706 | "0x%08X, 0x%08X, 0x%08X, " |
1707 | "0x%08X, 0x%08X, 0x%08X\n", | ||
1707 | dma_reason[0], dma_reason[1], | 1708 | dma_reason[0], dma_reason[1], |
1708 | dma_reason[2], dma_reason[3]); | 1709 | dma_reason[2], dma_reason[3], |
1710 | dma_reason[4], dma_reason[5]); | ||
1709 | bcm43xx_controller_restart(bcm, "DMA error"); | 1711 | bcm43xx_controller_restart(bcm, "DMA error"); |
1710 | mmiowb(); | 1712 | mmiowb(); |
1711 | spin_unlock_irqrestore(&bcm->irq_lock, flags); | 1713 | spin_unlock_irqrestore(&bcm->irq_lock, flags); |
1712 | return; | 1714 | return; |
1713 | } | 1715 | } |
1714 | if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | | 1716 | if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) { |
1715 | (dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) | | ||
1716 | (dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) | | ||
1717 | (dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) { | ||
1718 | printkl(KERN_ERR PFX "DMA error: " | 1717 | printkl(KERN_ERR PFX "DMA error: " |
1719 | "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", | 1718 | "0x%08X, 0x%08X, 0x%08X, " |
1719 | "0x%08X, 0x%08X, 0x%08X\n", | ||
1720 | dma_reason[0], dma_reason[1], | 1720 | dma_reason[0], dma_reason[1], |
1721 | dma_reason[2], dma_reason[3]); | 1721 | dma_reason[2], dma_reason[3], |
1722 | dma_reason[4], dma_reason[5]); | ||
1722 | } | 1723 | } |
1723 | 1724 | ||
1724 | if (reason & BCM43xx_IRQ_PS) { | 1725 | if (reason & BCM43xx_IRQ_PS) { |
@@ -1753,8 +1754,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) | |||
1753 | } | 1754 | } |
1754 | 1755 | ||
1755 | /* Check the DMA reason registers for received data. */ | 1756 | /* Check the DMA reason registers for received data. */ |
1756 | assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1757 | assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1758 | if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) { | 1757 | if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) { |
1759 | if (bcm43xx_using_pio(bcm)) | 1758 | if (bcm43xx_using_pio(bcm)) |
1760 | bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0); | 1759 | bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0); |
@@ -1762,13 +1761,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) | |||
1762 | bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0); | 1761 | bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0); |
1763 | /* We intentionally don't set "activity" to 1, here. */ | 1762 | /* We intentionally don't set "activity" to 1, here. */ |
1764 | } | 1763 | } |
1764 | assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1765 | assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1765 | if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) { | 1766 | if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) { |
1766 | if (bcm43xx_using_pio(bcm)) | 1767 | if (bcm43xx_using_pio(bcm)) |
1767 | bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3); | 1768 | bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3); |
1768 | else | 1769 | else |
1769 | bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1); | 1770 | bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3); |
1770 | activity = 1; | 1771 | activity = 1; |
1771 | } | 1772 | } |
1773 | assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1774 | assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE)); | ||
1772 | bcmirq_handled(BCM43xx_IRQ_RX); | 1775 | bcmirq_handled(BCM43xx_IRQ_RX); |
1773 | 1776 | ||
1774 | if (reason & BCM43xx_IRQ_XMIT_STATUS) { | 1777 | if (reason & BCM43xx_IRQ_XMIT_STATUS) { |
@@ -1825,14 +1828,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason) | |||
1825 | 1828 | ||
1826 | bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason); | 1829 | bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason); |
1827 | 1830 | ||
1828 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON, | 1831 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON, |
1829 | bcm->dma_reason[0]); | 1832 | bcm->dma_reason[0]); |
1830 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON, | 1833 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON, |
1831 | bcm->dma_reason[1]); | 1834 | bcm->dma_reason[1]); |
1832 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON, | 1835 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON, |
1833 | bcm->dma_reason[2]); | 1836 | bcm->dma_reason[2]); |
1834 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON, | 1837 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON, |
1835 | bcm->dma_reason[3]); | 1838 | bcm->dma_reason[3]); |
1839 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON, | ||
1840 | bcm->dma_reason[4]); | ||
1841 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON, | ||
1842 | bcm->dma_reason[5]); | ||
1836 | } | 1843 | } |
1837 | 1844 | ||
1838 | /* Interrupt handler top-half */ | 1845 | /* Interrupt handler top-half */ |
@@ -1860,14 +1867,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re | |||
1860 | if (!reason) | 1867 | if (!reason) |
1861 | goto out; | 1868 | goto out; |
1862 | 1869 | ||
1863 | bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) | 1870 | bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON) |
1864 | & 0x0001dc00; | 1871 | & 0x0001DC00; |
1865 | bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON) | 1872 | bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) |
1866 | & 0x0000dc00; | 1873 | & 0x0000DC00; |
1867 | bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON) | 1874 | bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON) |
1868 | & 0x0000dc00; | 1875 | & 0x0000DC00; |
1869 | bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON) | 1876 | bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON) |
1870 | & 0x0001dc00; | 1877 | & 0x0001DC00; |
1878 | bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON) | ||
1879 | & 0x0000DC00; | ||
1880 | bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON) | ||
1881 | & 0x0000DC00; | ||
1871 | 1882 | ||
1872 | bcm43xx_interrupt_ack(bcm, reason); | 1883 | bcm43xx_interrupt_ack(bcm, reason); |
1873 | 1884 | ||
@@ -2448,10 +2459,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm) | |||
2448 | bcm43xx_write32(bcm, 0x018C, 0x02000000); | 2459 | bcm43xx_write32(bcm, 0x018C, 0x02000000); |
2449 | } | 2460 | } |
2450 | bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000); | 2461 | bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000); |
2451 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00); | 2462 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00); |
2463 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00); | ||
2452 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00); | 2464 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00); |
2453 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00); | 2465 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00); |
2454 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00); | 2466 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00); |
2467 | bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00); | ||
2455 | 2468 | ||
2456 | value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); | 2469 | value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); |
2457 | value32 |= 0x00100000; | 2470 | value32 |= 0x00100000; |
@@ -3261,6 +3274,7 @@ static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm) | |||
3261 | /* This is the opposite of bcm43xx_init_board() */ | 3274 | /* This is the opposite of bcm43xx_init_board() */ |
3262 | static void bcm43xx_free_board(struct bcm43xx_private *bcm) | 3275 | static void bcm43xx_free_board(struct bcm43xx_private *bcm) |
3263 | { | 3276 | { |
3277 | bcm43xx_rng_exit(bcm); | ||
3264 | bcm43xx_sysfs_unregister(bcm); | 3278 | bcm43xx_sysfs_unregister(bcm); |
3265 | bcm43xx_periodic_tasks_delete(bcm); | 3279 | bcm43xx_periodic_tasks_delete(bcm); |
3266 | 3280 | ||
@@ -3349,6 +3363,8 @@ static void prepare_priv_for_init(struct bcm43xx_private *bcm) | |||
3349 | memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason)); | 3363 | memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason)); |
3350 | bcm->irq_savedstate = BCM43xx_IRQ_INITIAL; | 3364 | bcm->irq_savedstate = BCM43xx_IRQ_INITIAL; |
3351 | 3365 | ||
3366 | bcm->mac_suspended = 1; | ||
3367 | |||
3352 | /* Noise calculation context */ | 3368 | /* Noise calculation context */ |
3353 | memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc)); | 3369 | memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc)); |
3354 | 3370 | ||
@@ -3528,6 +3544,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) | |||
3528 | err = bcm43xx_sysfs_register(bcm); | 3544 | err = bcm43xx_sysfs_register(bcm); |
3529 | if (err) | 3545 | if (err) |
3530 | goto err_wlshutdown; | 3546 | goto err_wlshutdown; |
3547 | err = bcm43xx_rng_init(bcm); | ||
3548 | if (err) | ||
3549 | goto err_sysfs_unreg; | ||
3531 | 3550 | ||
3532 | /*FIXME: This should be handled by softmac instead. */ | 3551 | /*FIXME: This should be handled by softmac instead. */ |
3533 | schedule_work(&bcm->softmac->associnfo.work); | 3552 | schedule_work(&bcm->softmac->associnfo.work); |
@@ -3537,6 +3556,8 @@ out: | |||
3537 | 3556 | ||
3538 | return err; | 3557 | return err; |
3539 | 3558 | ||
3559 | err_sysfs_unreg: | ||
3560 | bcm43xx_sysfs_unregister(bcm); | ||
3540 | err_wlshutdown: | 3561 | err_wlshutdown: |
3541 | bcm43xx_shutdown_all_wireless_cores(bcm); | 3562 | bcm43xx_shutdown_all_wireless_cores(bcm); |
3542 | err_crystal_off: | 3563 | err_crystal_off: |
@@ -3899,7 +3920,9 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb, | |||
3899 | err = bcm43xx_tx(bcm, txb); | 3920 | err = bcm43xx_tx(bcm, txb); |
3900 | spin_unlock_irqrestore(&bcm->irq_lock, flags); | 3921 | spin_unlock_irqrestore(&bcm->irq_lock, flags); |
3901 | 3922 | ||
3902 | return err; | 3923 | if (unlikely(err)) |
3924 | return NETDEV_TX_BUSY; | ||
3925 | return NETDEV_TX_OK; | ||
3903 | } | 3926 | } |
3904 | 3927 | ||
3905 | static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev) | 3928 | static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev) |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index 1d3a3aaf96ec..888077fc14c4 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c | |||
@@ -229,8 +229,8 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev, | |||
229 | range->throughput = 27 * 1000 * 1000; | 229 | range->throughput = 27 * 1000 * 1000; |
230 | 230 | ||
231 | range->max_qual.qual = 100; | 231 | range->max_qual.qual = 100; |
232 | range->max_qual.level = 152; /* set floor at -104 dBm (152 - 256) */ | 232 | range->max_qual.level = 146; /* set floor at -110 dBm (146 - 256) */ |
233 | range->max_qual.noise = 152; | 233 | range->max_qual.noise = 146; |
234 | range->max_qual.updated = IW_QUAL_ALL_UPDATED; | 234 | range->max_qual.updated = IW_QUAL_ALL_UPDATED; |
235 | 235 | ||
236 | range->avg_qual.qual = 50; | 236 | range->avg_qual.qual = 50; |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 5f8ccf48061a..d2db8eb412c1 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -6254,13 +6254,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6254 | * member to call a function that then just turns and calls ipw2100_up. | 6254 | * member to call a function that then just turns and calls ipw2100_up. |
6255 | * net_dev->init is called after name allocation but before the | 6255 | * net_dev->init is called after name allocation but before the |
6256 | * notifier chain is called */ | 6256 | * notifier chain is called */ |
6257 | mutex_lock(&priv->action_mutex); | ||
6258 | err = register_netdev(dev); | 6257 | err = register_netdev(dev); |
6259 | if (err) { | 6258 | if (err) { |
6260 | printk(KERN_WARNING DRV_NAME | 6259 | printk(KERN_WARNING DRV_NAME |
6261 | "Error calling register_netdev.\n"); | 6260 | "Error calling register_netdev.\n"); |
6262 | goto fail_unlock; | 6261 | goto fail; |
6263 | } | 6262 | } |
6263 | |||
6264 | mutex_lock(&priv->action_mutex); | ||
6264 | registered = 1; | 6265 | registered = 1; |
6265 | 6266 | ||
6266 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); | 6267 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index a72f3e1e991b..f29ec0ebed2f 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -70,7 +70,7 @@ | |||
70 | #define VQ | 70 | #define VQ |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ | 73 | #define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ |
74 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" | 74 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" |
75 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" | 75 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
76 | #define DRV_VERSION IPW2200_VERSION | 76 | #define DRV_VERSION IPW2200_VERSION |
@@ -83,9 +83,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT); | |||
83 | MODULE_LICENSE("GPL"); | 83 | MODULE_LICENSE("GPL"); |
84 | 84 | ||
85 | static int cmdlog = 0; | 85 | static int cmdlog = 0; |
86 | #ifdef CONFIG_IPW2200_DEBUG | ||
87 | static int debug = 0; | 86 | static int debug = 0; |
88 | #endif | ||
89 | static int channel = 0; | 87 | static int channel = 0; |
90 | static int mode = 0; | 88 | static int mode = 0; |
91 | 89 | ||
@@ -567,7 +565,6 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv) | |||
567 | spin_unlock_irqrestore(&priv->irq_lock, flags); | 565 | spin_unlock_irqrestore(&priv->irq_lock, flags); |
568 | } | 566 | } |
569 | 567 | ||
570 | #ifdef CONFIG_IPW2200_DEBUG | ||
571 | static char *ipw_error_desc(u32 val) | 568 | static char *ipw_error_desc(u32 val) |
572 | { | 569 | { |
573 | switch (val) { | 570 | switch (val) { |
@@ -634,7 +631,6 @@ static void ipw_dump_error_log(struct ipw_priv *priv, | |||
634 | error->log[i].time, | 631 | error->log[i].time, |
635 | error->log[i].data, error->log[i].event); | 632 | error->log[i].data, error->log[i].event); |
636 | } | 633 | } |
637 | #endif | ||
638 | 634 | ||
639 | static inline int ipw_is_init(struct ipw_priv *priv) | 635 | static inline int ipw_is_init(struct ipw_priv *priv) |
640 | { | 636 | { |
@@ -1435,9 +1431,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, | |||
1435 | const char *buf, size_t count) | 1431 | const char *buf, size_t count) |
1436 | { | 1432 | { |
1437 | struct ipw_priv *priv = dev_get_drvdata(d); | 1433 | struct ipw_priv *priv = dev_get_drvdata(d); |
1438 | #ifdef CONFIG_IPW2200_DEBUG | ||
1439 | struct net_device *dev = priv->net_dev; | 1434 | struct net_device *dev = priv->net_dev; |
1440 | #endif | ||
1441 | char buffer[] = "00000000"; | 1435 | char buffer[] = "00000000"; |
1442 | unsigned long len = | 1436 | unsigned long len = |
1443 | (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; | 1437 | (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; |
@@ -1958,14 +1952,12 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1958 | IPW_WARNING("Firmware error detected. Restarting.\n"); | 1952 | IPW_WARNING("Firmware error detected. Restarting.\n"); |
1959 | if (priv->error) { | 1953 | if (priv->error) { |
1960 | IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); | 1954 | IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); |
1961 | #ifdef CONFIG_IPW2200_DEBUG | ||
1962 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { | 1955 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { |
1963 | struct ipw_fw_error *error = | 1956 | struct ipw_fw_error *error = |
1964 | ipw_alloc_error_log(priv); | 1957 | ipw_alloc_error_log(priv); |
1965 | ipw_dump_error_log(priv, error); | 1958 | ipw_dump_error_log(priv, error); |
1966 | kfree(error); | 1959 | kfree(error); |
1967 | } | 1960 | } |
1968 | #endif | ||
1969 | } else { | 1961 | } else { |
1970 | priv->error = ipw_alloc_error_log(priv); | 1962 | priv->error = ipw_alloc_error_log(priv); |
1971 | if (priv->error) | 1963 | if (priv->error) |
@@ -1973,10 +1965,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1973 | else | 1965 | else |
1974 | IPW_DEBUG_FW("Error allocating sysfs 'error' " | 1966 | IPW_DEBUG_FW("Error allocating sysfs 'error' " |
1975 | "log.\n"); | 1967 | "log.\n"); |
1976 | #ifdef CONFIG_IPW2200_DEBUG | ||
1977 | if (ipw_debug_level & IPW_DL_FW_ERRORS) | 1968 | if (ipw_debug_level & IPW_DL_FW_ERRORS) |
1978 | ipw_dump_error_log(priv, priv->error); | 1969 | ipw_dump_error_log(priv, priv->error); |
1979 | #endif | ||
1980 | } | 1970 | } |
1981 | 1971 | ||
1982 | /* XXX: If hardware encryption is for WPA/WPA2, | 1972 | /* XXX: If hardware encryption is for WPA/WPA2, |
@@ -2287,7 +2277,7 @@ static int ipw_send_scan_abort(struct ipw_priv *priv) | |||
2287 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) | 2277 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) |
2288 | { | 2278 | { |
2289 | struct ipw_sensitivity_calib calib = { | 2279 | struct ipw_sensitivity_calib calib = { |
2290 | .beacon_rssi_raw = sens, | 2280 | .beacon_rssi_raw = cpu_to_le16(sens), |
2291 | }; | 2281 | }; |
2292 | 2282 | ||
2293 | return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), | 2283 | return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), |
@@ -2353,6 +2343,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) | |||
2353 | return -1; | 2343 | return -1; |
2354 | } | 2344 | } |
2355 | 2345 | ||
2346 | phy_off = cpu_to_le32(phy_off); | ||
2356 | return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off), | 2347 | return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off), |
2357 | &phy_off); | 2348 | &phy_off); |
2358 | } | 2349 | } |
@@ -2414,7 +2405,7 @@ static int ipw_set_tx_power(struct ipw_priv *priv) | |||
2414 | static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) | 2405 | static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) |
2415 | { | 2406 | { |
2416 | struct ipw_rts_threshold rts_threshold = { | 2407 | struct ipw_rts_threshold rts_threshold = { |
2417 | .rts_threshold = rts, | 2408 | .rts_threshold = cpu_to_le16(rts), |
2418 | }; | 2409 | }; |
2419 | 2410 | ||
2420 | if (!priv) { | 2411 | if (!priv) { |
@@ -2429,7 +2420,7 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) | |||
2429 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | 2420 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) |
2430 | { | 2421 | { |
2431 | struct ipw_frag_threshold frag_threshold = { | 2422 | struct ipw_frag_threshold frag_threshold = { |
2432 | .frag_threshold = frag, | 2423 | .frag_threshold = cpu_to_le16(frag), |
2433 | }; | 2424 | }; |
2434 | 2425 | ||
2435 | if (!priv) { | 2426 | if (!priv) { |
@@ -2464,6 +2455,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | |||
2464 | break; | 2455 | break; |
2465 | } | 2456 | } |
2466 | 2457 | ||
2458 | param = cpu_to_le32(mode); | ||
2467 | return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), | 2459 | return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), |
2468 | ¶m); | 2460 | ¶m); |
2469 | } | 2461 | } |
@@ -3915,7 +3907,6 @@ static const struct ipw_status_code ipw_status_codes[] = { | |||
3915 | {0x2E, "Cipher suite is rejected per security policy"}, | 3907 | {0x2E, "Cipher suite is rejected per security policy"}, |
3916 | }; | 3908 | }; |
3917 | 3909 | ||
3918 | #ifdef CONFIG_IPW2200_DEBUG | ||
3919 | static const char *ipw_get_status_code(u16 status) | 3910 | static const char *ipw_get_status_code(u16 status) |
3920 | { | 3911 | { |
3921 | int i; | 3912 | int i; |
@@ -3924,7 +3915,6 @@ static const char *ipw_get_status_code(u16 status) | |||
3924 | return ipw_status_codes[i].reason; | 3915 | return ipw_status_codes[i].reason; |
3925 | return "Unknown status value."; | 3916 | return "Unknown status value."; |
3926 | } | 3917 | } |
3927 | #endif | ||
3928 | 3918 | ||
3929 | static void inline average_init(struct average *avg) | 3919 | static void inline average_init(struct average *avg) |
3930 | { | 3920 | { |
@@ -4394,7 +4384,6 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4394 | if (priv-> | 4384 | if (priv-> |
4395 | status & (STATUS_ASSOCIATED | | 4385 | status & (STATUS_ASSOCIATED | |
4396 | STATUS_AUTH)) { | 4386 | STATUS_AUTH)) { |
4397 | #ifdef CONFIG_IPW2200_DEBUG | ||
4398 | struct notif_authenticate *auth | 4387 | struct notif_authenticate *auth |
4399 | = ¬if->u.auth; | 4388 | = ¬if->u.auth; |
4400 | IPW_DEBUG(IPW_DL_NOTIF | | 4389 | IPW_DEBUG(IPW_DL_NOTIF | |
@@ -4412,7 +4401,6 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4412 | ipw_get_status_code | 4401 | ipw_get_status_code |
4413 | (ntohs | 4402 | (ntohs |
4414 | (auth->status))); | 4403 | (auth->status))); |
4415 | #endif | ||
4416 | 4404 | ||
4417 | priv->status &= | 4405 | priv->status &= |
4418 | ~(STATUS_ASSOCIATING | | 4406 | ~(STATUS_ASSOCIATING | |
@@ -5055,7 +5043,6 @@ static void ipw_rx_queue_replenish(void *data) | |||
5055 | } | 5043 | } |
5056 | list_del(element); | 5044 | list_del(element); |
5057 | 5045 | ||
5058 | rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data; | ||
5059 | rxb->dma_addr = | 5046 | rxb->dma_addr = |
5060 | pci_map_single(priv->pci_dev, rxb->skb->data, | 5047 | pci_map_single(priv->pci_dev, rxb->skb->data, |
5061 | IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | 5048 | IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); |
@@ -5834,8 +5821,8 @@ static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) | |||
5834 | key.station_index = 0; /* always 0 for BSS */ | 5821 | key.station_index = 0; /* always 0 for BSS */ |
5835 | key.flags = 0; | 5822 | key.flags = 0; |
5836 | /* 0 for new key; previous value of counter (after fatal error) */ | 5823 | /* 0 for new key; previous value of counter (after fatal error) */ |
5837 | key.tx_counter[0] = 0; | 5824 | key.tx_counter[0] = cpu_to_le32(0); |
5838 | key.tx_counter[1] = 0; | 5825 | key.tx_counter[1] = cpu_to_le32(0); |
5839 | 5826 | ||
5840 | ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); | 5827 | ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); |
5841 | } | 5828 | } |
@@ -5969,7 +5956,6 @@ static void ipw_bg_adhoc_check(void *data) | |||
5969 | mutex_unlock(&priv->mutex); | 5956 | mutex_unlock(&priv->mutex); |
5970 | } | 5957 | } |
5971 | 5958 | ||
5972 | #ifdef CONFIG_IPW2200_DEBUG | ||
5973 | static void ipw_debug_config(struct ipw_priv *priv) | 5959 | static void ipw_debug_config(struct ipw_priv *priv) |
5974 | { | 5960 | { |
5975 | IPW_DEBUG_INFO("Scan completed, no valid APs matched " | 5961 | IPW_DEBUG_INFO("Scan completed, no valid APs matched " |
@@ -5994,9 +5980,6 @@ static void ipw_debug_config(struct ipw_priv *priv) | |||
5994 | IPW_DEBUG_INFO("PRIVACY off\n"); | 5980 | IPW_DEBUG_INFO("PRIVACY off\n"); |
5995 | IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); | 5981 | IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); |
5996 | } | 5982 | } |
5997 | #else | ||
5998 | #define ipw_debug_config(x) do {} while (0) | ||
5999 | #endif | ||
6000 | 5983 | ||
6001 | static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) | 5984 | static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) |
6002 | { | 5985 | { |
@@ -6184,7 +6167,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6184 | } | 6167 | } |
6185 | } | 6168 | } |
6186 | 6169 | ||
6187 | static int ipw_request_scan(struct ipw_priv *priv) | 6170 | static int ipw_request_scan_helper(struct ipw_priv *priv, int type) |
6188 | { | 6171 | { |
6189 | struct ipw_scan_request_ext scan; | 6172 | struct ipw_scan_request_ext scan; |
6190 | int err = 0, scan_type; | 6173 | int err = 0, scan_type; |
@@ -6215,19 +6198,29 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6215 | } | 6198 | } |
6216 | 6199 | ||
6217 | memset(&scan, 0, sizeof(scan)); | 6200 | memset(&scan, 0, sizeof(scan)); |
6201 | scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); | ||
6218 | 6202 | ||
6219 | if (priv->config & CFG_SPEED_SCAN) | 6203 | if (type == IW_SCAN_TYPE_PASSIVE) { |
6204 | IPW_DEBUG_WX("use passive scanning\n"); | ||
6205 | scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; | ||
6206 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = | ||
6207 | cpu_to_le16(120); | ||
6208 | ipw_add_scan_channels(priv, &scan, scan_type); | ||
6209 | goto send_request; | ||
6210 | } | ||
6211 | |||
6212 | /* Use active scan by default. */ | ||
6213 | if (priv->config & CFG_SPEED_SCAN) | ||
6220 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | 6214 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = |
6221 | cpu_to_le16(30); | 6215 | cpu_to_le16(30); |
6222 | else | 6216 | else |
6223 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | 6217 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = |
6224 | cpu_to_le16(20); | 6218 | cpu_to_le16(20); |
6225 | 6219 | ||
6226 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = | 6220 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = |
6227 | cpu_to_le16(20); | 6221 | cpu_to_le16(20); |
6228 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); | ||
6229 | 6222 | ||
6230 | scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); | 6223 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); |
6231 | 6224 | ||
6232 | #ifdef CONFIG_IPW2200_MONITOR | 6225 | #ifdef CONFIG_IPW2200_MONITOR |
6233 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 6226 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
@@ -6264,7 +6257,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6264 | * | 6257 | * |
6265 | * TODO: Move SPEED SCAN support to all modes and bands */ | 6258 | * TODO: Move SPEED SCAN support to all modes and bands */ |
6266 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = | 6259 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = |
6267 | cpu_to_le16(2000); | 6260 | cpu_to_le16(2000); |
6268 | } else { | 6261 | } else { |
6269 | #endif /* CONFIG_IPW2200_MONITOR */ | 6262 | #endif /* CONFIG_IPW2200_MONITOR */ |
6270 | /* If we are roaming, then make this a directed scan for the | 6263 | /* If we are roaming, then make this a directed scan for the |
@@ -6290,6 +6283,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6290 | } | 6283 | } |
6291 | #endif | 6284 | #endif |
6292 | 6285 | ||
6286 | send_request: | ||
6293 | err = ipw_send_scan_request_ext(priv, &scan); | 6287 | err = ipw_send_scan_request_ext(priv, &scan); |
6294 | if (err) { | 6288 | if (err) { |
6295 | IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); | 6289 | IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); |
@@ -6300,11 +6294,19 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6300 | priv->status &= ~STATUS_SCAN_PENDING; | 6294 | priv->status &= ~STATUS_SCAN_PENDING; |
6301 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6295 | queue_delayed_work(priv->workqueue, &priv->scan_check, |
6302 | IPW_SCAN_CHECK_WATCHDOG); | 6296 | IPW_SCAN_CHECK_WATCHDOG); |
6303 | done: | 6297 | done: |
6304 | mutex_unlock(&priv->mutex); | 6298 | mutex_unlock(&priv->mutex); |
6305 | return err; | 6299 | return err; |
6306 | } | 6300 | } |
6307 | 6301 | ||
6302 | static int ipw_request_passive_scan(struct ipw_priv *priv) { | ||
6303 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | ||
6304 | } | ||
6305 | |||
6306 | static int ipw_request_scan(struct ipw_priv *priv) { | ||
6307 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | ||
6308 | } | ||
6309 | |||
6308 | static void ipw_bg_abort_scan(void *data) | 6310 | static void ipw_bg_abort_scan(void *data) |
6309 | { | 6311 | { |
6310 | struct ipw_priv *priv = data; | 6312 | struct ipw_priv *priv = data; |
@@ -6790,7 +6792,7 @@ static int ipw_qos_activate(struct ipw_priv *priv, | |||
6790 | burst_duration = ipw_qos_get_burst_duration(priv); | 6792 | burst_duration = ipw_qos_get_burst_duration(priv); |
6791 | for (i = 0; i < QOS_QUEUE_NUM; i++) | 6793 | for (i = 0; i < QOS_QUEUE_NUM; i++) |
6792 | qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = | 6794 | qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = |
6793 | (u16) burst_duration; | 6795 | (u16)burst_duration; |
6794 | } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | 6796 | } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { |
6795 | if (type == IEEE_B) { | 6797 | if (type == IEEE_B) { |
6796 | IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", | 6798 | IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", |
@@ -6822,11 +6824,20 @@ static int ipw_qos_activate(struct ipw_priv *priv, | |||
6822 | burst_duration = ipw_qos_get_burst_duration(priv); | 6824 | burst_duration = ipw_qos_get_burst_duration(priv); |
6823 | for (i = 0; i < QOS_QUEUE_NUM; i++) | 6825 | for (i = 0; i < QOS_QUEUE_NUM; i++) |
6824 | qos_parameters[QOS_PARAM_SET_ACTIVE]. | 6826 | qos_parameters[QOS_PARAM_SET_ACTIVE]. |
6825 | tx_op_limit[i] = (u16) burst_duration; | 6827 | tx_op_limit[i] = (u16)burst_duration; |
6826 | } | 6828 | } |
6827 | } | 6829 | } |
6828 | 6830 | ||
6829 | IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); | 6831 | IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); |
6832 | for (i = 0; i < 3; i++) { | ||
6833 | int j; | ||
6834 | for (j = 0; j < QOS_QUEUE_NUM; j++) { | ||
6835 | qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]); | ||
6836 | qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]); | ||
6837 | qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]); | ||
6838 | } | ||
6839 | } | ||
6840 | |||
6830 | err = ipw_send_qos_params_command(priv, | 6841 | err = ipw_send_qos_params_command(priv, |
6831 | (struct ieee80211_qos_parameters *) | 6842 | (struct ieee80211_qos_parameters *) |
6832 | &(qos_parameters[0])); | 6843 | &(qos_parameters[0])); |
@@ -7065,7 +7076,7 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, | |||
7065 | 7076 | ||
7066 | if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { | 7077 | if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { |
7067 | tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; | 7078 | tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; |
7068 | tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK; | 7079 | tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); |
7069 | } | 7080 | } |
7070 | return 0; | 7081 | return 0; |
7071 | } | 7082 | } |
@@ -7646,7 +7657,6 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | |||
7646 | /* Big bitfield of all the fields we provide in radiotap */ | 7657 | /* Big bitfield of all the fields we provide in radiotap */ |
7647 | ipw_rt->rt_hdr.it_present = | 7658 | ipw_rt->rt_hdr.it_present = |
7648 | ((1 << IEEE80211_RADIOTAP_FLAGS) | | 7659 | ((1 << IEEE80211_RADIOTAP_FLAGS) | |
7649 | (1 << IEEE80211_RADIOTAP_TSFT) | | ||
7650 | (1 << IEEE80211_RADIOTAP_RATE) | | 7660 | (1 << IEEE80211_RADIOTAP_RATE) | |
7651 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 7661 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
7652 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 7662 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | |
@@ -7655,6 +7665,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | |||
7655 | 7665 | ||
7656 | /* Zero the flags, we'll add to them as we go */ | 7666 | /* Zero the flags, we'll add to them as we go */ |
7657 | ipw_rt->rt_flags = 0; | 7667 | ipw_rt->rt_flags = 0; |
7668 | ipw_rt->rt_tsf = 0ULL; | ||
7658 | 7669 | ||
7659 | /* Convert signal to DBM */ | 7670 | /* Convert signal to DBM */ |
7660 | ipw_rt->rt_dbmsignal = antsignal; | 7671 | ipw_rt->rt_dbmsignal = antsignal; |
@@ -7773,7 +7784,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | |||
7773 | s8 noise = frame->noise; | 7784 | s8 noise = frame->noise; |
7774 | u8 rate = frame->rate; | 7785 | u8 rate = frame->rate; |
7775 | short len = le16_to_cpu(pkt->u.frame.length); | 7786 | short len = le16_to_cpu(pkt->u.frame.length); |
7776 | u64 tsf = 0; | ||
7777 | struct sk_buff *skb; | 7787 | struct sk_buff *skb; |
7778 | int hdr_only = 0; | 7788 | int hdr_only = 0; |
7779 | u16 filter = priv->prom_priv->filter; | 7789 | u16 filter = priv->prom_priv->filter; |
@@ -7808,17 +7818,17 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | |||
7808 | } | 7818 | } |
7809 | 7819 | ||
7810 | hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; | 7820 | hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; |
7811 | if (ieee80211_is_management(hdr->frame_ctl)) { | 7821 | if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) { |
7812 | if (filter & IPW_PROM_NO_MGMT) | 7822 | if (filter & IPW_PROM_NO_MGMT) |
7813 | return; | 7823 | return; |
7814 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) | 7824 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) |
7815 | hdr_only = 1; | 7825 | hdr_only = 1; |
7816 | } else if (ieee80211_is_control(hdr->frame_ctl)) { | 7826 | } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) { |
7817 | if (filter & IPW_PROM_NO_CTL) | 7827 | if (filter & IPW_PROM_NO_CTL) |
7818 | return; | 7828 | return; |
7819 | if (filter & IPW_PROM_CTL_HEADER_ONLY) | 7829 | if (filter & IPW_PROM_CTL_HEADER_ONLY) |
7820 | hdr_only = 1; | 7830 | hdr_only = 1; |
7821 | } else if (ieee80211_is_data(hdr->frame_ctl)) { | 7831 | } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) { |
7822 | if (filter & IPW_PROM_NO_DATA) | 7832 | if (filter & IPW_PROM_NO_DATA) |
7823 | return; | 7833 | return; |
7824 | if (filter & IPW_PROM_DATA_HEADER_ONLY) | 7834 | if (filter & IPW_PROM_DATA_HEADER_ONLY) |
@@ -7836,7 +7846,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | |||
7836 | ipw_rt = (void *)skb->data; | 7846 | ipw_rt = (void *)skb->data; |
7837 | 7847 | ||
7838 | if (hdr_only) | 7848 | if (hdr_only) |
7839 | len = ieee80211_get_hdrlen(hdr->frame_ctl); | 7849 | len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); |
7840 | 7850 | ||
7841 | memcpy(ipw_rt->payload, hdr, len); | 7851 | memcpy(ipw_rt->payload, hdr, len); |
7842 | 7852 | ||
@@ -7859,7 +7869,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | |||
7859 | /* Big bitfield of all the fields we provide in radiotap */ | 7869 | /* Big bitfield of all the fields we provide in radiotap */ |
7860 | ipw_rt->rt_hdr.it_present = | 7870 | ipw_rt->rt_hdr.it_present = |
7861 | ((1 << IEEE80211_RADIOTAP_FLAGS) | | 7871 | ((1 << IEEE80211_RADIOTAP_FLAGS) | |
7862 | (1 << IEEE80211_RADIOTAP_TSFT) | | ||
7863 | (1 << IEEE80211_RADIOTAP_RATE) | | 7872 | (1 << IEEE80211_RADIOTAP_RATE) | |
7864 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 7873 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
7865 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 7874 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | |
@@ -7868,8 +7877,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | |||
7868 | 7877 | ||
7869 | /* Zero the flags, we'll add to them as we go */ | 7878 | /* Zero the flags, we'll add to them as we go */ |
7870 | ipw_rt->rt_flags = 0; | 7879 | ipw_rt->rt_flags = 0; |
7871 | 7880 | ipw_rt->rt_tsf = 0ULL; | |
7872 | ipw_rt->rt_tsf = tsf; | ||
7873 | 7881 | ||
7874 | /* Convert to DBM */ | 7882 | /* Convert to DBM */ |
7875 | ipw_rt->rt_dbmsignal = signal; | 7883 | ipw_rt->rt_dbmsignal = signal; |
@@ -8142,8 +8150,7 @@ static void ipw_rx(struct ipw_priv *priv) | |||
8142 | switch (pkt->header.message_type) { | 8150 | switch (pkt->header.message_type) { |
8143 | case RX_FRAME_TYPE: /* 802.11 frame */ { | 8151 | case RX_FRAME_TYPE: /* 802.11 frame */ { |
8144 | struct ieee80211_rx_stats stats = { | 8152 | struct ieee80211_rx_stats stats = { |
8145 | .rssi = | 8153 | .rssi = pkt->u.frame.rssi_dbm - |
8146 | le16_to_cpu(pkt->u.frame.rssi_dbm) - | ||
8147 | IPW_RSSI_TO_DBM, | 8154 | IPW_RSSI_TO_DBM, |
8148 | .signal = | 8155 | .signal = |
8149 | le16_to_cpu(pkt->u.frame.rssi_dbm) - | 8156 | le16_to_cpu(pkt->u.frame.rssi_dbm) - |
@@ -8578,9 +8585,26 @@ static int ipw_wx_get_freq(struct net_device *dev, | |||
8578 | * configured CHANNEL then return that; otherwise return ANY */ | 8585 | * configured CHANNEL then return that; otherwise return ANY */ |
8579 | mutex_lock(&priv->mutex); | 8586 | mutex_lock(&priv->mutex); |
8580 | if (priv->config & CFG_STATIC_CHANNEL || | 8587 | if (priv->config & CFG_STATIC_CHANNEL || |
8581 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) | 8588 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { |
8582 | wrqu->freq.m = priv->channel; | 8589 | int i; |
8583 | else | 8590 | |
8591 | i = ieee80211_channel_to_index(priv->ieee, priv->channel); | ||
8592 | BUG_ON(i == -1); | ||
8593 | wrqu->freq.e = 1; | ||
8594 | |||
8595 | switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) { | ||
8596 | case IEEE80211_52GHZ_BAND: | ||
8597 | wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; | ||
8598 | break; | ||
8599 | |||
8600 | case IEEE80211_24GHZ_BAND: | ||
8601 | wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; | ||
8602 | break; | ||
8603 | |||
8604 | default: | ||
8605 | BUG(); | ||
8606 | } | ||
8607 | } else | ||
8584 | wrqu->freq.m = 0; | 8608 | wrqu->freq.m = 0; |
8585 | 8609 | ||
8586 | mutex_unlock(&priv->mutex); | 8610 | mutex_unlock(&priv->mutex); |
@@ -8836,42 +8860,38 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8836 | union iwreq_data *wrqu, char *extra) | 8860 | union iwreq_data *wrqu, char *extra) |
8837 | { | 8861 | { |
8838 | struct ipw_priv *priv = ieee80211_priv(dev); | 8862 | struct ipw_priv *priv = ieee80211_priv(dev); |
8839 | char *essid = ""; /* ANY */ | 8863 | int length; |
8840 | int length = 0; | 8864 | |
8841 | mutex_lock(&priv->mutex); | 8865 | mutex_lock(&priv->mutex); |
8842 | if (wrqu->essid.flags && wrqu->essid.length) { | ||
8843 | length = wrqu->essid.length - 1; | ||
8844 | essid = extra; | ||
8845 | } | ||
8846 | if (length == 0) { | ||
8847 | IPW_DEBUG_WX("Setting ESSID to ANY\n"); | ||
8848 | if ((priv->config & CFG_STATIC_ESSID) && | ||
8849 | !(priv->status & (STATUS_ASSOCIATED | | ||
8850 | STATUS_ASSOCIATING))) { | ||
8851 | IPW_DEBUG_ASSOC("Attempting to associate with new " | ||
8852 | "parameters.\n"); | ||
8853 | priv->config &= ~CFG_STATIC_ESSID; | ||
8854 | ipw_associate(priv); | ||
8855 | } | ||
8856 | mutex_unlock(&priv->mutex); | ||
8857 | return 0; | ||
8858 | } | ||
8859 | 8866 | ||
8860 | length = min(length, IW_ESSID_MAX_SIZE); | 8867 | if (!wrqu->essid.flags) |
8868 | { | ||
8869 | IPW_DEBUG_WX("Setting ESSID to ANY\n"); | ||
8870 | ipw_disassociate(priv); | ||
8871 | priv->config &= ~CFG_STATIC_ESSID; | ||
8872 | ipw_associate(priv); | ||
8873 | mutex_unlock(&priv->mutex); | ||
8874 | return 0; | ||
8875 | } | ||
8876 | |||
8877 | length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); | ||
8878 | if (!extra[length - 1]) | ||
8879 | length--; | ||
8861 | 8880 | ||
8862 | priv->config |= CFG_STATIC_ESSID; | 8881 | priv->config |= CFG_STATIC_ESSID; |
8863 | 8882 | ||
8864 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { | 8883 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length) |
8884 | && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { | ||
8865 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); | 8885 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); |
8866 | mutex_unlock(&priv->mutex); | 8886 | mutex_unlock(&priv->mutex); |
8867 | return 0; | 8887 | return 0; |
8868 | } | 8888 | } |
8869 | 8889 | ||
8870 | IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length), | 8890 | IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length), |
8871 | length); | 8891 | length); |
8872 | 8892 | ||
8873 | priv->essid_len = length; | 8893 | priv->essid_len = length; |
8874 | memcpy(priv->essid, essid, priv->essid_len); | 8894 | memcpy(priv->essid, extra, priv->essid_len); |
8875 | 8895 | ||
8876 | /* Network configuration changed -- force [re]association */ | 8896 | /* Network configuration changed -- force [re]association */ |
8877 | IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); | 8897 | IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); |
@@ -9252,7 +9272,7 @@ static int ipw_wx_set_retry(struct net_device *dev, | |||
9252 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) | 9272 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) |
9253 | return 0; | 9273 | return 0; |
9254 | 9274 | ||
9255 | if (wrqu->retry.value < 0 || wrqu->retry.value > 255) | 9275 | if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) |
9256 | return -EINVAL; | 9276 | return -EINVAL; |
9257 | 9277 | ||
9258 | mutex_lock(&priv->mutex); | 9278 | mutex_lock(&priv->mutex); |
@@ -9375,15 +9395,19 @@ static int ipw_wx_set_scan(struct net_device *dev, | |||
9375 | union iwreq_data *wrqu, char *extra) | 9395 | union iwreq_data *wrqu, char *extra) |
9376 | { | 9396 | { |
9377 | struct ipw_priv *priv = ieee80211_priv(dev); | 9397 | struct ipw_priv *priv = ieee80211_priv(dev); |
9378 | struct iw_scan_req *req = NULL; | 9398 | struct iw_scan_req *req = (struct iw_scan_req *)extra; |
9379 | if (wrqu->data.length | 9399 | |
9380 | && wrqu->data.length == sizeof(struct iw_scan_req)) { | 9400 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { |
9381 | req = (struct iw_scan_req *)extra; | ||
9382 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { | 9401 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { |
9383 | ipw_request_direct_scan(priv, req->essid, | 9402 | ipw_request_direct_scan(priv, req->essid, |
9384 | req->essid_len); | 9403 | req->essid_len); |
9385 | return 0; | 9404 | return 0; |
9386 | } | 9405 | } |
9406 | if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { | ||
9407 | queue_work(priv->workqueue, | ||
9408 | &priv->request_passive_scan); | ||
9409 | return 0; | ||
9410 | } | ||
9387 | } | 9411 | } |
9388 | 9412 | ||
9389 | IPW_DEBUG_WX("Start scan\n"); | 9413 | IPW_DEBUG_WX("Start scan\n"); |
@@ -10092,7 +10116,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
10092 | switch (priv->ieee->sec.level) { | 10116 | switch (priv->ieee->sec.level) { |
10093 | case SEC_LEVEL_3: | 10117 | case SEC_LEVEL_3: |
10094 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= | 10118 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= |
10095 | IEEE80211_FCTL_PROTECTED; | 10119 | cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
10096 | /* XXX: ACK flag must be set for CCMP even if it | 10120 | /* XXX: ACK flag must be set for CCMP even if it |
10097 | * is a multicast/broadcast packet, because CCMP | 10121 | * is a multicast/broadcast packet, because CCMP |
10098 | * group communication encrypted by GTK is | 10122 | * group communication encrypted by GTK is |
@@ -10107,14 +10131,14 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
10107 | break; | 10131 | break; |
10108 | case SEC_LEVEL_2: | 10132 | case SEC_LEVEL_2: |
10109 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= | 10133 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= |
10110 | IEEE80211_FCTL_PROTECTED; | 10134 | cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
10111 | tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; | 10135 | tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; |
10112 | tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; | 10136 | tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; |
10113 | tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; | 10137 | tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; |
10114 | break; | 10138 | break; |
10115 | case SEC_LEVEL_1: | 10139 | case SEC_LEVEL_1: |
10116 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= | 10140 | tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= |
10117 | IEEE80211_FCTL_PROTECTED; | 10141 | cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
10118 | tfd->u.data.key_index = priv->ieee->tx_keyidx; | 10142 | tfd->u.data.key_index = priv->ieee->tx_keyidx; |
10119 | if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <= | 10143 | if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <= |
10120 | 40) | 10144 | 40) |
@@ -10246,17 +10270,17 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, | |||
10246 | 10270 | ||
10247 | /* Filtering of fragment chains is done agains the first fragment */ | 10271 | /* Filtering of fragment chains is done agains the first fragment */ |
10248 | hdr = (void *)txb->fragments[0]->data; | 10272 | hdr = (void *)txb->fragments[0]->data; |
10249 | if (ieee80211_is_management(hdr->frame_ctl)) { | 10273 | if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) { |
10250 | if (filter & IPW_PROM_NO_MGMT) | 10274 | if (filter & IPW_PROM_NO_MGMT) |
10251 | return; | 10275 | return; |
10252 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) | 10276 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) |
10253 | hdr_only = 1; | 10277 | hdr_only = 1; |
10254 | } else if (ieee80211_is_control(hdr->frame_ctl)) { | 10278 | } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) { |
10255 | if (filter & IPW_PROM_NO_CTL) | 10279 | if (filter & IPW_PROM_NO_CTL) |
10256 | return; | 10280 | return; |
10257 | if (filter & IPW_PROM_CTL_HEADER_ONLY) | 10281 | if (filter & IPW_PROM_CTL_HEADER_ONLY) |
10258 | hdr_only = 1; | 10282 | hdr_only = 1; |
10259 | } else if (ieee80211_is_data(hdr->frame_ctl)) { | 10283 | } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) { |
10260 | if (filter & IPW_PROM_NO_DATA) | 10284 | if (filter & IPW_PROM_NO_DATA) |
10261 | return; | 10285 | return; |
10262 | if (filter & IPW_PROM_DATA_HEADER_ONLY) | 10286 | if (filter & IPW_PROM_DATA_HEADER_ONLY) |
@@ -10271,7 +10295,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, | |||
10271 | 10295 | ||
10272 | if (hdr_only) { | 10296 | if (hdr_only) { |
10273 | hdr = (void *)src->data; | 10297 | hdr = (void *)src->data; |
10274 | len = ieee80211_get_hdrlen(hdr->frame_ctl); | 10298 | len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); |
10275 | } else | 10299 | } else |
10276 | len = src->len; | 10300 | len = src->len; |
10277 | 10301 | ||
@@ -10615,6 +10639,8 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10615 | INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); | 10639 | INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); |
10616 | INIT_WORK(&priv->request_scan, | 10640 | INIT_WORK(&priv->request_scan, |
10617 | (void (*)(void *))ipw_request_scan, priv); | 10641 | (void (*)(void *))ipw_request_scan, priv); |
10642 | INIT_WORK(&priv->request_passive_scan, | ||
10643 | (void (*)(void *))ipw_request_passive_scan, priv); | ||
10618 | INIT_WORK(&priv->gather_stats, | 10644 | INIT_WORK(&priv->gather_stats, |
10619 | (void (*)(void *))ipw_bg_gather_stats, priv); | 10645 | (void (*)(void *))ipw_bg_gather_stats, priv); |
10620 | INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); | 10646 | INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); |
@@ -11467,9 +11493,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11467 | 11493 | ||
11468 | priv->net_dev = net_dev; | 11494 | priv->net_dev = net_dev; |
11469 | priv->pci_dev = pdev; | 11495 | priv->pci_dev = pdev; |
11470 | #ifdef CONFIG_IPW2200_DEBUG | ||
11471 | ipw_debug_level = debug; | 11496 | ipw_debug_level = debug; |
11472 | #endif | ||
11473 | spin_lock_init(&priv->irq_lock); | 11497 | spin_lock_init(&priv->irq_lock); |
11474 | spin_lock_init(&priv->lock); | 11498 | spin_lock_init(&priv->lock); |
11475 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) | 11499 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) |
@@ -11734,6 +11758,16 @@ static int ipw_pci_resume(struct pci_dev *pdev) | |||
11734 | } | 11758 | } |
11735 | #endif | 11759 | #endif |
11736 | 11760 | ||
11761 | static void ipw_pci_shutdown(struct pci_dev *pdev) | ||
11762 | { | ||
11763 | struct ipw_priv *priv = pci_get_drvdata(pdev); | ||
11764 | |||
11765 | /* Take down the device; powers it off, etc. */ | ||
11766 | ipw_down(priv); | ||
11767 | |||
11768 | pci_disable_device(pdev); | ||
11769 | } | ||
11770 | |||
11737 | /* driver initialization stuff */ | 11771 | /* driver initialization stuff */ |
11738 | static struct pci_driver ipw_driver = { | 11772 | static struct pci_driver ipw_driver = { |
11739 | .name = DRV_NAME, | 11773 | .name = DRV_NAME, |
@@ -11744,6 +11778,7 @@ static struct pci_driver ipw_driver = { | |||
11744 | .suspend = ipw_pci_suspend, | 11778 | .suspend = ipw_pci_suspend, |
11745 | .resume = ipw_pci_resume, | 11779 | .resume = ipw_pci_resume, |
11746 | #endif | 11780 | #endif |
11781 | .shutdown = ipw_pci_shutdown, | ||
11747 | }; | 11782 | }; |
11748 | 11783 | ||
11749 | static int __init ipw_init(void) | 11784 | static int __init ipw_init(void) |
@@ -11787,10 +11822,8 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); | |||
11787 | module_param(led, int, 0444); | 11822 | module_param(led, int, 0444); |
11788 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); | 11823 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); |
11789 | 11824 | ||
11790 | #ifdef CONFIG_IPW2200_DEBUG | ||
11791 | module_param(debug, int, 0444); | 11825 | module_param(debug, int, 0444); |
11792 | MODULE_PARM_DESC(debug, "debug output mask"); | 11826 | MODULE_PARM_DESC(debug, "debug output mask"); |
11793 | #endif | ||
11794 | 11827 | ||
11795 | module_param(channel, int, 0444); | 11828 | module_param(channel, int, 0444); |
11796 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); | 11829 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index 8b1cd7c749a4..dad5eedefbf1 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -713,7 +713,6 @@ struct ipw_rx_packet { | |||
713 | 713 | ||
714 | struct ipw_rx_mem_buffer { | 714 | struct ipw_rx_mem_buffer { |
715 | dma_addr_t dma_addr; | 715 | dma_addr_t dma_addr; |
716 | struct ipw_rx_buffer *rxb; | ||
717 | struct sk_buff *skb; | 716 | struct sk_buff *skb; |
718 | struct list_head list; | 717 | struct list_head list; |
719 | }; /* Not transferred over network, so not __attribute__ ((packed)) */ | 718 | }; /* Not transferred over network, so not __attribute__ ((packed)) */ |
@@ -1297,6 +1296,7 @@ struct ipw_priv { | |||
1297 | struct work_struct system_config; | 1296 | struct work_struct system_config; |
1298 | struct work_struct rx_replenish; | 1297 | struct work_struct rx_replenish; |
1299 | struct work_struct request_scan; | 1298 | struct work_struct request_scan; |
1299 | struct work_struct request_passive_scan; | ||
1300 | struct work_struct adapter_restart; | 1300 | struct work_struct adapter_restart; |
1301 | struct work_struct rf_kill; | 1301 | struct work_struct rf_kill; |
1302 | struct work_struct up; | 1302 | struct work_struct up; |
@@ -1381,13 +1381,18 @@ BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\ | |||
1381 | BIT_ARG16(x) | 1381 | BIT_ARG16(x) |
1382 | 1382 | ||
1383 | 1383 | ||
1384 | #ifdef CONFIG_IPW2200_DEBUG | ||
1385 | #define IPW_DEBUG(level, fmt, args...) \ | 1384 | #define IPW_DEBUG(level, fmt, args...) \ |
1386 | do { if (ipw_debug_level & (level)) \ | 1385 | do { if (ipw_debug_level & (level)) \ |
1387 | printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ | 1386 | printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ |
1388 | in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) | 1387 | in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) |
1388 | |||
1389 | #ifdef CONFIG_IPW2200_DEBUG | ||
1390 | #define IPW_LL_DEBUG(level, fmt, args...) \ | ||
1391 | do { if (ipw_debug_level & (level)) \ | ||
1392 | printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ | ||
1393 | in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) | ||
1389 | #else | 1394 | #else |
1390 | #define IPW_DEBUG(level, fmt, args...) do {} while (0) | 1395 | #define IPW_LL_DEBUG(level, fmt, args...) do {} while (0) |
1391 | #endif /* CONFIG_IPW2200_DEBUG */ | 1396 | #endif /* CONFIG_IPW2200_DEBUG */ |
1392 | 1397 | ||
1393 | /* | 1398 | /* |
@@ -1457,28 +1462,27 @@ do { if (ipw_debug_level & (level)) \ | |||
1457 | 1462 | ||
1458 | #define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a) | 1463 | #define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a) |
1459 | #define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a) | 1464 | #define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a) |
1460 | #define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a) | 1465 | #define IPW_DEBUG_TRACE(f, a...) IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a) |
1461 | #define IPW_DEBUG_TRACE(f, a...) IPW_DEBUG(IPW_DL_TRACE, f, ## a) | 1466 | #define IPW_DEBUG_RX(f, a...) IPW_LL_DEBUG(IPW_DL_RX, f, ## a) |
1462 | #define IPW_DEBUG_RX(f, a...) IPW_DEBUG(IPW_DL_RX, f, ## a) | 1467 | #define IPW_DEBUG_TX(f, a...) IPW_LL_DEBUG(IPW_DL_TX, f, ## a) |
1463 | #define IPW_DEBUG_TX(f, a...) IPW_DEBUG(IPW_DL_TX, f, ## a) | 1468 | #define IPW_DEBUG_ISR(f, a...) IPW_LL_DEBUG(IPW_DL_ISR, f, ## a) |
1464 | #define IPW_DEBUG_ISR(f, a...) IPW_DEBUG(IPW_DL_ISR, f, ## a) | ||
1465 | #define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a) | 1469 | #define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a) |
1466 | #define IPW_DEBUG_LED(f, a...) IPW_DEBUG(IPW_DL_LED, f, ## a) | 1470 | #define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a) |
1467 | #define IPW_DEBUG_WEP(f, a...) IPW_DEBUG(IPW_DL_WEP, f, ## a) | 1471 | #define IPW_DEBUG_WEP(f, a...) IPW_LL_DEBUG(IPW_DL_WEP, f, ## a) |
1468 | #define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a) | 1472 | #define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a) |
1469 | #define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a) | 1473 | #define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a) |
1470 | #define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a) | 1474 | #define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a) |
1471 | #define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a) | 1475 | #define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a) |
1472 | #define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a) | 1476 | #define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a) |
1473 | #define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a) | 1477 | #define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a) |
1474 | #define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a) | 1478 | #define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a) |
1475 | #define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a) | 1479 | #define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a) |
1476 | #define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a) | 1480 | #define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a) |
1477 | #define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) | 1481 | #define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) |
1478 | #define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) | 1482 | #define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) |
1479 | #define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a) | 1483 | #define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a) |
1480 | #define IPW_DEBUG_MERGE(f, a...) IPW_DEBUG(IPW_DL_MERGE, f, ## a) | 1484 | #define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a) |
1481 | #define IPW_DEBUG_QOS(f, a...) IPW_DEBUG(IPW_DL_QOS, f, ## a) | 1485 | #define IPW_DEBUG_QOS(f, a...) IPW_LL_DEBUG(IPW_DL_QOS, f, ## a) |
1482 | 1486 | ||
1483 | #include <linux/ctype.h> | 1487 | #include <linux/ctype.h> |
1484 | 1488 | ||
@@ -1947,10 +1951,17 @@ struct host_cmd { | |||
1947 | u32 *param; | 1951 | u32 *param; |
1948 | } __attribute__ ((packed)); | 1952 | } __attribute__ ((packed)); |
1949 | 1953 | ||
1954 | struct cmdlog_host_cmd { | ||
1955 | u8 cmd; | ||
1956 | u8 len; | ||
1957 | u16 reserved; | ||
1958 | char param[124]; | ||
1959 | } __attribute__ ((packed)); | ||
1960 | |||
1950 | struct ipw_cmd_log { | 1961 | struct ipw_cmd_log { |
1951 | unsigned long jiffies; | 1962 | unsigned long jiffies; |
1952 | int retcode; | 1963 | int retcode; |
1953 | struct host_cmd cmd; | 1964 | struct cmdlog_host_cmd cmd; |
1954 | }; | 1965 | }; |
1955 | 1966 | ||
1956 | /* SysConfig command parameters ... */ | 1967 | /* SysConfig command parameters ... */ |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 317ace7f9aae..1174ff53e025 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -82,6 +82,7 @@ | |||
82 | #include <linux/netdevice.h> | 82 | #include <linux/netdevice.h> |
83 | #include <linux/etherdevice.h> | 83 | #include <linux/etherdevice.h> |
84 | #include <linux/ethtool.h> | 84 | #include <linux/ethtool.h> |
85 | #include <linux/if_arp.h> | ||
85 | #include <linux/wireless.h> | 86 | #include <linux/wireless.h> |
86 | #include <net/iw_handler.h> | 87 | #include <net/iw_handler.h> |
87 | #include <net/ieee80211.h> | 88 | #include <net/ieee80211.h> |
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h index 16db3e14b7d2..fb5700d6c454 100644 --- a/drivers/net/wireless/orinoco.h +++ b/drivers/net/wireless/orinoco.h | |||
@@ -134,11 +134,7 @@ extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *reg | |||
134 | /* Locking and synchronization functions */ | 134 | /* Locking and synchronization functions */ |
135 | /********************************************************************/ | 135 | /********************************************************************/ |
136 | 136 | ||
137 | /* These functions *must* be inline or they will break horribly on | 137 | static inline int orinoco_lock(struct orinoco_private *priv, |
138 | * SPARC, due to its weird semantics for save/restore flags. extern | ||
139 | * inline should prevent the kernel from linking or module from | ||
140 | * loading if they are not inlined. */ | ||
141 | extern inline int orinoco_lock(struct orinoco_private *priv, | ||
142 | unsigned long *flags) | 138 | unsigned long *flags) |
143 | { | 139 | { |
144 | spin_lock_irqsave(&priv->lock, *flags); | 140 | spin_lock_irqsave(&priv->lock, *flags); |
@@ -151,7 +147,7 @@ extern inline int orinoco_lock(struct orinoco_private *priv, | |||
151 | return 0; | 147 | return 0; |
152 | } | 148 | } |
153 | 149 | ||
154 | extern inline void orinoco_unlock(struct orinoco_private *priv, | 150 | static inline void orinoco_unlock(struct orinoco_private *priv, |
155 | unsigned long *flags) | 151 | unsigned long *flags) |
156 | { | 152 | { |
157 | spin_unlock_irqrestore(&priv->lock, *flags); | 153 | spin_unlock_irqrestore(&priv->lock, *flags); |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index ed90a8af1444..098c66846339 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -271,6 +271,27 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
271 | return 0; | 271 | return 0; |
272 | } | 272 | } |
273 | 273 | ||
274 | /* | ||
275 | * deal with seq counter wrapping correctly. | ||
276 | * refer to timer_after() for jiffies wrapping handling | ||
277 | */ | ||
278 | static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o) | ||
279 | { | ||
280 | u32 iv32_n, iv16_n; | ||
281 | u32 iv32_o, iv16_o; | ||
282 | |||
283 | iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3]; | ||
284 | iv16_n = (pn_n[4] << 8) | pn_n[5]; | ||
285 | |||
286 | iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3]; | ||
287 | iv16_o = (pn_o[4] << 8) | pn_o[5]; | ||
288 | |||
289 | if ((s32)iv32_n - (s32)iv32_o < 0 || | ||
290 | (iv32_n == iv32_o && iv16_n <= iv16_o)) | ||
291 | return 1; | ||
292 | return 0; | ||
293 | } | ||
294 | |||
274 | static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | 295 | static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) |
275 | { | 296 | { |
276 | struct ieee80211_ccmp_data *key = priv; | 297 | struct ieee80211_ccmp_data *key = priv; |
@@ -323,7 +344,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
323 | pn[5] = pos[0]; | 344 | pn[5] = pos[0]; |
324 | pos += 8; | 345 | pos += 8; |
325 | 346 | ||
326 | if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) { | 347 | if (ccmp_replay_check(pn, key->rx_pn)) { |
327 | if (net_ratelimit()) { | 348 | if (net_ratelimit()) { |
328 | printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT | 349 | printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT |
329 | " previous PN %02x%02x%02x%02x%02x%02x " | 350 | " previous PN %02x%02x%02x%02x%02x%02x " |
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 34dba0ba545d..f2df2f5b3e4c 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -52,8 +52,10 @@ struct ieee80211_tkip_data { | |||
52 | 52 | ||
53 | int key_idx; | 53 | int key_idx; |
54 | 54 | ||
55 | struct crypto_tfm *tfm_arc4; | 55 | struct crypto_tfm *tx_tfm_arc4; |
56 | struct crypto_tfm *tfm_michael; | 56 | struct crypto_tfm *tx_tfm_michael; |
57 | struct crypto_tfm *rx_tfm_arc4; | ||
58 | struct crypto_tfm *rx_tfm_michael; | ||
57 | 59 | ||
58 | /* scratch buffers for virt_to_page() (crypto API) */ | 60 | /* scratch buffers for virt_to_page() (crypto API) */ |
59 | u8 rx_hdr[16], tx_hdr[16]; | 61 | u8 rx_hdr[16], tx_hdr[16]; |
@@ -85,15 +87,29 @@ static void *ieee80211_tkip_init(int key_idx) | |||
85 | 87 | ||
86 | priv->key_idx = key_idx; | 88 | priv->key_idx = key_idx; |
87 | 89 | ||
88 | priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); | 90 | priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); |
89 | if (priv->tfm_arc4 == NULL) { | 91 | if (priv->tx_tfm_arc4 == NULL) { |
90 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " | 92 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " |
91 | "crypto API arc4\n"); | 93 | "crypto API arc4\n"); |
92 | goto fail; | 94 | goto fail; |
93 | } | 95 | } |
94 | 96 | ||
95 | priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0); | 97 | priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); |
96 | if (priv->tfm_michael == NULL) { | 98 | if (priv->tx_tfm_michael == NULL) { |
99 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " | ||
100 | "crypto API michael_mic\n"); | ||
101 | goto fail; | ||
102 | } | ||
103 | |||
104 | priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); | ||
105 | if (priv->rx_tfm_arc4 == NULL) { | ||
106 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " | ||
107 | "crypto API arc4\n"); | ||
108 | goto fail; | ||
109 | } | ||
110 | |||
111 | priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); | ||
112 | if (priv->rx_tfm_michael == NULL) { | ||
97 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " | 113 | printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " |
98 | "crypto API michael_mic\n"); | 114 | "crypto API michael_mic\n"); |
99 | goto fail; | 115 | goto fail; |
@@ -103,10 +119,14 @@ static void *ieee80211_tkip_init(int key_idx) | |||
103 | 119 | ||
104 | fail: | 120 | fail: |
105 | if (priv) { | 121 | if (priv) { |
106 | if (priv->tfm_michael) | 122 | if (priv->tx_tfm_michael) |
107 | crypto_free_tfm(priv->tfm_michael); | 123 | crypto_free_tfm(priv->tx_tfm_michael); |
108 | if (priv->tfm_arc4) | 124 | if (priv->tx_tfm_arc4) |
109 | crypto_free_tfm(priv->tfm_arc4); | 125 | crypto_free_tfm(priv->tx_tfm_arc4); |
126 | if (priv->rx_tfm_michael) | ||
127 | crypto_free_tfm(priv->rx_tfm_michael); | ||
128 | if (priv->rx_tfm_arc4) | ||
129 | crypto_free_tfm(priv->rx_tfm_arc4); | ||
110 | kfree(priv); | 130 | kfree(priv); |
111 | } | 131 | } |
112 | 132 | ||
@@ -116,10 +136,16 @@ static void *ieee80211_tkip_init(int key_idx) | |||
116 | static void ieee80211_tkip_deinit(void *priv) | 136 | static void ieee80211_tkip_deinit(void *priv) |
117 | { | 137 | { |
118 | struct ieee80211_tkip_data *_priv = priv; | 138 | struct ieee80211_tkip_data *_priv = priv; |
119 | if (_priv && _priv->tfm_michael) | 139 | if (_priv) { |
120 | crypto_free_tfm(_priv->tfm_michael); | 140 | if (_priv->tx_tfm_michael) |
121 | if (_priv && _priv->tfm_arc4) | 141 | crypto_free_tfm(_priv->tx_tfm_michael); |
122 | crypto_free_tfm(_priv->tfm_arc4); | 142 | if (_priv->tx_tfm_arc4) |
143 | crypto_free_tfm(_priv->tx_tfm_arc4); | ||
144 | if (_priv->rx_tfm_michael) | ||
145 | crypto_free_tfm(_priv->rx_tfm_michael); | ||
146 | if (_priv->rx_tfm_arc4) | ||
147 | crypto_free_tfm(_priv->rx_tfm_arc4); | ||
148 | } | ||
123 | kfree(priv); | 149 | kfree(priv); |
124 | } | 150 | } |
125 | 151 | ||
@@ -351,12 +377,25 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
351 | icv[2] = crc >> 16; | 377 | icv[2] = crc >> 16; |
352 | icv[3] = crc >> 24; | 378 | icv[3] = crc >> 24; |
353 | 379 | ||
354 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); | 380 | crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); |
355 | sg.page = virt_to_page(pos); | 381 | sg.page = virt_to_page(pos); |
356 | sg.offset = offset_in_page(pos); | 382 | sg.offset = offset_in_page(pos); |
357 | sg.length = len + 4; | 383 | sg.length = len + 4; |
358 | crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); | 384 | crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4); |
385 | |||
386 | return 0; | ||
387 | } | ||
359 | 388 | ||
389 | /* | ||
390 | * deal with seq counter wrapping correctly. | ||
391 | * refer to timer_after() for jiffies wrapping handling | ||
392 | */ | ||
393 | static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, | ||
394 | u32 iv32_o, u16 iv16_o) | ||
395 | { | ||
396 | if ((s32)iv32_n - (s32)iv32_o < 0 || | ||
397 | (iv32_n == iv32_o && iv16_n <= iv16_o)) | ||
398 | return 1; | ||
360 | return 0; | 399 | return 0; |
361 | } | 400 | } |
362 | 401 | ||
@@ -414,8 +453,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
414 | iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); | 453 | iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); |
415 | pos += 8; | 454 | pos += 8; |
416 | 455 | ||
417 | if (iv32 < tkey->rx_iv32 || | 456 | if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { |
418 | (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { | ||
419 | if (net_ratelimit()) { | 457 | if (net_ratelimit()) { |
420 | printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT | 458 | printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT |
421 | " previous TSC %08x%04x received TSC " | 459 | " previous TSC %08x%04x received TSC " |
@@ -434,11 +472,11 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
434 | 472 | ||
435 | plen = skb->len - hdr_len - 12; | 473 | plen = skb->len - hdr_len - 12; |
436 | 474 | ||
437 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); | 475 | crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); |
438 | sg.page = virt_to_page(pos); | 476 | sg.page = virt_to_page(pos); |
439 | sg.offset = offset_in_page(pos); | 477 | sg.offset = offset_in_page(pos); |
440 | sg.length = plen + 4; | 478 | sg.length = plen + 4; |
441 | crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4); | 479 | crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4); |
442 | 480 | ||
443 | crc = ~crc32_le(~0, pos, plen); | 481 | crc = ~crc32_le(~0, pos, plen); |
444 | icv[0] = crc; | 482 | icv[0] = crc; |
@@ -472,12 +510,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
472 | return keyidx; | 510 | return keyidx; |
473 | } | 511 | } |
474 | 512 | ||
475 | static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, | 513 | static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr, |
476 | u8 * data, size_t data_len, u8 * mic) | 514 | u8 * data, size_t data_len, u8 * mic) |
477 | { | 515 | { |
478 | struct scatterlist sg[2]; | 516 | struct scatterlist sg[2]; |
479 | 517 | ||
480 | if (tkey->tfm_michael == NULL) { | 518 | if (tfm_michael == NULL) { |
481 | printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); | 519 | printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); |
482 | return -1; | 520 | return -1; |
483 | } | 521 | } |
@@ -489,10 +527,10 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, | |||
489 | sg[1].offset = offset_in_page(data); | 527 | sg[1].offset = offset_in_page(data); |
490 | sg[1].length = data_len; | 528 | sg[1].length = data_len; |
491 | 529 | ||
492 | crypto_digest_init(tkey->tfm_michael); | 530 | crypto_digest_init(tfm_michael); |
493 | crypto_digest_setkey(tkey->tfm_michael, key, 8); | 531 | crypto_digest_setkey(tfm_michael, key, 8); |
494 | crypto_digest_update(tkey->tfm_michael, sg, 2); | 532 | crypto_digest_update(tfm_michael, sg, 2); |
495 | crypto_digest_final(tkey->tfm_michael, mic); | 533 | crypto_digest_final(tfm_michael, mic); |
496 | 534 | ||
497 | return 0; | 535 | return 0; |
498 | } | 536 | } |
@@ -528,7 +566,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) | |||
528 | if (stype & IEEE80211_STYPE_QOS_DATA) { | 566 | if (stype & IEEE80211_STYPE_QOS_DATA) { |
529 | const struct ieee80211_hdr_3addrqos *qoshdr = | 567 | const struct ieee80211_hdr_3addrqos *qoshdr = |
530 | (struct ieee80211_hdr_3addrqos *)skb->data; | 568 | (struct ieee80211_hdr_3addrqos *)skb->data; |
531 | hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID; | 569 | hdr[12] = qoshdr->qos_ctl & cpu_to_le16(IEEE80211_QCTL_TID); |
532 | } else | 570 | } else |
533 | hdr[12] = 0; /* priority */ | 571 | hdr[12] = 0; /* priority */ |
534 | 572 | ||
@@ -550,7 +588,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, | |||
550 | 588 | ||
551 | michael_mic_hdr(skb, tkey->tx_hdr); | 589 | michael_mic_hdr(skb, tkey->tx_hdr); |
552 | pos = skb_put(skb, 8); | 590 | pos = skb_put(skb, 8); |
553 | if (michael_mic(tkey, &tkey->key[16], tkey->tx_hdr, | 591 | if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, |
554 | skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) | 592 | skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) |
555 | return -1; | 593 | return -1; |
556 | 594 | ||
@@ -588,7 +626,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, | |||
588 | return -1; | 626 | return -1; |
589 | 627 | ||
590 | michael_mic_hdr(skb, tkey->rx_hdr); | 628 | michael_mic_hdr(skb, tkey->rx_hdr); |
591 | if (michael_mic(tkey, &tkey->key[24], tkey->rx_hdr, | 629 | if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, |
592 | skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) | 630 | skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) |
593 | return -1; | 631 | return -1; |
594 | if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { | 632 | if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { |
@@ -618,14 +656,18 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) | |||
618 | { | 656 | { |
619 | struct ieee80211_tkip_data *tkey = priv; | 657 | struct ieee80211_tkip_data *tkey = priv; |
620 | int keyidx; | 658 | int keyidx; |
621 | struct crypto_tfm *tfm = tkey->tfm_michael; | 659 | struct crypto_tfm *tfm = tkey->tx_tfm_michael; |
622 | struct crypto_tfm *tfm2 = tkey->tfm_arc4; | 660 | struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4; |
661 | struct crypto_tfm *tfm3 = tkey->rx_tfm_michael; | ||
662 | struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4; | ||
623 | 663 | ||
624 | keyidx = tkey->key_idx; | 664 | keyidx = tkey->key_idx; |
625 | memset(tkey, 0, sizeof(*tkey)); | 665 | memset(tkey, 0, sizeof(*tkey)); |
626 | tkey->key_idx = keyidx; | 666 | tkey->key_idx = keyidx; |
627 | tkey->tfm_michael = tfm; | 667 | tkey->tx_tfm_michael = tfm; |
628 | tkey->tfm_arc4 = tfm2; | 668 | tkey->tx_tfm_arc4 = tfm2; |
669 | tkey->rx_tfm_michael = tfm3; | ||
670 | tkey->rx_tfm_arc4 = tfm4; | ||
629 | if (len == TKIP_KEY_LEN) { | 671 | if (len == TKIP_KEY_LEN) { |
630 | memcpy(tkey->key, key, TKIP_KEY_LEN); | 672 | memcpy(tkey->key, key, TKIP_KEY_LEN); |
631 | tkey->key_set = 1; | 673 | tkey->key_set = 1; |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 0ebf235f6939..b435b28857ed 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
@@ -32,7 +32,8 @@ struct prism2_wep_data { | |||
32 | u8 key[WEP_KEY_LEN + 1]; | 32 | u8 key[WEP_KEY_LEN + 1]; |
33 | u8 key_len; | 33 | u8 key_len; |
34 | u8 key_idx; | 34 | u8 key_idx; |
35 | struct crypto_tfm *tfm; | 35 | struct crypto_tfm *tx_tfm; |
36 | struct crypto_tfm *rx_tfm; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | static void *prism2_wep_init(int keyidx) | 39 | static void *prism2_wep_init(int keyidx) |
@@ -44,13 +45,19 @@ static void *prism2_wep_init(int keyidx) | |||
44 | goto fail; | 45 | goto fail; |
45 | priv->key_idx = keyidx; | 46 | priv->key_idx = keyidx; |
46 | 47 | ||
47 | priv->tfm = crypto_alloc_tfm("arc4", 0); | 48 | priv->tx_tfm = crypto_alloc_tfm("arc4", 0); |
48 | if (priv->tfm == NULL) { | 49 | if (priv->tx_tfm == NULL) { |
49 | printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " | 50 | printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " |
50 | "crypto API arc4\n"); | 51 | "crypto API arc4\n"); |
51 | goto fail; | 52 | goto fail; |
52 | } | 53 | } |
53 | 54 | ||
55 | priv->rx_tfm = crypto_alloc_tfm("arc4", 0); | ||
56 | if (priv->rx_tfm == NULL) { | ||
57 | printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " | ||
58 | "crypto API arc4\n"); | ||
59 | goto fail; | ||
60 | } | ||
54 | /* start WEP IV from a random value */ | 61 | /* start WEP IV from a random value */ |
55 | get_random_bytes(&priv->iv, 4); | 62 | get_random_bytes(&priv->iv, 4); |
56 | 63 | ||
@@ -58,8 +65,10 @@ static void *prism2_wep_init(int keyidx) | |||
58 | 65 | ||
59 | fail: | 66 | fail: |
60 | if (priv) { | 67 | if (priv) { |
61 | if (priv->tfm) | 68 | if (priv->tx_tfm) |
62 | crypto_free_tfm(priv->tfm); | 69 | crypto_free_tfm(priv->tx_tfm); |
70 | if (priv->rx_tfm) | ||
71 | crypto_free_tfm(priv->rx_tfm); | ||
63 | kfree(priv); | 72 | kfree(priv); |
64 | } | 73 | } |
65 | return NULL; | 74 | return NULL; |
@@ -68,8 +77,12 @@ static void *prism2_wep_init(int keyidx) | |||
68 | static void prism2_wep_deinit(void *priv) | 77 | static void prism2_wep_deinit(void *priv) |
69 | { | 78 | { |
70 | struct prism2_wep_data *_priv = priv; | 79 | struct prism2_wep_data *_priv = priv; |
71 | if (_priv && _priv->tfm) | 80 | if (_priv) { |
72 | crypto_free_tfm(_priv->tfm); | 81 | if (_priv->tx_tfm) |
82 | crypto_free_tfm(_priv->tx_tfm); | ||
83 | if (_priv->rx_tfm) | ||
84 | crypto_free_tfm(_priv->rx_tfm); | ||
85 | } | ||
73 | kfree(priv); | 86 | kfree(priv); |
74 | } | 87 | } |
75 | 88 | ||
@@ -151,11 +164,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
151 | icv[2] = crc >> 16; | 164 | icv[2] = crc >> 16; |
152 | icv[3] = crc >> 24; | 165 | icv[3] = crc >> 24; |
153 | 166 | ||
154 | crypto_cipher_setkey(wep->tfm, key, klen); | 167 | crypto_cipher_setkey(wep->tx_tfm, key, klen); |
155 | sg.page = virt_to_page(pos); | 168 | sg.page = virt_to_page(pos); |
156 | sg.offset = offset_in_page(pos); | 169 | sg.offset = offset_in_page(pos); |
157 | sg.length = len + 4; | 170 | sg.length = len + 4; |
158 | crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); | 171 | crypto_cipher_encrypt(wep->tx_tfm, &sg, &sg, len + 4); |
159 | 172 | ||
160 | return 0; | 173 | return 0; |
161 | } | 174 | } |
@@ -194,11 +207,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
194 | /* Apply RC4 to data and compute CRC32 over decrypted data */ | 207 | /* Apply RC4 to data and compute CRC32 over decrypted data */ |
195 | plen = skb->len - hdr_len - 8; | 208 | plen = skb->len - hdr_len - 8; |
196 | 209 | ||
197 | crypto_cipher_setkey(wep->tfm, key, klen); | 210 | crypto_cipher_setkey(wep->rx_tfm, key, klen); |
198 | sg.page = virt_to_page(pos); | 211 | sg.page = virt_to_page(pos); |
199 | sg.offset = offset_in_page(pos); | 212 | sg.offset = offset_in_page(pos); |
200 | sg.length = plen + 4; | 213 | sg.length = plen + 4; |
201 | crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4); | 214 | crypto_cipher_decrypt(wep->rx_tfm, &sg, &sg, plen + 4); |
202 | 215 | ||
203 | crc = ~crc32_le(~0, pos, plen); | 216 | crc = ~crc32_le(~0, pos, plen); |
204 | icv[0] = crc; | 217 | icv[0] = crc; |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index d60358d702d7..770704183a1b 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -1078,13 +1078,16 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1078 | 1078 | ||
1079 | while (length >= sizeof(*info_element)) { | 1079 | while (length >= sizeof(*info_element)) { |
1080 | if (sizeof(*info_element) + info_element->len > length) { | 1080 | if (sizeof(*info_element) + info_element->len > length) { |
1081 | IEEE80211_DEBUG_MGMT("Info elem: parse failed: " | 1081 | IEEE80211_ERROR("Info elem: parse failed: " |
1082 | "info_element->len + 2 > left : " | 1082 | "info_element->len + 2 > left : " |
1083 | "info_element->len+2=%zd left=%d, id=%d.\n", | 1083 | "info_element->len+2=%zd left=%d, id=%d.\n", |
1084 | info_element->len + | 1084 | info_element->len + |
1085 | sizeof(*info_element), | 1085 | sizeof(*info_element), |
1086 | length, info_element->id); | 1086 | length, info_element->id); |
1087 | return 1; | 1087 | /* We stop processing but don't return an error here |
1088 | * because some misbehaviour APs break this rule. ie. | ||
1089 | * Orinoco AP1000. */ | ||
1090 | break; | ||
1088 | } | 1091 | } |
1089 | 1092 | ||
1090 | switch (info_element->id) { | 1093 | switch (info_element->id) { |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index bf042139c7ab..ae254497ba3d 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -337,7 +337,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
337 | hdr_len += 2; | 337 | hdr_len += 2; |
338 | 338 | ||
339 | skb->priority = ieee80211_classify(skb); | 339 | skb->priority = ieee80211_classify(skb); |
340 | header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID; | 340 | header.qos_ctl |= cpu_to_le16(skb->priority & IEEE80211_QCTL_TID); |
341 | } | 341 | } |
342 | header.frame_ctl = cpu_to_le16(fc); | 342 | header.frame_ctl = cpu_to_le16(fc); |
343 | 343 | ||
@@ -532,13 +532,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
532 | return 0; | 532 | return 0; |
533 | } | 533 | } |
534 | 534 | ||
535 | if (ret == NETDEV_TX_BUSY) { | ||
536 | printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; " | ||
537 | "driver should report queue full via " | ||
538 | "ieee_device->is_queue_full.\n", | ||
539 | ieee->dev->name); | ||
540 | } | ||
541 | |||
542 | ieee80211_txb_free(txb); | 535 | ieee80211_txb_free(txb); |
543 | } | 536 | } |
544 | 537 | ||