aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile6
-rw-r--r--arch/arm/mach-at91rm9200/at91rm9200.c45
-rw-r--r--arch/arm/mach-at91rm9200/generic.h8
-rw-r--r--arch/arm/mach-at91rm9200/irq.c70
-rw-r--r--arch/arm/mach-pnx4008/core.c2
-rw-r--r--arch/arm/mach-pnx4008/dma.c1
-rw-r--r--arch/arm/mach-pnx4008/irq.c22
-rw-r--r--arch/arm/mach-pnx4008/time.c8
-rw-r--r--arch/powerpc/sysdev/mpic.c39
-rw-r--r--arch/sparc64/kernel/prom.c2
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c10
-rw-r--r--arch/sparc64/kernel/time.c5
-rw-r--r--drivers/net/8139cp.c35
-rw-r--r--drivers/net/8139too.c34
-rw-r--r--drivers/net/b44.c27
-rw-r--r--drivers/net/bnx2.c30
-rw-r--r--drivers/net/cassini.c25
-rw-r--r--drivers/net/declance.c7
-rw-r--r--drivers/net/dl2k.c43
-rw-r--r--drivers/net/eepro100.c6
-rw-r--r--drivers/net/epic100.c93
-rw-r--r--drivers/net/fealnx.c36
-rw-r--r--drivers/net/gt96100eth.c3
-rw-r--r--drivers/net/gt96100eth.h2
-rw-r--r--drivers/net/hamachi.c16
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/natsemi.c117
-rw-r--r--drivers/net/ne2k-pci.c9
-rw-r--r--drivers/net/ni5010.c52
-rw-r--r--drivers/net/ns83820.c41
-rw-r--r--drivers/net/pci-skeleton.c19
-rw-r--r--drivers/net/pcnet32.c520
-rw-r--r--drivers/net/phy/cicada.c42
-rw-r--r--drivers/net/r8169.c40
-rw-r--r--drivers/net/starfire.c123
-rw-r--r--drivers/net/sundance.c106
-rw-r--r--drivers/net/tulip/winbond-840.c29
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c27
-rw-r--r--drivers/net/via-rhine.c121
-rw-r--r--drivers/net/via-velocity.c102
-rw-r--r--drivers/net/via-velocity.h4
-rw-r--r--drivers/net/wan/Kconfig12
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c31
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h24
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c7
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig19
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1615
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h825
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h48
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c191
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h85
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c1057
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h190
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.c267
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.h45
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c151
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h82
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c308
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c279
-rw-r--r--drivers/net/wireless/zd1211rw/zd_types.h71
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1316
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h240
-rw-r--r--drivers/net/wireless/zd1211rw/zd_util.c82
-rw-r--r--drivers/net/wireless/zd1211rw/zd_util.h29
-rw-r--r--drivers/net/yellowfin.c39
-rw-r--r--drivers/scsi/ahci.c17
-rw-r--r--drivers/scsi/libata-core.c289
-rw-r--r--drivers/scsi/libata-eh.c405
-rw-r--r--drivers/scsi/libata-scsi.c124
-rw-r--r--drivers/scsi/sata_sil.c105
-rw-r--r--drivers/scsi/sata_sil24.c134
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/serial/at91_serial.c5
-rw-r--r--fs/lockd/clntproc.c26
-rw-r--r--fs/locks.c23
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/direct.c435
-rw-r--r--fs/nfs/nfs4proc.c74
-rw-r--r--fs/nfs/write.c20
-rw-r--r--include/asm-arm/arch-at91rm9200/irqs.h8
-rw-r--r--include/asm-powerpc/cputime.h18
-rw-r--r--include/asm-sparc64/dma-mapping.h14
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/libata.h85
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/net/ieee80211softmac.h1
-rw-r--r--net/ieee80211/ieee80211_rx.c4
-rw-r--r--net/ieee80211/ieee80211_tx.c15
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c31
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c3
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c36
-rw-r--r--net/sched/act_api.c18
-rw-r--r--net/sunrpc/xdr.c3
103 files changed, 9128 insertions, 1772 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 801829dee742..cf8fd4589748 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2057,9 +2057,10 @@ L: linux-kernel@vger.kernel.org
2057S: Maintained 2057S: Maintained
2058 2058
2059NI5010 NETWORK DRIVER 2059NI5010 NETWORK DRIVER
2060P: Jan-Pascal van Best and Andreas Mohr 2060P: Jan-Pascal van Best
2061M: Jan-Pascal van Best <jvbest@qv3pluto.leidenuniv.nl> 2061M: janpascal@vanbest.org
2062M: Andreas Mohr <100.30936@germany.net> 2062P: Andreas Mohr
2063M: andi@lisas.de
2063L: netdev@vger.kernel.org 2064L: netdev@vger.kernel.org
2064S: Maintained 2065S: Maintained
2065 2066
diff --git a/Makefile b/Makefile
index 11a850cffd3d..7c010f3325a9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 17 3SUBLEVEL = 18
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -528,7 +528,7 @@ export MODLIB
528 528
529ifdef INSTALL_MOD_STRIP 529ifdef INSTALL_MOD_STRIP
530ifeq ($(INSTALL_MOD_STRIP),1) 530ifeq ($(INSTALL_MOD_STRIP),1)
531mod_strip_cmd = $STRIP) --strip-debug 531mod_strip_cmd = $(STRIP) --strip-debug
532else 532else
533mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) 533mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP)
534endif # INSTALL_MOD_STRIP=1 534endif # INSTALL_MOD_STRIP=1
diff --git a/arch/arm/mach-at91rm9200/at91rm9200.c b/arch/arm/mach-at91rm9200/at91rm9200.c
index 7e1d072bdd80..0985b1c42c7c 100644
--- a/arch/arm/mach-at91rm9200/at91rm9200.c
+++ b/arch/arm/mach-at91rm9200/at91rm9200.c
@@ -107,3 +107,48 @@ void __init at91rm9200_map_io(void)
107 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); 107 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
108} 108}
109 109
110/*
111 * The default interrupt priority levels (0 = lowest, 7 = highest).
112 */
113static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
114 7, /* Advanced Interrupt Controller (FIQ) */
115 7, /* System Peripherals */
116 0, /* Parallel IO Controller A */
117 0, /* Parallel IO Controller B */
118 0, /* Parallel IO Controller C */
119 0, /* Parallel IO Controller D */
120 6, /* USART 0 */
121 6, /* USART 1 */
122 6, /* USART 2 */
123 6, /* USART 3 */
124 0, /* Multimedia Card Interface */
125 4, /* USB Device Port */
126 0, /* Two-Wire Interface */
127 6, /* Serial Peripheral Interface */
128 5, /* Serial Synchronous Controller 0 */
129 5, /* Serial Synchronous Controller 1 */
130 5, /* Serial Synchronous Controller 2 */
131 0, /* Timer Counter 0 */
132 0, /* Timer Counter 1 */
133 0, /* Timer Counter 2 */
134 0, /* Timer Counter 3 */
135 0, /* Timer Counter 4 */
136 0, /* Timer Counter 5 */
137 3, /* USB Host port */
138 3, /* Ethernet MAC */
139 0, /* Advanced Interrupt Controller (IRQ0) */
140 0, /* Advanced Interrupt Controller (IRQ1) */
141 0, /* Advanced Interrupt Controller (IRQ2) */
142 0, /* Advanced Interrupt Controller (IRQ3) */
143 0, /* Advanced Interrupt Controller (IRQ4) */
144 0, /* Advanced Interrupt Controller (IRQ5) */
145 0 /* Advanced Interrupt Controller (IRQ6) */
146};
147
148void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS])
149{
150 if (!priority)
151 priority = at91rm9200_default_irq_priority;
152
153 at91_aic_init(priority);
154}
diff --git a/arch/arm/mach-at91rm9200/generic.h b/arch/arm/mach-at91rm9200/generic.h
index f0d969d7d874..7979d8ab7e07 100644
--- a/arch/arm/mach-at91rm9200/generic.h
+++ b/arch/arm/mach-at91rm9200/generic.h
@@ -8,13 +8,19 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11void at91_gpio_irq_setup(unsigned banks); 11 /* Interrupts */
12extern void __init at91rm9200_init_irq(unsigned int priority[]);
13extern void __init at91_aic_init(unsigned int priority[]);
14extern void __init at91_gpio_irq_setup(unsigned banks);
12 15
16 /* Timer */
13struct sys_timer; 17struct sys_timer;
14extern struct sys_timer at91rm9200_timer; 18extern struct sys_timer at91rm9200_timer;
15 19
20 /* Memory Map */
16extern void __init at91rm9200_map_io(void); 21extern void __init at91rm9200_map_io(void);
17 22
23 /* Clocks */
18extern int __init at91_clock_init(unsigned long main_clock); 24extern int __init at91_clock_init(unsigned long main_clock);
19struct device; 25struct device;
20extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func); 26extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func);
diff --git a/arch/arm/mach-at91rm9200/irq.c b/arch/arm/mach-at91rm9200/irq.c
index dcd560dbcfb7..9b0911320417 100644
--- a/arch/arm/mach-at91rm9200/irq.c
+++ b/arch/arm/mach-at91rm9200/irq.c
@@ -36,58 +36,20 @@
36 36
37#include "generic.h" 37#include "generic.h"
38 38
39/*
40 * The default interrupt priority levels (0 = lowest, 7 = highest).
41 */
42static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
43 7, /* Advanced Interrupt Controller */
44 7, /* System Peripheral */
45 0, /* Parallel IO Controller A */
46 0, /* Parallel IO Controller B */
47 0, /* Parallel IO Controller C */
48 0, /* Parallel IO Controller D */
49 6, /* USART 0 */
50 6, /* USART 1 */
51 6, /* USART 2 */
52 6, /* USART 3 */
53 0, /* Multimedia Card Interface */
54 4, /* USB Device Port */
55 0, /* Two-Wire Interface */
56 6, /* Serial Peripheral Interface */
57 5, /* Serial Synchronous Controller */
58 5, /* Serial Synchronous Controller */
59 5, /* Serial Synchronous Controller */
60 0, /* Timer Counter 0 */
61 0, /* Timer Counter 1 */
62 0, /* Timer Counter 2 */
63 0, /* Timer Counter 3 */
64 0, /* Timer Counter 4 */
65 0, /* Timer Counter 5 */
66 3, /* USB Host port */
67 3, /* Ethernet MAC */
68 0, /* Advanced Interrupt Controller */
69 0, /* Advanced Interrupt Controller */
70 0, /* Advanced Interrupt Controller */
71 0, /* Advanced Interrupt Controller */
72 0, /* Advanced Interrupt Controller */
73 0, /* Advanced Interrupt Controller */
74 0 /* Advanced Interrupt Controller */
75};
76 39
77 40static void at91_aic_mask_irq(unsigned int irq)
78static void at91rm9200_mask_irq(unsigned int irq)
79{ 41{
80 /* Disable interrupt on AIC */ 42 /* Disable interrupt on AIC */
81 at91_sys_write(AT91_AIC_IDCR, 1 << irq); 43 at91_sys_write(AT91_AIC_IDCR, 1 << irq);
82} 44}
83 45
84static void at91rm9200_unmask_irq(unsigned int irq) 46static void at91_aic_unmask_irq(unsigned int irq)
85{ 47{
86 /* Enable interrupt on AIC */ 48 /* Enable interrupt on AIC */
87 at91_sys_write(AT91_AIC_IECR, 1 << irq); 49 at91_sys_write(AT91_AIC_IECR, 1 << irq);
88} 50}
89 51
90static int at91rm9200_irq_type(unsigned irq, unsigned type) 52static int at91_aic_set_type(unsigned irq, unsigned type)
91{ 53{
92 unsigned int smr, srctype; 54 unsigned int smr, srctype;
93 55
@@ -122,7 +84,7 @@ static int at91rm9200_irq_type(unsigned irq, unsigned type)
122static u32 wakeups; 84static u32 wakeups;
123static u32 backups; 85static u32 backups;
124 86
125static int at91rm9200_irq_set_wake(unsigned irq, unsigned value) 87static int at91_aic_set_wake(unsigned irq, unsigned value)
126{ 88{
127 if (unlikely(irq >= 32)) 89 if (unlikely(irq >= 32))
128 return -EINVAL; 90 return -EINVAL;
@@ -149,28 +111,24 @@ void at91_irq_resume(void)
149} 111}
150 112
151#else 113#else
152#define at91rm9200_irq_set_wake NULL 114#define at91_aic_set_wake NULL
153#endif 115#endif
154 116
155static struct irqchip at91rm9200_irq_chip = { 117static struct irqchip at91_aic_chip = {
156 .ack = at91rm9200_mask_irq, 118 .ack = at91_aic_mask_irq,
157 .mask = at91rm9200_mask_irq, 119 .mask = at91_aic_mask_irq,
158 .unmask = at91rm9200_unmask_irq, 120 .unmask = at91_aic_unmask_irq,
159 .set_type = at91rm9200_irq_type, 121 .set_type = at91_aic_set_type,
160 .set_wake = at91rm9200_irq_set_wake, 122 .set_wake = at91_aic_set_wake,
161}; 123};
162 124
163/* 125/*
164 * Initialize the AIC interrupt controller. 126 * Initialize the AIC interrupt controller.
165 */ 127 */
166void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS]) 128void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
167{ 129{
168 unsigned int i; 130 unsigned int i;
169 131
170 /* No priority list specified for this board -> use defaults */
171 if (priority == NULL)
172 priority = at91rm9200_default_irq_priority;
173
174 /* 132 /*
175 * The IVR is used by macro get_irqnr_and_base to read and verify. 133 * The IVR is used by macro get_irqnr_and_base to read and verify.
176 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred. 134 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
@@ -178,10 +136,10 @@ void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS])
178 for (i = 0; i < NR_AIC_IRQS; i++) { 136 for (i = 0; i < NR_AIC_IRQS; i++) {
179 /* Put irq number in Source Vector Register: */ 137 /* Put irq number in Source Vector Register: */
180 at91_sys_write(AT91_AIC_SVR(i), i); 138 at91_sys_write(AT91_AIC_SVR(i), i);
181 /* Store the Source Mode Register as defined in table above */ 139 /* Active Low interrupt, with the specified priority */
182 at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); 140 at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
183 141
184 set_irq_chip(i, &at91rm9200_irq_chip); 142 set_irq_chip(i, &at91_aic_chip);
185 set_irq_handler(i, do_level_IRQ); 143 set_irq_handler(i, do_level_IRQ);
186 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 144 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
187 145
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index ba91daad64fb..3d73c1e93752 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -27,7 +27,6 @@
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28 28
29#include <asm/hardware.h> 29#include <asm/hardware.h>
30#include <asm/irq.h>
31#include <asm/io.h> 30#include <asm/io.h>
32#include <asm/setup.h> 31#include <asm/setup.h>
33#include <asm/mach-types.h> 32#include <asm/mach-types.h>
@@ -36,7 +35,6 @@
36#include <asm/system.h> 35#include <asm/system.h>
37 36
38#include <asm/mach/arch.h> 37#include <asm/mach/arch.h>
39#include <asm/mach/irq.h>
40#include <asm/mach/map.h> 38#include <asm/mach/map.h>
41#include <asm/mach/time.h> 39#include <asm/mach/time.h>
42 40
diff --git a/arch/arm/mach-pnx4008/dma.c b/arch/arm/mach-pnx4008/dma.c
index 981aa9dcdede..ec01574f88ac 100644
--- a/arch/arm/mach-pnx4008/dma.c
+++ b/arch/arm/mach-pnx4008/dma.c
@@ -23,7 +23,6 @@
23#include <linux/clk.h> 23#include <linux/clk.h>
24 24
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/irq.h>
27#include <asm/hardware.h> 26#include <asm/hardware.h>
28#include <asm/dma.h> 27#include <asm/dma.h>
29#include <asm/dma-mapping.h> 28#include <asm/dma-mapping.h>
diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c
index 9b0a8e084e99..3a4bcf3d91fa 100644
--- a/arch/arm/mach-pnx4008/irq.c
+++ b/arch/arm/mach-pnx4008/irq.c
@@ -22,8 +22,8 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/irq.h>
25#include <asm/hardware.h> 26#include <asm/hardware.h>
26#include <asm/irq.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/setup.h> 28#include <asm/setup.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
@@ -96,26 +96,24 @@ void __init pnx4008_init_irq(void)
96{ 96{
97 unsigned int i; 97 unsigned int i;
98 98
99 /* configure and enable IRQ 0,1,30,31 (cascade interrupts) mask all others */ 99 /* configure IRQ's */
100 for (i = 0; i < NR_IRQS; i++) {
101 set_irq_flags(i, IRQF_VALID);
102 set_irq_chip(i, &pnx4008_irq_chip);
103 pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
104 }
105
106 /* configure and enable IRQ 0,1,30,31 (cascade interrupts) */
100 pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]); 107 pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]);
101 pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]); 108 pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]);
102 pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]); 109 pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]);
103 pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]); 110 pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]);
104 111
112 /* mask all others */
105 __raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) | 113 __raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) |
106 (1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N), 114 (1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N),
107 INTC_ER(MAIN_BASE_INT)); 115 INTC_ER(MAIN_BASE_INT));
108 __raw_writel(0, INTC_ER(SIC1_BASE_INT)); 116 __raw_writel(0, INTC_ER(SIC1_BASE_INT));
109 __raw_writel(0, INTC_ER(SIC2_BASE_INT)); 117 __raw_writel(0, INTC_ER(SIC2_BASE_INT));
110
111 /* configure all other IRQ's */
112 for (i = 0; i < NR_IRQS; i++) {
113 if (i == SUB2_FIQ_N || i == SUB1_FIQ_N ||
114 i == SUB2_IRQ_N || i == SUB1_IRQ_N)
115 continue;
116 set_irq_flags(i, IRQF_VALID);
117 set_irq_chip(i, &pnx4008_irq_chip);
118 pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
119 }
120} 118}
121 119
diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c
index 888bf6cfba8a..756228ddd035 100644
--- a/arch/arm/mach-pnx4008/time.c
+++ b/arch/arm/mach-pnx4008/time.c
@@ -20,17 +20,15 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/time.h>
24#include <linux/timex.h>
25#include <linux/irq.h>
23 26
24#include <asm/system.h> 27#include <asm/system.h>
25#include <asm/hardware.h> 28#include <asm/hardware.h>
26#include <asm/io.h> 29#include <asm/io.h>
27#include <asm/leds.h> 30#include <asm/leds.h>
28#include <asm/irq.h>
29#include <asm/mach/irq.h>
30#include <asm/mach/time.h> 31#include <asm/mach/time.h>
31
32#include <linux/time.h>
33#include <linux/timex.h>
34#include <asm/errno.h> 32#include <asm/errno.h>
35 33
36/*! Note: all timers are UPCOUNTING */ 34/*! Note: all timers are UPCOUNTING */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7d31d7cc392d..9cecebaa0360 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -405,20 +405,22 @@ static void mpic_unmask_irq(unsigned int irq)
405 unsigned int loops = 100000; 405 unsigned int loops = 100000;
406 struct mpic *mpic = mpic_from_irq(irq); 406 struct mpic *mpic = mpic_from_irq(irq);
407 unsigned int src = mpic_irq_to_hw(irq); 407 unsigned int src = mpic_irq_to_hw(irq);
408 unsigned long flags;
408 409
409 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 410 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
410 411
412 spin_lock_irqsave(&mpic_lock, flags);
411 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 413 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
412 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & 414 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
413 ~MPIC_VECPRI_MASK); 415 ~MPIC_VECPRI_MASK);
414
415 /* make sure mask gets to controller before we return to user */ 416 /* make sure mask gets to controller before we return to user */
416 do { 417 do {
417 if (!loops--) { 418 if (!loops--) {
418 printk(KERN_ERR "mpic_enable_irq timeout\n"); 419 printk(KERN_ERR "mpic_enable_irq timeout\n");
419 break; 420 break;
420 } 421 }
421 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 422 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
423 spin_unlock_irqrestore(&mpic_lock, flags);
422} 424}
423 425
424static void mpic_mask_irq(unsigned int irq) 426static void mpic_mask_irq(unsigned int irq)
@@ -426,9 +428,11 @@ static void mpic_mask_irq(unsigned int irq)
426 unsigned int loops = 100000; 428 unsigned int loops = 100000;
427 struct mpic *mpic = mpic_from_irq(irq); 429 struct mpic *mpic = mpic_from_irq(irq);
428 unsigned int src = mpic_irq_to_hw(irq); 430 unsigned int src = mpic_irq_to_hw(irq);
431 unsigned long flags;
429 432
430 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 433 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
431 434
435 spin_lock_irqsave(&mpic_lock, flags);
432 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 436 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
433 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | 437 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
434 MPIC_VECPRI_MASK); 438 MPIC_VECPRI_MASK);
@@ -440,6 +444,7 @@ static void mpic_mask_irq(unsigned int irq)
440 break; 444 break;
441 } 445 }
442 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 446 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
447 spin_unlock_irqrestore(&mpic_lock, flags);
443} 448}
444 449
445static void mpic_end_irq(unsigned int irq) 450static void mpic_end_irq(unsigned int irq)
@@ -624,9 +629,10 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
624 struct irq_desc *desc = get_irq_desc(virq); 629 struct irq_desc *desc = get_irq_desc(virq);
625 struct irq_chip *chip; 630 struct irq_chip *chip;
626 struct mpic *mpic = h->host_data; 631 struct mpic *mpic = h->host_data;
627 unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL | 632 u32 v, vecpri = MPIC_VECPRI_SENSE_LEVEL |
628 MPIC_VECPRI_POLARITY_NEGATIVE; 633 MPIC_VECPRI_POLARITY_NEGATIVE;
629 int level; 634 int level;
635 unsigned long iflags;
630 636
631 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n", 637 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
632 virq, hw, flags); 638 virq, hw, flags);
@@ -668,11 +674,21 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
668 } 674 }
669#endif 675#endif
670 676
671 /* Reconfigure irq */ 677 /* Reconfigure irq. We must preserve the mask bit as we can be called
672 vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 678 * while the interrupt is still active (This may change in the future
673 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri); 679 * but for now, it is the case).
674 680 */
675 pr_debug("mpic: mapping as IRQ\n"); 681 spin_lock_irqsave(&mpic_lock, iflags);
682 v = mpic_irq_read(hw, MPIC_IRQ_VECTOR_PRI);
683 vecpri = (v &
684 ~(MPIC_VECPRI_POLARITY_MASK | MPIC_VECPRI_SENSE_MASK)) |
685 vecpri;
686 if (vecpri != v)
687 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
688 spin_unlock_irqrestore(&mpic_lock, iflags);
689
690 pr_debug("mpic: mapping as IRQ, vecpri = 0x%08x (was 0x%08x)\n",
691 vecpri, v);
676 692
677 set_irq_chip_data(virq, mpic); 693 set_irq_chip_data(virq, mpic);
678 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); 694 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
@@ -904,8 +920,8 @@ void __init mpic_init(struct mpic *mpic)
904 920
905 /* do senses munging */ 921 /* do senses munging */
906 if (mpic->senses && i < mpic->senses_count) 922 if (mpic->senses && i < mpic->senses_count)
907 vecpri = mpic_flags_to_vecpri(mpic->senses[i], 923 vecpri |= mpic_flags_to_vecpri(mpic->senses[i],
908 &level); 924 &level);
909 else 925 else
910 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 926 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
911 927
@@ -955,14 +971,17 @@ void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
955 971
956void __init mpic_set_serial_int(struct mpic *mpic, int enable) 972void __init mpic_set_serial_int(struct mpic *mpic, int enable)
957{ 973{
974 unsigned long flags;
958 u32 v; 975 u32 v;
959 976
977 spin_lock_irqsave(&mpic_lock, flags);
960 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 978 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
961 if (enable) 979 if (enable)
962 v |= MPIC_GREG_GLOBAL_CONF_1_SIE; 980 v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
963 else 981 else
964 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; 982 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
965 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 983 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
984 spin_unlock_irqrestore(&mpic_lock, flags);
966} 985}
967 986
968void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 987void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index fa484d4f241e..99daeee4209d 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1032,7 +1032,9 @@ static void sun4v_vdev_irq_trans_init(struct device_node *dp)
1032static void irq_trans_init(struct device_node *dp) 1032static void irq_trans_init(struct device_node *dp)
1033{ 1033{
1034 const char *model; 1034 const char *model;
1035#ifdef CONFIG_PCI
1035 int i; 1036 int i;
1037#endif
1036 1038
1037 model = of_get_property(dp, "model", NULL); 1039 model = of_get_property(dp, "model", NULL);
1038 if (!model) 1040 if (!model)
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 4173de425f09..237524d87cab 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -124,11 +124,6 @@ EXPORT_SYMBOL(__write_lock);
124EXPORT_SYMBOL(__write_unlock); 124EXPORT_SYMBOL(__write_unlock);
125EXPORT_SYMBOL(__write_trylock); 125EXPORT_SYMBOL(__write_trylock);
126 126
127#if defined(CONFIG_MCOUNT)
128extern void _mcount(void);
129EXPORT_SYMBOL(_mcount);
130#endif
131
132/* CPU online map and active count. */ 127/* CPU online map and active count. */
133EXPORT_SYMBOL(cpu_online_map); 128EXPORT_SYMBOL(cpu_online_map);
134EXPORT_SYMBOL(phys_cpu_present_map); 129EXPORT_SYMBOL(phys_cpu_present_map);
@@ -136,6 +131,11 @@ EXPORT_SYMBOL(phys_cpu_present_map);
136EXPORT_SYMBOL(smp_call_function); 131EXPORT_SYMBOL(smp_call_function);
137#endif /* CONFIG_SMP */ 132#endif /* CONFIG_SMP */
138 133
134#if defined(CONFIG_MCOUNT)
135extern void _mcount(void);
136EXPORT_SYMBOL(_mcount);
137#endif
138
139EXPORT_SYMBOL(sparc64_get_clock_tick); 139EXPORT_SYMBOL(sparc64_get_clock_tick);
140 140
141/* semaphores */ 141/* semaphores */
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 8dcbfbffacc9..b43de647ba73 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -788,12 +788,15 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
788 if (!regs) 788 if (!regs)
789 return -ENOMEM; 789 return -ENOMEM;
790 790
791#ifdef CONFIG_PCI
791 if (!strcmp(model, "ds1287") || 792 if (!strcmp(model, "ds1287") ||
792 !strcmp(model, "m5819") || 793 !strcmp(model, "m5819") ||
793 !strcmp(model, "m5819p") || 794 !strcmp(model, "m5819p") ||
794 !strcmp(model, "m5823")) { 795 !strcmp(model, "m5823")) {
795 ds1287_regs = (unsigned long) regs; 796 ds1287_regs = (unsigned long) regs;
796 } else if (model[5] == '0' && model[6] == '2') { 797 } else
798#endif
799 if (model[5] == '0' && model[6] == '2') {
797 mstk48t02_regs = regs; 800 mstk48t02_regs = regs;
798 } else if(model[5] == '0' && model[6] == '8') { 801 } else if(model[5] == '0' && model[6] == '8') {
799 mstk48t08_regs = regs; 802 mstk48t08_regs = regs;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 1959654cbec8..d2150baa7e35 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1836,9 +1836,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1836 1836
1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { 1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1839 printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", 1839 dev_err(&pdev->dev,
1840 pci_name(pdev), pdev->vendor, pdev->device, pci_rev); 1840 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1841 printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); 1841 pdev->vendor, pdev->device, pci_rev);
1842 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1842 return -ENODEV; 1843 return -ENODEV;
1843 } 1844 }
1844 1845
@@ -1876,14 +1877,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1876 pciaddr = pci_resource_start(pdev, 1); 1877 pciaddr = pci_resource_start(pdev, 1);
1877 if (!pciaddr) { 1878 if (!pciaddr) {
1878 rc = -EIO; 1879 rc = -EIO;
1879 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", 1880 dev_err(&pdev->dev, "no MMIO resource\n");
1880 pci_name(pdev));
1881 goto err_out_res; 1881 goto err_out_res;
1882 } 1882 }
1883 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { 1883 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1884 rc = -EIO; 1884 rc = -EIO;
1885 printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n", 1885 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1886 (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev)); 1886 (unsigned long long)pci_resource_len(pdev, 1));
1887 goto err_out_res; 1887 goto err_out_res;
1888 } 1888 }
1889 1889
@@ -1897,14 +1897,15 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1897 1897
1898 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1898 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1899 if (rc) { 1899 if (rc) {
1900 printk(KERN_ERR PFX "No usable DMA configuration, " 1900 dev_err(&pdev->dev,
1901 "aborting.\n"); 1901 "No usable DMA configuration, aborting.\n");
1902 goto err_out_res; 1902 goto err_out_res;
1903 } 1903 }
1904 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1904 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1905 if (rc) { 1905 if (rc) {
1906 printk(KERN_ERR PFX "No usable consistent DMA configuration, " 1906 dev_err(&pdev->dev,
1907 "aborting.\n"); 1907 "No usable consistent DMA configuration, "
1908 "aborting.\n");
1908 goto err_out_res; 1909 goto err_out_res;
1909 } 1910 }
1910 } 1911 }
@@ -1915,9 +1916,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1915 regs = ioremap(pciaddr, CP_REGS_SIZE); 1916 regs = ioremap(pciaddr, CP_REGS_SIZE);
1916 if (!regs) { 1917 if (!regs) {
1917 rc = -EIO; 1918 rc = -EIO;
1918 printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%llx) on pci dev %s\n", 1919 dev_err(&pdev->dev, "Cannot map PCI MMIO (%lx@%lx)\n",
1919 (unsigned long long)pci_resource_len(pdev, 1), 1920 (unsigned long long)pci_resource_len(pdev, 1),
1920 (unsigned long long)pciaddr, pci_name(pdev)); 1921 (unsigned long long)pciaddr);
1921 goto err_out_res; 1922 goto err_out_res;
1922 } 1923 }
1923 dev->base_addr = (unsigned long) regs; 1924 dev->base_addr = (unsigned long) regs;
@@ -1986,7 +1987,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1986 /* enable busmastering and memory-write-invalidate */ 1987 /* enable busmastering and memory-write-invalidate */
1987 pci_set_master(pdev); 1988 pci_set_master(pdev);
1988 1989
1989 if (cp->wol_enabled) cp_set_d3_state (cp); 1990 if (cp->wol_enabled)
1991 cp_set_d3_state (cp);
1990 1992
1991 return 0; 1993 return 0;
1992 1994
@@ -2011,7 +2013,8 @@ static void cp_remove_one (struct pci_dev *pdev)
2011 BUG_ON(!dev); 2013 BUG_ON(!dev);
2012 unregister_netdev(dev); 2014 unregister_netdev(dev);
2013 iounmap(cp->regs); 2015 iounmap(cp->regs);
2014 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); 2016 if (cp->wol_enabled)
2017 pci_set_power_state (pdev, PCI_D0);
2015 pci_release_regions(pdev); 2018 pci_release_regions(pdev);
2016 pci_clear_mwi(pdev); 2019 pci_clear_mwi(pdev);
2017 pci_disable_device(pdev); 2020 pci_disable_device(pdev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 717506b2b13a..cd9718512d1c 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -768,7 +768,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
768 /* dev and priv zeroed in alloc_etherdev */ 768 /* dev and priv zeroed in alloc_etherdev */
769 dev = alloc_etherdev (sizeof (*tp)); 769 dev = alloc_etherdev (sizeof (*tp));
770 if (dev == NULL) { 770 if (dev == NULL) {
771 printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev)); 771 dev_err(&pdev->dev, "Unable to alloc new net device\n");
772 return -ENOMEM; 772 return -ENOMEM;
773 } 773 }
774 SET_MODULE_OWNER(dev); 774 SET_MODULE_OWNER(dev);
@@ -800,31 +800,31 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
800#ifdef USE_IO_OPS 800#ifdef USE_IO_OPS
801 /* make sure PCI base addr 0 is PIO */ 801 /* make sure PCI base addr 0 is PIO */
802 if (!(pio_flags & IORESOURCE_IO)) { 802 if (!(pio_flags & IORESOURCE_IO)) {
803 printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev)); 803 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
804 rc = -ENODEV; 804 rc = -ENODEV;
805 goto err_out; 805 goto err_out;
806 } 806 }
807 /* check for weird/broken PCI region reporting */ 807 /* check for weird/broken PCI region reporting */
808 if (pio_len < RTL_MIN_IO_SIZE) { 808 if (pio_len < RTL_MIN_IO_SIZE) {
809 printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev)); 809 dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
810 rc = -ENODEV; 810 rc = -ENODEV;
811 goto err_out; 811 goto err_out;
812 } 812 }
813#else 813#else
814 /* make sure PCI base addr 1 is MMIO */ 814 /* make sure PCI base addr 1 is MMIO */
815 if (!(mmio_flags & IORESOURCE_MEM)) { 815 if (!(mmio_flags & IORESOURCE_MEM)) {
816 printk (KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev)); 816 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
817 rc = -ENODEV; 817 rc = -ENODEV;
818 goto err_out; 818 goto err_out;
819 } 819 }
820 if (mmio_len < RTL_MIN_IO_SIZE) { 820 if (mmio_len < RTL_MIN_IO_SIZE) {
821 printk (KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev)); 821 dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
822 rc = -ENODEV; 822 rc = -ENODEV;
823 goto err_out; 823 goto err_out;
824 } 824 }
825#endif 825#endif
826 826
827 rc = pci_request_regions (pdev, "8139too"); 827 rc = pci_request_regions (pdev, DRV_NAME);
828 if (rc) 828 if (rc)
829 goto err_out; 829 goto err_out;
830 disable_dev_on_err = 1; 830 disable_dev_on_err = 1;
@@ -835,7 +835,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
835#ifdef USE_IO_OPS 835#ifdef USE_IO_OPS
836 ioaddr = ioport_map(pio_start, pio_len); 836 ioaddr = ioport_map(pio_start, pio_len);
837 if (!ioaddr) { 837 if (!ioaddr) {
838 printk (KERN_ERR PFX "%s: cannot map PIO, aborting\n", pci_name(pdev)); 838 dev_err(&pdev->dev, "cannot map PIO, aborting\n");
839 rc = -EIO; 839 rc = -EIO;
840 goto err_out; 840 goto err_out;
841 } 841 }
@@ -846,7 +846,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
846 /* ioremap MMIO region */ 846 /* ioremap MMIO region */
847 ioaddr = pci_iomap(pdev, 1, 0); 847 ioaddr = pci_iomap(pdev, 1, 0);
848 if (ioaddr == NULL) { 848 if (ioaddr == NULL) {
849 printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev)); 849 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
850 rc = -EIO; 850 rc = -EIO;
851 goto err_out; 851 goto err_out;
852 } 852 }
@@ -860,8 +860,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
860 860
861 /* check for missing/broken hardware */ 861 /* check for missing/broken hardware */
862 if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { 862 if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
863 printk (KERN_ERR PFX "%s: Chip not responding, ignoring board\n", 863 dev_err(&pdev->dev, "Chip not responding, ignoring board\n");
864 pci_name(pdev));
865 rc = -EIO; 864 rc = -EIO;
866 goto err_out; 865 goto err_out;
867 } 866 }
@@ -875,9 +874,10 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
875 } 874 }
876 875
877 /* if unknown chip, assume array element #0, original RTL-8139 in this case */ 876 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
878 printk (KERN_DEBUG PFX "%s: unknown chip version, assuming RTL-8139\n", 877 dev_printk (KERN_DEBUG, &pdev->dev,
879 pci_name(pdev)); 878 "unknown chip version, assuming RTL-8139\n");
880 printk (KERN_DEBUG PFX "%s: TxConfig = 0x%lx\n", pci_name(pdev), RTL_R32 (TxConfig)); 879 dev_printk (KERN_DEBUG, &pdev->dev,
880 "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
881 tp->chipset = 0; 881 tp->chipset = 0;
882 882
883match: 883match:
@@ -954,9 +954,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
954 954
955 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 955 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
956 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { 956 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) {
957 printk(KERN_INFO PFX "pci dev %s (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", 957 dev_info(&pdev->dev,
958 pci_name(pdev), pdev->vendor, pdev->device, pci_rev); 958 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
959 printk(KERN_INFO PFX "Use the \"8139cp\" driver for improved performance and stability.\n"); 959 pdev->vendor, pdev->device, pci_rev);
960 dev_info(&pdev->dev,
961 "Use the \"8139cp\" driver for improved performance and stability.\n");
960 } 962 }
961 963
962 i = rtl8139_init_board (pdev, &dev); 964 i = rtl8139_init_board (pdev, &dev);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index cd98d31dee8c..bea0fc0ede2f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2120,13 +2120,14 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2120 2120
2121 err = pci_enable_device(pdev); 2121 err = pci_enable_device(pdev);
2122 if (err) { 2122 if (err) {
2123 printk(KERN_ERR PFX "Cannot enable PCI device, " 2123 dev_err(&pdev->dev, "Cannot enable PCI device, "
2124 "aborting.\n"); 2124 "aborting.\n");
2125 return err; 2125 return err;
2126 } 2126 }
2127 2127
2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2129 printk(KERN_ERR PFX "Cannot find proper PCI device " 2129 dev_err(&pdev->dev,
2130 "Cannot find proper PCI device "
2130 "base address, aborting.\n"); 2131 "base address, aborting.\n");
2131 err = -ENODEV; 2132 err = -ENODEV;
2132 goto err_out_disable_pdev; 2133 goto err_out_disable_pdev;
@@ -2134,8 +2135,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2134 2135
2135 err = pci_request_regions(pdev, DRV_MODULE_NAME); 2136 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2136 if (err) { 2137 if (err) {
2137 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 2138 dev_err(&pdev->dev,
2138 "aborting.\n"); 2139 "Cannot obtain PCI resources, aborting.\n");
2139 goto err_out_disable_pdev; 2140 goto err_out_disable_pdev;
2140 } 2141 }
2141 2142
@@ -2143,15 +2144,13 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2143 2144
2144 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK); 2145 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2145 if (err) { 2146 if (err) {
2146 printk(KERN_ERR PFX "No usable DMA configuration, " 2147 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2147 "aborting.\n");
2148 goto err_out_free_res; 2148 goto err_out_free_res;
2149 } 2149 }
2150 2150
2151 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); 2151 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2152 if (err) { 2152 if (err) {
2153 printk(KERN_ERR PFX "No usable DMA configuration, " 2153 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2154 "aborting.\n");
2155 goto err_out_free_res; 2154 goto err_out_free_res;
2156 } 2155 }
2157 2156
@@ -2160,7 +2159,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2160 2159
2161 dev = alloc_etherdev(sizeof(*bp)); 2160 dev = alloc_etherdev(sizeof(*bp));
2162 if (!dev) { 2161 if (!dev) {
2163 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 2162 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2164 err = -ENOMEM; 2163 err = -ENOMEM;
2165 goto err_out_free_res; 2164 goto err_out_free_res;
2166 } 2165 }
@@ -2181,8 +2180,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2181 2180
2182 bp->regs = ioremap(b44reg_base, b44reg_len); 2181 bp->regs = ioremap(b44reg_base, b44reg_len);
2183 if (bp->regs == 0UL) { 2182 if (bp->regs == 0UL) {
2184 printk(KERN_ERR PFX "Cannot map device registers, " 2183 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2185 "aborting.\n");
2186 err = -ENOMEM; 2184 err = -ENOMEM;
2187 goto err_out_free_dev; 2185 goto err_out_free_dev;
2188 } 2186 }
@@ -2212,8 +2210,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2212 2210
2213 err = b44_get_invariants(bp); 2211 err = b44_get_invariants(bp);
2214 if (err) { 2212 if (err) {
2215 printk(KERN_ERR PFX "Problem fetching invariants of chip, " 2213 dev_err(&pdev->dev,
2216 "aborting.\n"); 2214 "Problem fetching invariants of chip, aborting.\n");
2217 goto err_out_iounmap; 2215 goto err_out_iounmap;
2218 } 2216 }
2219 2217
@@ -2233,8 +2231,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2233 2231
2234 err = register_netdev(dev); 2232 err = register_netdev(dev);
2235 if (err) { 2233 if (err) {
2236 printk(KERN_ERR PFX "Cannot register net device, " 2234 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2237 "aborting.\n");
2238 goto err_out_iounmap; 2235 goto err_out_iounmap;
2239 } 2236 }
2240 2237
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4f4db5ae503b..64b6a72b4f6a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -5575,20 +5575,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5575 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 5575 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5576 rc = pci_enable_device(pdev); 5576 rc = pci_enable_device(pdev);
5577 if (rc) { 5577 if (rc) {
5578 printk(KERN_ERR PFX "Cannot enable PCI device, aborting."); 5578 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5579 goto err_out; 5579 goto err_out;
5580 } 5580 }
5581 5581
5582 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 5582 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5583 printk(KERN_ERR PFX "Cannot find PCI device base address, " 5583 dev_err(&pdev->dev,
5584 "aborting.\n"); 5584 "Cannot find PCI device base address, aborting.\n");
5585 rc = -ENODEV; 5585 rc = -ENODEV;
5586 goto err_out_disable; 5586 goto err_out_disable;
5587 } 5587 }
5588 5588
5589 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 5589 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5590 if (rc) { 5590 if (rc) {
5591 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n"); 5591 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5592 goto err_out_disable; 5592 goto err_out_disable;
5593 } 5593 }
5594 5594
@@ -5596,15 +5596,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5596 5596
5597 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 5597 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5598 if (bp->pm_cap == 0) { 5598 if (bp->pm_cap == 0) {
5599 printk(KERN_ERR PFX "Cannot find power management capability, " 5599 dev_err(&pdev->dev,
5600 "aborting.\n"); 5600 "Cannot find power management capability, aborting.\n");
5601 rc = -EIO; 5601 rc = -EIO;
5602 goto err_out_release; 5602 goto err_out_release;
5603 } 5603 }
5604 5604
5605 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 5605 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5606 if (bp->pcix_cap == 0) { 5606 if (bp->pcix_cap == 0) {
5607 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n"); 5607 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5608 rc = -EIO; 5608 rc = -EIO;
5609 goto err_out_release; 5609 goto err_out_release;
5610 } 5610 }
@@ -5612,14 +5612,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5612 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { 5612 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5613 bp->flags |= USING_DAC_FLAG; 5613 bp->flags |= USING_DAC_FLAG;
5614 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) { 5614 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5615 printk(KERN_ERR PFX "pci_set_consistent_dma_mask " 5615 dev_err(&pdev->dev,
5616 "failed, aborting.\n"); 5616 "pci_set_consistent_dma_mask failed, aborting.\n");
5617 rc = -EIO; 5617 rc = -EIO;
5618 goto err_out_release; 5618 goto err_out_release;
5619 } 5619 }
5620 } 5620 }
5621 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) { 5621 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5622 printk(KERN_ERR PFX "System does not support DMA, aborting.\n"); 5622 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5623 rc = -EIO; 5623 rc = -EIO;
5624 goto err_out_release; 5624 goto err_out_release;
5625 } 5625 }
@@ -5639,7 +5639,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5639 bp->regview = ioremap_nocache(dev->base_addr, mem_len); 5639 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5640 5640
5641 if (!bp->regview) { 5641 if (!bp->regview) {
5642 printk(KERN_ERR PFX "Cannot map register space, aborting.\n"); 5642 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5643 rc = -ENOMEM; 5643 rc = -ENOMEM;
5644 goto err_out_release; 5644 goto err_out_release;
5645 } 5645 }
@@ -5711,8 +5711,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5711 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && 5711 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5712 !(bp->flags & PCIX_FLAG)) { 5712 !(bp->flags & PCIX_FLAG)) {
5713 5713
5714 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, " 5714 dev_err(&pdev->dev,
5715 "aborting.\n"); 5715 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5716 goto err_out_unmap; 5716 goto err_out_unmap;
5717 } 5717 }
5718 5718
@@ -5733,7 +5733,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5733 5733
5734 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != 5734 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5735 BNX2_DEV_INFO_SIGNATURE_MAGIC) { 5735 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5736 printk(KERN_ERR PFX "Firmware not running, aborting.\n"); 5736 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5737 rc = -ENODEV; 5737 rc = -ENODEV;
5738 goto err_out_unmap; 5738 goto err_out_unmap;
5739 } 5739 }
@@ -5895,7 +5895,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5895#endif 5895#endif
5896 5896
5897 if ((rc = register_netdev(dev))) { 5897 if ((rc = register_netdev(dev))) {
5898 printk(KERN_ERR PFX "Cannot register net device\n"); 5898 dev_err(&pdev->dev, "Cannot register net device\n");
5899 if (bp->regview) 5899 if (bp->regview)
5900 iounmap(bp->regview); 5900 iounmap(bp->regview);
5901 pci_release_regions(pdev); 5901 pci_release_regions(pdev);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d33130f64700..a31544ccb3c4 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4887,13 +4887,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4887 4887
4888 err = pci_enable_device(pdev); 4888 err = pci_enable_device(pdev);
4889 if (err) { 4889 if (err) {
4890 printk(KERN_ERR PFX "Cannot enable PCI device, " 4890 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
4891 "aborting.\n");
4892 return err; 4891 return err;
4893 } 4892 }
4894 4893
4895 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4894 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4896 printk(KERN_ERR PFX "Cannot find proper PCI device " 4895 dev_err(&pdev->dev, "Cannot find proper PCI device "
4897 "base address, aborting.\n"); 4896 "base address, aborting.\n");
4898 err = -ENODEV; 4897 err = -ENODEV;
4899 goto err_out_disable_pdev; 4898 goto err_out_disable_pdev;
@@ -4901,7 +4900,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4901 4900
4902 dev = alloc_etherdev(sizeof(*cp)); 4901 dev = alloc_etherdev(sizeof(*cp));
4903 if (!dev) { 4902 if (!dev) {
4904 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 4903 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
4905 err = -ENOMEM; 4904 err = -ENOMEM;
4906 goto err_out_disable_pdev; 4905 goto err_out_disable_pdev;
4907 } 4906 }
@@ -4910,8 +4909,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4910 4909
4911 err = pci_request_regions(pdev, dev->name); 4910 err = pci_request_regions(pdev, dev->name);
4912 if (err) { 4911 if (err) {
4913 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 4912 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
4914 "aborting.\n");
4915 goto err_out_free_netdev; 4913 goto err_out_free_netdev;
4916 } 4914 }
4917 pci_set_master(pdev); 4915 pci_set_master(pdev);
@@ -4941,7 +4939,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4941 if (pci_write_config_byte(pdev, 4939 if (pci_write_config_byte(pdev,
4942 PCI_CACHE_LINE_SIZE, 4940 PCI_CACHE_LINE_SIZE,
4943 cas_cacheline_size)) { 4941 cas_cacheline_size)) {
4944 printk(KERN_ERR PFX "Could not set PCI cache " 4942 dev_err(&pdev->dev, "Could not set PCI cache "
4945 "line size\n"); 4943 "line size\n");
4946 goto err_write_cacheline; 4944 goto err_write_cacheline;
4947 } 4945 }
@@ -4955,7 +4953,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4955 err = pci_set_consistent_dma_mask(pdev, 4953 err = pci_set_consistent_dma_mask(pdev,
4956 DMA_64BIT_MASK); 4954 DMA_64BIT_MASK);
4957 if (err < 0) { 4955 if (err < 0) {
4958 printk(KERN_ERR PFX "Unable to obtain 64-bit DMA " 4956 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4959 "for consistent allocations\n"); 4957 "for consistent allocations\n");
4960 goto err_out_free_res; 4958 goto err_out_free_res;
4961 } 4959 }
@@ -4963,7 +4961,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4963 } else { 4961 } else {
4964 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4962 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4965 if (err) { 4963 if (err) {
4966 printk(KERN_ERR PFX "No usable DMA configuration, " 4964 dev_err(&pdev->dev, "No usable DMA configuration, "
4967 "aborting.\n"); 4965 "aborting.\n");
4968 goto err_out_free_res; 4966 goto err_out_free_res;
4969 } 4967 }
@@ -5023,8 +5021,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5023 /* give us access to cassini registers */ 5021 /* give us access to cassini registers */
5024 cp->regs = pci_iomap(pdev, 0, casreg_len); 5022 cp->regs = pci_iomap(pdev, 0, casreg_len);
5025 if (cp->regs == 0UL) { 5023 if (cp->regs == 0UL) {
5026 printk(KERN_ERR PFX "Cannot map device registers, " 5024 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
5027 "aborting.\n");
5028 goto err_out_free_res; 5025 goto err_out_free_res;
5029 } 5026 }
5030 cp->casreg_len = casreg_len; 5027 cp->casreg_len = casreg_len;
@@ -5040,8 +5037,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5040 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5037 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5041 &cp->block_dvma); 5038 &cp->block_dvma);
5042 if (!cp->init_block) { 5039 if (!cp->init_block) {
5043 printk(KERN_ERR PFX "Cannot allocate init block, " 5040 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
5044 "aborting.\n");
5045 goto err_out_iounmap; 5041 goto err_out_iounmap;
5046 } 5042 }
5047 5043
@@ -5085,8 +5081,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5085 dev->features |= NETIF_F_HIGHDMA; 5081 dev->features |= NETIF_F_HIGHDMA;
5086 5082
5087 if (register_netdev(dev)) { 5083 if (register_netdev(dev)) {
5088 printk(KERN_ERR PFX "Cannot register net device, " 5084 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5089 "aborting.\n");
5090 goto err_out_free_consistent; 5085 goto err_out_free_consistent;
5091 } 5086 }
5092 5087
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 2038ca7e49ce..6ad5796121c8 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -703,8 +703,8 @@ static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id,
703 return IRQ_HANDLED; 703 return IRQ_HANDLED;
704} 704}
705 705
706static irqreturn_t 706static irqreturn_t lance_interrupt(const int irq, void *dev_id,
707lance_interrupt(const int irq, void *dev_id, struct pt_regs *regs) 707 struct pt_regs *regs)
708{ 708{
709 struct net_device *dev = (struct net_device *) dev_id; 709 struct net_device *dev = (struct net_device *) dev_id;
710 struct lance_private *lp = netdev_priv(dev); 710 struct lance_private *lp = netdev_priv(dev);
@@ -1253,7 +1253,7 @@ static int __init dec_lance_init(const int type, const int slot)
1253 return 0; 1253 return 0;
1254 1254
1255err_out_free_dev: 1255err_out_free_dev:
1256 kfree(dev); 1256 free_netdev(dev);
1257 1257
1258err_out: 1258err_out:
1259 return ret; 1259 return ret;
@@ -1299,6 +1299,7 @@ static void __exit dec_lance_cleanup(void)
1299 while (root_lance_dev) { 1299 while (root_lance_dev) {
1300 struct net_device *dev = root_lance_dev; 1300 struct net_device *dev = root_lance_dev;
1301 struct lance_private *lp = netdev_priv(dev); 1301 struct lance_private *lp = netdev_priv(dev);
1302
1302 unregister_netdev(dev); 1303 unregister_netdev(dev);
1303#ifdef CONFIG_TC 1304#ifdef CONFIG_TC
1304 if (lp->slot >= 0) 1305 if (lp->slot >= 0)
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 4b6ddb70f921..402961e68c89 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -9,49 +9,10 @@
9 the Free Software Foundation; either version 2 of the License, or 9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version. 10 (at your option) any later version.
11*/ 11*/
12/*
13 Rev Date Description
14 ==========================================================================
15 0.01 2001/05/03 Created DL2000-based linux driver
16 0.02 2001/05/21 Added VLAN and hardware checksum support.
17 1.00 2001/06/26 Added jumbo frame support.
18 1.01 2001/08/21 Added two parameters, rx_coalesce and rx_timeout.
19 1.02 2001/10/08 Supported fiber media.
20 Added flow control parameters.
21 1.03 2001/10/12 Changed the default media to 1000mbps_fd for
22 the fiber devices.
23 1.04 2001/11/08 Fixed Tx stopped when tx very busy.
24 1.05 2001/11/22 Fixed Tx stopped when unidirectional tx busy.
25 1.06 2001/12/13 Fixed disconnect bug at 10Mbps mode.
26 Fixed tx_full flag incorrect.
27 Added tx_coalesce paramter.
28 1.07 2002/01/03 Fixed miscount of RX frame error.
29 1.08 2002/01/17 Fixed the multicast bug.
30 1.09 2002/03/07 Move rx-poll-now to re-fill loop.
31 Added rio_timer() to watch rx buffers.
32 1.10 2002/04/16 Fixed miscount of carrier error.
33 1.11 2002/05/23 Added ISR schedule scheme
34 Fixed miscount of rx frame error for DGE-550SX.
35 Fixed VLAN bug.
36 1.12 2002/06/13 Lock tx_coalesce=1 on 10/100Mbps mode.
37 1.13 2002/08/13 1. Fix disconnection (many tx:carrier/rx:frame
38 errs) with some mainboards.
39 2. Use definition "DRV_NAME" "DRV_VERSION"
40 "DRV_RELDATE" for flexibility.
41 1.14 2002/08/14 Support ethtool.
42 1.15 2002/08/27 Changed the default media to Auto-Negotiation
43 for the fiber devices.
44 1.16 2002/09/04 More power down time for fiber devices auto-
45 negotiation.
46 Fix disconnect bug after ifup and ifdown.
47 1.17 2002/10/03 Fix RMON statistics overflow.
48 Always use I/O mapping to access eeprom,
49 avoid system freezing with some chipsets.
50 12
51*/
52#define DRV_NAME "D-Link DL2000-based linux driver" 13#define DRV_NAME "D-Link DL2000-based linux driver"
53#define DRV_VERSION "v1.17b" 14#define DRV_VERSION "v1.18"
54#define DRV_RELDATE "2006/03/10" 15#define DRV_RELDATE "2006/06/27"
55#include "dl2k.h" 16#include "dl2k.h"
56#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
57 18
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 2ad327542927..e445988c92ee 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -555,12 +555,12 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
555 555
556 if (!request_region(pci_resource_start(pdev, 1), 556 if (!request_region(pci_resource_start(pdev, 1),
557 pci_resource_len(pdev, 1), "eepro100")) { 557 pci_resource_len(pdev, 1), "eepro100")) {
558 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n"); 558 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
559 goto err_out_none; 559 goto err_out_none;
560 } 560 }
561 if (!request_mem_region(pci_resource_start(pdev, 0), 561 if (!request_mem_region(pci_resource_start(pdev, 0),
562 pci_resource_len(pdev, 0), "eepro100")) { 562 pci_resource_len(pdev, 0), "eepro100")) {
563 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n"); 563 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
564 goto err_out_free_pio_region; 564 goto err_out_free_pio_region;
565 } 565 }
566 566
@@ -573,7 +573,7 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
573 573
574 ioaddr = pci_iomap(pdev, pci_bar, 0); 574 ioaddr = pci_iomap(pdev, pci_bar, 0);
575 if (!ioaddr) { 575 if (!ioaddr) {
576 printk (KERN_ERR "eepro100: cannot remap IO\n"); 576 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
577 goto err_out_free_mmio_region; 577 goto err_out_free_mmio_region;
578 } 578 }
579 579
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 9f3e09a3d88c..a67650ccf084 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -19,62 +19,15 @@
19 19
20 Information and updates available at 20 Information and updates available at
21 http://www.scyld.com/network/epic100.html 21 http://www.scyld.com/network/epic100.html
22 [this link no longer provides anything useful -jgarzik]
22 23
23 --------------------------------------------------------------------- 24 ---------------------------------------------------------------------
24 25
25 Linux kernel-specific changes:
26
27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000)
29
30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu)
32
33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000)
35
36 LK1.1.5:
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
39
40 LK1.1.6:
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
43
44 LK1.1.7:
45 * { fill me in }
46
47 LK1.1.8:
48 * ethtool driver info support (jgarzik)
49
50 LK1.1.9:
51 * ethtool media get/set support (jgarzik)
52
53 LK1.1.10:
54 * revert MII transceiver init change (jgarzik)
55
56 LK1.1.11:
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
59
60 LK1.1.12:
61 * fix power-up sequence
62
63 LK1.1.13:
64 * revert version 1.1.12, power-up sequence "fix"
65
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
69
70 AC1.1.14ac
71 * fix power up/down for ethtool that broke in 1.11
72
73*/ 26*/
74 27
75#define DRV_NAME "epic100" 28#define DRV_NAME "epic100"
76#define DRV_VERSION "1.11+LK1.1.14+AC1.1.14" 29#define DRV_VERSION "2.0"
77#define DRV_RELDATE "June 2, 2004" 30#define DRV_RELDATE "June 27, 2006"
78 31
79/* The user-configurable values. 32/* The user-configurable values.
80 These may be modified when a driver module is loaded.*/ 33 These may be modified when a driver module is loaded.*/
@@ -204,19 +157,15 @@ typedef enum {
204 157
205struct epic_chip_info { 158struct epic_chip_info {
206 const char *name; 159 const char *name;
207 int io_size; /* Needed for I/O region check or ioremap(). */
208 int drv_flags; /* Driver use, intended as capability flags. */ 160 int drv_flags; /* Driver use, intended as capability flags. */
209}; 161};
210 162
211 163
212/* indexed by chip_t */ 164/* indexed by chip_t */
213static const struct epic_chip_info pci_id_tbl[] = { 165static const struct epic_chip_info pci_id_tbl[] = {
214 { "SMSC EPIC/100 83c170", 166 { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
215 EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, 167 { "SMSC EPIC/100 83c170", TYPE2_INTR },
216 { "SMSC EPIC/100 83c170", 168 { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
217 EPIC_TOTAL_SIZE, TYPE2_INTR },
218 { "SMSC EPIC/C 83c175",
219 EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
220}; 169};
221 170
222 171
@@ -385,8 +334,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
385 goto out; 334 goto out;
386 irq = pdev->irq; 335 irq = pdev->irq;
387 336
388 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) { 337 if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
389 printk (KERN_ERR "card %d: no PCI region space\n", card_idx); 338 dev_err(&pdev->dev, "no PCI region space\n");
390 ret = -ENODEV; 339 ret = -ENODEV;
391 goto err_out_disable; 340 goto err_out_disable;
392 } 341 }
@@ -401,7 +350,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
401 350
402 dev = alloc_etherdev(sizeof (*ep)); 351 dev = alloc_etherdev(sizeof (*ep));
403 if (!dev) { 352 if (!dev) {
404 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx); 353 dev_err(&pdev->dev, "no memory for eth device\n");
405 goto err_out_free_res; 354 goto err_out_free_res;
406 } 355 }
407 SET_MODULE_OWNER(dev); 356 SET_MODULE_OWNER(dev);
@@ -413,7 +362,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
413 ioaddr = pci_resource_start (pdev, 1); 362 ioaddr = pci_resource_start (pdev, 1);
414 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); 363 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
415 if (!ioaddr) { 364 if (!ioaddr) {
416 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx); 365 dev_err(&pdev->dev, "ioremap failed\n");
417 goto err_out_free_netdev; 366 goto err_out_free_netdev;
418 } 367 }
419#endif 368#endif
@@ -473,8 +422,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
473 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4)); 422 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
474 423
475 if (debug > 2) { 424 if (debug > 2) {
476 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n", 425 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
477 pci_name(pdev));
478 for (i = 0; i < 64; i++) 426 for (i = 0; i < 64; i++)
479 printk(" %4.4x%s", read_eeprom(ioaddr, i), 427 printk(" %4.4x%s", read_eeprom(ioaddr, i),
480 i % 16 == 15 ? "\n" : ""); 428 i % 16 == 15 ? "\n" : "");
@@ -496,21 +444,23 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
496 int mii_status = mdio_read(dev, phy, MII_BMSR); 444 int mii_status = mdio_read(dev, phy, MII_BMSR);
497 if (mii_status != 0xffff && mii_status != 0x0000) { 445 if (mii_status != 0xffff && mii_status != 0x0000) {
498 ep->phys[phy_idx++] = phy; 446 ep->phys[phy_idx++] = phy;
499 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control " 447 dev_info(&pdev->dev,
500 "%4.4x status %4.4x.\n", 448 "MII transceiver #%d control "
501 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status); 449 "%4.4x status %4.4x.\n",
450 phy, mdio_read(dev, phy, 0), mii_status);
502 } 451 }
503 } 452 }
504 ep->mii_phy_cnt = phy_idx; 453 ep->mii_phy_cnt = phy_idx;
505 if (phy_idx != 0) { 454 if (phy_idx != 0) {
506 phy = ep->phys[0]; 455 phy = ep->phys[0];
507 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); 456 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
508 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link " 457 dev_info(&pdev->dev,
458 "Autonegotiation advertising %4.4x link "
509 "partner %4.4x.\n", 459 "partner %4.4x.\n",
510 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5)); 460 ep->mii.advertising, mdio_read(dev, phy, 5));
511 } else if ( ! (ep->chip_flags & NO_MII)) { 461 } else if ( ! (ep->chip_flags & NO_MII)) {
512 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n", 462 dev_warn(&pdev->dev,
513 pci_name(pdev)); 463 "***WARNING***: No MII transceiver found!\n");
514 /* Use the known PHY address of the EPII. */ 464 /* Use the known PHY address of the EPII. */
515 ep->phys[0] = 3; 465 ep->phys[0] = 3;
516 } 466 }
@@ -525,8 +475,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
525 /* The lower four bits are the media type. */ 475 /* The lower four bits are the media type. */
526 if (duplex) { 476 if (duplex) {
527 ep->mii.force_media = ep->mii.full_duplex = 1; 477 ep->mii.force_media = ep->mii.full_duplex = 1;
528 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n", 478 dev_info(&pdev->dev, "Forced full duplex requested.\n");
529 pci_name(pdev));
530 } 479 }
531 dev->if_port = ep->default_port = option; 480 dev->if_port = ep->default_port = option;
532 481
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index c701951dcd6f..97d34fee8c1f 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -124,7 +124,9 @@ MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered mult
124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
126 126
127#define MIN_REGION_SIZE 136 127enum {
128 MIN_REGION_SIZE = 136,
129};
128 130
129/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 131/* A chip capabilities table, matching the entries in pci_tbl[] above. */
130enum chip_capability_flags { 132enum chip_capability_flags {
@@ -146,14 +148,13 @@ enum phy_type_flags {
146 148
147struct chip_info { 149struct chip_info {
148 char *chip_name; 150 char *chip_name;
149 int io_size;
150 int flags; 151 int flags;
151}; 152};
152 153
153static const struct chip_info skel_netdrv_tbl[] = { 154static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
154 {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, 155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
155 {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, 156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
156 {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, 157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
157}; 158};
158 159
159/* Offsets to the Command and Status Registers. */ 160/* Offsets to the Command and Status Registers. */
@@ -504,13 +505,14 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
504 505
505 len = pci_resource_len(pdev, bar); 506 len = pci_resource_len(pdev, bar);
506 if (len < MIN_REGION_SIZE) { 507 if (len < MIN_REGION_SIZE) {
507 printk(KERN_ERR "%s: region size %ld too small, aborting\n", 508 dev_err(&pdev->dev,
508 boardname, len); 509 "region size %ld too small, aborting\n", len);
509 return -ENODEV; 510 return -ENODEV;
510 } 511 }
511 512
512 i = pci_request_regions(pdev, boardname); 513 i = pci_request_regions(pdev, boardname);
513 if (i) return i; 514 if (i)
515 return i;
514 516
515 irq = pdev->irq; 517 irq = pdev->irq;
516 518
@@ -576,9 +578,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
576 578
577 if (mii_status != 0xffff && mii_status != 0x0000) { 579 if (mii_status != 0xffff && mii_status != 0x0000) {
578 np->phys[phy_idx++] = phy; 580 np->phys[phy_idx++] = phy;
579 printk(KERN_INFO 581 dev_info(&pdev->dev,
580 "%s: MII PHY found at address %d, status " 582 "MII PHY found at address %d, status "
581 "0x%4.4x.\n", dev->name, phy, mii_status); 583 "0x%4.4x.\n", phy, mii_status);
582 /* get phy type */ 584 /* get phy type */
583 { 585 {
584 unsigned int data; 586 unsigned int data;
@@ -601,10 +603,10 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
601 } 603 }
602 604
603 np->mii_cnt = phy_idx; 605 np->mii_cnt = phy_idx;
604 if (phy_idx == 0) { 606 if (phy_idx == 0)
605 printk(KERN_WARNING "%s: MII PHY not found -- this device may " 607 dev_warn(&pdev->dev,
606 "not operate correctly.\n", dev->name); 608 "MII PHY not found -- this device may "
607 } 609 "not operate correctly.\n");
608 } else { 610 } else {
609 np->phys[0] = 32; 611 np->phys[0] = 32;
610/* 89/6/23 add, (begin) */ 612/* 89/6/23 add, (begin) */
@@ -630,7 +632,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
630 np->mii.full_duplex = full_duplex[card_idx]; 632 np->mii.full_duplex = full_duplex[card_idx];
631 633
632 if (np->mii.full_duplex) { 634 if (np->mii.full_duplex) {
633 printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name); 635 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
634/* 89/6/13 add, (begin) */ 636/* 89/6/13 add, (begin) */
635// if (np->PHYType==MarvellPHY) 637// if (np->PHYType==MarvellPHY)
636 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 638 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c
index 49dacc6e35aa..2b4db7414475 100644
--- a/drivers/net/gt96100eth.c
+++ b/drivers/net/gt96100eth.c
@@ -699,7 +699,6 @@ static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
699 memset(gp, 0, sizeof(*gp)); // clear it 699 memset(gp, 0, sizeof(*gp)); // clear it
700 700
701 gp->port_num = port_num; 701 gp->port_num = port_num;
702 gp->io_size = GT96100_ETH_IO_SIZE;
703 gp->port_offset = port_num * GT96100_ETH_IO_SIZE; 702 gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
704 gp->phy_addr = phy_addr; 703 gp->phy_addr = phy_addr;
705 gp->chip_rev = chip_rev; 704 gp->chip_rev = chip_rev;
@@ -1531,7 +1530,7 @@ static void gt96100_cleanup_module(void)
1531 + sizeof(gt96100_td_t) * TX_RING_SIZE, 1530 + sizeof(gt96100_td_t) * TX_RING_SIZE,
1532 gp->rx_ring); 1531 gp->rx_ring);
1533 free_netdev(gtif->dev); 1532 free_netdev(gtif->dev);
1534 release_region(gtif->iobase, gp->io_size); 1533 release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
1535 } 1534 }
1536 } 1535 }
1537} 1536}
diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h
index 2a8331938b84..3b62a87c7d7f 100644
--- a/drivers/net/gt96100eth.h
+++ b/drivers/net/gt96100eth.h
@@ -331,7 +331,6 @@ struct gt96100_private {
331 mib_counters_t mib; 331 mib_counters_t mib;
332 struct net_device_stats stats; 332 struct net_device_stats stats;
333 333
334 int io_size;
335 int port_num; // 0 or 1 334 int port_num; // 0 or 1
336 int chip_rev; 335 int chip_rev;
337 u32 port_offset; 336 u32 port_offset;
@@ -340,7 +339,6 @@ struct gt96100_private {
340 u32 last_psr; // last value of the port status register 339 u32 last_psr; // last value of the port status register
341 340
342 int options; /* User-settable misc. driver options. */ 341 int options; /* User-settable misc. driver options. */
343 int drv_flags;
344 struct timer_list timer; 342 struct timer_list timer;
345 spinlock_t lock; /* Serialise access to device */ 343 spinlock_t lock; /* Serialise access to device */
346}; 344};
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 7bcd939c6edd..409c6aab0411 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -20,22 +20,15 @@
20 20
21 Support and updates available at 21 Support and updates available at
22 http://www.scyld.com/network/hamachi.html 22 http://www.scyld.com/network/hamachi.html
23 [link no longer provides useful info -jgarzik]
23 or 24 or
24 http://www.parl.clemson.edu/~keithu/hamachi.html 25 http://www.parl.clemson.edu/~keithu/hamachi.html
25 26
26
27
28 Linux kernel changelog:
29
30 LK1.0.1:
31 - fix lack of pci_dev<->dev association
32 - ethtool support (jgarzik)
33
34*/ 27*/
35 28
36#define DRV_NAME "hamachi" 29#define DRV_NAME "hamachi"
37#define DRV_VERSION "1.01+LK1.0.1" 30#define DRV_VERSION "2.0"
38#define DRV_RELDATE "5/18/2001" 31#define DRV_RELDATE "June 27, 2006"
39 32
40 33
41/* A few user-configurable values. */ 34/* A few user-configurable values. */
@@ -608,7 +601,8 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
608 pci_set_master(pdev); 601 pci_set_master(pdev);
609 602
610 i = pci_request_regions(pdev, DRV_NAME); 603 i = pci_request_regions(pdev, DRV_NAME);
611 if (i) return i; 604 if (i)
605 return i;
612 606
613 irq = pdev->irq; 607 irq = pdev->irq;
614 ioaddr = ioremap(base, 0x400); 608 ioaddr = ioremap(base, 0x400);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 72aad42db7b4..f4c8fd373b9b 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -188,7 +188,6 @@ struct myri10ge_priv {
188 int vendor_specific_offset; 188 int vendor_specific_offset;
189 u32 devctl; 189 u32 devctl;
190 u16 msi_flags; 190 u16 msi_flags;
191 u32 pm_state[16];
192 u32 read_dma; 191 u32 read_dma;
193 u32 write_dma; 192 u32 write_dma;
194 u32 read_write_dma; 193 u32 read_write_dma;
@@ -1289,6 +1288,7 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
1289 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1288 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1290 "tx_heartbeat_errors", "tx_window_errors", 1289 "tx_heartbeat_errors", "tx_window_errors",
1291 /* device-specific stats */ 1290 /* device-specific stats */
1291 "tx_boundary", "WC", "irq", "MSI",
1292 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1292 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1293 "serial_number", "tx_pkt_start", "tx_pkt_done", 1293 "serial_number", "tx_pkt_start", "tx_pkt_done",
1294 "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", 1294 "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
@@ -1327,6 +1327,10 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1327 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1327 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1328 data[i] = ((unsigned long *)&mgp->stats)[i]; 1328 data[i] = ((unsigned long *)&mgp->stats)[i];
1329 1329
1330 data[i++] = (unsigned int)mgp->tx.boundary;
1331 data[i++] = (unsigned int)(mgp->mtrr >= 0);
1332 data[i++] = (unsigned int)mgp->pdev->irq;
1333 data[i++] = (unsigned int)mgp->msi_enabled;
1330 data[i++] = (unsigned int)mgp->read_dma; 1334 data[i++] = (unsigned int)mgp->read_dma;
1331 data[i++] = (unsigned int)mgp->write_dma; 1335 data[i++] = (unsigned int)mgp->write_dma;
1332 data[i++] = (unsigned int)mgp->read_write_dma; 1336 data[i++] = (unsigned int)mgp->read_write_dma;
@@ -2197,8 +2201,6 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
2197 * any other device, except if forced with myri10ge_ecrc_enable > 1. 2201 * any other device, except if forced with myri10ge_ecrc_enable > 1.
2198 */ 2202 */
2199 2203
2200#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d
2201
2202static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) 2204static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2203{ 2205{
2204 struct pci_dev *bridge = mgp->pdev->bus->self; 2206 struct pci_dev *bridge = mgp->pdev->bus->self;
@@ -2737,11 +2739,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2737 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 2739 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
2738 goto abort_with_irq; 2740 goto abort_with_irq;
2739 } 2741 }
2740 2742 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
2741 printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n", 2743 (mgp->msi_enabled ? "MSI" : "xPIC"),
2742 netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"), 2744 pdev->irq, mgp->tx.boundary, mgp->fw_name,
2743 pdev->irq, mgp->tx.boundary, mgp->fw_name, 2745 (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
2744 (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
2745 2746
2746 return 0; 2747 return 0;
2747 2748
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 9df2628be1e7..db0475a1102f 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -20,120 +20,9 @@
20 20
21 Support information and updates available at 21 Support information and updates available at
22 http://www.scyld.com/network/netsemi.html 22 http://www.scyld.com/network/netsemi.html
23 [link no longer provides useful info -jgarzik]
23 24
24 25
25 Linux kernel modifications:
26
27 Version 1.0.1:
28 - Spinlock fixes
29 - Bug fixes and better intr performance (Tjeerd)
30 Version 1.0.2:
31 - Now reads correct MAC address from eeprom
32 Version 1.0.3:
33 - Eliminate redundant priv->tx_full flag
34 - Call netif_start_queue from dev->tx_timeout
35 - wmb() in start_tx() to flush data
36 - Update Tx locking
37 - Clean up PCI enable (davej)
38 Version 1.0.4:
39 - Merge Donald Becker's natsemi.c version 1.07
40 Version 1.0.5:
41 - { fill me in }
42 Version 1.0.6:
43 * ethtool support (jgarzik)
44 * Proper initialization of the card (which sometimes
45 fails to occur and leaves the card in a non-functional
46 state). (uzi)
47
48 * Some documented register settings to optimize some
49 of the 100Mbit autodetection circuitry in rev C cards. (uzi)
50
51 * Polling of the PHY intr for stuff like link state
52 change and auto- negotiation to finally work properly. (uzi)
53
54 * One-liner removal of a duplicate declaration of
55 netdev_error(). (uzi)
56
57 Version 1.0.7: (Manfred Spraul)
58 * pci dma
59 * SMP locking update
60 * full reset added into tx_timeout
61 * correct multicast hash generation (both big and little endian)
62 [copied from a natsemi driver version
63 from Myrio Corporation, Greg Smith]
64 * suspend/resume
65
66 version 1.0.8 (Tim Hockin <thockin@sun.com>)
67 * ETHTOOL_* support
68 * Wake on lan support (Erik Gilling)
69 * MXDMA fixes for serverworks
70 * EEPROM reload
71
72 version 1.0.9 (Manfred Spraul)
73 * Main change: fix lack of synchronize
74 netif_close/netif_suspend against a last interrupt
75 or packet.
76 * do not enable superflous interrupts (e.g. the
77 drivers relies on TxDone - TxIntr not needed)
78 * wait that the hardware has really stopped in close
79 and suspend.
80 * workaround for the (at least) gcc-2.95.1 compiler
81 problem. Also simplifies the code a bit.
82 * disable_irq() in tx_timeout - needed to protect
83 against rx interrupts.
84 * stop the nic before switching into silent rx mode
85 for wol (required according to docu).
86
87 version 1.0.10:
88 * use long for ee_addr (various)
89 * print pointers properly (DaveM)
90 * include asm/irq.h (?)
91
92 version 1.0.11:
93 * check and reset if PHY errors appear (Adrian Sun)
94 * WoL cleanup (Tim Hockin)
95 * Magic number cleanup (Tim Hockin)
96 * Don't reload EEPROM on every reset (Tim Hockin)
97 * Save and restore EEPROM state across reset (Tim Hockin)
98 * MDIO Cleanup (Tim Hockin)
99 * Reformat register offsets/bits (jgarzik)
100
101 version 1.0.12:
102 * ETHTOOL_* further support (Tim Hockin)
103
104 version 1.0.13:
105 * ETHTOOL_[G]EEPROM support (Tim Hockin)
106
107 version 1.0.13:
108 * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
109
110 version 1.0.14:
111 * Cleanup some messages and autoneg in ethtool (Tim Hockin)
112
113 version 1.0.15:
114 * Get rid of cable_magic flag
115 * use new (National provided) solution for cable magic issue
116
117 version 1.0.16:
118 * call netdev_rx() for RxErrors (Manfred Spraul)
119 * formatting and cleanups
120 * change options and full_duplex arrays to be zero
121 initialized
122 * enable only the WoL and PHY interrupts in wol mode
123
124 version 1.0.17:
125 * only do cable_magic on 83815 and early 83816 (Tim Hockin)
126 * create a function for rx refill (Manfred Spraul)
127 * combine drain_ring and init_ring (Manfred Spraul)
128 * oom handling (Manfred Spraul)
129 * hands_off instead of playing with netif_device_{de,a}ttach
130 (Manfred Spraul)
131 * be sure to write the MAC back to the chip (Manfred Spraul)
132 * lengthen EEPROM timeout, and always warn about timeouts
133 (Manfred Spraul)
134 * comments update (Manfred)
135 * do the right thing on a phy-reset (Manfred and Tim)
136
137 TODO: 26 TODO:
138 * big endian support with CFG:BEM instead of cpu_to_le32 27 * big endian support with CFG:BEM instead of cpu_to_le32
139*/ 28*/
@@ -165,8 +54,8 @@
165#include <asm/uaccess.h> 54#include <asm/uaccess.h>
166 55
167#define DRV_NAME "natsemi" 56#define DRV_NAME "natsemi"
168#define DRV_VERSION "1.07+LK1.0.17" 57#define DRV_VERSION "2.0"
169#define DRV_RELDATE "Sep 27, 2002" 58#define DRV_RELDATE "June 27, 2006"
170 59
171#define RX_OFFSET 2 60#define RX_OFFSET 2
172 61
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index fa50eb889408..34bdba9eec79 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -231,12 +231,12 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
231 irq = pdev->irq; 231 irq = pdev->irq;
232 232
233 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { 233 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
234 printk (KERN_ERR PFX "no I/O resource at PCI BAR #0\n"); 234 dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
235 return -ENODEV; 235 return -ENODEV;
236 } 236 }
237 237
238 if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { 238 if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
239 printk (KERN_ERR PFX "I/O resource 0x%x @ 0x%lx busy\n", 239 dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
240 NE_IO_EXTENT, ioaddr); 240 NE_IO_EXTENT, ioaddr);
241 return -EBUSY; 241 return -EBUSY;
242 } 242 }
@@ -263,7 +263,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
263 /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ 263 /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */
264 dev = alloc_ei_netdev(); 264 dev = alloc_ei_netdev();
265 if (!dev) { 265 if (!dev) {
266 printk (KERN_ERR PFX "cannot allocate ethernet device\n"); 266 dev_err(&pdev->dev, "cannot allocate ethernet device\n");
267 goto err_out_free_res; 267 goto err_out_free_res;
268 } 268 }
269 SET_MODULE_OWNER(dev); 269 SET_MODULE_OWNER(dev);
@@ -281,7 +281,8 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
281 while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) 281 while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
282 /* Limit wait: '2' avoids jiffy roll-over. */ 282 /* Limit wait: '2' avoids jiffy roll-over. */
283 if (jiffies - reset_start_time > 2) { 283 if (jiffies - reset_start_time > 2) {
284 printk(KERN_ERR PFX "Card failure (no reset ack).\n"); 284 dev_err(&pdev->dev,
285 "Card failure (no reset ack).\n");
285 goto err_out_free_netdev; 286 goto err_out_free_netdev;
286 } 287 }
287 288
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a68bf474f6ed..d4be207d321a 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -1,17 +1,12 @@
1/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard. 1/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
2 * 2 *
3 * Copyright 1996,1997 Jan-Pascal van Best and Andreas Mohr. 3 * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
4 * 4 *
5 * This software may be used and distributed according to the terms 5 * This software may be used and distributed according to the terms
6 * of the GNU General Public License, incorporated herein by reference. 6 * of the GNU General Public License, incorporated herein by reference.
7 * 7 *
8 * The authors may be reached as: 8 * The authors may be reached as:
9 * jvbest@wi.leidenuniv.nl a.mohr@mailto.de 9 * janpascal@vanbest.org andi@lisas.de
10 * or by snail mail as
11 * Jan-Pascal van Best Andreas Mohr
12 * Klikspaanweg 58-4 Stauferstr. 6
13 * 2324 LZ Leiden D-71272 Renningen
14 * The Netherlands Germany
15 * 10 *
16 * Sources: 11 * Sources:
17 * Donald Becker's "skeleton.c" 12 * Donald Becker's "skeleton.c"
@@ -27,8 +22,9 @@
27 * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB) 22 * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
28 * 970623 v1.00: First kernel version (AM) 23 * 970623 v1.00: First kernel version (AM)
29 * 970814 v1.01: Added detection of onboard receive buffer size (AM) 24 * 970814 v1.01: Added detection of onboard receive buffer size (AM)
25 * 060611 v1.02: slight cleanup: email addresses, driver modernization.
30 * Bugs: 26 * Bugs:
31 * - None known... 27 * - not SMP-safe (no locking of I/O accesses)
32 * - Note that you have to patch ifconfig for the new /proc/net/dev 28 * - Note that you have to patch ifconfig for the new /proc/net/dev
33 * format. It gives incorrect stats otherwise. 29 * format. It gives incorrect stats otherwise.
34 * 30 *
@@ -39,7 +35,7 @@
39 * Complete merge with Andreas' driver 35 * Complete merge with Andreas' driver
40 * Implement ring buffers (Is this useful? You can't squeeze 36 * Implement ring buffers (Is this useful? You can't squeeze
41 * too many packet in a 2k buffer!) 37 * too many packet in a 2k buffer!)
42 * Implement DMA (Again, is this useful? Some docs says DMA is 38 * Implement DMA (Again, is this useful? Some docs say DMA is
43 * slower than programmed I/O) 39 * slower than programmed I/O)
44 * 40 *
45 * Compile with: 41 * Compile with:
@@ -47,7 +43,7 @@
47 * -DMODULE -c ni5010.c 43 * -DMODULE -c ni5010.c
48 * 44 *
49 * Insert with e.g.: 45 * Insert with e.g.:
50 * insmod ni5010.o io=0x300 irq=5 46 * insmod ni5010.ko io=0x300 irq=5
51 */ 47 */
52 48
53#include <linux/module.h> 49#include <linux/module.h>
@@ -69,15 +65,15 @@
69 65
70#include "ni5010.h" 66#include "ni5010.h"
71 67
72static const char *boardname = "NI5010"; 68static const char boardname[] = "NI5010";
73static char *version = 69static char version[] __initdata =
74 "ni5010.c: v1.00 06/23/97 Jan-Pascal van Best and Andreas Mohr\n"; 70 "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
75 71
76/* bufsize_rcv == 0 means autoprobing */ 72/* bufsize_rcv == 0 means autoprobing */
77static unsigned int bufsize_rcv; 73static unsigned int bufsize_rcv;
78 74
79#define jumpered_interrupts /* IRQ line jumpered on board */ 75#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
80#undef jumpered_dma /* No DMA used */ 76#undef JUMPERED_DMA /* No DMA used */
81#undef FULL_IODETECT /* Only detect in portlist */ 77#undef FULL_IODETECT /* Only detect in portlist */
82 78
83#ifndef FULL_IODETECT 79#ifndef FULL_IODETECT
@@ -281,7 +277,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
281 277
282 PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); 278 PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
283 279
284#ifdef jumpered_interrupts 280#ifdef JUMPERED_INTERRUPTS
285 if (dev->irq == 0xff) 281 if (dev->irq == 0xff)
286 ; 282 ;
287 else if (dev->irq < 2) { 283 else if (dev->irq < 2) {
@@ -305,7 +301,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
305 } else if (dev->irq == 2) { 301 } else if (dev->irq == 2) {
306 dev->irq = 9; 302 dev->irq = 9;
307 } 303 }
308#endif /* jumpered_irq */ 304#endif /* JUMPERED_INTERRUPTS */
309 PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name)); 305 PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
310 306
311 /* DMA is not supported (yet?), so no use detecting it */ 307 /* DMA is not supported (yet?), so no use detecting it */
@@ -334,7 +330,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
334 outw(0, IE_GP); /* Point GP at start of packet */ 330 outw(0, IE_GP); /* Point GP at start of packet */
335 outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */ 331 outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
336 } 332 }
337 printk("// bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE); 333 printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
338 memset(dev->priv, 0, sizeof(struct ni5010_local)); 334 memset(dev->priv, 0, sizeof(struct ni5010_local));
339 335
340 dev->open = ni5010_open; 336 dev->open = ni5010_open;
@@ -354,11 +350,9 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
354 outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */ 350 outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
355 351
356 printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq); 352 printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
357 if (dev->dma) printk(" & DMA %d", dev->dma); 353 if (dev->dma)
354 printk(" & DMA %d", dev->dma);
358 printk(".\n"); 355 printk(".\n");
359
360 printk(KERN_INFO "Join the NI5010 driver development team!\n");
361 printk(KERN_INFO "Mail to a.mohr@mailto.de or jvbest@wi.leidenuniv.nl\n");
362 return 0; 356 return 0;
363out: 357out:
364 release_region(dev->base_addr, NI5010_IO_EXTENT); 358 release_region(dev->base_addr, NI5010_IO_EXTENT);
@@ -371,7 +365,7 @@ out:
371 * 365 *
372 * This routine should set everything up anew at each open, even 366 * This routine should set everything up anew at each open, even
373 * registers that "should" only need to be set once at boot, so that 367 * registers that "should" only need to be set once at boot, so that
374 * there is non-reboot way to recover if something goes wrong. 368 * there is a non-reboot way to recover if something goes wrong.
375 */ 369 */
376 370
377static int ni5010_open(struct net_device *dev) 371static int ni5010_open(struct net_device *dev)
@@ -390,13 +384,13 @@ static int ni5010_open(struct net_device *dev)
390 * Always allocate the DMA channel after the IRQ, 384 * Always allocate the DMA channel after the IRQ,
391 * and clean up on failure. 385 * and clean up on failure.
392 */ 386 */
393#ifdef jumpered_dma 387#ifdef JUMPERED_DMA
394 if (request_dma(dev->dma, cardname)) { 388 if (request_dma(dev->dma, cardname)) {
395 printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma); 389 printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
396 free_irq(dev->irq, NULL); 390 free_irq(dev->irq, NULL);
397 return -EAGAIN; 391 return -EAGAIN;
398 } 392 }
399#endif /* jumpered_dma */ 393#endif /* JUMPERED_DMA */
400 394
401 PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name)); 395 PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
402 /* Reset the hardware here. Don't forget to set the station address. */ 396 /* Reset the hardware here. Don't forget to set the station address. */
@@ -633,7 +627,7 @@ static int ni5010_close(struct net_device *dev)
633 int ioaddr = dev->base_addr; 627 int ioaddr = dev->base_addr;
634 628
635 PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name)); 629 PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
636#ifdef jumpered_interrupts 630#ifdef JUMPERED_INTERRUPTS
637 free_irq(dev->irq, NULL); 631 free_irq(dev->irq, NULL);
638#endif 632#endif
639 /* Put card in held-RESET state */ 633 /* Put card in held-RESET state */
@@ -771,7 +765,7 @@ module_param(irq, int, 0);
771MODULE_PARM_DESC(io, "ni5010 I/O base address"); 765MODULE_PARM_DESC(io, "ni5010 I/O base address");
772MODULE_PARM_DESC(irq, "ni5010 IRQ number"); 766MODULE_PARM_DESC(irq, "ni5010 IRQ number");
773 767
774int init_module(void) 768static int __init ni5010_init_module(void)
775{ 769{
776 PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname)); 770 PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
777 /* 771 /*
@@ -792,13 +786,15 @@ int init_module(void)
792 return 0; 786 return 0;
793} 787}
794 788
795void cleanup_module(void) 789static void __exit ni5010_cleanup_module(void)
796{ 790{
797 PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname)); 791 PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
798 unregister_netdev(dev_ni5010); 792 unregister_netdev(dev_ni5010);
799 release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT); 793 release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
800 free_netdev(dev_ni5010); 794 free_netdev(dev_ni5010);
801} 795}
796module_init(ni5010_init_module);
797module_exit(ni5010_cleanup_module);
802#endif /* MODULE */ 798#endif /* MODULE */
803MODULE_LICENSE("GPL"); 799MODULE_LICENSE("GPL");
804 800
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 70429108c40d..0e76859c90a2 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -803,7 +803,7 @@ static int ns83820_setup_rx(struct net_device *ndev)
803 803
804 writel(dev->IMR_cache, dev->base + IMR); 804 writel(dev->IMR_cache, dev->base + IMR);
805 writel(1, dev->base + IER); 805 writel(1, dev->base + IER);
806 spin_unlock_irq(&dev->misc_lock); 806 spin_unlock(&dev->misc_lock);
807 807
808 kick_rx(ndev); 808 kick_rx(ndev);
809 809
@@ -1012,8 +1012,6 @@ static void do_tx_done(struct net_device *ndev)
1012 struct ns83820 *dev = PRIV(ndev); 1012 struct ns83820 *dev = PRIV(ndev);
1013 u32 cmdsts, tx_done_idx, *desc; 1013 u32 cmdsts, tx_done_idx, *desc;
1014 1014
1015 spin_lock_irq(&dev->tx_lock);
1016
1017 dprintk("do_tx_done(%p)\n", ndev); 1015 dprintk("do_tx_done(%p)\n", ndev);
1018 tx_done_idx = dev->tx_done_idx; 1016 tx_done_idx = dev->tx_done_idx;
1019 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 1017 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
@@ -1069,7 +1067,6 @@ static void do_tx_done(struct net_device *ndev)
1069 netif_start_queue(ndev); 1067 netif_start_queue(ndev);
1070 netif_wake_queue(ndev); 1068 netif_wake_queue(ndev);
1071 } 1069 }
1072 spin_unlock_irq(&dev->tx_lock);
1073} 1070}
1074 1071
1075static void ns83820_cleanup_tx(struct ns83820 *dev) 1072static void ns83820_cleanup_tx(struct ns83820 *dev)
@@ -1281,11 +1278,13 @@ static struct ethtool_ops ops = {
1281 .get_link = ns83820_get_link 1278 .get_link = ns83820_get_link
1282}; 1279};
1283 1280
1281/* this function is called in irq context from the ISR */
1284static void ns83820_mib_isr(struct ns83820 *dev) 1282static void ns83820_mib_isr(struct ns83820 *dev)
1285{ 1283{
1286 spin_lock(&dev->misc_lock); 1284 unsigned long flags;
1285 spin_lock_irqsave(&dev->misc_lock, flags);
1287 ns83820_update_stats(dev); 1286 ns83820_update_stats(dev);
1288 spin_unlock(&dev->misc_lock); 1287 spin_unlock_irqrestore(&dev->misc_lock, flags);
1289} 1288}
1290 1289
1291static void ns83820_do_isr(struct net_device *ndev, u32 isr); 1290static void ns83820_do_isr(struct net_device *ndev, u32 isr);
@@ -1307,6 +1306,8 @@ static irqreturn_t ns83820_irq(int foo, void *data, struct pt_regs *regs)
1307static void ns83820_do_isr(struct net_device *ndev, u32 isr) 1306static void ns83820_do_isr(struct net_device *ndev, u32 isr)
1308{ 1307{
1309 struct ns83820 *dev = PRIV(ndev); 1308 struct ns83820 *dev = PRIV(ndev);
1309 unsigned long flags;
1310
1310#ifdef DEBUG 1311#ifdef DEBUG
1311 if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) 1312 if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC))
1312 Dprintk("odd isr? 0x%08x\n", isr); 1313 Dprintk("odd isr? 0x%08x\n", isr);
@@ -1321,10 +1322,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
1321 if ((ISR_RXDESC | ISR_RXOK) & isr) { 1322 if ((ISR_RXDESC | ISR_RXOK) & isr) {
1322 prefetch(dev->rx_info.next_rx_desc); 1323 prefetch(dev->rx_info.next_rx_desc);
1323 1324
1324 spin_lock_irq(&dev->misc_lock); 1325 spin_lock_irqsave(&dev->misc_lock, flags);
1325 dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); 1326 dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
1326 writel(dev->IMR_cache, dev->base + IMR); 1327 writel(dev->IMR_cache, dev->base + IMR);
1327 spin_unlock_irq(&dev->misc_lock); 1328 spin_unlock_irqrestore(&dev->misc_lock, flags);
1328 1329
1329 tasklet_schedule(&dev->rx_tasklet); 1330 tasklet_schedule(&dev->rx_tasklet);
1330 //rx_irq(ndev); 1331 //rx_irq(ndev);
@@ -1370,16 +1371,18 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
1370 * work has accumulated 1371 * work has accumulated
1371 */ 1372 */
1372 if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { 1373 if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) {
1374 spin_lock_irqsave(&dev->tx_lock, flags);
1373 do_tx_done(ndev); 1375 do_tx_done(ndev);
1376 spin_unlock_irqrestore(&dev->tx_lock, flags);
1374 1377
1375 /* Disable TxOk if there are no outstanding tx packets. 1378 /* Disable TxOk if there are no outstanding tx packets.
1376 */ 1379 */
1377 if ((dev->tx_done_idx == dev->tx_free_idx) && 1380 if ((dev->tx_done_idx == dev->tx_free_idx) &&
1378 (dev->IMR_cache & ISR_TXOK)) { 1381 (dev->IMR_cache & ISR_TXOK)) {
1379 spin_lock_irq(&dev->misc_lock); 1382 spin_lock_irqsave(&dev->misc_lock, flags);
1380 dev->IMR_cache &= ~ISR_TXOK; 1383 dev->IMR_cache &= ~ISR_TXOK;
1381 writel(dev->IMR_cache, dev->base + IMR); 1384 writel(dev->IMR_cache, dev->base + IMR);
1382 spin_unlock_irq(&dev->misc_lock); 1385 spin_unlock_irqrestore(&dev->misc_lock, flags);
1383 } 1386 }
1384 } 1387 }
1385 1388
@@ -1390,10 +1393,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr)
1390 * nature are expected, we must enable TxOk. 1393 * nature are expected, we must enable TxOk.
1391 */ 1394 */
1392 if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { 1395 if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
1393 spin_lock_irq(&dev->misc_lock); 1396 spin_lock_irqsave(&dev->misc_lock, flags);
1394 dev->IMR_cache |= ISR_TXOK; 1397 dev->IMR_cache |= ISR_TXOK;
1395 writel(dev->IMR_cache, dev->base + IMR); 1398 writel(dev->IMR_cache, dev->base + IMR);
1396 spin_unlock_irq(&dev->misc_lock); 1399 spin_unlock_irqrestore(&dev->misc_lock, flags);
1397 } 1400 }
1398 1401
1399 /* MIB interrupt: one of the statistics counters is about to overflow */ 1402 /* MIB interrupt: one of the statistics counters is about to overflow */
@@ -1455,7 +1458,7 @@ static void ns83820_tx_timeout(struct net_device *ndev)
1455 u32 tx_done_idx, *desc; 1458 u32 tx_done_idx, *desc;
1456 unsigned long flags; 1459 unsigned long flags;
1457 1460
1458 local_irq_save(flags); 1461 spin_lock_irqsave(&dev->tx_lock, flags);
1459 1462
1460 tx_done_idx = dev->tx_done_idx; 1463 tx_done_idx = dev->tx_done_idx;
1461 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 1464 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
@@ -1482,7 +1485,7 @@ static void ns83820_tx_timeout(struct net_device *ndev)
1482 ndev->name, 1485 ndev->name,
1483 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); 1486 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
1484 1487
1485 local_irq_restore(flags); 1488 spin_unlock_irqrestore(&dev->tx_lock, flags);
1486} 1489}
1487 1490
1488static void ns83820_tx_watch(unsigned long data) 1491static void ns83820_tx_watch(unsigned long data)
@@ -1832,7 +1835,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1832 } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { 1835 } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
1833 using_dac = 0; 1836 using_dac = 0;
1834 } else { 1837 } else {
1835 printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n"); 1838 dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
1836 return -ENODEV; 1839 return -ENODEV;
1837 } 1840 }
1838 1841
@@ -1855,7 +1858,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1855 1858
1856 err = pci_enable_device(pci_dev); 1859 err = pci_enable_device(pci_dev);
1857 if (err) { 1860 if (err) {
1858 printk(KERN_INFO "ns83820: pci_enable_dev failed: %d\n", err); 1861 dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err);
1859 goto out_free; 1862 goto out_free;
1860 } 1863 }
1861 1864
@@ -1884,8 +1887,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1884 err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, 1887 err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED,
1885 DRV_NAME, ndev); 1888 DRV_NAME, ndev);
1886 if (err) { 1889 if (err) {
1887 printk(KERN_INFO "ns83820: unable to register irq %d\n", 1890 dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n",
1888 pci_dev->irq); 1891 pci_dev->irq, err);
1889 goto out_disable; 1892 goto out_disable;
1890 } 1893 }
1891 1894
@@ -1899,7 +1902,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1899 rtnl_lock(); 1902 rtnl_lock();
1900 err = dev_alloc_name(ndev, ndev->name); 1903 err = dev_alloc_name(ndev, ndev->name);
1901 if (err < 0) { 1904 if (err < 0) {
1902 printk(KERN_INFO "ns83820: unable to get netdev name: %d\n", err); 1905 dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err);
1903 goto out_free_irq; 1906 goto out_free_irq;
1904 } 1907 }
1905 1908
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 3388ee1313ea..e0e293964042 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -601,7 +601,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
601 /* dev zeroed in alloc_etherdev */ 601 /* dev zeroed in alloc_etherdev */
602 dev = alloc_etherdev (sizeof (*tp)); 602 dev = alloc_etherdev (sizeof (*tp));
603 if (dev == NULL) { 603 if (dev == NULL) {
604 printk (KERN_ERR PFX "unable to alloc new ethernet\n"); 604 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
605 DPRINTK ("EXIT, returning -ENOMEM\n"); 605 DPRINTK ("EXIT, returning -ENOMEM\n");
606 return -ENOMEM; 606 return -ENOMEM;
607 } 607 }
@@ -631,14 +631,14 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
631 631
632 /* make sure PCI base addr 0 is PIO */ 632 /* make sure PCI base addr 0 is PIO */
633 if (!(pio_flags & IORESOURCE_IO)) { 633 if (!(pio_flags & IORESOURCE_IO)) {
634 printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n"); 634 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
635 rc = -ENODEV; 635 rc = -ENODEV;
636 goto err_out; 636 goto err_out;
637 } 637 }
638 638
639 /* make sure PCI base addr 1 is MMIO */ 639 /* make sure PCI base addr 1 is MMIO */
640 if (!(mmio_flags & IORESOURCE_MEM)) { 640 if (!(mmio_flags & IORESOURCE_MEM)) {
641 printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n"); 641 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
642 rc = -ENODEV; 642 rc = -ENODEV;
643 goto err_out; 643 goto err_out;
644 } 644 }
@@ -646,12 +646,12 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
646 /* check for weird/broken PCI region reporting */ 646 /* check for weird/broken PCI region reporting */
647 if ((pio_len < NETDRV_MIN_IO_SIZE) || 647 if ((pio_len < NETDRV_MIN_IO_SIZE) ||
648 (mmio_len < NETDRV_MIN_IO_SIZE)) { 648 (mmio_len < NETDRV_MIN_IO_SIZE)) {
649 printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n"); 649 dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
650 rc = -ENODEV; 650 rc = -ENODEV;
651 goto err_out; 651 goto err_out;
652 } 652 }
653 653
654 rc = pci_request_regions (pdev, "pci-skeleton"); 654 rc = pci_request_regions (pdev, MODNAME);
655 if (rc) 655 if (rc)
656 goto err_out; 656 goto err_out;
657 657
@@ -663,7 +663,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
663 /* ioremap MMIO region */ 663 /* ioremap MMIO region */
664 ioaddr = ioremap (mmio_start, mmio_len); 664 ioaddr = ioremap (mmio_start, mmio_len);
665 if (ioaddr == NULL) { 665 if (ioaddr == NULL) {
666 printk (KERN_ERR PFX "cannot remap MMIO, aborting\n"); 666 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
667 rc = -EIO; 667 rc = -EIO;
668 goto err_out_free_res; 668 goto err_out_free_res;
669 } 669 }
@@ -699,9 +699,10 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
699 } 699 }
700 700
701 /* if unknown chip, assume array element #0, original RTL-8139 in this case */ 701 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
702 printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8139\n", 702 dev_printk (KERN_DEBUG, &pdev->dev,
703 pci_name(pdev)); 703 "unknown chip version, assuming RTL-8139\n");
704 printk (KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n", pci_name(pdev), NETDRV_R32 (TxConfig)); 704 dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n",
705 NETDRV_R32 (TxConfig));
705 tp->chipset = 0; 706 tp->chipset = 0;
706 707
707match: 708match:
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d768f3d1ac28..4daafe303358 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -58,18 +58,15 @@ static const char *const version =
58 * PCI device identifiers for "new style" Linux PCI Device Drivers 58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */ 59 */
60static struct pci_device_id pcnet32_pci_tbl[] = { 60static struct pci_device_id pcnet32_pci_tbl[] = {
61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, 61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
62 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
63 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 63
66 /* 64 /*
67 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have 65 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
68 * the incorrect vendor id. 66 * the incorrect vendor id.
69 */ 67 */
70 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, 68 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
71 PCI_ANY_ID, PCI_ANY_ID, 69 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
72 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
73 70
74 { } /* terminate list */ 71 { } /* terminate list */
75}; 72};
@@ -188,6 +185,23 @@ static int homepna[MAX_UNITS];
188 185
189#define PCNET32_TOTAL_SIZE 0x20 186#define PCNET32_TOTAL_SIZE 0x20
190 187
188#define CSR0 0
189#define CSR0_INIT 0x1
190#define CSR0_START 0x2
191#define CSR0_STOP 0x4
192#define CSR0_TXPOLL 0x8
193#define CSR0_INTEN 0x40
194#define CSR0_IDON 0x0100
195#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
196#define PCNET32_INIT_LOW 1
197#define PCNET32_INIT_HIGH 2
198#define CSR3 3
199#define CSR4 4
200#define CSR5 5
201#define CSR5_SUSPEND 0x0001
202#define CSR15 15
203#define PCNET32_MC_FILTER 8
204
191/* The PCNET32 Rx and Tx ring descriptors. */ 205/* The PCNET32 Rx and Tx ring descriptors. */
192struct pcnet32_rx_head { 206struct pcnet32_rx_head {
193 u32 base; 207 u32 base;
@@ -277,7 +291,6 @@ struct pcnet32_private {
277 u32 phymask; 291 u32 phymask;
278}; 292};
279 293
280static void pcnet32_probe_vlbus(void);
281static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
282static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 295static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
283static int pcnet32_open(struct net_device *); 296static int pcnet32_open(struct net_device *);
@@ -419,6 +432,238 @@ static struct pcnet32_access pcnet32_dwio = {
419 .reset = pcnet32_dwio_reset 432 .reset = pcnet32_dwio_reset
420}; 433};
421 434
435static void pcnet32_netif_stop(struct net_device *dev)
436{
437 dev->trans_start = jiffies;
438 netif_poll_disable(dev);
439 netif_tx_disable(dev);
440}
441
442static void pcnet32_netif_start(struct net_device *dev)
443{
444 netif_wake_queue(dev);
445 netif_poll_enable(dev);
446}
447
448/*
449 * Allocate space for the new sized tx ring.
450 * Free old resources
451 * Save new resources.
452 * Any failure keeps old resources.
453 * Must be called with lp->lock held.
454 */
455static void pcnet32_realloc_tx_ring(struct net_device *dev,
456 struct pcnet32_private *lp,
457 unsigned int size)
458{
459 dma_addr_t new_ring_dma_addr;
460 dma_addr_t *new_dma_addr_list;
461 struct pcnet32_tx_head *new_tx_ring;
462 struct sk_buff **new_skb_list;
463
464 pcnet32_purge_tx_ring(dev);
465
466 new_tx_ring = pci_alloc_consistent(lp->pci_dev,
467 sizeof(struct pcnet32_tx_head) *
468 (1 << size),
469 &new_ring_dma_addr);
470 if (new_tx_ring == NULL) {
471 if (netif_msg_drv(lp))
472 printk("\n" KERN_ERR
473 "%s: Consistent memory allocation failed.\n",
474 dev->name);
475 return;
476 }
477 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
478
479 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
480 GFP_ATOMIC);
481 if (!new_dma_addr_list) {
482 if (netif_msg_drv(lp))
483 printk("\n" KERN_ERR
484 "%s: Memory allocation failed.\n", dev->name);
485 goto free_new_tx_ring;
486 }
487
488 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
489 GFP_ATOMIC);
490 if (!new_skb_list) {
491 if (netif_msg_drv(lp))
492 printk("\n" KERN_ERR
493 "%s: Memory allocation failed.\n", dev->name);
494 goto free_new_lists;
495 }
496
497 kfree(lp->tx_skbuff);
498 kfree(lp->tx_dma_addr);
499 pci_free_consistent(lp->pci_dev,
500 sizeof(struct pcnet32_tx_head) *
501 lp->tx_ring_size, lp->tx_ring,
502 lp->tx_ring_dma_addr);
503
504 lp->tx_ring_size = (1 << size);
505 lp->tx_mod_mask = lp->tx_ring_size - 1;
506 lp->tx_len_bits = (size << 12);
507 lp->tx_ring = new_tx_ring;
508 lp->tx_ring_dma_addr = new_ring_dma_addr;
509 lp->tx_dma_addr = new_dma_addr_list;
510 lp->tx_skbuff = new_skb_list;
511 return;
512
513 free_new_lists:
514 kfree(new_dma_addr_list);
515 free_new_tx_ring:
516 pci_free_consistent(lp->pci_dev,
517 sizeof(struct pcnet32_tx_head) *
518 (1 << size),
519 new_tx_ring,
520 new_ring_dma_addr);
521 return;
522}
523
524/*
525 * Allocate space for the new sized rx ring.
526 * Re-use old receive buffers.
527 * alloc extra buffers
528 * free unneeded buffers
529 * free unneeded buffers
530 * Save new resources.
531 * Any failure keeps old resources.
532 * Must be called with lp->lock held.
533 */
534static void pcnet32_realloc_rx_ring(struct net_device *dev,
535 struct pcnet32_private *lp,
536 unsigned int size)
537{
538 dma_addr_t new_ring_dma_addr;
539 dma_addr_t *new_dma_addr_list;
540 struct pcnet32_rx_head *new_rx_ring;
541 struct sk_buff **new_skb_list;
542 int new, overlap;
543
544 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
545 sizeof(struct pcnet32_rx_head) *
546 (1 << size),
547 &new_ring_dma_addr);
548 if (new_rx_ring == NULL) {
549 if (netif_msg_drv(lp))
550 printk("\n" KERN_ERR
551 "%s: Consistent memory allocation failed.\n",
552 dev->name);
553 return;
554 }
555 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
556
557 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
558 GFP_ATOMIC);
559 if (!new_dma_addr_list) {
560 if (netif_msg_drv(lp))
561 printk("\n" KERN_ERR
562 "%s: Memory allocation failed.\n", dev->name);
563 goto free_new_rx_ring;
564 }
565
566 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
567 GFP_ATOMIC);
568 if (!new_skb_list) {
569 if (netif_msg_drv(lp))
570 printk("\n" KERN_ERR
571 "%s: Memory allocation failed.\n", dev->name);
572 goto free_new_lists;
573 }
574
575 /* first copy the current receive buffers */
576 overlap = min(size, lp->rx_ring_size);
577 for (new = 0; new < overlap; new++) {
578 new_rx_ring[new] = lp->rx_ring[new];
579 new_dma_addr_list[new] = lp->rx_dma_addr[new];
580 new_skb_list[new] = lp->rx_skbuff[new];
581 }
582 /* now allocate any new buffers needed */
583 for (; new < size; new++ ) {
584 struct sk_buff *rx_skbuff;
585 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
586 if (!(rx_skbuff = new_skb_list[new])) {
587 /* keep the original lists and buffers */
588 if (netif_msg_drv(lp))
589 printk(KERN_ERR
590 "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
591 dev->name);
592 goto free_all_new;
593 }
594 skb_reserve(rx_skbuff, 2);
595
596 new_dma_addr_list[new] =
597 pci_map_single(lp->pci_dev, rx_skbuff->data,
598 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
599 new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
600 new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
601 new_rx_ring[new].status = le16_to_cpu(0x8000);
602 }
603 /* and free any unneeded buffers */
604 for (; new < lp->rx_ring_size; new++) {
605 if (lp->rx_skbuff[new]) {
606 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
607 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
608 dev_kfree_skb(lp->rx_skbuff[new]);
609 }
610 }
611
612 kfree(lp->rx_skbuff);
613 kfree(lp->rx_dma_addr);
614 pci_free_consistent(lp->pci_dev,
615 sizeof(struct pcnet32_rx_head) *
616 lp->rx_ring_size, lp->rx_ring,
617 lp->rx_ring_dma_addr);
618
619 lp->rx_ring_size = (1 << size);
620 lp->rx_mod_mask = lp->rx_ring_size - 1;
621 lp->rx_len_bits = (size << 4);
622 lp->rx_ring = new_rx_ring;
623 lp->rx_ring_dma_addr = new_ring_dma_addr;
624 lp->rx_dma_addr = new_dma_addr_list;
625 lp->rx_skbuff = new_skb_list;
626 return;
627
628 free_all_new:
629 for (; --new >= lp->rx_ring_size; ) {
630 if (new_skb_list[new]) {
631 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
632 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
633 dev_kfree_skb(new_skb_list[new]);
634 }
635 }
636 kfree(new_skb_list);
637 free_new_lists:
638 kfree(new_dma_addr_list);
639 free_new_rx_ring:
640 pci_free_consistent(lp->pci_dev,
641 sizeof(struct pcnet32_rx_head) *
642 (1 << size),
643 new_rx_ring,
644 new_ring_dma_addr);
645 return;
646}
647
648static void pcnet32_purge_rx_ring(struct net_device *dev)
649{
650 struct pcnet32_private *lp = dev->priv;
651 int i;
652
653 /* free all allocated skbuffs */
654 for (i = 0; i < lp->rx_ring_size; i++) {
655 lp->rx_ring[i].status = 0; /* CPU owns buffer */
656 wmb(); /* Make sure adapter sees owner change */
657 if (lp->rx_skbuff[i]) {
658 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
659 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
660 dev_kfree_skb_any(lp->rx_skbuff[i]);
661 }
662 lp->rx_skbuff[i] = NULL;
663 lp->rx_dma_addr[i] = 0;
664 }
665}
666
422#ifdef CONFIG_NET_POLL_CONTROLLER 667#ifdef CONFIG_NET_POLL_CONTROLLER
423static void pcnet32_poll_controller(struct net_device *dev) 668static void pcnet32_poll_controller(struct net_device *dev)
424{ 669{
@@ -519,10 +764,10 @@ static void pcnet32_get_ringparam(struct net_device *dev,
519{ 764{
520 struct pcnet32_private *lp = dev->priv; 765 struct pcnet32_private *lp = dev->priv;
521 766
522 ering->tx_max_pending = TX_MAX_RING_SIZE - 1; 767 ering->tx_max_pending = TX_MAX_RING_SIZE;
523 ering->tx_pending = lp->tx_ring_size - 1; 768 ering->tx_pending = lp->tx_ring_size;
524 ering->rx_max_pending = RX_MAX_RING_SIZE - 1; 769 ering->rx_max_pending = RX_MAX_RING_SIZE;
525 ering->rx_pending = lp->rx_ring_size - 1; 770 ering->rx_pending = lp->rx_ring_size;
526} 771}
527 772
528static int pcnet32_set_ringparam(struct net_device *dev, 773static int pcnet32_set_ringparam(struct net_device *dev,
@@ -530,56 +775,53 @@ static int pcnet32_set_ringparam(struct net_device *dev,
530{ 775{
531 struct pcnet32_private *lp = dev->priv; 776 struct pcnet32_private *lp = dev->priv;
532 unsigned long flags; 777 unsigned long flags;
778 unsigned int size;
779 ulong ioaddr = dev->base_addr;
533 int i; 780 int i;
534 781
535 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 782 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
536 return -EINVAL; 783 return -EINVAL;
537 784
538 if (netif_running(dev)) 785 if (netif_running(dev))
539 pcnet32_close(dev); 786 pcnet32_netif_stop(dev);
540 787
541 spin_lock_irqsave(&lp->lock, flags); 788 spin_lock_irqsave(&lp->lock, flags);
542 pcnet32_free_ring(dev); 789 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
543 lp->tx_ring_size = 790
544 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 791 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
545 lp->rx_ring_size =
546 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
547 792
548 /* set the minimum ring size to 4, to allow the loopback test to work 793 /* set the minimum ring size to 4, to allow the loopback test to work
549 * unchanged. 794 * unchanged.
550 */ 795 */
551 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 796 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
552 if (lp->tx_ring_size <= (1 << i)) 797 if (size <= (1 << i))
553 break; 798 break;
554 } 799 }
555 lp->tx_ring_size = (1 << i); 800 if ((1 << i) != lp->tx_ring_size)
556 lp->tx_mod_mask = lp->tx_ring_size - 1; 801 pcnet32_realloc_tx_ring(dev, lp, i);
557 lp->tx_len_bits = (i << 12); 802
558 803 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
559 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 804 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
560 if (lp->rx_ring_size <= (1 << i)) 805 if (size <= (1 << i))
561 break; 806 break;
562 } 807 }
563 lp->rx_ring_size = (1 << i); 808 if ((1 << i) != lp->rx_ring_size)
564 lp->rx_mod_mask = lp->rx_ring_size - 1; 809 pcnet32_realloc_rx_ring(dev, lp, i);
565 lp->rx_len_bits = (i << 4); 810
811 dev->weight = lp->rx_ring_size / 2;
566 812
567 if (pcnet32_alloc_ring(dev, dev->name)) { 813 if (netif_running(dev)) {
568 pcnet32_free_ring(dev); 814 pcnet32_netif_start(dev);
569 spin_unlock_irqrestore(&lp->lock, flags); 815 pcnet32_restart(dev, CSR0_NORMAL);
570 return -ENOMEM;
571 } 816 }
572 817
573 spin_unlock_irqrestore(&lp->lock, flags); 818 spin_unlock_irqrestore(&lp->lock, flags);
574 819
575 if (pcnet32_debug & NETIF_MSG_DRV) 820 if (netif_msg_drv(lp))
576 printk(KERN_INFO PFX 821 printk(KERN_INFO
577 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, 822 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
578 lp->rx_ring_size, lp->tx_ring_size); 823 lp->rx_ring_size, lp->tx_ring_size);
579 824
580 if (netif_running(dev))
581 pcnet32_open(dev);
582
583 return 0; 825 return 0;
584} 826}
585 827
@@ -633,29 +875,27 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
633 unsigned long flags; 875 unsigned long flags;
634 unsigned long ticks; 876 unsigned long ticks;
635 877
636 *data1 = 1; /* status of test, default to fail */
637 rc = 1; /* default to fail */ 878 rc = 1; /* default to fail */
638 879
639 if (netif_running(dev)) 880 if (netif_running(dev))
640 pcnet32_close(dev); 881 pcnet32_close(dev);
641 882
642 spin_lock_irqsave(&lp->lock, flags); 883 spin_lock_irqsave(&lp->lock, flags);
884 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
885
886 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
643 887
644 /* Reset the PCNET32 */ 888 /* Reset the PCNET32 */
645 lp->a.reset(ioaddr); 889 lp->a.reset(ioaddr);
890 lp->a.write_csr(ioaddr, CSR4, 0x0915);
646 891
647 /* switch pcnet32 to 32bit mode */ 892 /* switch pcnet32 to 32bit mode */
648 lp->a.write_bcr(ioaddr, 20, 2); 893 lp->a.write_bcr(ioaddr, 20, 2);
649 894
650 lp->init_block.mode =
651 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
652 lp->init_block.filter[0] = 0;
653 lp->init_block.filter[1] = 0;
654
655 /* purge & init rings but don't actually restart */ 895 /* purge & init rings but don't actually restart */
656 pcnet32_restart(dev, 0x0000); 896 pcnet32_restart(dev, 0x0000);
657 897
658 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 898 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
659 899
660 /* Initialize Transmit buffers. */ 900 /* Initialize Transmit buffers. */
661 size = data_len + 15; 901 size = data_len + 15;
@@ -697,14 +937,15 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
697 } 937 }
698 } 938 }
699 939
700 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ 940 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
701 x = x | 0x0002; 941 a->write_bcr(ioaddr, 32, x | 0x0002);
702 a->write_bcr(ioaddr, 32, x);
703 942
704 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ 943 /* set int loopback in CSR15 */
944 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
945 lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
705 946
706 teststatus = le16_to_cpu(0x8000); 947 teststatus = le16_to_cpu(0x8000);
707 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ 948 lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
708 949
709 /* Check status of descriptors */ 950 /* Check status of descriptors */
710 for (x = 0; x < numbuffs; x++) { 951 for (x = 0; x < numbuffs; x++) {
@@ -712,7 +953,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
712 rmb(); 953 rmb();
713 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { 954 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
714 spin_unlock_irqrestore(&lp->lock, flags); 955 spin_unlock_irqrestore(&lp->lock, flags);
715 mdelay(1); 956 msleep(1);
716 spin_lock_irqsave(&lp->lock, flags); 957 spin_lock_irqsave(&lp->lock, flags);
717 rmb(); 958 rmb();
718 ticks++; 959 ticks++;
@@ -725,7 +966,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
725 } 966 }
726 } 967 }
727 968
728 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 969 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
729 wmb(); 970 wmb();
730 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 971 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
731 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); 972 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
@@ -758,25 +999,24 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
758 } 999 }
759 x++; 1000 x++;
760 } 1001 }
761 if (!rc) {
762 *data1 = 0;
763 }
764 1002
765 clean_up: 1003 clean_up:
1004 *data1 = rc;
766 pcnet32_purge_tx_ring(dev); 1005 pcnet32_purge_tx_ring(dev);
767 x = a->read_csr(ioaddr, 15) & 0xFFFF;
768 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
769 1006
770 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 1007 x = a->read_csr(ioaddr, CSR15);
771 x = x & ~0x0002; 1008 a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
772 a->write_bcr(ioaddr, 32, x);
773 1009
774 spin_unlock_irqrestore(&lp->lock, flags); 1010 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1011 a->write_bcr(ioaddr, 32, (x & ~0x0002));
775 1012
776 if (netif_running(dev)) { 1013 if (netif_running(dev)) {
1014 spin_unlock_irqrestore(&lp->lock, flags);
777 pcnet32_open(dev); 1015 pcnet32_open(dev);
778 } else { 1016 } else {
1017 pcnet32_purge_rx_ring(dev);
779 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ 1018 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1019 spin_unlock_irqrestore(&lp->lock, flags);
780 } 1020 }
781 1021
782 return (rc); 1022 return (rc);
@@ -839,6 +1079,43 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
839 return 0; 1079 return 0;
840} 1080}
841 1081
1082/*
1083 * lp->lock must be held.
1084 */
1085static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1086 int can_sleep)
1087{
1088 int csr5;
1089 struct pcnet32_private *lp = dev->priv;
1090 struct pcnet32_access *a = &lp->a;
1091 ulong ioaddr = dev->base_addr;
1092 int ticks;
1093
1094 /* set SUSPEND (SPND) - CSR5 bit 0 */
1095 csr5 = a->read_csr(ioaddr, CSR5);
1096 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
1097
1098 /* poll waiting for bit to be set */
1099 ticks = 0;
1100 while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
1101 spin_unlock_irqrestore(&lp->lock, *flags);
1102 if (can_sleep)
1103 msleep(1);
1104 else
1105 mdelay(1);
1106 spin_lock_irqsave(&lp->lock, *flags);
1107 ticks++;
1108 if (ticks > 200) {
1109 if (netif_msg_hw(lp))
1110 printk(KERN_DEBUG
1111 "%s: Error getting into suspend!\n",
1112 dev->name);
1113 return 0;
1114 }
1115 }
1116 return 1;
1117}
1118
842#define PCNET32_REGS_PER_PHY 32 1119#define PCNET32_REGS_PER_PHY 32
843#define PCNET32_MAX_PHYS 32 1120#define PCNET32_MAX_PHYS 32
844static int pcnet32_get_regs_len(struct net_device *dev) 1121static int pcnet32_get_regs_len(struct net_device *dev)
@@ -857,32 +1134,13 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
857 struct pcnet32_private *lp = dev->priv; 1134 struct pcnet32_private *lp = dev->priv;
858 struct pcnet32_access *a = &lp->a; 1135 struct pcnet32_access *a = &lp->a;
859 ulong ioaddr = dev->base_addr; 1136 ulong ioaddr = dev->base_addr;
860 int ticks;
861 unsigned long flags; 1137 unsigned long flags;
862 1138
863 spin_lock_irqsave(&lp->lock, flags); 1139 spin_lock_irqsave(&lp->lock, flags);
864 1140
865 csr0 = a->read_csr(ioaddr, 0); 1141 csr0 = a->read_csr(ioaddr, CSR0);
866 if (!(csr0 & 0x0004)) { /* If not stopped */ 1142 if (!(csr0 & CSR0_STOP)) /* If not stopped */
867 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1143 pcnet32_suspend(dev, &flags, 1);
868 a->write_csr(ioaddr, 5, 0x0001);
869
870 /* poll waiting for bit to be set */
871 ticks = 0;
872 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
873 spin_unlock_irqrestore(&lp->lock, flags);
874 mdelay(1);
875 spin_lock_irqsave(&lp->lock, flags);
876 ticks++;
877 if (ticks > 200) {
878 if (netif_msg_hw(lp))
879 printk(KERN_DEBUG
880 "%s: Error getting into suspend!\n",
881 dev->name);
882 break;
883 }
884 }
885 }
886 1144
887 /* read address PROM */ 1145 /* read address PROM */
888 for (i = 0; i < 16; i += 2) 1146 for (i = 0; i < 16; i += 2)
@@ -919,9 +1177,12 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
919 } 1177 }
920 } 1178 }
921 1179
922 if (!(csr0 & 0x0004)) { /* If not stopped */ 1180 if (!(csr0 & CSR0_STOP)) { /* If not stopped */
1181 int csr5;
1182
923 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 1183 /* clear SUSPEND (SPND) - CSR5 bit 0 */
924 a->write_csr(ioaddr, 5, 0x0000); 1184 csr5 = a->read_csr(ioaddr, CSR5);
1185 a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
925 } 1186 }
926 1187
927 spin_unlock_irqrestore(&lp->lock, flags); 1188 spin_unlock_irqrestore(&lp->lock, flags);
@@ -952,7 +1213,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
952/* only probes for non-PCI devices, the rest are handled by 1213/* only probes for non-PCI devices, the rest are handled by
953 * pci_register_driver via pcnet32_probe_pci */ 1214 * pci_register_driver via pcnet32_probe_pci */
954 1215
955static void __devinit pcnet32_probe_vlbus(void) 1216static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
956{ 1217{
957 unsigned int *port, ioaddr; 1218 unsigned int *port, ioaddr;
958 1219
@@ -1436,7 +1697,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1436 lp->tx_ring_size, 1697 lp->tx_ring_size,
1437 &lp->tx_ring_dma_addr); 1698 &lp->tx_ring_dma_addr);
1438 if (lp->tx_ring == NULL) { 1699 if (lp->tx_ring == NULL) {
1439 if (pcnet32_debug & NETIF_MSG_DRV) 1700 if (netif_msg_drv(lp))
1440 printk("\n" KERN_ERR PFX 1701 printk("\n" KERN_ERR PFX
1441 "%s: Consistent memory allocation failed.\n", 1702 "%s: Consistent memory allocation failed.\n",
1442 name); 1703 name);
@@ -1448,52 +1709,48 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1448 lp->rx_ring_size, 1709 lp->rx_ring_size,
1449 &lp->rx_ring_dma_addr); 1710 &lp->rx_ring_dma_addr);
1450 if (lp->rx_ring == NULL) { 1711 if (lp->rx_ring == NULL) {
1451 if (pcnet32_debug & NETIF_MSG_DRV) 1712 if (netif_msg_drv(lp))
1452 printk("\n" KERN_ERR PFX 1713 printk("\n" KERN_ERR PFX
1453 "%s: Consistent memory allocation failed.\n", 1714 "%s: Consistent memory allocation failed.\n",
1454 name); 1715 name);
1455 return -ENOMEM; 1716 return -ENOMEM;
1456 } 1717 }
1457 1718
1458 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, 1719 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
1459 GFP_ATOMIC); 1720 GFP_ATOMIC);
1460 if (!lp->tx_dma_addr) { 1721 if (!lp->tx_dma_addr) {
1461 if (pcnet32_debug & NETIF_MSG_DRV) 1722 if (netif_msg_drv(lp))
1462 printk("\n" KERN_ERR PFX 1723 printk("\n" KERN_ERR PFX
1463 "%s: Memory allocation failed.\n", name); 1724 "%s: Memory allocation failed.\n", name);
1464 return -ENOMEM; 1725 return -ENOMEM;
1465 } 1726 }
1466 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1467 1727
1468 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, 1728 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
1469 GFP_ATOMIC); 1729 GFP_ATOMIC);
1470 if (!lp->rx_dma_addr) { 1730 if (!lp->rx_dma_addr) {
1471 if (pcnet32_debug & NETIF_MSG_DRV) 1731 if (netif_msg_drv(lp))
1472 printk("\n" KERN_ERR PFX 1732 printk("\n" KERN_ERR PFX
1473 "%s: Memory allocation failed.\n", name); 1733 "%s: Memory allocation failed.\n", name);
1474 return -ENOMEM; 1734 return -ENOMEM;
1475 } 1735 }
1476 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1477 1736
1478 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, 1737 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
1479 GFP_ATOMIC); 1738 GFP_ATOMIC);
1480 if (!lp->tx_skbuff) { 1739 if (!lp->tx_skbuff) {
1481 if (pcnet32_debug & NETIF_MSG_DRV) 1740 if (netif_msg_drv(lp))
1482 printk("\n" KERN_ERR PFX 1741 printk("\n" KERN_ERR PFX
1483 "%s: Memory allocation failed.\n", name); 1742 "%s: Memory allocation failed.\n", name);
1484 return -ENOMEM; 1743 return -ENOMEM;
1485 } 1744 }
1486 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1487 1745
1488 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, 1746 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
1489 GFP_ATOMIC); 1747 GFP_ATOMIC);
1490 if (!lp->rx_skbuff) { 1748 if (!lp->rx_skbuff) {
1491 if (pcnet32_debug & NETIF_MSG_DRV) 1749 if (netif_msg_drv(lp))
1492 printk("\n" KERN_ERR PFX 1750 printk("\n" KERN_ERR PFX
1493 "%s: Memory allocation failed.\n", name); 1751 "%s: Memory allocation failed.\n", name);
1494 return -ENOMEM; 1752 return -ENOMEM;
1495 } 1753 }
1496 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1497 1754
1498 return 0; 1755 return 0;
1499} 1756}
@@ -1757,16 +2014,7 @@ static int pcnet32_open(struct net_device *dev)
1757 2014
1758 err_free_ring: 2015 err_free_ring:
1759 /* free any allocated skbuffs */ 2016 /* free any allocated skbuffs */
1760 for (i = 0; i < lp->rx_ring_size; i++) { 2017 pcnet32_purge_rx_ring(dev);
1761 lp->rx_ring[i].status = 0;
1762 if (lp->rx_skbuff[i]) {
1763 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1764 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1765 dev_kfree_skb(lp->rx_skbuff[i]);
1766 }
1767 lp->rx_skbuff[i] = NULL;
1768 lp->rx_dma_addr[i] = 0;
1769 }
1770 2018
1771 /* 2019 /*
1772 * Switch back to 16bit mode to avoid problems with dumb 2020 * Switch back to 16bit mode to avoid problems with dumb
@@ -2348,7 +2596,6 @@ static int pcnet32_close(struct net_device *dev)
2348{ 2596{
2349 unsigned long ioaddr = dev->base_addr; 2597 unsigned long ioaddr = dev->base_addr;
2350 struct pcnet32_private *lp = dev->priv; 2598 struct pcnet32_private *lp = dev->priv;
2351 int i;
2352 unsigned long flags; 2599 unsigned long flags;
2353 2600
2354 del_timer_sync(&lp->watchdog_timer); 2601 del_timer_sync(&lp->watchdog_timer);
@@ -2379,31 +2626,8 @@ static int pcnet32_close(struct net_device *dev)
2379 2626
2380 spin_lock_irqsave(&lp->lock, flags); 2627 spin_lock_irqsave(&lp->lock, flags);
2381 2628
2382 /* free all allocated skbuffs */ 2629 pcnet32_purge_rx_ring(dev);
2383 for (i = 0; i < lp->rx_ring_size; i++) { 2630 pcnet32_purge_tx_ring(dev);
2384 lp->rx_ring[i].status = 0;
2385 wmb(); /* Make sure adapter sees owner change */
2386 if (lp->rx_skbuff[i]) {
2387 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2388 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2389 dev_kfree_skb(lp->rx_skbuff[i]);
2390 }
2391 lp->rx_skbuff[i] = NULL;
2392 lp->rx_dma_addr[i] = 0;
2393 }
2394
2395 for (i = 0; i < lp->tx_ring_size; i++) {
2396 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2397 wmb(); /* Make sure adapter sees owner change */
2398 if (lp->tx_skbuff[i]) {
2399 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2400 lp->tx_skbuff[i]->len,
2401 PCI_DMA_TODEVICE);
2402 dev_kfree_skb(lp->tx_skbuff[i]);
2403 }
2404 lp->tx_skbuff[i] = NULL;
2405 lp->tx_dma_addr[i] = 0;
2406 }
2407 2631
2408 spin_unlock_irqrestore(&lp->lock, flags); 2632 spin_unlock_irqrestore(&lp->lock, flags);
2409 2633
@@ -2433,6 +2657,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
2433 volatile struct pcnet32_init_block *ib = &lp->init_block; 2657 volatile struct pcnet32_init_block *ib = &lp->init_block;
2434 volatile u16 *mcast_table = (u16 *) & ib->filter; 2658 volatile u16 *mcast_table = (u16 *) & ib->filter;
2435 struct dev_mc_list *dmi = dev->mc_list; 2659 struct dev_mc_list *dmi = dev->mc_list;
2660 unsigned long ioaddr = dev->base_addr;
2436 char *addrs; 2661 char *addrs;
2437 int i; 2662 int i;
2438 u32 crc; 2663 u32 crc;
@@ -2441,6 +2666,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
2441 if (dev->flags & IFF_ALLMULTI) { 2666 if (dev->flags & IFF_ALLMULTI) {
2442 ib->filter[0] = 0xffffffff; 2667 ib->filter[0] = 0xffffffff;
2443 ib->filter[1] = 0xffffffff; 2668 ib->filter[1] = 0xffffffff;
2669 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2670 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2671 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2672 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
2444 return; 2673 return;
2445 } 2674 }
2446 /* clear the multicast filter */ 2675 /* clear the multicast filter */
@@ -2462,6 +2691,9 @@ static void pcnet32_load_multicast(struct net_device *dev)
2462 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | 2691 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2463 (1 << (crc & 0xf))); 2692 (1 << (crc & 0xf)));
2464 } 2693 }
2694 for (i = 0; i < 4; i++)
2695 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
2696 le16_to_cpu(mcast_table[i]));
2465 return; 2697 return;
2466} 2698}
2467 2699
@@ -2472,8 +2704,11 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
2472{ 2704{
2473 unsigned long ioaddr = dev->base_addr, flags; 2705 unsigned long ioaddr = dev->base_addr, flags;
2474 struct pcnet32_private *lp = dev->priv; 2706 struct pcnet32_private *lp = dev->priv;
2707 int csr15, suspended;
2475 2708
2476 spin_lock_irqsave(&lp->lock, flags); 2709 spin_lock_irqsave(&lp->lock, flags);
2710 suspended = pcnet32_suspend(dev, &flags, 0);
2711 csr15 = lp->a.read_csr(ioaddr, CSR15);
2477 if (dev->flags & IFF_PROMISC) { 2712 if (dev->flags & IFF_PROMISC) {
2478 /* Log any net taps. */ 2713 /* Log any net taps. */
2479 if (netif_msg_hw(lp)) 2714 if (netif_msg_hw(lp))
@@ -2482,15 +2717,24 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
2482 lp->init_block.mode = 2717 lp->init_block.mode =
2483 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 2718 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2484 7); 2719 7);
2720 lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
2485 } else { 2721 } else {
2486 lp->init_block.mode = 2722 lp->init_block.mode =
2487 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 2723 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2724 lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
2488 pcnet32_load_multicast(dev); 2725 pcnet32_load_multicast(dev);
2489 } 2726 }
2490 2727
2491 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ 2728 if (suspended) {
2492 pcnet32_restart(dev, 0x0042); /* Resume normal operation */ 2729 int csr5;
2493 netif_wake_queue(dev); 2730 /* clear SUSPEND (SPND) - CSR5 bit 0 */
2731 csr5 = lp->a.read_csr(ioaddr, CSR5);
2732 lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
2733 } else {
2734 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2735 pcnet32_restart(dev, CSR0_NORMAL);
2736 netif_wake_queue(dev);
2737 }
2494 2738
2495 spin_unlock_irqrestore(&lp->lock, flags); 2739 spin_unlock_irqrestore(&lp->lock, flags);
2496} 2740}
@@ -2730,7 +2974,7 @@ static int __init pcnet32_init_module(void)
2730 2974
2731 /* should we find any remaining VLbus devices ? */ 2975 /* should we find any remaining VLbus devices ? */
2732 if (pcnet32vlb) 2976 if (pcnet32vlb)
2733 pcnet32_probe_vlbus(); 2977 pcnet32_probe_vlbus(pcnet32_portlist);
2734 2978
2735 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2979 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2736 printk(KERN_INFO PFX "%d cards_found.\n", cards_found); 2980 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 3efb715c28dc..ae60e6e4107c 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -103,7 +103,22 @@ static int cis820x_config_intr(struct phy_device *phydev)
103 return err; 103 return err;
104} 104}
105 105
106/* Cicada 820x */ 106/* Cicada 8201, a.k.a Vitesse VSC8201 */
107static struct phy_driver cis8201_driver = {
108 .phy_id = 0x000fc410,
109 .name = "Cicada Cis8201",
110 .phy_id_mask = 0x000ffff0,
111 .features = PHY_GBIT_FEATURES,
112 .flags = PHY_HAS_INTERRUPT,
113 .config_init = &cis820x_config_init,
114 .config_aneg = &genphy_config_aneg,
115 .read_status = &genphy_read_status,
116 .ack_interrupt = &cis820x_ack_interrupt,
117 .config_intr = &cis820x_config_intr,
118 .driver = { .owner = THIS_MODULE,},
119};
120
121/* Cicada 8204 */
107static struct phy_driver cis8204_driver = { 122static struct phy_driver cis8204_driver = {
108 .phy_id = 0x000fc440, 123 .phy_id = 0x000fc440,
109 .name = "Cicada Cis8204", 124 .name = "Cicada Cis8204",
@@ -118,15 +133,30 @@ static struct phy_driver cis8204_driver = {
118 .driver = { .owner = THIS_MODULE,}, 133 .driver = { .owner = THIS_MODULE,},
119}; 134};
120 135
121static int __init cis8204_init(void) 136static int __init cicada_init(void)
122{ 137{
123 return phy_driver_register(&cis8204_driver); 138 int ret;
139
140 ret = phy_driver_register(&cis8204_driver);
141 if (ret)
142 goto err1;
143
144 ret = phy_driver_register(&cis8201_driver);
145 if (ret)
146 goto err2;
147 return 0;
148
149err2:
150 phy_driver_unregister(&cis8204_driver);
151err1:
152 return ret;
124} 153}
125 154
126static void __exit cis8204_exit(void) 155static void __exit cicada_exit(void)
127{ 156{
128 phy_driver_unregister(&cis8204_driver); 157 phy_driver_unregister(&cis8204_driver);
158 phy_driver_unregister(&cis8201_driver);
129} 159}
130 160
131module_init(cis8204_init); 161module_init(cicada_init);
132module_exit(cis8204_exit); 162module_exit(cicada_exit);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 16a0ef1b1369..4c2f575faad7 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1406,7 +1406,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1406 dev = alloc_etherdev(sizeof (*tp)); 1406 dev = alloc_etherdev(sizeof (*tp));
1407 if (dev == NULL) { 1407 if (dev == NULL) {
1408 if (netif_msg_drv(&debug)) 1408 if (netif_msg_drv(&debug))
1409 printk(KERN_ERR PFX "unable to alloc new ethernet\n"); 1409 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
1410 goto err_out; 1410 goto err_out;
1411 } 1411 }
1412 1412
@@ -1418,10 +1418,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1418 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1418 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1419 rc = pci_enable_device(pdev); 1419 rc = pci_enable_device(pdev);
1420 if (rc < 0) { 1420 if (rc < 0) {
1421 if (netif_msg_probe(tp)) { 1421 if (netif_msg_probe(tp))
1422 printk(KERN_ERR PFX "%s: enable failure\n", 1422 dev_err(&pdev->dev, "enable failure\n");
1423 pci_name(pdev));
1424 }
1425 goto err_out_free_dev; 1423 goto err_out_free_dev;
1426 } 1424 }
1427 1425
@@ -1437,37 +1435,32 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1437 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); 1435 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1438 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; 1436 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1439 } else { 1437 } else {
1440 if (netif_msg_probe(tp)) { 1438 if (netif_msg_probe(tp))
1441 printk(KERN_ERR PFX 1439 dev_err(&pdev->dev,
1442 "PowerManagement capability not found.\n"); 1440 "PowerManagement capability not found.\n");
1443 }
1444 } 1441 }
1445 1442
1446 /* make sure PCI base addr 1 is MMIO */ 1443 /* make sure PCI base addr 1 is MMIO */
1447 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1444 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1448 if (netif_msg_probe(tp)) { 1445 if (netif_msg_probe(tp))
1449 printk(KERN_ERR PFX 1446 dev_err(&pdev->dev,
1450 "region #1 not an MMIO resource, aborting\n"); 1447 "region #1 not an MMIO resource, aborting\n");
1451 }
1452 rc = -ENODEV; 1448 rc = -ENODEV;
1453 goto err_out_mwi; 1449 goto err_out_mwi;
1454 } 1450 }
1455 /* check for weird/broken PCI region reporting */ 1451 /* check for weird/broken PCI region reporting */
1456 if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) { 1452 if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
1457 if (netif_msg_probe(tp)) { 1453 if (netif_msg_probe(tp))
1458 printk(KERN_ERR PFX 1454 dev_err(&pdev->dev,
1459 "Invalid PCI region size(s), aborting\n"); 1455 "Invalid PCI region size(s), aborting\n");
1460 }
1461 rc = -ENODEV; 1456 rc = -ENODEV;
1462 goto err_out_mwi; 1457 goto err_out_mwi;
1463 } 1458 }
1464 1459
1465 rc = pci_request_regions(pdev, MODULENAME); 1460 rc = pci_request_regions(pdev, MODULENAME);
1466 if (rc < 0) { 1461 if (rc < 0) {
1467 if (netif_msg_probe(tp)) { 1462 if (netif_msg_probe(tp))
1468 printk(KERN_ERR PFX "%s: could not request regions.\n", 1463 dev_err(&pdev->dev, "could not request regions.\n");
1469 pci_name(pdev));
1470 }
1471 goto err_out_mwi; 1464 goto err_out_mwi;
1472 } 1465 }
1473 1466
@@ -1480,10 +1473,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1480 } else { 1473 } else {
1481 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1474 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1482 if (rc < 0) { 1475 if (rc < 0) {
1483 if (netif_msg_probe(tp)) { 1476 if (netif_msg_probe(tp))
1484 printk(KERN_ERR PFX 1477 dev_err(&pdev->dev,
1485 "DMA configuration failed.\n"); 1478 "DMA configuration failed.\n");
1486 }
1487 goto err_out_free_res; 1479 goto err_out_free_res;
1488 } 1480 }
1489 } 1481 }
@@ -1494,7 +1486,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1494 ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE); 1486 ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
1495 if (ioaddr == NULL) { 1487 if (ioaddr == NULL) {
1496 if (netif_msg_probe(tp)) 1488 if (netif_msg_probe(tp))
1497 printk(KERN_ERR PFX "cannot remap MMIO, aborting\n"); 1489 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1498 rc = -EIO; 1490 rc = -EIO;
1499 goto err_out_free_res; 1491 goto err_out_free_res;
1500 } 1492 }
@@ -1526,9 +1518,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1526 if (i < 0) { 1518 if (i < 0) {
1527 /* Unknown chip: assume array element #0, original RTL-8169 */ 1519 /* Unknown chip: assume array element #0, original RTL-8169 */
1528 if (netif_msg_probe(tp)) { 1520 if (netif_msg_probe(tp)) {
1529 printk(KERN_DEBUG PFX "PCI device %s: " 1521 dev_printk(KERN_DEBUG, &pdev->dev,
1530 "unknown chip version, assuming %s\n", 1522 "unknown chip version, assuming %s\n",
1531 pci_name(pdev), rtl_chip_info[0].name); 1523 rtl_chip_info[0].name);
1532 } 1524 }
1533 i++; 1525 i++;
1534 } 1526 }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index ed1f59901ff4..c0a62b00ffc8 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -22,129 +22,13 @@
22 22
23 Support and updates available at 23 Support and updates available at
24 http://www.scyld.com/network/starfire.html 24 http://www.scyld.com/network/starfire.html
25 [link no longer provides useful info -jgarzik]
25 26
26 -----------------------------------------------------------
27
28 Linux kernel-specific changes:
29
30 LK1.1.1 (jgarzik):
31 - Use PCI driver interface
32 - Fix MOD_xxx races
33 - softnet fixups
34
35 LK1.1.2 (jgarzik):
36 - Merge Becker version 0.15
37
38 LK1.1.3 (Andrew Morton)
39 - Timer cleanups
40
41 LK1.1.4 (jgarzik):
42 - Merge Becker version 1.03
43
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
47
48 LK1.2.2 (Ion Badulescu)
49 - Backported to 2.2.x
50
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
54
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
57
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
60
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
63
64 LK1.2.7 (Ion Badulescu)
65 - Removed unused code
66 - Made more functions static and __init
67
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
71
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
75
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
78
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
81
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
85
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
89
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
93
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
97
98 LK1.3.5 (jgarzik)
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
100
101 LK1.3.6:
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
105
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
111 - VLAN support
112
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
115 - 64-bit support
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
119
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
123
124 LK1.4.0 (Ion Badulescu)
125 - NAPI support
126
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
131
132 LK1.4.2 (Ion Badulescu)
133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x
135
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
139
140TODO: - fix forced speed/duplexing code (broken a long time ago, when
141 somebody converted the driver to use the generic MII code)
142 - fix VLAN support
143*/ 27*/
144 28
145#define DRV_NAME "starfire" 29#define DRV_NAME "starfire"
146#define DRV_VERSION "1.03+LK1.4.2.1" 30#define DRV_VERSION "2.0"
147#define DRV_RELDATE "October 3, 2005" 31#define DRV_RELDATE "June 27, 2006"
148 32
149#include <linux/module.h> 33#include <linux/module.h>
150#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -846,7 +730,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
846 goto err_out_free_netdev; 730 goto err_out_free_netdev;
847 } 731 }
848 732
849 /* ioremap is borken in Linux-2.2.x/sparc64 */
850 base = ioremap(ioaddr, io_size); 733 base = ioremap(ioaddr, io_size);
851 if (!base) { 734 if (!base) {
852 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", 735 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 643fceae3db5..ac17377b3e9f 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -16,91 +16,13 @@
16 16
17 Support and updates available at 17 Support and updates available at
18 http://www.scyld.com/network/sundance.html 18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
19 20
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
97 Version LK1.11 (Pedro Alejandro Lopez-Valencia palopezv at gmail.com):
98 - Add support for IC Plus Corporation IP100A chipset
99*/ 21*/
100 22
101#define DRV_NAME "sundance" 23#define DRV_NAME "sundance"
102#define DRV_VERSION "1.01+LK1.11" 24#define DRV_VERSION "1.1"
103#define DRV_RELDATE "14-Jun-2006" 25#define DRV_RELDATE "27-Jun-2006"
104 26
105 27
106/* The user-configurable values. 28/* The user-configurable values.
@@ -282,15 +204,15 @@ IVc. Errata
282#define USE_IO_OPS 1 204#define USE_IO_OPS 1
283#endif 205#endif
284 206
285static struct pci_device_id sundance_pci_tbl[] = { 207static const struct pci_device_id sundance_pci_tbl[] = {
286 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0}, 208 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
287 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1}, 209 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
288 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2}, 210 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
289 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3}, 211 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
290 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 212 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
291 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 213 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
292 {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6}, 214 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
293 {0,} 215 { }
294}; 216};
295MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); 217MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
296 218
@@ -301,7 +223,7 @@ enum {
301struct pci_id_info { 223struct pci_id_info {
302 const char *name; 224 const char *name;
303}; 225};
304static const struct pci_id_info pci_id_tbl[] = { 226static const struct pci_id_info pci_id_tbl[] __devinitdata = {
305 {"D-Link DFE-550TX FAST Ethernet Adapter"}, 227 {"D-Link DFE-550TX FAST Ethernet Adapter"},
306 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, 228 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
307 {"D-Link DFE-580TX 4 port Server Adapter"}, 229 {"D-Link DFE-580TX 4 port Server Adapter"},
@@ -309,7 +231,7 @@ static const struct pci_id_info pci_id_tbl[] = {
309 {"D-Link DL10050-based FAST Ethernet Adapter"}, 231 {"D-Link DL10050-based FAST Ethernet Adapter"},
310 {"Sundance Technology Alta"}, 232 {"Sundance Technology Alta"},
311 {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, 233 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
312 {NULL,}, /* 0 terminated list. */ 234 { } /* terminate list. */
313}; 235};
314 236
315/* This driver was written to use PCI memory space, however x86-oriented 237/* This driver was written to use PCI memory space, however x86-oriented
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index b4c0d101a7d7..7f414815cc62 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -224,24 +224,21 @@ static const struct pci_device_id w840_pci_tbl[] = {
224}; 224};
225MODULE_DEVICE_TABLE(pci, w840_pci_tbl); 225MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
226 226
227enum {
228 netdev_res_size = 128, /* size of PCI BAR resource */
229};
230
227struct pci_id_info { 231struct pci_id_info {
228 const char *name; 232 const char *name;
229 struct match_info { 233 int drv_flags; /* Driver use, intended as capability flags. */
230 int pci, pci_mask, subsystem, subsystem_mask;
231 int revision, revision_mask; /* Only 8 bits. */
232 } id;
233 int io_size; /* Needed for I/O region check or ioremap(). */
234 int drv_flags; /* Driver use, intended as capability flags. */
235}; 234};
236static struct pci_id_info pci_id_tbl[] = { 235
237 {"Winbond W89c840", /* Sometime a Level-One switch card. */ 236static const struct pci_id_info pci_id_tbl[] __devinitdata = {
238 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 }, 237 { /* Sometime a Level-One switch card. */
239 128, CanHaveMII | HasBrokenTx | FDXOnNoMII}, 238 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
240 {"Winbond W89c840", { 0x08401050, 0xffffffff, }, 239 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
241 128, CanHaveMII | HasBrokenTx}, 240 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
242 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,}, 241 { } /* terminate list. */
243 128, CanHaveMII | HasBrokenTx},
244 {NULL,}, /* 0 terminated list. */
245}; 242};
246 243
247/* This driver was written to use PCI memory space, however some x86 systems 244/* This driver was written to use PCI memory space, however some x86 systems
@@ -399,7 +396,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
399#ifdef USE_IO_OPS 396#ifdef USE_IO_OPS
400 bar = 0; 397 bar = 0;
401#endif 398#endif
402 ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size); 399 ioaddr = pci_iomap(pdev, bar, netdev_res_size);
403 if (!ioaddr) 400 if (!ioaddr)
404 goto err_out_free_res; 401 goto err_out_free_res;
405 402
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index 091ebb7a62f6..17ca7dc42e6f 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -10,26 +10,11 @@
10 410 Severn Ave., Suite 210 10 410 Severn Ave., Suite 210
11 Annapolis MD 21403 11 Annapolis MD 21403
12 12
13 -----------------------------------------------------------
14
15 Linux kernel-specific changes:
16
17 LK1.0 (Ion Badulescu)
18 - Major cleanup
19 - Use 2.4 PCI API
20 - Support ethtool
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
23
24 LK1.1 (Ion Badulescu)
25 - Disallow negotiation of unsupported full-duplex modes
26*/ 13*/
27 14
28#define DRV_NAME "xircom_tulip_cb" 15#define DRV_NAME "xircom_tulip_cb"
29#define DRV_VERSION "0.91+LK1.1" 16#define DRV_VERSION "0.92"
30#define DRV_RELDATE "October 11, 2001" 17#define DRV_RELDATE "June 27, 2006"
31
32#define CARDBUS 1
33 18
34/* A few user-configurable values. */ 19/* A few user-configurable values. */
35 20
@@ -306,10 +291,10 @@ struct xircom_private {
306 struct xircom_tx_desc tx_ring[TX_RING_SIZE]; 291 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
307 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 292 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
308 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
309#ifdef CARDBUS 294
310 /* The X3201-3 requires 4-byte aligned tx bufs */ 295 /* The X3201-3 requires 4-byte aligned tx bufs */
311 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE]; 296 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
312#endif 297
313 /* The addresses of receive-in-place skbuffs. */ 298 /* The addresses of receive-in-place skbuffs. */
314 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 299 struct sk_buff* rx_skbuff[RX_RING_SIZE];
315 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */ 300 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
@@ -908,10 +893,8 @@ static void xircom_init_ring(struct net_device *dev)
908 tp->tx_skbuff[i] = NULL; 893 tp->tx_skbuff[i] = NULL;
909 tp->tx_ring[i].status = 0; 894 tp->tx_ring[i].status = 0;
910 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]); 895 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
911#ifdef CARDBUS
912 if (tp->chip_id == X3201_3) 896 if (tp->chip_id == X3201_3)
913 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ); 897 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
914#endif /* CARDBUS */
915 } 898 }
916 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]); 899 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
917} 900}
@@ -931,12 +914,10 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
931 entry = tp->cur_tx % TX_RING_SIZE; 914 entry = tp->cur_tx % TX_RING_SIZE;
932 915
933 tp->tx_skbuff[entry] = skb; 916 tp->tx_skbuff[entry] = skb;
934#ifdef CARDBUS
935 if (tp->chip_id == X3201_3) { 917 if (tp->chip_id == X3201_3) {
936 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); 918 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
937 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); 919 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
938 } else 920 } else
939#endif
940 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); 921 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
941 922
942 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ 923 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 98b6f3207d3d..d3d0ec970318 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -25,117 +25,13 @@
25 version. He may or may not be interested in bug reports on this 25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at: 26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html 27 http://www.scyld.com/network/via-rhine.html
28 28 [link no longer provides useful info -jgarzik]
29
30 Linux kernel version history:
31
32 LK1.1.0:
33 - Jeff Garzik: softnet 'n stuff
34
35 LK1.1.1:
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
38
39 LK1.1.2:
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
41
42 LK1.1.3:
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
48
49 LK1.1.4:
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
52
53 LK1.1.5:
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
57
58 LK1.1.6:
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
61
62 LK1.1.7:
63 - Manfred Spraul: added reset into tx_timeout
64
65 LK1.1.9:
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
70
71 LK1.1.10:
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
74
75 LK1.1.11:
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
79
80 LK1.1.12:
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
82
83 LK1.1.13 (jgarzik):
84 - Add ethtool support
85 - Replace some MII-related magic numbers with constants
86
87 LK1.1.14 (Ivan G.):
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
99 (Roger Luethi)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
103
104 LK1.1.15 (jgarzik):
105 - Use new MII lib helper generic_mii_ioctl
106
107 LK1.1.16 (Roger Luethi)
108 - Etherleak fix
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
112 - Various clean ups
113
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
120
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
124
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
127
128 LK1.2.0-2.6 (Roger Luethi)
129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
133 29
134*/ 30*/
135 31
136#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
137#define DRV_VERSION "1.2.0-2.6" 33#define DRV_VERSION "1.4.0"
138#define DRV_RELDATE "June-10-2004" 34#define DRV_RELDATE "June-27-2006"
139 35
140 36
141/* A few user-configurable values. 37/* A few user-configurable values.
@@ -356,12 +252,11 @@ enum rhine_quirks {
356/* Beware of PCI posted writes */ 252/* Beware of PCI posted writes */
357#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 253#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
358 254
359static struct pci_device_id rhine_pci_tbl[] = 255static const struct pci_device_id rhine_pci_tbl[] = {
360{ 256 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
361 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */ 257 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
362 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */ 258 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
363 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */ 259 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
364 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
365 { } /* terminate list */ 260 { } /* terminate list */
366}; 261};
367MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 262MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index ba2972ba3757..f5b0078eb4ad 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -229,7 +229,8 @@ static int rx_copybreak = 200;
229module_param(rx_copybreak, int, 0644); 229module_param(rx_copybreak, int, 0644);
230MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 230MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
231 231
232static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); 232static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
233 const struct velocity_info_tbl *info);
233static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); 234static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
234static void velocity_print_info(struct velocity_info *vptr); 235static void velocity_print_info(struct velocity_info *vptr);
235static int velocity_open(struct net_device *dev); 236static int velocity_open(struct net_device *dev);
@@ -294,9 +295,9 @@ static void velocity_unregister_notifier(void)
294 * Internal board variants. At the moment we have only one 295 * Internal board variants. At the moment we have only one
295 */ 296 */
296 297
297static struct velocity_info_tbl chip_info_table[] = { 298static const struct velocity_info_tbl chip_info_table[] __devinitdata = {
298 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL}, 299 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
299 {0, NULL} 300 { }
300}; 301};
301 302
302/* 303/*
@@ -304,10 +305,9 @@ static struct velocity_info_tbl chip_info_table[] = {
304 * device driver. Used for hotplug autoloading. 305 * device driver. Used for hotplug autoloading.
305 */ 306 */
306 307
307static struct pci_device_id velocity_id_table[] __devinitdata = { 308static const struct pci_device_id velocity_id_table[] __devinitdata = {
308 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X, 309 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
309 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table}, 310 { }
310 {0, }
311}; 311};
312 312
313MODULE_DEVICE_TABLE(pci, velocity_id_table); 313MODULE_DEVICE_TABLE(pci, velocity_id_table);
@@ -341,7 +341,7 @@ static char __devinit *get_chip_name(enum chip_type chip_id)
341static void __devexit velocity_remove1(struct pci_dev *pdev) 341static void __devexit velocity_remove1(struct pci_dev *pdev)
342{ 342{
343 struct net_device *dev = pci_get_drvdata(pdev); 343 struct net_device *dev = pci_get_drvdata(pdev);
344 struct velocity_info *vptr = dev->priv; 344 struct velocity_info *vptr = netdev_priv(dev);
345 345
346#ifdef CONFIG_PM 346#ifdef CONFIG_PM
347 unsigned long flags; 347 unsigned long flags;
@@ -686,21 +686,23 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
686 static int first = 1; 686 static int first = 1;
687 struct net_device *dev; 687 struct net_device *dev;
688 int i; 688 int i;
689 struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data; 689 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
690 struct velocity_info *vptr; 690 struct velocity_info *vptr;
691 struct mac_regs __iomem * regs; 691 struct mac_regs __iomem * regs;
692 int ret = -ENOMEM; 692 int ret = -ENOMEM;
693 693
694 /* FIXME: this driver, like almost all other ethernet drivers,
695 * can support more than MAX_UNITS.
696 */
694 if (velocity_nics >= MAX_UNITS) { 697 if (velocity_nics >= MAX_UNITS) {
695 printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n", 698 dev_notice(&pdev->dev, "already found %d NICs.\n",
696 velocity_nics); 699 velocity_nics);
697 return -ENODEV; 700 return -ENODEV;
698 } 701 }
699 702
700 dev = alloc_etherdev(sizeof(struct velocity_info)); 703 dev = alloc_etherdev(sizeof(struct velocity_info));
701 704 if (!dev) {
702 if (dev == NULL) { 705 dev_err(&pdev->dev, "allocate net device failed.\n");
703 printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n");
704 goto out; 706 goto out;
705 } 707 }
706 708
@@ -708,7 +710,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
708 710
709 SET_MODULE_OWNER(dev); 711 SET_MODULE_OWNER(dev);
710 SET_NETDEV_DEV(dev, &pdev->dev); 712 SET_NETDEV_DEV(dev, &pdev->dev);
711 vptr = dev->priv; 713 vptr = netdev_priv(dev);
712 714
713 715
714 if (first) { 716 if (first) {
@@ -731,17 +733,17 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
731 733
732 ret = velocity_get_pci_info(vptr, pdev); 734 ret = velocity_get_pci_info(vptr, pdev);
733 if (ret < 0) { 735 if (ret < 0) {
734 printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); 736 /* error message already printed */
735 goto err_disable; 737 goto err_disable;
736 } 738 }
737 739
738 ret = pci_request_regions(pdev, VELOCITY_NAME); 740 ret = pci_request_regions(pdev, VELOCITY_NAME);
739 if (ret < 0) { 741 if (ret < 0) {
740 printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); 742 dev_err(&pdev->dev, "No PCI resources.\n");
741 goto err_disable; 743 goto err_disable;
742 } 744 }
743 745
744 regs = ioremap(vptr->memaddr, vptr->io_size); 746 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
745 if (regs == NULL) { 747 if (regs == NULL) {
746 ret = -EIO; 748 ret = -EIO;
747 goto err_release_res; 749 goto err_release_res;
@@ -859,13 +861,14 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
859 * discovered. 861 * discovered.
860 */ 862 */
861 863
862static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info) 864static void __devinit velocity_init_info(struct pci_dev *pdev,
865 struct velocity_info *vptr,
866 const struct velocity_info_tbl *info)
863{ 867{
864 memset(vptr, 0, sizeof(struct velocity_info)); 868 memset(vptr, 0, sizeof(struct velocity_info));
865 869
866 vptr->pdev = pdev; 870 vptr->pdev = pdev;
867 vptr->chip_id = info->chip_id; 871 vptr->chip_id = info->chip_id;
868 vptr->io_size = info->io_size;
869 vptr->num_txq = info->txqueue; 872 vptr->num_txq = info->txqueue;
870 vptr->multicast_limit = MCAM_SIZE; 873 vptr->multicast_limit = MCAM_SIZE;
871 spin_lock_init(&vptr->lock); 874 spin_lock_init(&vptr->lock);
@@ -883,8 +886,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_i
883 886
884static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) 887static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
885{ 888{
886 889 if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
887 if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
888 return -EIO; 890 return -EIO;
889 891
890 pci_set_master(pdev); 892 pci_set_master(pdev);
@@ -892,24 +894,20 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
892 vptr->ioaddr = pci_resource_start(pdev, 0); 894 vptr->ioaddr = pci_resource_start(pdev, 0);
893 vptr->memaddr = pci_resource_start(pdev, 1); 895 vptr->memaddr = pci_resource_start(pdev, 1);
894 896
895 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) 897 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
896 { 898 dev_err(&pdev->dev,
897 printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n", 899 "region #0 is not an I/O resource, aborting.\n");
898 pci_name(pdev));
899 return -EINVAL; 900 return -EINVAL;
900 } 901 }
901 902
902 if((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) 903 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
903 { 904 dev_err(&pdev->dev,
904 printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n", 905 "region #1 is an I/O resource, aborting.\n");
905 pci_name(pdev));
906 return -EINVAL; 906 return -EINVAL;
907 } 907 }
908 908
909 if(pci_resource_len(pdev, 1) < 256) 909 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
910 { 910 dev_err(&pdev->dev, "region #1 is too small.\n");
911 printk(KERN_ERR "%s: region #1 is too small.\n",
912 pci_name(pdev));
913 return -EINVAL; 911 return -EINVAL;
914 } 912 }
915 vptr->pdev = pdev; 913 vptr->pdev = pdev;
@@ -1728,7 +1726,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1728 1726
1729static int velocity_open(struct net_device *dev) 1727static int velocity_open(struct net_device *dev)
1730{ 1728{
1731 struct velocity_info *vptr = dev->priv; 1729 struct velocity_info *vptr = netdev_priv(dev);
1732 int ret; 1730 int ret;
1733 1731
1734 vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); 1732 vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
@@ -1785,7 +1783,7 @@ err_free_desc_rings:
1785 1783
1786static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1784static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1787{ 1785{
1788 struct velocity_info *vptr = dev->priv; 1786 struct velocity_info *vptr = netdev_priv(dev);
1789 unsigned long flags; 1787 unsigned long flags;
1790 int oldmtu = dev->mtu; 1788 int oldmtu = dev->mtu;
1791 int ret = 0; 1789 int ret = 0;
@@ -1861,7 +1859,7 @@ static void velocity_shutdown(struct velocity_info *vptr)
1861 1859
1862static int velocity_close(struct net_device *dev) 1860static int velocity_close(struct net_device *dev)
1863{ 1861{
1864 struct velocity_info *vptr = dev->priv; 1862 struct velocity_info *vptr = netdev_priv(dev);
1865 1863
1866 netif_stop_queue(dev); 1864 netif_stop_queue(dev);
1867 velocity_shutdown(vptr); 1865 velocity_shutdown(vptr);
@@ -1894,7 +1892,7 @@ static int velocity_close(struct net_device *dev)
1894 1892
1895static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) 1893static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
1896{ 1894{
1897 struct velocity_info *vptr = dev->priv; 1895 struct velocity_info *vptr = netdev_priv(dev);
1898 int qnum = 0; 1896 int qnum = 0;
1899 struct tx_desc *td_ptr; 1897 struct tx_desc *td_ptr;
1900 struct velocity_td_info *tdinfo; 1898 struct velocity_td_info *tdinfo;
@@ -2049,7 +2047,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2049static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) 2047static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
2050{ 2048{
2051 struct net_device *dev = dev_instance; 2049 struct net_device *dev = dev_instance;
2052 struct velocity_info *vptr = dev->priv; 2050 struct velocity_info *vptr = netdev_priv(dev);
2053 u32 isr_status; 2051 u32 isr_status;
2054 int max_count = 0; 2052 int max_count = 0;
2055 2053
@@ -2104,7 +2102,7 @@ static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
2104 2102
2105static void velocity_set_multi(struct net_device *dev) 2103static void velocity_set_multi(struct net_device *dev)
2106{ 2104{
2107 struct velocity_info *vptr = dev->priv; 2105 struct velocity_info *vptr = netdev_priv(dev);
2108 struct mac_regs __iomem * regs = vptr->mac_regs; 2106 struct mac_regs __iomem * regs = vptr->mac_regs;
2109 u8 rx_mode; 2107 u8 rx_mode;
2110 int i; 2108 int i;
@@ -2153,7 +2151,7 @@ static void velocity_set_multi(struct net_device *dev)
2153 2151
2154static struct net_device_stats *velocity_get_stats(struct net_device *dev) 2152static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2155{ 2153{
2156 struct velocity_info *vptr = dev->priv; 2154 struct velocity_info *vptr = netdev_priv(dev);
2157 2155
2158 /* If the hardware is down, don't touch MII */ 2156 /* If the hardware is down, don't touch MII */
2159 if(!netif_running(dev)) 2157 if(!netif_running(dev))
@@ -2196,7 +2194,7 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2196 2194
2197static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2195static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2198{ 2196{
2199 struct velocity_info *vptr = dev->priv; 2197 struct velocity_info *vptr = netdev_priv(dev);
2200 int ret; 2198 int ret;
2201 2199
2202 /* If we are asked for information and the device is power 2200 /* If we are asked for information and the device is power
@@ -2825,7 +2823,7 @@ static void enable_flow_control_ability(struct velocity_info *vptr)
2825 2823
2826static int velocity_ethtool_up(struct net_device *dev) 2824static int velocity_ethtool_up(struct net_device *dev)
2827{ 2825{
2828 struct velocity_info *vptr = dev->priv; 2826 struct velocity_info *vptr = netdev_priv(dev);
2829 if (!netif_running(dev)) 2827 if (!netif_running(dev))
2830 pci_set_power_state(vptr->pdev, PCI_D0); 2828 pci_set_power_state(vptr->pdev, PCI_D0);
2831 return 0; 2829 return 0;
@@ -2841,14 +2839,14 @@ static int velocity_ethtool_up(struct net_device *dev)
2841 2839
2842static void velocity_ethtool_down(struct net_device *dev) 2840static void velocity_ethtool_down(struct net_device *dev)
2843{ 2841{
2844 struct velocity_info *vptr = dev->priv; 2842 struct velocity_info *vptr = netdev_priv(dev);
2845 if (!netif_running(dev)) 2843 if (!netif_running(dev))
2846 pci_set_power_state(vptr->pdev, PCI_D3hot); 2844 pci_set_power_state(vptr->pdev, PCI_D3hot);
2847} 2845}
2848 2846
2849static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2847static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2850{ 2848{
2851 struct velocity_info *vptr = dev->priv; 2849 struct velocity_info *vptr = netdev_priv(dev);
2852 struct mac_regs __iomem * regs = vptr->mac_regs; 2850 struct mac_regs __iomem * regs = vptr->mac_regs;
2853 u32 status; 2851 u32 status;
2854 status = check_connection_type(vptr->mac_regs); 2852 status = check_connection_type(vptr->mac_regs);
@@ -2873,7 +2871,7 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
2873 2871
2874static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2872static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2875{ 2873{
2876 struct velocity_info *vptr = dev->priv; 2874 struct velocity_info *vptr = netdev_priv(dev);
2877 u32 curr_status; 2875 u32 curr_status;
2878 u32 new_status = 0; 2876 u32 new_status = 0;
2879 int ret = 0; 2877 int ret = 0;
@@ -2896,14 +2894,14 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
2896 2894
2897static u32 velocity_get_link(struct net_device *dev) 2895static u32 velocity_get_link(struct net_device *dev)
2898{ 2896{
2899 struct velocity_info *vptr = dev->priv; 2897 struct velocity_info *vptr = netdev_priv(dev);
2900 struct mac_regs __iomem * regs = vptr->mac_regs; 2898 struct mac_regs __iomem * regs = vptr->mac_regs;
2901 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1; 2899 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1;
2902} 2900}
2903 2901
2904static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2902static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2905{ 2903{
2906 struct velocity_info *vptr = dev->priv; 2904 struct velocity_info *vptr = netdev_priv(dev);
2907 strcpy(info->driver, VELOCITY_NAME); 2905 strcpy(info->driver, VELOCITY_NAME);
2908 strcpy(info->version, VELOCITY_VERSION); 2906 strcpy(info->version, VELOCITY_VERSION);
2909 strcpy(info->bus_info, pci_name(vptr->pdev)); 2907 strcpy(info->bus_info, pci_name(vptr->pdev));
@@ -2911,7 +2909,7 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
2911 2909
2912static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2910static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2913{ 2911{
2914 struct velocity_info *vptr = dev->priv; 2912 struct velocity_info *vptr = netdev_priv(dev);
2915 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; 2913 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
2916 wol->wolopts |= WAKE_MAGIC; 2914 wol->wolopts |= WAKE_MAGIC;
2917 /* 2915 /*
@@ -2927,7 +2925,7 @@ static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_woli
2927 2925
2928static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2926static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2929{ 2927{
2930 struct velocity_info *vptr = dev->priv; 2928 struct velocity_info *vptr = netdev_priv(dev);
2931 2929
2932 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) 2930 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
2933 return -EFAULT; 2931 return -EFAULT;
@@ -2992,7 +2990,7 @@ static struct ethtool_ops velocity_ethtool_ops = {
2992 2990
2993static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2991static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2994{ 2992{
2995 struct velocity_info *vptr = dev->priv; 2993 struct velocity_info *vptr = netdev_priv(dev);
2996 struct mac_regs __iomem * regs = vptr->mac_regs; 2994 struct mac_regs __iomem * regs = vptr->mac_regs;
2997 unsigned long flags; 2995 unsigned long flags;
2998 struct mii_ioctl_data *miidata = if_mii(ifr); 2996 struct mii_ioctl_data *miidata = if_mii(ifr);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f1b2640ebdc6..496c3d597444 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -31,6 +31,8 @@
31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" 31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
32#define VELOCITY_VERSION "1.13" 32#define VELOCITY_VERSION "1.13"
33 33
34#define VELOCITY_IO_SIZE 256
35
34#define PKT_BUF_SZ 1540 36#define PKT_BUF_SZ 1540
35 37
36#define MAX_UNITS 8 38#define MAX_UNITS 8
@@ -1191,7 +1193,6 @@ enum chip_type {
1191struct velocity_info_tbl { 1193struct velocity_info_tbl {
1192 enum chip_type chip_id; 1194 enum chip_type chip_id;
1193 char *name; 1195 char *name;
1194 int io_size;
1195 int txqueue; 1196 int txqueue;
1196 u32 flags; 1197 u32 flags;
1197}; 1198};
@@ -1751,7 +1752,6 @@ struct velocity_info {
1751 struct mac_regs __iomem * mac_regs; 1752 struct mac_regs __iomem * mac_regs;
1752 unsigned long memaddr; 1753 unsigned long memaddr;
1753 unsigned long ioaddr; 1754 unsigned long ioaddr;
1754 u32 io_size;
1755 1755
1756 u8 rev_id; 1756 u8 rev_id;
1757 1757
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index b5328b0ff927..54b8e492ef97 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -134,18 +134,6 @@ config SEALEVEL_4021
134 The driver will be compiled as a module: the 134 The driver will be compiled as a module: the
135 module will be called sealevel. 135 module will be called sealevel.
136 136
137config SYNCLINK_SYNCPPP
138 tristate "SyncLink HDLC/SYNCPPP support"
139 depends on WAN
140 help
141 Enables HDLC/SYNCPPP support for the SyncLink WAN driver.
142
143 Normally the SyncLink WAN driver works with the main PPP driver
144 <file:drivers/net/ppp_generic.c> and pppd program.
145 HDLC/SYNCPPP support allows use of the Cisco HDLC/PPP driver
146 <file:drivers/net/wan/syncppp.c>. The SyncLink WAN driver (in
147 character devices) must also be enabled.
148
149# Generic HDLC 137# Generic HDLC
150config HDLC 138config HDLC
151 tristate "Generic HDLC layer" 139 tristate "Generic HDLC layer"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 823c6d5ab90d..316ca6869d5e 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_COSA) += syncppp.o cosa.o
28obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o 28obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o
29obj-$(CONFIG_DSCC4) += dscc4.o 29obj-$(CONFIG_DSCC4) += dscc4.o
30obj-$(CONFIG_LANMEDIA) += syncppp.o 30obj-$(CONFIG_LANMEDIA) += syncppp.o
31obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o
32obj-$(CONFIG_X25_ASY) += x25_asy.o 31obj-$(CONFIG_X25_ASY) += x25_asy.o
33 32
34obj-$(CONFIG_LANMEDIA) += lmc/ 33obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 30ec235e6935..fa9d2c4edc93 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -550,6 +550,7 @@ config USB_ZD1201
550 550
551source "drivers/net/wireless/hostap/Kconfig" 551source "drivers/net/wireless/hostap/Kconfig"
552source "drivers/net/wireless/bcm43xx/Kconfig" 552source "drivers/net/wireless/bcm43xx/Kconfig"
553source "drivers/net/wireless/zd1211rw/Kconfig"
553 554
554# yes, this works even when no drivers are selected 555# yes, this works even when no drivers are selected
555config NET_WIRELESS 556config NET_WIRELESS
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 512603de309a..c613af17a159 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_PRISM54) += prism54/
36 36
37obj-$(CONFIG_HOSTAP) += hostap/ 37obj-$(CONFIG_HOSTAP) += hostap/
38obj-$(CONFIG_BCM43XX) += bcm43xx/ 38obj-$(CONFIG_BCM43XX) += bcm43xx/
39obj-$(CONFIG_ZD1211RW) += zd1211rw/
39 40
40# 16-bit wireless PCMCIA client drivers 41# 16-bit wireless PCMCIA client drivers
41obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 42obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index d8f5600578b4..e1c5a939bca4 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -1885,6 +1885,15 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
1885 1885
1886 spin_lock(&bcm->irq_lock); 1886 spin_lock(&bcm->irq_lock);
1887 1887
1888 /* Only accept IRQs, if we are initialized properly.
1889 * This avoids an RX race while initializing.
1890 * We should probably not enable IRQs before we are initialized
1891 * completely, but some careful work is needed to fix this. I think it
1892 * is best to stay with this cheap workaround for now... .
1893 */
1894 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED))
1895 goto out;
1896
1888 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); 1897 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
1889 if (reason == 0xffffffff) { 1898 if (reason == 0xffffffff) {
1890 /* irq not for us (shared irq) */ 1899 /* irq not for us (shared irq) */
@@ -1906,19 +1915,11 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
1906 1915
1907 bcm43xx_interrupt_ack(bcm, reason); 1916 bcm43xx_interrupt_ack(bcm, reason);
1908 1917
1909 /* Only accept IRQs, if we are initialized properly. 1918 /* disable all IRQs. They are enabled again in the bottom half. */
1910 * This avoids an RX race while initializing. 1919 bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
1911 * We should probably not enable IRQs before we are initialized 1920 /* save the reason code and call our bottom half. */
1912 * completely, but some careful work is needed to fix this. I think it 1921 bcm->irq_reason = reason;
1913 * is best to stay with this cheap workaround for now... . 1922 tasklet_schedule(&bcm->isr_tasklet);
1914 */
1915 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
1916 /* disable all IRQs. They are enabled again in the bottom half. */
1917 bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
1918 /* save the reason code and call our bottom half. */
1919 bcm->irq_reason = reason;
1920 tasklet_schedule(&bcm->isr_tasklet);
1921 }
1922 1923
1923out: 1924out:
1924 mmiowb(); 1925 mmiowb();
@@ -3698,6 +3699,10 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3698 secinfo->encrypt = sec->encrypt; 3699 secinfo->encrypt = sec->encrypt;
3699 dprintk(", .encrypt = %d", sec->encrypt); 3700 dprintk(", .encrypt = %d", sec->encrypt);
3700 } 3701 }
3702 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode);
3705 }
3701 dprintk("\n"); 3706 dprintk("\n");
3702 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
3703 !bcm->ieee->host_encrypt) { 3708 !bcm->ieee->host_encrypt) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index 30a202b258b5..116493671f88 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -112,30 +112,6 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
112 return bcm43xx_channel_to_freq_bg(channel); 112 return bcm43xx_channel_to_freq_bg(channel);
113} 113}
114 114
115/* Lightweight function to check if a channel number is valid.
116 * Note that this does _NOT_ check for geographical restrictions!
117 */
118static inline
119int bcm43xx_is_valid_channel_a(u8 channel)
120{
121 return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
122 && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
123}
124static inline
125int bcm43xx_is_valid_channel_bg(u8 channel)
126{
127 return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
128 && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
129}
130static inline
131int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
132 u8 channel)
133{
134 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
135 return bcm43xx_is_valid_channel_a(channel);
136 return bcm43xx_is_valid_channel_bg(channel);
137}
138
139void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf); 115void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf);
140void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf); 116void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf);
141 117
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
index af5c0bff1696..bb9c484d7e19 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
@@ -1594,11 +1594,11 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm,
1594 u16 r8, tmp; 1594 u16 r8, tmp;
1595 u16 freq; 1595 u16 freq;
1596 1596
1597 if (!ieee80211_is_valid_channel(bcm->ieee, channel))
1598 return -EINVAL;
1597 if ((radio->manufact == 0x17F) && 1599 if ((radio->manufact == 0x17F) &&
1598 (radio->version == 0x2060) && 1600 (radio->version == 0x2060) &&
1599 (radio->revision == 1)) { 1601 (radio->revision == 1)) {
1600 if (channel > 200)
1601 return -EINVAL;
1602 freq = channel2freq_a(channel); 1602 freq = channel2freq_a(channel);
1603 1603
1604 r8 = bcm43xx_radio_read16(bcm, 0x0008); 1604 r8 = bcm43xx_radio_read16(bcm, 0x0008);
@@ -1651,9 +1651,6 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm,
1651 TODO(); //TODO: TSSI2dbm workaround 1651 TODO(); //TODO: TSSI2dbm workaround
1652 bcm43xx_phy_xmitpower(bcm);//FIXME correct? 1652 bcm43xx_phy_xmitpower(bcm);//FIXME correct?
1653 } else { 1653 } else {
1654 if ((channel < 1) || (channel > 14))
1655 return -EINVAL;
1656
1657 if (synthetic_pu_workaround) 1654 if (synthetic_pu_workaround)
1658 bcm43xx_synth_pu_workaround(bcm, channel); 1655 bcm43xx_synth_pu_workaround(bcm, channel);
1659 1656
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index c35cb3a0777e..5c36e29efff7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -119,7 +119,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
119 channel = bcm43xx_freq_to_channel(bcm, data->freq.m); 119 channel = bcm43xx_freq_to_channel(bcm, data->freq.m);
120 freq = data->freq.m; 120 freq = data->freq.m;
121 } 121 }
122 if (!bcm43xx_is_valid_channel(bcm, channel)) 122 if (!ieee80211_is_valid_channel(bcm->ieee, channel))
123 goto out_unlock; 123 goto out_unlock;
124 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 124 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
125 //ieee80211softmac_disassoc(softmac, $REASON); 125 //ieee80211softmac_disassoc(softmac, $REASON);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
index d8ece28c079f..6dbd855b3647 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
@@ -296,11 +296,14 @@ void bcm43xx_generate_txhdr(struct bcm43xx_private *bcm,
296 u16 control = 0; 296 u16 control = 0;
297 u16 wsec_rate = 0; 297 u16 wsec_rate = 0;
298 u16 encrypt_frame; 298 u16 encrypt_frame;
299 const u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(wireless_header->frame_ctl));
300 const int is_mgt = (ftype == IEEE80211_FTYPE_MGMT);
299 301
300 /* Now construct the TX header. */ 302 /* Now construct the TX header. */
301 memset(txhdr, 0, sizeof(*txhdr)); 303 memset(txhdr, 0, sizeof(*txhdr));
302 304
303 bitrate = bcm->softmac->txrates.default_rate; 305 bitrate = ieee80211softmac_suggest_txrate(bcm->softmac,
306 is_multicast_ether_addr(wireless_header->addr1), is_mgt);
304 ofdm_modulation = !(ieee80211_is_cck_rate(bitrate)); 307 ofdm_modulation = !(ieee80211_is_cck_rate(bitrate));
305 fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate); 308 fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate);
306 fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate)); 309 fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate));
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 49860fa61c30..6dfa041be66d 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -66,10 +66,12 @@ static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
66 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), 66 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
67 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), 67 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
68 PLXDEV(0x126c, 0x8030, "Nortel emobility"), 68 PLXDEV(0x126c, 0x8030, "Nortel emobility"),
69 PLXDEV(0x1562, 0x0001, "Symbol LA-4123"),
69 PLXDEV(0x1385, 0x4100, "Netgear MA301"), 70 PLXDEV(0x1385, 0x4100, "Netgear MA301"),
70 PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"), 71 PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"),
71 PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"), 72 PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"),
72 PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"), 73 PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"),
74 PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"),
73 PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"), 75 PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"),
74 PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"), 76 PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"),
75 PLXDEV(0x16ab, 0x1103, "Longshine 8031"), 77 PLXDEV(0x16ab, 0x1103, "Longshine 8031"),
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
new file mode 100644
index 000000000000..66ed55bc5460
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -0,0 +1,19 @@
1config ZD1211RW
2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
3 depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
7 chip, present in many USB-wireless adapters.
8
9 Device firmware is required alongside this driver. You can download the
10 firmware distribution from http://zd1211.ath.cx/get-firmware
11
12config ZD1211RW_DEBUG
13 bool "ZyDAS ZD1211 debugging"
14 depends on ZD1211RW
15 ---help---
16 ZD1211 debugging messages. Choosing Y will result in additional debug
17 messages being saved to your kernel logs, which may help debug any
18 problems.
19
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
new file mode 100644
index 000000000000..500314fc74d2
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -0,0 +1,11 @@
1obj-$(CONFIG_ZD1211RW) += zd1211rw.o
2
3zd1211rw-objs := zd_chip.o zd_ieee80211.o \
4 zd_mac.o zd_netdev.o \
5 zd_rf_al2230.o zd_rf_rf2959.o \
6 zd_rf.o zd_usb.o zd_util.o
7
8ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
9EXTRA_CFLAGS += -DDEBUG
10endif
11
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
new file mode 100644
index 000000000000..efc9c4bd826f
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -0,0 +1,1615 @@
1/* zd_chip.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18/* This file implements all the hardware specific functions for the ZD1211
19 * and ZD1211B chips. Support for the ZD1211B was possible after Timothy
20 * Legge sent me a ZD1211B device. Thank you Tim. -- Uli
21 */
22
23#include <linux/kernel.h>
24#include <linux/errno.h>
25
26#include "zd_def.h"
27#include "zd_chip.h"
28#include "zd_ieee80211.h"
29#include "zd_mac.h"
30#include "zd_rf.h"
31#include "zd_util.h"
32
33void zd_chip_init(struct zd_chip *chip,
34 struct net_device *netdev,
35 struct usb_interface *intf)
36{
37 memset(chip, 0, sizeof(*chip));
38 mutex_init(&chip->mutex);
39 zd_usb_init(&chip->usb, netdev, intf);
40 zd_rf_init(&chip->rf);
41}
42
43void zd_chip_clear(struct zd_chip *chip)
44{
45 mutex_lock(&chip->mutex);
46 zd_usb_clear(&chip->usb);
47 zd_rf_clear(&chip->rf);
48 mutex_unlock(&chip->mutex);
49 mutex_destroy(&chip->mutex);
50 memset(chip, 0, sizeof(*chip));
51}
52
53static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
54{
55 return scnprintf(buffer, size, "%02x-%02x-%02x",
56 addr[0], addr[1], addr[2]);
57}
58
59/* Prints an identifier line, which will support debugging. */
60static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
61{
62 int i = 0;
63
64 i = scnprintf(buffer, size, "zd1211%s chip ",
65 chip->is_zd1211b ? "b" : "");
66 i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i);
67 i += scnprintf(buffer+i, size-i, " ");
68 i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
69 i += scnprintf(buffer+i, size-i, " ");
70 i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
71 i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c", chip->pa_type,
72 chip->patch_cck_gain ? 'g' : '-',
73 chip->patch_cr157 ? '7' : '-',
74 chip->patch_6m_band_edge ? '6' : '-');
75 return i;
76}
77
78static void print_id(struct zd_chip *chip)
79{
80 char buffer[80];
81
82 scnprint_id(chip, buffer, sizeof(buffer));
83 buffer[sizeof(buffer)-1] = 0;
84 dev_info(zd_chip_dev(chip), "%s\n", buffer);
85}
86
87/* Read a variable number of 32-bit values. Parameter count is not allowed to
88 * exceed USB_MAX_IOREAD32_COUNT.
89 */
90int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr,
91 unsigned int count)
92{
93 int r;
94 int i;
95 zd_addr_t *a16 = (zd_addr_t *)NULL;
96 u16 *v16;
97 unsigned int count16;
98
99 if (count > USB_MAX_IOREAD32_COUNT)
100 return -EINVAL;
101
102 /* Allocate a single memory block for values and addresses. */
103 count16 = 2*count;
104 a16 = (zd_addr_t *)kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
105 GFP_NOFS);
106 if (!a16) {
107 dev_dbg_f(zd_chip_dev(chip),
108 "error ENOMEM in allocation of a16\n");
109 r = -ENOMEM;
110 goto out;
111 }
112 v16 = (u16 *)(a16 + count16);
113
114 for (i = 0; i < count; i++) {
115 int j = 2*i;
116 /* We read the high word always first. */
117 a16[j] = zd_inc_word(addr[i]);
118 a16[j+1] = addr[i];
119 }
120
121 r = zd_ioread16v_locked(chip, v16, a16, count16);
122 if (r) {
123 dev_dbg_f(zd_chip_dev(chip),
124 "error: zd_ioread16v_locked. Error number %d\n", r);
125 goto out;
126 }
127
128 for (i = 0; i < count; i++) {
129 int j = 2*i;
130 values[i] = (v16[j] << 16) | v16[j+1];
131 }
132
133out:
134 kfree((void *)a16);
135 return r;
136}
137
138int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
139 unsigned int count)
140{
141 int i, j, r;
142 struct zd_ioreq16 *ioreqs16;
143 unsigned int count16;
144
145 ZD_ASSERT(mutex_is_locked(&chip->mutex));
146
147 if (count == 0)
148 return 0;
149 if (count > USB_MAX_IOWRITE32_COUNT)
150 return -EINVAL;
151
152 /* Allocate a single memory block for values and addresses. */
153 count16 = 2*count;
154 ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_NOFS);
155 if (!ioreqs16) {
156 r = -ENOMEM;
157 dev_dbg_f(zd_chip_dev(chip),
158 "error %d in ioreqs16 allocation\n", r);
159 goto out;
160 }
161
162 for (i = 0; i < count; i++) {
163 j = 2*i;
164 /* We write the high word always first. */
165 ioreqs16[j].value = ioreqs[i].value >> 16;
166 ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr);
167 ioreqs16[j+1].value = ioreqs[i].value;
168 ioreqs16[j+1].addr = ioreqs[i].addr;
169 }
170
171 r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
172#ifdef DEBUG
173 if (r) {
174 dev_dbg_f(zd_chip_dev(chip),
175 "error %d in zd_usb_write16v\n", r);
176 }
177#endif /* DEBUG */
178out:
179 kfree(ioreqs16);
180 return r;
181}
182
183int zd_iowrite16a_locked(struct zd_chip *chip,
184 const struct zd_ioreq16 *ioreqs, unsigned int count)
185{
186 int r;
187 unsigned int i, j, t, max;
188
189 ZD_ASSERT(mutex_is_locked(&chip->mutex));
190 for (i = 0; i < count; i += j + t) {
191 t = 0;
192 max = count-i;
193 if (max > USB_MAX_IOWRITE16_COUNT)
194 max = USB_MAX_IOWRITE16_COUNT;
195 for (j = 0; j < max; j++) {
196 if (!ioreqs[i+j].addr) {
197 t = 1;
198 break;
199 }
200 }
201
202 r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
203 if (r) {
204 dev_dbg_f(zd_chip_dev(chip),
205 "error zd_usb_iowrite16v. Error number %d\n",
206 r);
207 return r;
208 }
209 }
210
211 return 0;
212}
213
214/* Writes a variable number of 32 bit registers. The functions will split
215 * that in several USB requests. A split can be forced by inserting an IO
216 * request with an zero address field.
217 */
218int zd_iowrite32a_locked(struct zd_chip *chip,
219 const struct zd_ioreq32 *ioreqs, unsigned int count)
220{
221 int r;
222 unsigned int i, j, t, max;
223
224 for (i = 0; i < count; i += j + t) {
225 t = 0;
226 max = count-i;
227 if (max > USB_MAX_IOWRITE32_COUNT)
228 max = USB_MAX_IOWRITE32_COUNT;
229 for (j = 0; j < max; j++) {
230 if (!ioreqs[i+j].addr) {
231 t = 1;
232 break;
233 }
234 }
235
236 r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
237 if (r) {
238 dev_dbg_f(zd_chip_dev(chip),
239 "error _zd_iowrite32v_locked."
240 " Error number %d\n", r);
241 return r;
242 }
243 }
244
245 return 0;
246}
247
248int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
249{
250 int r;
251
252 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
253 mutex_lock(&chip->mutex);
254 r = zd_ioread16_locked(chip, value, addr);
255 mutex_unlock(&chip->mutex);
256 return r;
257}
258
259int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value)
260{
261 int r;
262
263 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
264 mutex_lock(&chip->mutex);
265 r = zd_ioread32_locked(chip, value, addr);
266 mutex_unlock(&chip->mutex);
267 return r;
268}
269
270int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value)
271{
272 int r;
273
274 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
275 mutex_lock(&chip->mutex);
276 r = zd_iowrite16_locked(chip, value, addr);
277 mutex_unlock(&chip->mutex);
278 return r;
279}
280
281int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value)
282{
283 int r;
284
285 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
286 mutex_lock(&chip->mutex);
287 r = zd_iowrite32_locked(chip, value, addr);
288 mutex_unlock(&chip->mutex);
289 return r;
290}
291
292int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
293 u32 *values, unsigned int count)
294{
295 int r;
296
297 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
298 mutex_lock(&chip->mutex);
299 r = zd_ioread32v_locked(chip, values, addresses, count);
300 mutex_unlock(&chip->mutex);
301 return r;
302}
303
304int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
305 unsigned int count)
306{
307 int r;
308
309 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
310 mutex_lock(&chip->mutex);
311 r = zd_iowrite32a_locked(chip, ioreqs, count);
312 mutex_unlock(&chip->mutex);
313 return r;
314}
315
316static int read_pod(struct zd_chip *chip, u8 *rf_type)
317{
318 int r;
319 u32 value;
320
321 ZD_ASSERT(mutex_is_locked(&chip->mutex));
322 r = zd_ioread32_locked(chip, &value, E2P_POD);
323 if (r)
324 goto error;
325 dev_dbg_f(zd_chip_dev(chip), "E2P_POD %#010x\n", value);
326
327 /* FIXME: AL2230 handling (Bit 7 in POD) */
328 *rf_type = value & 0x0f;
329 chip->pa_type = (value >> 16) & 0x0f;
330 chip->patch_cck_gain = (value >> 8) & 0x1;
331 chip->patch_cr157 = (value >> 13) & 0x1;
332 chip->patch_6m_band_edge = (value >> 21) & 0x1;
333
334 dev_dbg_f(zd_chip_dev(chip),
335 "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d "
336 "patch 6M %d\n",
337 zd_rf_name(*rf_type), *rf_type,
338 chip->pa_type, chip->patch_cck_gain,
339 chip->patch_cr157, chip->patch_6m_band_edge);
340 return 0;
341error:
342 *rf_type = 0;
343 chip->pa_type = 0;
344 chip->patch_cck_gain = 0;
345 chip->patch_cr157 = 0;
346 chip->patch_6m_band_edge = 0;
347 return r;
348}
349
350static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr,
351 const zd_addr_t *addr)
352{
353 int r;
354 u32 parts[2];
355
356 r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2);
357 if (r) {
358 dev_dbg_f(zd_chip_dev(chip),
359 "error: couldn't read e2p macs. Error number %d\n", r);
360 return r;
361 }
362
363 mac_addr[0] = parts[0];
364 mac_addr[1] = parts[0] >> 8;
365 mac_addr[2] = parts[0] >> 16;
366 mac_addr[3] = parts[0] >> 24;
367 mac_addr[4] = parts[1];
368 mac_addr[5] = parts[1] >> 8;
369
370 return 0;
371}
372
373static int read_e2p_mac_addr(struct zd_chip *chip)
374{
375 static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 };
376
377 ZD_ASSERT(mutex_is_locked(&chip->mutex));
378 return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr);
379}
380
381/* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and
382 * CR_MAC_ADDR_P2 must be overwritten
383 */
384void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr)
385{
386 mutex_lock(&chip->mutex);
387 memcpy(mac_addr, chip->e2p_mac, ETH_ALEN);
388 mutex_unlock(&chip->mutex);
389}
390
391static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
392{
393 static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 };
394 return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr);
395}
396
397int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
398{
399 int r;
400
401 dev_dbg_f(zd_chip_dev(chip), "\n");
402 mutex_lock(&chip->mutex);
403 r = read_mac_addr(chip, mac_addr);
404 mutex_unlock(&chip->mutex);
405 return r;
406}
407
408int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
409{
410 int r;
411 struct zd_ioreq32 reqs[2] = {
412 [0] = { .addr = CR_MAC_ADDR_P1 },
413 [1] = { .addr = CR_MAC_ADDR_P2 },
414 };
415
416 reqs[0].value = (mac_addr[3] << 24)
417 | (mac_addr[2] << 16)
418 | (mac_addr[1] << 8)
419 | mac_addr[0];
420 reqs[1].value = (mac_addr[5] << 8)
421 | mac_addr[4];
422
423 dev_dbg_f(zd_chip_dev(chip),
424 "mac addr " MAC_FMT "\n", MAC_ARG(mac_addr));
425
426 mutex_lock(&chip->mutex);
427 r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
428#ifdef DEBUG
429 {
430 u8 tmp[ETH_ALEN];
431 read_mac_addr(chip, tmp);
432 }
433#endif /* DEBUG */
434 mutex_unlock(&chip->mutex);
435 return r;
436}
437
438int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
439{
440 int r;
441 u32 value;
442
443 mutex_lock(&chip->mutex);
444 r = zd_ioread32_locked(chip, &value, E2P_SUBID);
445 mutex_unlock(&chip->mutex);
446 if (r)
447 return r;
448
449 *regdomain = value >> 16;
450 dev_dbg_f(zd_chip_dev(chip), "regdomain: %#04x\n", *regdomain);
451
452 return 0;
453}
454
455static int read_values(struct zd_chip *chip, u8 *values, size_t count,
456 zd_addr_t e2p_addr, u32 guard)
457{
458 int r;
459 int i;
460 u32 v;
461
462 ZD_ASSERT(mutex_is_locked(&chip->mutex));
463 for (i = 0;;) {
464 r = zd_ioread32_locked(chip, &v, e2p_addr+i/2);
465 if (r)
466 return r;
467 v -= guard;
468 if (i+4 < count) {
469 values[i++] = v;
470 values[i++] = v >> 8;
471 values[i++] = v >> 16;
472 values[i++] = v >> 24;
473 continue;
474 }
475 for (;i < count; i++)
476 values[i] = v >> (8*(i%3));
477 return 0;
478 }
479}
480
481static int read_pwr_cal_values(struct zd_chip *chip)
482{
483 return read_values(chip, chip->pwr_cal_values,
484 E2P_CHANNEL_COUNT, E2P_PWR_CAL_VALUE1,
485 0);
486}
487
488static int read_pwr_int_values(struct zd_chip *chip)
489{
490 return read_values(chip, chip->pwr_int_values,
491 E2P_CHANNEL_COUNT, E2P_PWR_INT_VALUE1,
492 E2P_PWR_INT_GUARD);
493}
494
495static int read_ofdm_cal_values(struct zd_chip *chip)
496{
497 int r;
498 int i;
499 static const zd_addr_t addresses[] = {
500 E2P_36M_CAL_VALUE1,
501 E2P_48M_CAL_VALUE1,
502 E2P_54M_CAL_VALUE1,
503 };
504
505 for (i = 0; i < 3; i++) {
506 r = read_values(chip, chip->ofdm_cal_values[i],
507 E2P_CHANNEL_COUNT, addresses[i], 0);
508 if (r)
509 return r;
510 }
511 return 0;
512}
513
514static int read_cal_int_tables(struct zd_chip *chip)
515{
516 int r;
517
518 r = read_pwr_cal_values(chip);
519 if (r)
520 return r;
521 r = read_pwr_int_values(chip);
522 if (r)
523 return r;
524 r = read_ofdm_cal_values(chip);
525 if (r)
526 return r;
527 return 0;
528}
529
530/* phy means physical registers */
531int zd_chip_lock_phy_regs(struct zd_chip *chip)
532{
533 int r;
534 u32 tmp;
535
536 ZD_ASSERT(mutex_is_locked(&chip->mutex));
537 r = zd_ioread32_locked(chip, &tmp, CR_REG1);
538 if (r) {
539 dev_err(zd_chip_dev(chip), "error ioread32(CR_REG1): %d\n", r);
540 return r;
541 }
542
543 dev_dbg_f(zd_chip_dev(chip),
544 "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp & ~UNLOCK_PHY_REGS);
545 tmp &= ~UNLOCK_PHY_REGS;
546
547 r = zd_iowrite32_locked(chip, tmp, CR_REG1);
548 if (r)
549 dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
550 return r;
551}
552
553int zd_chip_unlock_phy_regs(struct zd_chip *chip)
554{
555 int r;
556 u32 tmp;
557
558 ZD_ASSERT(mutex_is_locked(&chip->mutex));
559 r = zd_ioread32_locked(chip, &tmp, CR_REG1);
560 if (r) {
561 dev_err(zd_chip_dev(chip),
562 "error ioread32(CR_REG1): %d\n", r);
563 return r;
564 }
565
566 dev_dbg_f(zd_chip_dev(chip),
567 "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp | UNLOCK_PHY_REGS);
568 tmp |= UNLOCK_PHY_REGS;
569
570 r = zd_iowrite32_locked(chip, tmp, CR_REG1);
571 if (r)
572 dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
573 return r;
574}
575
576/* CR157 can be optionally patched by the EEPROM */
577static int patch_cr157(struct zd_chip *chip)
578{
579 int r;
580 u32 value;
581
582 if (!chip->patch_cr157)
583 return 0;
584
585 r = zd_ioread32_locked(chip, &value, E2P_PHY_REG);
586 if (r)
587 return r;
588
589 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8);
590 return zd_iowrite32_locked(chip, value >> 8, CR157);
591}
592
593/*
594 * 6M band edge can be optionally overwritten for certain RF's
595 * Vendor driver says: for FCC regulation, enabled per HWFeature 6M band edge
596 * bit (for AL2230, AL2230S)
597 */
598static int patch_6m_band_edge(struct zd_chip *chip, int channel)
599{
600 struct zd_ioreq16 ioreqs[] = {
601 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
602 { CR47, 0x1e },
603 };
604
605 if (!chip->patch_6m_band_edge || !chip->rf.patch_6m_band_edge)
606 return 0;
607
608 /* FIXME: Channel 11 is not the edge for all regulatory domains. */
609 if (channel == 1 || channel == 11)
610 ioreqs[0].value = 0x12;
611
612 dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel);
613 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
614}
615
616static int zd1211_hw_reset_phy(struct zd_chip *chip)
617{
618 static const struct zd_ioreq16 ioreqs[] = {
619 { CR0, 0x0a }, { CR1, 0x06 }, { CR2, 0x26 },
620 { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xa0 },
621 { CR10, 0x81 }, { CR11, 0x00 }, { CR12, 0x7f },
622 { CR13, 0x8c }, { CR14, 0x80 }, { CR15, 0x3d },
623 { CR16, 0x20 }, { CR17, 0x1e }, { CR18, 0x0a },
624 { CR19, 0x48 }, { CR20, 0x0c }, { CR21, 0x0c },
625 { CR22, 0x23 }, { CR23, 0x90 }, { CR24, 0x14 },
626 { CR25, 0x40 }, { CR26, 0x10 }, { CR27, 0x19 },
627 { CR28, 0x7f }, { CR29, 0x80 }, { CR30, 0x4b },
628 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 },
629 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 },
630 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c },
631 { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 },
632 { CR43, 0x10 }, { CR44, 0x12 }, { CR46, 0xff },
633 { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b },
634 { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 },
635 { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 },
636 { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff },
637 { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 },
638 { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 },
639 { CR79, 0x68 }, { CR80, 0x64 }, { CR81, 0x64 },
640 { CR82, 0x00 }, { CR83, 0x00 }, { CR84, 0x00 },
641 { CR85, 0x02 }, { CR86, 0x00 }, { CR87, 0x00 },
642 { CR88, 0xff }, { CR89, 0xfc }, { CR90, 0x00 },
643 { CR91, 0x00 }, { CR92, 0x00 }, { CR93, 0x08 },
644 { CR94, 0x00 }, { CR95, 0x00 }, { CR96, 0xff },
645 { CR97, 0xe7 }, { CR98, 0x00 }, { CR99, 0x00 },
646 { CR100, 0x00 }, { CR101, 0xae }, { CR102, 0x02 },
647 { CR103, 0x00 }, { CR104, 0x03 }, { CR105, 0x65 },
648 { CR106, 0x04 }, { CR107, 0x00 }, { CR108, 0x0a },
649 { CR109, 0xaa }, { CR110, 0xaa }, { CR111, 0x25 },
650 { CR112, 0x25 }, { CR113, 0x00 }, { CR119, 0x1e },
651 { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 },
652 { },
653 { CR5, 0x00 }, { CR6, 0x00 }, { CR7, 0x00 },
654 { CR8, 0x00 }, { CR9, 0x20 }, { CR12, 0xf0 },
655 { CR20, 0x0e }, { CR21, 0x0e }, { CR27, 0x10 },
656 { CR44, 0x33 }, { CR47, 0x1E }, { CR83, 0x24 },
657 { CR84, 0x04 }, { CR85, 0x00 }, { CR86, 0x0C },
658 { CR87, 0x12 }, { CR88, 0x0C }, { CR89, 0x00 },
659 { CR90, 0x10 }, { CR91, 0x08 }, { CR93, 0x00 },
660 { CR94, 0x01 }, { CR95, 0x00 }, { CR96, 0x50 },
661 { CR97, 0x37 }, { CR98, 0x35 }, { CR101, 0x13 },
662 { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 },
663 { CR105, 0x12 }, { CR109, 0x27 }, { CR110, 0x27 },
664 { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 },
665 { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 },
666 { CR117, 0xfc }, { CR118, 0xfa }, { CR120, 0x4f },
667 { CR123, 0x27 }, { CR125, 0xaa }, { CR127, 0x03 },
668 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
669 { CR131, 0x0C }, { CR136, 0xdf }, { CR137, 0x40 },
670 { CR138, 0xa0 }, { CR139, 0xb0 }, { CR140, 0x99 },
671 { CR141, 0x82 }, { CR142, 0x54 }, { CR143, 0x1c },
672 { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x4c },
673 { CR149, 0x50 }, { CR150, 0x0e }, { CR151, 0x18 },
674 { CR160, 0xfe }, { CR161, 0xee }, { CR162, 0xaa },
675 { CR163, 0xfa }, { CR164, 0xfa }, { CR165, 0xea },
676 { CR166, 0xbe }, { CR167, 0xbe }, { CR168, 0x6a },
677 { CR169, 0xba }, { CR170, 0xba }, { CR171, 0xba },
678 /* Note: CR204 must lead the CR203 */
679 { CR204, 0x7d },
680 { },
681 { CR203, 0x30 },
682 };
683
684 int r, t;
685
686 dev_dbg_f(zd_chip_dev(chip), "\n");
687
688 r = zd_chip_lock_phy_regs(chip);
689 if (r)
690 goto out;
691
692 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
693 if (r)
694 goto unlock;
695
696 r = patch_cr157(chip);
697unlock:
698 t = zd_chip_unlock_phy_regs(chip);
699 if (t && !r)
700 r = t;
701out:
702 return r;
703}
704
705static int zd1211b_hw_reset_phy(struct zd_chip *chip)
706{
707 static const struct zd_ioreq16 ioreqs[] = {
708 { CR0, 0x14 }, { CR1, 0x06 }, { CR2, 0x26 },
709 { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xe0 },
710 { CR10, 0x81 },
711 /* power control { { CR11, 1 << 6 }, */
712 { CR11, 0x00 },
713 { CR12, 0xf0 }, { CR13, 0x8c }, { CR14, 0x80 },
714 { CR15, 0x3d }, { CR16, 0x20 }, { CR17, 0x1e },
715 { CR18, 0x0a }, { CR19, 0x48 },
716 { CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */
717 { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 },
718 { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 },
719 { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 },
720 { CR30, 0x49 }, /* jointly decoder, no ASIC */
721 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 },
722 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 },
723 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c },
724 { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 },
725 { CR43, 0x10 }, { CR44, 0x33 }, { CR46, 0xff },
726 { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b },
727 { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 },
728 { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 },
729 { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff },
730 { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 },
731 { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 },
732 { CR79, 0xf0 }, { CR80, 0x64 }, { CR81, 0x64 },
733 { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 },
734 { CR85, 0x00 }, { CR86, 0x0c }, { CR87, 0x12 },
735 { CR88, 0x0c }, { CR89, 0x00 }, { CR90, 0x58 },
736 { CR91, 0x04 }, { CR92, 0x00 }, { CR93, 0x00 },
737 { CR94, 0x01 },
738 { CR95, 0x20 }, /* ZD1211B */
739 { CR96, 0x50 }, { CR97, 0x37 }, { CR98, 0x35 },
740 { CR99, 0x00 }, { CR100, 0x01 }, { CR101, 0x13 },
741 { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 },
742 { CR105, 0x12 }, { CR106, 0x04 }, { CR107, 0x00 },
743 { CR108, 0x0a }, { CR109, 0x27 }, { CR110, 0x27 },
744 { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 },
745 { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 },
746 { CR117, 0xfc }, { CR118, 0xfa }, { CR119, 0x1e },
747 { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 },
748 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
749 { CR131, 0x0c }, { CR136, 0xdf }, { CR137, 0xa0 },
750 { CR138, 0xa8 }, { CR139, 0xb4 }, { CR140, 0x98 },
751 { CR141, 0x82 }, { CR142, 0x53 }, { CR143, 0x1c },
752 { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x40 },
753 { CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */
754 { CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */
755 { CR151, 0x18 }, { CR159, 0x70 }, { CR160, 0xfe },
756 { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa },
757 { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe },
758 { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba },
759 { CR170, 0xba }, { CR171, 0xba },
760 /* Note: CR204 must lead the CR203 */
761 { CR204, 0x7d },
762 {},
763 { CR203, 0x30 },
764 };
765
766 int r, t;
767
768 dev_dbg_f(zd_chip_dev(chip), "\n");
769
770 r = zd_chip_lock_phy_regs(chip);
771 if (r)
772 goto out;
773
774 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
775 if (r)
776 goto unlock;
777
778 r = patch_cr157(chip);
779unlock:
780 t = zd_chip_unlock_phy_regs(chip);
781 if (t && !r)
782 r = t;
783out:
784 return r;
785}
786
787static int hw_reset_phy(struct zd_chip *chip)
788{
789 return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) :
790 zd1211_hw_reset_phy(chip);
791}
792
793static int zd1211_hw_init_hmac(struct zd_chip *chip)
794{
795 static const struct zd_ioreq32 ioreqs[] = {
796 { CR_ACK_TIMEOUT_EXT, 0x20 },
797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
798 { CR_ZD1211_RETRY_MAX, 0x2 },
799 { CR_SNIFFER_ON, 0 },
800 { CR_RX_FILTER, AP_RX_FILTER },
801 { CR_GROUP_HASH_P1, 0x00 },
802 { CR_GROUP_HASH_P2, 0x80000000 },
803 { CR_REG1, 0xa4 },
804 { CR_ADDA_PWR_DWN, 0x7f },
805 { CR_BCN_PLCP_CFG, 0x00f00401 },
806 { CR_PHY_DELAY, 0x00 },
807 { CR_ACK_TIMEOUT_EXT, 0x80 },
808 { CR_ADDA_PWR_DWN, 0x00 },
809 { CR_ACK_TIME_80211, 0x100 },
810 { CR_IFS_VALUE, 0x547c032 },
811 { CR_RX_PE_DELAY, 0x70 },
812 { CR_PS_CTRL, 0x10000000 },
813 { CR_RTS_CTS_RATE, 0x02030203 },
814 { CR_RX_THRESHOLD, 0x000c0640 },
815 { CR_AFTER_PNP, 0x1 },
816 { CR_WEP_PROTECT, 0x114 },
817 };
818
819 int r;
820
821 dev_dbg_f(zd_chip_dev(chip), "\n");
822 ZD_ASSERT(mutex_is_locked(&chip->mutex));
823 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
824#ifdef DEBUG
825 if (r) {
826 dev_err(zd_chip_dev(chip),
827 "error in zd_iowrite32a_locked. Error number %d\n", r);
828 }
829#endif /* DEBUG */
830 return r;
831}
832
833static int zd1211b_hw_init_hmac(struct zd_chip *chip)
834{
835 static const struct zd_ioreq32 ioreqs[] = {
836 { CR_ACK_TIMEOUT_EXT, 0x20 },
837 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
838 { CR_ZD1211B_RETRY_MAX, 0x02020202 },
839 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f },
840 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f },
841 { CR_ZD1211B_TX_PWR_CTL2, 0x003f001f },
842 { CR_ZD1211B_TX_PWR_CTL1, 0x001f000f },
843 { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
845 { CR_ZD1211B_TXOP, 0x01800824 },
846 { CR_SNIFFER_ON, 0 },
847 { CR_RX_FILTER, AP_RX_FILTER },
848 { CR_GROUP_HASH_P1, 0x00 },
849 { CR_GROUP_HASH_P2, 0x80000000 },
850 { CR_REG1, 0xa4 },
851 { CR_ADDA_PWR_DWN, 0x7f },
852 { CR_BCN_PLCP_CFG, 0x00f00401 },
853 { CR_PHY_DELAY, 0x00 },
854 { CR_ACK_TIMEOUT_EXT, 0x80 },
855 { CR_ADDA_PWR_DWN, 0x00 },
856 { CR_ACK_TIME_80211, 0x100 },
857 { CR_IFS_VALUE, 0x547c032 },
858 { CR_RX_PE_DELAY, 0x70 },
859 { CR_PS_CTRL, 0x10000000 },
860 { CR_RTS_CTS_RATE, 0x02030203 },
861 { CR_RX_THRESHOLD, 0x000c0640 },
862 { CR_AFTER_PNP, 0x1 },
863 { CR_WEP_PROTECT, 0x114 },
864 };
865
866 int r;
867
868 dev_dbg_f(zd_chip_dev(chip), "\n");
869 ZD_ASSERT(mutex_is_locked(&chip->mutex));
870 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
871 if (r) {
872 dev_dbg_f(zd_chip_dev(chip),
873 "error in zd_iowrite32a_locked. Error number %d\n", r);
874 }
875 return r;
876}
877
878static int hw_init_hmac(struct zd_chip *chip)
879{
880 return chip->is_zd1211b ?
881 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
882}
883
884struct aw_pt_bi {
885 u32 atim_wnd_period;
886 u32 pre_tbtt;
887 u32 beacon_interval;
888};
889
890static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
891{
892 int r;
893 static const zd_addr_t aw_pt_bi_addr[] =
894 { CR_ATIM_WND_PERIOD, CR_PRE_TBTT, CR_BCN_INTERVAL };
895 u32 values[3];
896
897 r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr,
898 ARRAY_SIZE(aw_pt_bi_addr));
899 if (r) {
900 memset(s, 0, sizeof(*s));
901 return r;
902 }
903
904 s->atim_wnd_period = values[0];
905 s->pre_tbtt = values[1];
906 s->beacon_interval = values[2];
907 dev_dbg_f(zd_chip_dev(chip), "aw %u pt %u bi %u\n",
908 s->atim_wnd_period, s->pre_tbtt, s->beacon_interval);
909 return 0;
910}
911
912static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
913{
914 struct zd_ioreq32 reqs[3];
915
916 if (s->beacon_interval <= 5)
917 s->beacon_interval = 5;
918 if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
919 s->pre_tbtt = s->beacon_interval - 1;
920 if (s->atim_wnd_period >= s->pre_tbtt)
921 s->atim_wnd_period = s->pre_tbtt - 1;
922
923 reqs[0].addr = CR_ATIM_WND_PERIOD;
924 reqs[0].value = s->atim_wnd_period;
925 reqs[1].addr = CR_PRE_TBTT;
926 reqs[1].value = s->pre_tbtt;
927 reqs[2].addr = CR_BCN_INTERVAL;
928 reqs[2].value = s->beacon_interval;
929
930 dev_dbg_f(zd_chip_dev(chip),
931 "aw %u pt %u bi %u\n", s->atim_wnd_period, s->pre_tbtt,
932 s->beacon_interval);
933 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
934}
935
936
937static int set_beacon_interval(struct zd_chip *chip, u32 interval)
938{
939 int r;
940 struct aw_pt_bi s;
941
942 ZD_ASSERT(mutex_is_locked(&chip->mutex));
943 r = get_aw_pt_bi(chip, &s);
944 if (r)
945 return r;
946 s.beacon_interval = interval;
947 return set_aw_pt_bi(chip, &s);
948}
949
950int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
951{
952 int r;
953
954 mutex_lock(&chip->mutex);
955 r = set_beacon_interval(chip, interval);
956 mutex_unlock(&chip->mutex);
957 return r;
958}
959
960static int hw_init(struct zd_chip *chip)
961{
962 int r;
963
964 dev_dbg_f(zd_chip_dev(chip), "\n");
965 ZD_ASSERT(mutex_is_locked(&chip->mutex));
966 r = hw_reset_phy(chip);
967 if (r)
968 return r;
969
970 r = hw_init_hmac(chip);
971 if (r)
972 return r;
973 r = set_beacon_interval(chip, 100);
974 if (r)
975 return r;
976 return 0;
977}
978
979#ifdef DEBUG
980static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
981 const char *addr_string)
982{
983 int r;
984 u32 value;
985
986 r = zd_ioread32_locked(chip, &value, addr);
987 if (r) {
988 dev_dbg_f(zd_chip_dev(chip),
989 "error reading %s. Error number %d\n", addr_string, r);
990 return r;
991 }
992
993 dev_dbg_f(zd_chip_dev(chip), "%s %#010x\n",
994 addr_string, (unsigned int)value);
995 return 0;
996}
997
998static int test_init(struct zd_chip *chip)
999{
1000 int r;
1001
1002 r = dump_cr(chip, CR_AFTER_PNP, "CR_AFTER_PNP");
1003 if (r)
1004 return r;
1005 r = dump_cr(chip, CR_GPI_EN, "CR_GPI_EN");
1006 if (r)
1007 return r;
1008 return dump_cr(chip, CR_INTERRUPT, "CR_INTERRUPT");
1009}
1010
1011static void dump_fw_registers(struct zd_chip *chip)
1012{
1013 static const zd_addr_t addr[4] = {
1014 FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE,
1015 FW_LINK_STATUS
1016 };
1017
1018 int r;
1019 u16 values[4];
1020
1021 r = zd_ioread16v_locked(chip, values, (const zd_addr_t*)addr,
1022 ARRAY_SIZE(addr));
1023 if (r) {
1024 dev_dbg_f(zd_chip_dev(chip), "error %d zd_ioread16v_locked\n",
1025 r);
1026 return;
1027 }
1028
1029 dev_dbg_f(zd_chip_dev(chip), "FW_FIRMWARE_VER %#06hx\n", values[0]);
1030 dev_dbg_f(zd_chip_dev(chip), "FW_USB_SPEED %#06hx\n", values[1]);
1031 dev_dbg_f(zd_chip_dev(chip), "FW_FIX_TX_RATE %#06hx\n", values[2]);
1032 dev_dbg_f(zd_chip_dev(chip), "FW_LINK_STATUS %#06hx\n", values[3]);
1033}
1034#endif /* DEBUG */
1035
1036static int print_fw_version(struct zd_chip *chip)
1037{
1038 int r;
1039 u16 version;
1040
1041 r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER);
1042 if (r)
1043 return r;
1044
1045 dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version);
1046 return 0;
1047}
1048
1049static int set_mandatory_rates(struct zd_chip *chip, enum ieee80211_std std)
1050{
1051 u32 rates;
1052 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1053 /* This sets the mandatory rates, which only depend from the standard
1054 * that the device is supporting. Until further notice we should try
1055 * to support 802.11g also for full speed USB.
1056 */
1057 switch (std) {
1058 case IEEE80211B:
1059 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M;
1060 break;
1061 case IEEE80211G:
1062 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M|
1063 CR_RATE_6M|CR_RATE_12M|CR_RATE_24M;
1064 break;
1065 default:
1066 return -EINVAL;
1067 }
1068 return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
1069}
1070
1071int zd_chip_enable_hwint(struct zd_chip *chip)
1072{
1073 int r;
1074
1075 mutex_lock(&chip->mutex);
1076 r = zd_iowrite32_locked(chip, HWINT_ENABLED, CR_INTERRUPT);
1077 mutex_unlock(&chip->mutex);
1078 return r;
1079}
1080
1081static int disable_hwint(struct zd_chip *chip)
1082{
1083 return zd_iowrite32_locked(chip, HWINT_DISABLED, CR_INTERRUPT);
1084}
1085
1086int zd_chip_disable_hwint(struct zd_chip *chip)
1087{
1088 int r;
1089
1090 mutex_lock(&chip->mutex);
1091 r = disable_hwint(chip);
1092 mutex_unlock(&chip->mutex);
1093 return r;
1094}
1095
1096int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1097{
1098 int r;
1099 u8 rf_type;
1100
1101 dev_dbg_f(zd_chip_dev(chip), "\n");
1102
1103 mutex_lock(&chip->mutex);
1104 chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0;
1105
1106#ifdef DEBUG
1107 r = test_init(chip);
1108 if (r)
1109 goto out;
1110#endif
1111 r = zd_iowrite32_locked(chip, 1, CR_AFTER_PNP);
1112 if (r)
1113 goto out;
1114
1115 r = zd_usb_init_hw(&chip->usb);
1116 if (r)
1117 goto out;
1118
1119 /* GPI is always disabled, also in the other driver.
1120 */
1121 r = zd_iowrite32_locked(chip, 0, CR_GPI_EN);
1122 if (r)
1123 goto out;
1124 r = zd_iowrite32_locked(chip, CWIN_SIZE, CR_CWMIN_CWMAX);
1125 if (r)
1126 goto out;
1127 /* Currently we support IEEE 802.11g for full and high speed USB.
1128 * It might be discussed, whether we should suppport pure b mode for
1129 * full speed USB.
1130 */
1131 r = set_mandatory_rates(chip, IEEE80211G);
1132 if (r)
1133 goto out;
1134 /* Disabling interrupts is certainly a smart thing here.
1135 */
1136 r = disable_hwint(chip);
1137 if (r)
1138 goto out;
1139 r = read_pod(chip, &rf_type);
1140 if (r)
1141 goto out;
1142 r = hw_init(chip);
1143 if (r)
1144 goto out;
1145 r = zd_rf_init_hw(&chip->rf, rf_type);
1146 if (r)
1147 goto out;
1148
1149 r = print_fw_version(chip);
1150 if (r)
1151 goto out;
1152
1153#ifdef DEBUG
1154 dump_fw_registers(chip);
1155 r = test_init(chip);
1156 if (r)
1157 goto out;
1158#endif /* DEBUG */
1159
1160 r = read_e2p_mac_addr(chip);
1161 if (r)
1162 goto out;
1163
1164 r = read_cal_int_tables(chip);
1165 if (r)
1166 goto out;
1167
1168 print_id(chip);
1169out:
1170 mutex_unlock(&chip->mutex);
1171 return r;
1172}
1173
1174static int update_pwr_int(struct zd_chip *chip, u8 channel)
1175{
1176 u8 value = chip->pwr_int_values[channel - 1];
1177 dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_int %#04x\n",
1178 channel, value);
1179 return zd_iowrite32_locked(chip, value, CR31);
1180}
1181
1182static int update_pwr_cal(struct zd_chip *chip, u8 channel)
1183{
1184 u8 value = chip->pwr_cal_values[channel-1];
1185 dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_cal %#04x\n",
1186 channel, value);
1187 return zd_iowrite32_locked(chip, value, CR68);
1188}
1189
1190static int update_ofdm_cal(struct zd_chip *chip, u8 channel)
1191{
1192 struct zd_ioreq32 ioreqs[3];
1193
1194 ioreqs[0].addr = CR67;
1195 ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1];
1196 ioreqs[1].addr = CR66;
1197 ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1];
1198 ioreqs[2].addr = CR65;
1199 ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1];
1200
1201 dev_dbg_f(zd_chip_dev(chip),
1202 "channel %d ofdm_cal 36M %#04x 48M %#04x 54M %#04x\n",
1203 channel, ioreqs[0].value, ioreqs[1].value, ioreqs[2].value);
1204 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
1205}
1206
1207static int update_channel_integration_and_calibration(struct zd_chip *chip,
1208 u8 channel)
1209{
1210 int r;
1211
1212 r = update_pwr_int(chip, channel);
1213 if (r)
1214 return r;
1215 if (chip->is_zd1211b) {
1216 static const struct zd_ioreq32 ioreqs[] = {
1217 { CR69, 0x28 },
1218 {},
1219 { CR69, 0x2a },
1220 };
1221
1222 r = update_ofdm_cal(chip, channel);
1223 if (r)
1224 return r;
1225 r = update_pwr_cal(chip, channel);
1226 if (r)
1227 return r;
1228 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
1229 if (r)
1230 return r;
1231 }
1232
1233 return 0;
1234}
1235
1236/* The CCK baseband gain can be optionally patched by the EEPROM */
1237static int patch_cck_gain(struct zd_chip *chip)
1238{
1239 int r;
1240 u32 value;
1241
1242 if (!chip->patch_cck_gain)
1243 return 0;
1244
1245 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1246 r = zd_ioread32_locked(chip, &value, E2P_PHY_REG);
1247 if (r)
1248 return r;
1249 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff);
1250 return zd_iowrite32_locked(chip, value & 0xff, CR47);
1251}
1252
1253int zd_chip_set_channel(struct zd_chip *chip, u8 channel)
1254{
1255 int r, t;
1256
1257 mutex_lock(&chip->mutex);
1258 r = zd_chip_lock_phy_regs(chip);
1259 if (r)
1260 goto out;
1261 r = zd_rf_set_channel(&chip->rf, channel);
1262 if (r)
1263 goto unlock;
1264 r = update_channel_integration_and_calibration(chip, channel);
1265 if (r)
1266 goto unlock;
1267 r = patch_cck_gain(chip);
1268 if (r)
1269 goto unlock;
1270 r = patch_6m_band_edge(chip, channel);
1271 if (r)
1272 goto unlock;
1273 r = zd_iowrite32_locked(chip, 0, CR_CONFIG_PHILIPS);
1274unlock:
1275 t = zd_chip_unlock_phy_regs(chip);
1276 if (t && !r)
1277 r = t;
1278out:
1279 mutex_unlock(&chip->mutex);
1280 return r;
1281}
1282
1283u8 zd_chip_get_channel(struct zd_chip *chip)
1284{
1285 u8 channel;
1286
1287 mutex_lock(&chip->mutex);
1288 channel = chip->rf.channel;
1289 mutex_unlock(&chip->mutex);
1290 return channel;
1291}
1292
1293static u16 led_mask(int led)
1294{
1295 switch (led) {
1296 case 1:
1297 return LED1;
1298 case 2:
1299 return LED2;
1300 default:
1301 return 0;
1302 }
1303}
1304
1305static int read_led_reg(struct zd_chip *chip, u16 *status)
1306{
1307 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1308 return zd_ioread16_locked(chip, status, CR_LED);
1309}
1310
1311static int write_led_reg(struct zd_chip *chip, u16 status)
1312{
1313 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1314 return zd_iowrite16_locked(chip, status, CR_LED);
1315}
1316
1317int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status)
1318{
1319 int r, ret;
1320 u16 mask = led_mask(led);
1321 u16 reg;
1322
1323 if (!mask)
1324 return -EINVAL;
1325 mutex_lock(&chip->mutex);
1326 r = read_led_reg(chip, &reg);
1327 if (r)
1328 return r;
1329 switch (status) {
1330 case LED_STATUS:
1331 return (reg & mask) ? LED_ON : LED_OFF;
1332 case LED_OFF:
1333 reg &= ~mask;
1334 ret = LED_OFF;
1335 break;
1336 case LED_FLIP:
1337 reg ^= mask;
1338 ret = (reg&mask) ? LED_ON : LED_OFF;
1339 break;
1340 case LED_ON:
1341 reg |= mask;
1342 ret = LED_ON;
1343 break;
1344 default:
1345 return -EINVAL;
1346 }
1347 r = write_led_reg(chip, reg);
1348 if (r) {
1349 ret = r;
1350 goto out;
1351 }
1352out:
1353 mutex_unlock(&chip->mutex);
1354 return r;
1355}
1356
1357int zd_chip_led_flip(struct zd_chip *chip, int led,
1358 const unsigned int *phases_msecs, unsigned int count)
1359{
1360 int i, r;
1361 enum led_status status;
1362
1363 r = zd_chip_led_status(chip, led, LED_STATUS);
1364 if (r)
1365 return r;
1366 status = r;
1367 for (i = 0; i < count; i++) {
1368 r = zd_chip_led_status(chip, led, LED_FLIP);
1369 if (r < 0)
1370 goto out;
1371 msleep(phases_msecs[i]);
1372 }
1373
1374out:
1375 zd_chip_led_status(chip, led, status);
1376 return r;
1377}
1378
1379int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
1380{
1381 int r;
1382
1383 if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G))
1384 return -EINVAL;
1385
1386 mutex_lock(&chip->mutex);
1387 r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
1388 mutex_unlock(&chip->mutex);
1389 return r;
1390}
1391
1392static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
1393{
1394 static const u16 constants[] = {
1395 715, 655, 585, 540, 470, 410, 360, 315,
1396 270, 235, 205, 175, 150, 125, 105, 85,
1397 65, 50, 40, 25, 15
1398 };
1399
1400 int i;
1401 u32 x;
1402
1403 /* It seems that their quality parameter is somehow per signal
1404 * and is now transferred per bit.
1405 */
1406 switch (rate) {
1407 case ZD_OFDM_RATE_6M:
1408 case ZD_OFDM_RATE_12M:
1409 case ZD_OFDM_RATE_24M:
1410 size *= 2;
1411 break;
1412 case ZD_OFDM_RATE_9M:
1413 case ZD_OFDM_RATE_18M:
1414 case ZD_OFDM_RATE_36M:
1415 case ZD_OFDM_RATE_54M:
1416 size *= 4;
1417 size /= 3;
1418 break;
1419 case ZD_OFDM_RATE_48M:
1420 size *= 3;
1421 size /= 2;
1422 break;
1423 default:
1424 return -EINVAL;
1425 }
1426
1427 x = (10000 * status_quality)/size;
1428 for (i = 0; i < ARRAY_SIZE(constants); i++) {
1429 if (x > constants[i])
1430 break;
1431 }
1432
1433 return i;
1434}
1435
1436static unsigned int log10times100(unsigned int x)
1437{
1438 static const u8 log10[] = {
1439 0,
1440 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
1441 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
1442 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
1443 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
1444 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
1445 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
1446 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
1447 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
1448 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
1449 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
1450 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
1451 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
1452 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
1453 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
1454 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
1455 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
1456 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
1457 223, 223, 223, 224, 224, 224, 224,
1458 };
1459
1460 return x < ARRAY_SIZE(log10) ? log10[x] : 225;
1461}
1462
1463enum {
1464 MAX_CCK_EVM_DB = 45,
1465};
1466
1467static int cck_evm_db(u8 status_quality)
1468{
1469 return (20 * log10times100(status_quality)) / 100;
1470}
1471
1472static int cck_snr_db(u8 status_quality)
1473{
1474 int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
1475 ZD_ASSERT(r >= 0);
1476 return r;
1477}
1478
1479static int rx_qual_db(const void *rx_frame, unsigned int size,
1480 const struct rx_status *status)
1481{
1482 return (status->frame_status&ZD_RX_OFDM) ?
1483 ofdm_qual_db(status->signal_quality_ofdm,
1484 zd_ofdm_plcp_header_rate(rx_frame),
1485 size) :
1486 cck_snr_db(status->signal_quality_cck);
1487}
1488
1489u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
1490 const struct rx_status *status)
1491{
1492 int r = rx_qual_db(rx_frame, size, status);
1493 if (r < 0)
1494 r = 0;
1495 r = (r * 100) / 14;
1496 if (r > 100)
1497 r = 100;
1498 return r;
1499}
1500
1501u8 zd_rx_strength_percent(u8 rssi)
1502{
1503 int r = (rssi*100) / 30;
1504 if (r > 100)
1505 r = 100;
1506 return (u8) r;
1507}
1508
1509u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status)
1510{
1511 static const u16 ofdm_rates[] = {
1512 [ZD_OFDM_RATE_6M] = 60,
1513 [ZD_OFDM_RATE_9M] = 90,
1514 [ZD_OFDM_RATE_12M] = 120,
1515 [ZD_OFDM_RATE_18M] = 180,
1516 [ZD_OFDM_RATE_24M] = 240,
1517 [ZD_OFDM_RATE_36M] = 360,
1518 [ZD_OFDM_RATE_48M] = 480,
1519 [ZD_OFDM_RATE_54M] = 540,
1520 };
1521 u16 rate;
1522 if (status->frame_status & ZD_RX_OFDM) {
1523 u8 ofdm_rate = zd_ofdm_plcp_header_rate(rx_frame);
1524 rate = ofdm_rates[ofdm_rate & 0xf];
1525 } else {
1526 u8 cck_rate = zd_cck_plcp_header_rate(rx_frame);
1527 switch (cck_rate) {
1528 case ZD_CCK_SIGNAL_1M:
1529 rate = 10;
1530 break;
1531 case ZD_CCK_SIGNAL_2M:
1532 rate = 20;
1533 break;
1534 case ZD_CCK_SIGNAL_5M5:
1535 rate = 55;
1536 break;
1537 case ZD_CCK_SIGNAL_11M:
1538 rate = 110;
1539 break;
1540 default:
1541 rate = 0;
1542 }
1543 }
1544
1545 return rate;
1546}
1547
1548int zd_chip_switch_radio_on(struct zd_chip *chip)
1549{
1550 int r;
1551
1552 mutex_lock(&chip->mutex);
1553 r = zd_switch_radio_on(&chip->rf);
1554 mutex_unlock(&chip->mutex);
1555 return r;
1556}
1557
1558int zd_chip_switch_radio_off(struct zd_chip *chip)
1559{
1560 int r;
1561
1562 mutex_lock(&chip->mutex);
1563 r = zd_switch_radio_off(&chip->rf);
1564 mutex_unlock(&chip->mutex);
1565 return r;
1566}
1567
1568int zd_chip_enable_int(struct zd_chip *chip)
1569{
1570 int r;
1571
1572 mutex_lock(&chip->mutex);
1573 r = zd_usb_enable_int(&chip->usb);
1574 mutex_unlock(&chip->mutex);
1575 return r;
1576}
1577
1578void zd_chip_disable_int(struct zd_chip *chip)
1579{
1580 mutex_lock(&chip->mutex);
1581 zd_usb_disable_int(&chip->usb);
1582 mutex_unlock(&chip->mutex);
1583}
1584
1585int zd_chip_enable_rx(struct zd_chip *chip)
1586{
1587 int r;
1588
1589 mutex_lock(&chip->mutex);
1590 r = zd_usb_enable_rx(&chip->usb);
1591 mutex_unlock(&chip->mutex);
1592 return r;
1593}
1594
1595void zd_chip_disable_rx(struct zd_chip *chip)
1596{
1597 mutex_lock(&chip->mutex);
1598 zd_usb_disable_rx(&chip->usb);
1599 mutex_unlock(&chip->mutex);
1600}
1601
1602int zd_rfwritev_locked(struct zd_chip *chip,
1603 const u32* values, unsigned int count, u8 bits)
1604{
1605 int r;
1606 unsigned int i;
1607
1608 for (i = 0; i < count; i++) {
1609 r = zd_rfwrite_locked(chip, values[i], bits);
1610 if (r)
1611 return r;
1612 }
1613
1614 return 0;
1615}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
new file mode 100644
index 000000000000..805121093ab5
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -0,0 +1,825 @@
1/* zd_chip.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_CHIP_H
19#define _ZD_CHIP_H
20
21#include "zd_types.h"
22#include "zd_rf.h"
23#include "zd_usb.h"
24
25/* Header for the Media Access Controller (MAC) and the Baseband Processor
26 * (BBP). It appears that the ZD1211 wraps the old ZD1205 with USB glue and
27 * adds a processor for handling the USB protocol.
28 */
29
30/* 8-bit hardware registers */
31#define CR0 CTL_REG(0x0000)
32#define CR1 CTL_REG(0x0004)
33#define CR2 CTL_REG(0x0008)
34#define CR3 CTL_REG(0x000C)
35
36#define CR5 CTL_REG(0x0010)
37/* bit 5: if set short preamble used
38 * bit 6: filter band - Japan channel 14 on, else off
39 */
40#define CR6 CTL_REG(0x0014)
41#define CR7 CTL_REG(0x0018)
42#define CR8 CTL_REG(0x001C)
43
44#define CR4 CTL_REG(0x0020)
45
46#define CR9 CTL_REG(0x0024)
47/* bit 2: antenna switch (together with CR10) */
48#define CR10 CTL_REG(0x0028)
49/* bit 1: antenna switch (together with CR9)
50 * RF2959 controls with CR11 radion on and off
51 */
52#define CR11 CTL_REG(0x002C)
53/* bit 6: TX power control for OFDM
54 * RF2959 controls with CR10 radio on and off
55 */
56#define CR12 CTL_REG(0x0030)
57#define CR13 CTL_REG(0x0034)
58#define CR14 CTL_REG(0x0038)
59#define CR15 CTL_REG(0x003C)
60#define CR16 CTL_REG(0x0040)
61#define CR17 CTL_REG(0x0044)
62#define CR18 CTL_REG(0x0048)
63#define CR19 CTL_REG(0x004C)
64#define CR20 CTL_REG(0x0050)
65#define CR21 CTL_REG(0x0054)
66#define CR22 CTL_REG(0x0058)
67#define CR23 CTL_REG(0x005C)
68#define CR24 CTL_REG(0x0060) /* CCA threshold */
69#define CR25 CTL_REG(0x0064)
70#define CR26 CTL_REG(0x0068)
71#define CR27 CTL_REG(0x006C)
72#define CR28 CTL_REG(0x0070)
73#define CR29 CTL_REG(0x0074)
74#define CR30 CTL_REG(0x0078)
75#define CR31 CTL_REG(0x007C) /* TX power control for RF in CCK mode */
76#define CR32 CTL_REG(0x0080)
77#define CR33 CTL_REG(0x0084)
78#define CR34 CTL_REG(0x0088)
79#define CR35 CTL_REG(0x008C)
80#define CR36 CTL_REG(0x0090)
81#define CR37 CTL_REG(0x0094)
82#define CR38 CTL_REG(0x0098)
83#define CR39 CTL_REG(0x009C)
84#define CR40 CTL_REG(0x00A0)
85#define CR41 CTL_REG(0x00A4)
86#define CR42 CTL_REG(0x00A8)
87#define CR43 CTL_REG(0x00AC)
88#define CR44 CTL_REG(0x00B0)
89#define CR45 CTL_REG(0x00B4)
90#define CR46 CTL_REG(0x00B8)
91#define CR47 CTL_REG(0x00BC) /* CCK baseband gain
92 * (patch value might be in EEPROM)
93 */
94#define CR48 CTL_REG(0x00C0)
95#define CR49 CTL_REG(0x00C4)
96#define CR50 CTL_REG(0x00C8)
97#define CR51 CTL_REG(0x00CC) /* TX power control for RF in 6-36M modes */
98#define CR52 CTL_REG(0x00D0) /* TX power control for RF in 48M mode */
99#define CR53 CTL_REG(0x00D4) /* TX power control for RF in 54M mode */
100#define CR54 CTL_REG(0x00D8)
101#define CR55 CTL_REG(0x00DC)
102#define CR56 CTL_REG(0x00E0)
103#define CR57 CTL_REG(0x00E4)
104#define CR58 CTL_REG(0x00E8)
105#define CR59 CTL_REG(0x00EC)
106#define CR60 CTL_REG(0x00F0)
107#define CR61 CTL_REG(0x00F4)
108#define CR62 CTL_REG(0x00F8)
109#define CR63 CTL_REG(0x00FC)
110#define CR64 CTL_REG(0x0100)
111#define CR65 CTL_REG(0x0104) /* OFDM 54M calibration */
112#define CR66 CTL_REG(0x0108) /* OFDM 48M calibration */
113#define CR67 CTL_REG(0x010C) /* OFDM 36M calibration */
114#define CR68 CTL_REG(0x0110) /* CCK calibration */
115#define CR69 CTL_REG(0x0114)
116#define CR70 CTL_REG(0x0118)
117#define CR71 CTL_REG(0x011C)
118#define CR72 CTL_REG(0x0120)
119#define CR73 CTL_REG(0x0124)
120#define CR74 CTL_REG(0x0128)
121#define CR75 CTL_REG(0x012C)
122#define CR76 CTL_REG(0x0130)
123#define CR77 CTL_REG(0x0134)
124#define CR78 CTL_REG(0x0138)
125#define CR79 CTL_REG(0x013C)
126#define CR80 CTL_REG(0x0140)
127#define CR81 CTL_REG(0x0144)
128#define CR82 CTL_REG(0x0148)
129#define CR83 CTL_REG(0x014C)
130#define CR84 CTL_REG(0x0150)
131#define CR85 CTL_REG(0x0154)
132#define CR86 CTL_REG(0x0158)
133#define CR87 CTL_REG(0x015C)
134#define CR88 CTL_REG(0x0160)
135#define CR89 CTL_REG(0x0164)
136#define CR90 CTL_REG(0x0168)
137#define CR91 CTL_REG(0x016C)
138#define CR92 CTL_REG(0x0170)
139#define CR93 CTL_REG(0x0174)
140#define CR94 CTL_REG(0x0178)
141#define CR95 CTL_REG(0x017C)
142#define CR96 CTL_REG(0x0180)
143#define CR97 CTL_REG(0x0184)
144#define CR98 CTL_REG(0x0188)
145#define CR99 CTL_REG(0x018C)
146#define CR100 CTL_REG(0x0190)
147#define CR101 CTL_REG(0x0194)
148#define CR102 CTL_REG(0x0198)
149#define CR103 CTL_REG(0x019C)
150#define CR104 CTL_REG(0x01A0)
151#define CR105 CTL_REG(0x01A4)
152#define CR106 CTL_REG(0x01A8)
153#define CR107 CTL_REG(0x01AC)
154#define CR108 CTL_REG(0x01B0)
155#define CR109 CTL_REG(0x01B4)
156#define CR110 CTL_REG(0x01B8)
157#define CR111 CTL_REG(0x01BC)
158#define CR112 CTL_REG(0x01C0)
159#define CR113 CTL_REG(0x01C4)
160#define CR114 CTL_REG(0x01C8)
161#define CR115 CTL_REG(0x01CC)
162#define CR116 CTL_REG(0x01D0)
163#define CR117 CTL_REG(0x01D4)
164#define CR118 CTL_REG(0x01D8)
165#define CR119 CTL_REG(0x01DC)
166#define CR120 CTL_REG(0x01E0)
167#define CR121 CTL_REG(0x01E4)
168#define CR122 CTL_REG(0x01E8)
169#define CR123 CTL_REG(0x01EC)
170#define CR124 CTL_REG(0x01F0)
171#define CR125 CTL_REG(0x01F4)
172#define CR126 CTL_REG(0x01F8)
173#define CR127 CTL_REG(0x01FC)
174#define CR128 CTL_REG(0x0200)
175#define CR129 CTL_REG(0x0204)
176#define CR130 CTL_REG(0x0208)
177#define CR131 CTL_REG(0x020C)
178#define CR132 CTL_REG(0x0210)
179#define CR133 CTL_REG(0x0214)
180#define CR134 CTL_REG(0x0218)
181#define CR135 CTL_REG(0x021C)
182#define CR136 CTL_REG(0x0220)
183#define CR137 CTL_REG(0x0224)
184#define CR138 CTL_REG(0x0228)
185#define CR139 CTL_REG(0x022C)
186#define CR140 CTL_REG(0x0230)
187#define CR141 CTL_REG(0x0234)
188#define CR142 CTL_REG(0x0238)
189#define CR143 CTL_REG(0x023C)
190#define CR144 CTL_REG(0x0240)
191#define CR145 CTL_REG(0x0244)
192#define CR146 CTL_REG(0x0248)
193#define CR147 CTL_REG(0x024C)
194#define CR148 CTL_REG(0x0250)
195#define CR149 CTL_REG(0x0254)
196#define CR150 CTL_REG(0x0258)
197#define CR151 CTL_REG(0x025C)
198#define CR152 CTL_REG(0x0260)
199#define CR153 CTL_REG(0x0264)
200#define CR154 CTL_REG(0x0268)
201#define CR155 CTL_REG(0x026C)
202#define CR156 CTL_REG(0x0270)
203#define CR157 CTL_REG(0x0274)
204#define CR158 CTL_REG(0x0278)
205#define CR159 CTL_REG(0x027C)
206#define CR160 CTL_REG(0x0280)
207#define CR161 CTL_REG(0x0284)
208#define CR162 CTL_REG(0x0288)
209#define CR163 CTL_REG(0x028C)
210#define CR164 CTL_REG(0x0290)
211#define CR165 CTL_REG(0x0294)
212#define CR166 CTL_REG(0x0298)
213#define CR167 CTL_REG(0x029C)
214#define CR168 CTL_REG(0x02A0)
215#define CR169 CTL_REG(0x02A4)
216#define CR170 CTL_REG(0x02A8)
217#define CR171 CTL_REG(0x02AC)
218#define CR172 CTL_REG(0x02B0)
219#define CR173 CTL_REG(0x02B4)
220#define CR174 CTL_REG(0x02B8)
221#define CR175 CTL_REG(0x02BC)
222#define CR176 CTL_REG(0x02C0)
223#define CR177 CTL_REG(0x02C4)
224#define CR178 CTL_REG(0x02C8)
225#define CR179 CTL_REG(0x02CC)
226#define CR180 CTL_REG(0x02D0)
227#define CR181 CTL_REG(0x02D4)
228#define CR182 CTL_REG(0x02D8)
229#define CR183 CTL_REG(0x02DC)
230#define CR184 CTL_REG(0x02E0)
231#define CR185 CTL_REG(0x02E4)
232#define CR186 CTL_REG(0x02E8)
233#define CR187 CTL_REG(0x02EC)
234#define CR188 CTL_REG(0x02F0)
235#define CR189 CTL_REG(0x02F4)
236#define CR190 CTL_REG(0x02F8)
237#define CR191 CTL_REG(0x02FC)
238#define CR192 CTL_REG(0x0300)
239#define CR193 CTL_REG(0x0304)
240#define CR194 CTL_REG(0x0308)
241#define CR195 CTL_REG(0x030C)
242#define CR196 CTL_REG(0x0310)
243#define CR197 CTL_REG(0x0314)
244#define CR198 CTL_REG(0x0318)
245#define CR199 CTL_REG(0x031C)
246#define CR200 CTL_REG(0x0320)
247#define CR201 CTL_REG(0x0324)
248#define CR202 CTL_REG(0x0328)
249#define CR203 CTL_REG(0x032C) /* I2C bus template value & flash control */
250#define CR204 CTL_REG(0x0330)
251#define CR205 CTL_REG(0x0334)
252#define CR206 CTL_REG(0x0338)
253#define CR207 CTL_REG(0x033C)
254#define CR208 CTL_REG(0x0340)
255#define CR209 CTL_REG(0x0344)
256#define CR210 CTL_REG(0x0348)
257#define CR211 CTL_REG(0x034C)
258#define CR212 CTL_REG(0x0350)
259#define CR213 CTL_REG(0x0354)
260#define CR214 CTL_REG(0x0358)
261#define CR215 CTL_REG(0x035C)
262#define CR216 CTL_REG(0x0360)
263#define CR217 CTL_REG(0x0364)
264#define CR218 CTL_REG(0x0368)
265#define CR219 CTL_REG(0x036C)
266#define CR220 CTL_REG(0x0370)
267#define CR221 CTL_REG(0x0374)
268#define CR222 CTL_REG(0x0378)
269#define CR223 CTL_REG(0x037C)
270#define CR224 CTL_REG(0x0380)
271#define CR225 CTL_REG(0x0384)
272#define CR226 CTL_REG(0x0388)
273#define CR227 CTL_REG(0x038C)
274#define CR228 CTL_REG(0x0390)
275#define CR229 CTL_REG(0x0394)
276#define CR230 CTL_REG(0x0398)
277#define CR231 CTL_REG(0x039C)
278#define CR232 CTL_REG(0x03A0)
279#define CR233 CTL_REG(0x03A4)
280#define CR234 CTL_REG(0x03A8)
281#define CR235 CTL_REG(0x03AC)
282#define CR236 CTL_REG(0x03B0)
283
284#define CR240 CTL_REG(0x03C0)
285/* bit 7: host-controlled RF register writes
286 * CR241-CR245: for hardware controlled writing of RF bits, not needed for
287 * USB
288 */
289#define CR241 CTL_REG(0x03C4)
290#define CR242 CTL_REG(0x03C8)
291#define CR243 CTL_REG(0x03CC)
292#define CR244 CTL_REG(0x03D0)
293#define CR245 CTL_REG(0x03D4)
294
295#define CR251 CTL_REG(0x03EC) /* only used for activation and deactivation of
296 * Airoha RFs AL2230 and AL7230B
297 */
298#define CR252 CTL_REG(0x03F0)
299#define CR253 CTL_REG(0x03F4)
300#define CR254 CTL_REG(0x03F8)
301#define CR255 CTL_REG(0x03FC)
302
303#define CR_MAX_PHY_REG 255
304
305/* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211
306 * driver.
307 */
308
309#define CR_RF_IF_CLK CTL_REG(0x0400)
310#define CR_RF_IF_DATA CTL_REG(0x0404)
311#define CR_PE1_PE2 CTL_REG(0x0408)
312#define CR_PE2_DLY CTL_REG(0x040C)
313#define CR_LE1 CTL_REG(0x0410)
314#define CR_LE2 CTL_REG(0x0414)
315/* Seems to enable/disable GPI (General Purpose IO?) */
316#define CR_GPI_EN CTL_REG(0x0418)
317#define CR_RADIO_PD CTL_REG(0x042C)
318#define CR_RF2948_PD CTL_REG(0x042C)
319#define CR_ENABLE_PS_MANUAL_AGC CTL_REG(0x043C)
320#define CR_CONFIG_PHILIPS CTL_REG(0x0440)
321#define CR_SA2400_SER_AP CTL_REG(0x0444)
322#define CR_I2C_WRITE CTL_REG(0x0444)
323#define CR_SA2400_SER_RP CTL_REG(0x0448)
324#define CR_RADIO_PE CTL_REG(0x0458)
325#define CR_RST_BUS_MASTER CTL_REG(0x045C)
326#define CR_RFCFG CTL_REG(0x0464)
327#define CR_HSTSCHG CTL_REG(0x046C)
328#define CR_PHY_ON CTL_REG(0x0474)
329#define CR_RX_DELAY CTL_REG(0x0478)
330#define CR_RX_PE_DELAY CTL_REG(0x047C)
331#define CR_GPIO_1 CTL_REG(0x0490)
332#define CR_GPIO_2 CTL_REG(0x0494)
333#define CR_EncryBufMux CTL_REG(0x04A8)
334#define CR_PS_CTRL CTL_REG(0x0500)
335#define CR_ADDA_PWR_DWN CTL_REG(0x0504)
336#define CR_ADDA_MBIAS_WARMTIME CTL_REG(0x0508)
337#define CR_MAC_PS_STATE CTL_REG(0x050C)
338
339#define CR_INTERRUPT CTL_REG(0x0510)
340#define INT_TX_COMPLETE 0x00000001
341#define INT_RX_COMPLETE 0x00000002
342#define INT_RETRY_FAIL 0x00000004
343#define INT_WAKEUP 0x00000008
344#define INT_DTIM_NOTIFY 0x00000020
345#define INT_CFG_NEXT_BCN 0x00000040
346#define INT_BUS_ABORT 0x00000080
347#define INT_TX_FIFO_READY 0x00000100
348#define INT_UART 0x00000200
349#define INT_TX_COMPLETE_EN 0x00010000
350#define INT_RX_COMPLETE_EN 0x00020000
351#define INT_RETRY_FAIL_EN 0x00040000
352#define INT_WAKEUP_EN 0x00080000
353#define INT_DTIM_NOTIFY_EN 0x00200000
354#define INT_CFG_NEXT_BCN_EN 0x00400000
355#define INT_BUS_ABORT_EN 0x00800000
356#define INT_TX_FIFO_READY_EN 0x01000000
357#define INT_UART_EN 0x02000000
358
359#define CR_TSF_LOW_PART CTL_REG(0x0514)
360#define CR_TSF_HIGH_PART CTL_REG(0x0518)
361
362/* Following three values are in time units (1024us)
363 * Following condition must be met:
364 * atim < tbtt < bcn
365 */
366#define CR_ATIM_WND_PERIOD CTL_REG(0x051C)
367#define CR_BCN_INTERVAL CTL_REG(0x0520)
368#define CR_PRE_TBTT CTL_REG(0x0524)
369/* in units of TU(1024us) */
370
371/* for UART support */
372#define CR_UART_RBR_THR_DLL CTL_REG(0x0540)
373#define CR_UART_DLM_IER CTL_REG(0x0544)
374#define CR_UART_IIR_FCR CTL_REG(0x0548)
375#define CR_UART_LCR CTL_REG(0x054c)
376#define CR_UART_MCR CTL_REG(0x0550)
377#define CR_UART_LSR CTL_REG(0x0554)
378#define CR_UART_MSR CTL_REG(0x0558)
379#define CR_UART_ECR CTL_REG(0x055c)
380#define CR_UART_STATUS CTL_REG(0x0560)
381
382#define CR_PCI_TX_ADDR_P1 CTL_REG(0x0600)
383#define CR_PCI_TX_AddR_P2 CTL_REG(0x0604)
384#define CR_PCI_RX_AddR_P1 CTL_REG(0x0608)
385#define CR_PCI_RX_AddR_P2 CTL_REG(0x060C)
386
387/* must be overwritten if custom MAC address will be used */
388#define CR_MAC_ADDR_P1 CTL_REG(0x0610)
389#define CR_MAC_ADDR_P2 CTL_REG(0x0614)
390#define CR_BSSID_P1 CTL_REG(0x0618)
391#define CR_BSSID_P2 CTL_REG(0x061C)
392#define CR_BCN_PLCP_CFG CTL_REG(0x0620)
393#define CR_GROUP_HASH_P1 CTL_REG(0x0624)
394#define CR_GROUP_HASH_P2 CTL_REG(0x0628)
395#define CR_RX_TIMEOUT CTL_REG(0x062C)
396
397/* Basic rates supported by the BSS. When producing ACK or CTS messages, the
398 * device will use a rate in this table that is less than or equal to the rate
399 * of the incoming frame which prompted the response */
400#define CR_BASIC_RATE_TBL CTL_REG(0x0630)
401#define CR_RATE_1M 0x0001 /* 802.11b */
402#define CR_RATE_2M 0x0002 /* 802.11b */
403#define CR_RATE_5_5M 0x0004 /* 802.11b */
404#define CR_RATE_11M 0x0008 /* 802.11b */
405#define CR_RATE_6M 0x0100 /* 802.11g */
406#define CR_RATE_9M 0x0200 /* 802.11g */
407#define CR_RATE_12M 0x0400 /* 802.11g */
408#define CR_RATE_18M 0x0800 /* 802.11g */
409#define CR_RATE_24M 0x1000 /* 802.11g */
410#define CR_RATE_36M 0x2000 /* 802.11g */
411#define CR_RATE_48M 0x4000 /* 802.11g */
412#define CR_RATE_54M 0x8000 /* 802.11g */
413#define CR_RATES_80211G 0xff00
414#define CR_RATES_80211B 0x000f
415
416/* Mandatory rates required in the BSS. When producing ACK or CTS messages, if
417 * the device could not find an appropriate rate in CR_BASIC_RATE_TBL, it will
418 * look for a rate in this table that is less than or equal to the rate of
419 * the incoming frame. */
420#define CR_MANDATORY_RATE_TBL CTL_REG(0x0634)
421#define CR_RTS_CTS_RATE CTL_REG(0x0638)
422
423#define CR_WEP_PROTECT CTL_REG(0x063C)
424#define CR_RX_THRESHOLD CTL_REG(0x0640)
425
426/* register for controlling the LEDS */
427#define CR_LED CTL_REG(0x0644)
428/* masks for controlling LEDs */
429#define LED1 0x0100
430#define LED2 0x0200
431
432/* Seems to indicate that the configuration is over.
433 */
434#define CR_AFTER_PNP CTL_REG(0x0648)
435#define CR_ACK_TIME_80211 CTL_REG(0x0658)
436
437#define CR_RX_OFFSET CTL_REG(0x065c)
438
439#define CR_PHY_DELAY CTL_REG(0x066C)
440#define CR_BCN_FIFO CTL_REG(0x0670)
441#define CR_SNIFFER_ON CTL_REG(0x0674)
442
443#define CR_ENCRYPTION_TYPE CTL_REG(0x0678)
444#define NO_WEP 0
445#define WEP64 1
446#define WEP128 5
447#define WEP256 6
448#define ENC_SNIFFER 8
449
450#define CR_ZD1211_RETRY_MAX CTL_REG(0x067C)
451
452#define CR_REG1 CTL_REG(0x0680)
453/* Setting the bit UNLOCK_PHY_REGS disallows the write access to physical
454 * registers, so one could argue it is a LOCK bit. But calling it
455 * LOCK_PHY_REGS makes it confusing.
456 */
457#define UNLOCK_PHY_REGS 0x0080
458
459#define CR_DEVICE_STATE CTL_REG(0x0684)
460#define CR_UNDERRUN_CNT CTL_REG(0x0688)
461
462#define CR_RX_FILTER CTL_REG(0x068c)
463#define RX_FILTER_ASSOC_RESPONSE 0x0002
464#define RX_FILTER_PROBE_RESPONSE 0x0020
465#define RX_FILTER_BEACON 0x0100
466#define RX_FILTER_AUTH 0x0800
467/* Sniff modus sets filter to 0xfffff */
468
469#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
470#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
471#define CR_IFS_VALUE CTL_REG(0x0698)
472#define CR_RX_TIME_OUT CTL_REG(0x069C)
473#define CR_TOTAL_RX_FRM CTL_REG(0x06A0)
474#define CR_CRC32_CNT CTL_REG(0x06A4)
475#define CR_CRC16_CNT CTL_REG(0x06A8)
476#define CR_DECRYPTION_ERR_UNI CTL_REG(0x06AC)
477#define CR_RX_FIFO_OVERRUN CTL_REG(0x06B0)
478
479#define CR_DECRYPTION_ERR_MUL CTL_REG(0x06BC)
480
481#define CR_NAV_CNT CTL_REG(0x06C4)
482#define CR_NAV_CCA CTL_REG(0x06C8)
483#define CR_RETRY_CNT CTL_REG(0x06CC)
484
485#define CR_READ_TCB_ADDR CTL_REG(0x06E8)
486#define CR_READ_RFD_ADDR CTL_REG(0x06EC)
487#define CR_CWMIN_CWMAX CTL_REG(0x06F0)
488#define CR_TOTAL_TX_FRM CTL_REG(0x06F4)
489
490/* CAM: Continuous Access Mode (power management) */
491#define CR_CAM_MODE CTL_REG(0x0700)
492#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704)
493#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708)
494#define CR_CAM_ADDRESS CTL_REG(0x070C)
495#define CR_CAM_DATA CTL_REG(0x0710)
496
497#define CR_ROMDIR CTL_REG(0x0714)
498
499#define CR_DECRY_ERR_FLG_LOW CTL_REG(0x0714)
500#define CR_DECRY_ERR_FLG_HIGH CTL_REG(0x0718)
501
502#define CR_WEPKEY0 CTL_REG(0x0720)
503#define CR_WEPKEY1 CTL_REG(0x0724)
504#define CR_WEPKEY2 CTL_REG(0x0728)
505#define CR_WEPKEY3 CTL_REG(0x072C)
506#define CR_WEPKEY4 CTL_REG(0x0730)
507#define CR_WEPKEY5 CTL_REG(0x0734)
508#define CR_WEPKEY6 CTL_REG(0x0738)
509#define CR_WEPKEY7 CTL_REG(0x073C)
510#define CR_WEPKEY8 CTL_REG(0x0740)
511#define CR_WEPKEY9 CTL_REG(0x0744)
512#define CR_WEPKEY10 CTL_REG(0x0748)
513#define CR_WEPKEY11 CTL_REG(0x074C)
514#define CR_WEPKEY12 CTL_REG(0x0750)
515#define CR_WEPKEY13 CTL_REG(0x0754)
516#define CR_WEPKEY14 CTL_REG(0x0758)
517#define CR_WEPKEY15 CTL_REG(0x075c)
518#define CR_TKIP_MODE CTL_REG(0x0760)
519
520#define CR_EEPROM_PROTECT0 CTL_REG(0x0758)
521#define CR_EEPROM_PROTECT1 CTL_REG(0x075C)
522
523#define CR_DBG_FIFO_RD CTL_REG(0x0800)
524#define CR_DBG_SELECT CTL_REG(0x0804)
525#define CR_FIFO_Length CTL_REG(0x0808)
526
527
528#define CR_RSSI_MGC CTL_REG(0x0810)
529
530#define CR_PON CTL_REG(0x0818)
531#define CR_RX_ON CTL_REG(0x081C)
532#define CR_TX_ON CTL_REG(0x0820)
533#define CR_CHIP_EN CTL_REG(0x0824)
534#define CR_LO_SW CTL_REG(0x0828)
535#define CR_TXRX_SW CTL_REG(0x082C)
536#define CR_S_MD CTL_REG(0x0830)
537
538#define CR_USB_DEBUG_PORT CTL_REG(0x0888)
539
540#define CR_ZD1211B_TX_PWR_CTL1 CTL_REG(0x0b00)
541#define CR_ZD1211B_TX_PWR_CTL2 CTL_REG(0x0b04)
542#define CR_ZD1211B_TX_PWR_CTL3 CTL_REG(0x0b08)
543#define CR_ZD1211B_TX_PWR_CTL4 CTL_REG(0x0b0c)
544#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10)
545#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14)
546#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
547#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
548
549#define AP_RX_FILTER 0x0400feff
550#define STA_RX_FILTER 0x0000ffff
551
552#define CWIN_SIZE 0x007f043f
553
554
555#define HWINT_ENABLED 0x004f0000
556#define HWINT_DISABLED 0
557
558#define E2P_PWR_INT_GUARD 8
559#define E2P_CHANNEL_COUNT 14
560
561/* If you compare this addresses with the ZYDAS orignal driver, please notify
562 * that we use word mapping for the EEPROM.
563 */
564
565/*
566 * Upper 16 bit contains the regulatory domain.
567 */
568#define E2P_SUBID E2P_REG(0x00)
569#define E2P_POD E2P_REG(0x02)
570#define E2P_MAC_ADDR_P1 E2P_REG(0x04)
571#define E2P_MAC_ADDR_P2 E2P_REG(0x06)
572#define E2P_PWR_CAL_VALUE1 E2P_REG(0x08)
573#define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a)
574#define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c)
575#define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e)
576#define E2P_PWR_INT_VALUE1 E2P_REG(0x10)
577#define E2P_PWR_INT_VALUE2 E2P_REG(0x12)
578#define E2P_PWR_INT_VALUE3 E2P_REG(0x14)
579#define E2P_PWR_INT_VALUE4 E2P_REG(0x16)
580
581/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
582 * also only 11 channels. */
583#define E2P_ALLOWED_CHANNEL E2P_REG(0x18)
584
585#define E2P_PHY_REG E2P_REG(0x1a)
586#define E2P_DEVICE_VER E2P_REG(0x20)
587#define E2P_36M_CAL_VALUE1 E2P_REG(0x28)
588#define E2P_36M_CAL_VALUE2 E2P_REG(0x2a)
589#define E2P_36M_CAL_VALUE3 E2P_REG(0x2c)
590#define E2P_36M_CAL_VALUE4 E2P_REG(0x2e)
591#define E2P_11A_INT_VALUE1 E2P_REG(0x30)
592#define E2P_11A_INT_VALUE2 E2P_REG(0x32)
593#define E2P_11A_INT_VALUE3 E2P_REG(0x34)
594#define E2P_11A_INT_VALUE4 E2P_REG(0x36)
595#define E2P_48M_CAL_VALUE1 E2P_REG(0x38)
596#define E2P_48M_CAL_VALUE2 E2P_REG(0x3a)
597#define E2P_48M_CAL_VALUE3 E2P_REG(0x3c)
598#define E2P_48M_CAL_VALUE4 E2P_REG(0x3e)
599#define E2P_48M_INT_VALUE1 E2P_REG(0x40)
600#define E2P_48M_INT_VALUE2 E2P_REG(0x42)
601#define E2P_48M_INT_VALUE3 E2P_REG(0x44)
602#define E2P_48M_INT_VALUE4 E2P_REG(0x46)
603#define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */
604#define E2P_54M_CAL_VALUE2 E2P_REG(0x4a)
605#define E2P_54M_CAL_VALUE3 E2P_REG(0x4c)
606#define E2P_54M_CAL_VALUE4 E2P_REG(0x4e)
607#define E2P_54M_INT_VALUE1 E2P_REG(0x50)
608#define E2P_54M_INT_VALUE2 E2P_REG(0x52)
609#define E2P_54M_INT_VALUE3 E2P_REG(0x54)
610#define E2P_54M_INT_VALUE4 E2P_REG(0x56)
611
612/* All 16 bit values */
613#define FW_FIRMWARE_VER FW_REG(0)
614/* non-zero if USB high speed connection */
615#define FW_USB_SPEED FW_REG(1)
616#define FW_FIX_TX_RATE FW_REG(2)
617/* Seems to be able to control LEDs over the firmware */
618#define FW_LINK_STATUS FW_REG(3)
619#define FW_SOFT_RESET FW_REG(4)
620#define FW_FLASH_CHK FW_REG(5)
621
622enum {
623 CR_BASE_OFFSET = 0x9000,
624 FW_START_OFFSET = 0xee00,
625 FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d,
626 EEPROM_START_OFFSET = 0xf800,
627 EEPROM_SIZE = 0x800, /* words */
628 LOAD_CODE_SIZE = 0xe, /* words */
629 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
630 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
631 E2P_BASE_OFFSET = EEPROM_START_OFFSET +
632 EEPROM_REGS_OFFSET,
633};
634
635#define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d)
636
637enum {
638 /* indices for ofdm_cal_values */
639 OFDM_36M_INDEX = 0,
640 OFDM_48M_INDEX = 1,
641 OFDM_54M_INDEX = 2,
642};
643
644struct zd_chip {
645 struct zd_usb usb;
646 struct zd_rf rf;
647 struct mutex mutex;
648 u8 e2p_mac[ETH_ALEN];
649 /* EepSetPoint in the vendor driver */
650 u8 pwr_cal_values[E2P_CHANNEL_COUNT];
651 /* integration values in the vendor driver */
652 u8 pwr_int_values[E2P_CHANNEL_COUNT];
653 /* SetPointOFDM in the vendor driver */
654 u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT];
655 u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
656 is_zd1211b:1;
657};
658
659static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
660{
661 return container_of(usb, struct zd_chip, usb);
662}
663
664static inline struct zd_chip *zd_rf_to_chip(struct zd_rf *rf)
665{
666 return container_of(rf, struct zd_chip, rf);
667}
668
669#define zd_chip_dev(chip) (&(chip)->usb.intf->dev)
670
671void zd_chip_init(struct zd_chip *chip,
672 struct net_device *netdev,
673 struct usb_interface *intf);
674void zd_chip_clear(struct zd_chip *chip);
675int zd_chip_init_hw(struct zd_chip *chip, u8 device_type);
676int zd_chip_reset(struct zd_chip *chip);
677
678static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values,
679 const zd_addr_t *addresses,
680 unsigned int count)
681{
682 ZD_ASSERT(mutex_is_locked(&chip->mutex));
683 return zd_usb_ioread16v(&chip->usb, values, addresses, count);
684}
685
686static inline int zd_ioread16_locked(struct zd_chip *chip, u16 *value,
687 const zd_addr_t addr)
688{
689 ZD_ASSERT(mutex_is_locked(&chip->mutex));
690 return zd_usb_ioread16(&chip->usb, value, addr);
691}
692
693int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
694 const zd_addr_t *addresses, unsigned int count);
695
696static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
697 const zd_addr_t addr)
698{
699 return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1);
700}
701
702static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
703 zd_addr_t addr)
704{
705 struct zd_ioreq16 ioreq;
706
707 ZD_ASSERT(mutex_is_locked(&chip->mutex));
708 ioreq.addr = addr;
709 ioreq.value = value;
710
711 return zd_usb_iowrite16v(&chip->usb, &ioreq, 1);
712}
713
714int zd_iowrite16a_locked(struct zd_chip *chip,
715 const struct zd_ioreq16 *ioreqs, unsigned int count);
716
717int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
718 unsigned int count);
719
720static inline int zd_iowrite32_locked(struct zd_chip *chip, u32 value,
721 zd_addr_t addr)
722{
723 struct zd_ioreq32 ioreq;
724
725 ioreq.addr = addr;
726 ioreq.value = value;
727
728 return _zd_iowrite32v_locked(chip, &ioreq, 1);
729}
730
731int zd_iowrite32a_locked(struct zd_chip *chip,
732 const struct zd_ioreq32 *ioreqs, unsigned int count);
733
734static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits)
735{
736 ZD_ASSERT(mutex_is_locked(&chip->mutex));
737 return zd_usb_rfwrite(&chip->usb, value, bits);
738}
739
740int zd_rfwritev_locked(struct zd_chip *chip,
741 const u32* values, unsigned int count, u8 bits);
742
743/* Locking functions for reading and writing registers.
744 * The different parameters are intentional.
745 */
746int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value);
747int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value);
748int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value);
749int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value);
750int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
751 u32 *values, unsigned int count);
752int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
753 unsigned int count);
754
755int zd_chip_set_channel(struct zd_chip *chip, u8 channel);
756static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
757{
758 return chip->rf.channel;
759}
760u8 zd_chip_get_channel(struct zd_chip *chip);
761int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
762void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr);
763int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr);
764int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
765int zd_chip_switch_radio_on(struct zd_chip *chip);
766int zd_chip_switch_radio_off(struct zd_chip *chip);
767int zd_chip_enable_int(struct zd_chip *chip);
768void zd_chip_disable_int(struct zd_chip *chip);
769int zd_chip_enable_rx(struct zd_chip *chip);
770void zd_chip_disable_rx(struct zd_chip *chip);
771int zd_chip_enable_hwint(struct zd_chip *chip);
772int zd_chip_disable_hwint(struct zd_chip *chip);
773
774static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type)
775{
776 return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type);
777}
778
779static inline int zd_set_encryption_type(struct zd_chip *chip, u32 type)
780{
781 return zd_iowrite32(chip, CR_ENCRYPTION_TYPE, type);
782}
783
784static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates)
785{
786 return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates);
787}
788
789int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates);
790
791static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter)
792{
793 return zd_iowrite32(chip, CR_RX_FILTER, filter);
794}
795
796int zd_chip_lock_phy_regs(struct zd_chip *chip);
797int zd_chip_unlock_phy_regs(struct zd_chip *chip);
798
799enum led_status {
800 LED_OFF = 0,
801 LED_ON = 1,
802 LED_FLIP = 2,
803 LED_STATUS = 3,
804};
805
806int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status);
807int zd_chip_led_flip(struct zd_chip *chip, int led,
808 const unsigned int *phases_msecs, unsigned int count);
809
810int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
811
812static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
813{
814 return zd_ioread32(chip, CR_BCN_INTERVAL, interval);
815}
816
817struct rx_status;
818
819u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
820 const struct rx_status *status);
821u8 zd_rx_strength_percent(u8 rssi);
822
823u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
824
825#endif /* _ZD_CHIP_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
new file mode 100644
index 000000000000..465906812fc4
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -0,0 +1,48 @@
1/* zd_def.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_DEF_H
19#define _ZD_DEF_H
20
21#include <linux/kernel.h>
22#include <linux/stringify.h>
23#include <linux/device.h>
24#include <linux/kernel.h>
25
26#define dev_printk_f(level, dev, fmt, args...) \
27 dev_printk(level, dev, "%s() " fmt, __func__, ##args)
28
29#ifdef DEBUG
30# define dev_dbg_f(dev, fmt, args...) \
31 dev_printk_f(KERN_DEBUG, dev, fmt, ## args)
32#else
33# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0)
34#endif /* DEBUG */
35
36#ifdef DEBUG
37# define ZD_ASSERT(x) \
38do { \
39 if (!(x)) { \
40 pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
41 __FILE__, __LINE__, __stringify(x)); \
42 } \
43} while (0)
44#else
45# define ZD_ASSERT(x) do { } while (0)
46#endif
47
48#endif /* _ZD_DEF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
new file mode 100644
index 000000000000..66905f7b61ff
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
@@ -0,0 +1,191 @@
1/* zd_ieee80211.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18/*
19 * A lot of this code is generic and should be moved into the upper layers
20 * at some point.
21 */
22
23#include <linux/errno.h>
24#include <linux/wireless.h>
25#include <linux/kernel.h>
26#include <net/ieee80211.h>
27
28#include "zd_def.h"
29#include "zd_ieee80211.h"
30#include "zd_mac.h"
31
32static const struct channel_range channel_ranges[] = {
33 [0] = { 0, 0},
34 [ZD_REGDOMAIN_FCC] = { 1, 12},
35 [ZD_REGDOMAIN_IC] = { 1, 12},
36 [ZD_REGDOMAIN_ETSI] = { 1, 14},
37 [ZD_REGDOMAIN_JAPAN] = { 1, 14},
38 [ZD_REGDOMAIN_SPAIN] = { 1, 14},
39 [ZD_REGDOMAIN_FRANCE] = { 1, 14},
40 [ZD_REGDOMAIN_JAPAN_ADD] = {14, 15},
41};
42
43const struct channel_range *zd_channel_range(u8 regdomain)
44{
45 if (regdomain >= ARRAY_SIZE(channel_ranges))
46 regdomain = 0;
47 return &channel_ranges[regdomain];
48}
49
50int zd_regdomain_supports_channel(u8 regdomain, u8 channel)
51{
52 const struct channel_range *range = zd_channel_range(regdomain);
53 return range->start <= channel && channel < range->end;
54}
55
56int zd_regdomain_supported(u8 regdomain)
57{
58 const struct channel_range *range = zd_channel_range(regdomain);
59 return range->start != 0;
60}
61
62/* Stores channel frequencies in MHz. */
63static const u16 channel_frequencies[] = {
64 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447,
65 2452, 2457, 2462, 2467, 2472, 2484,
66};
67
68#define NUM_CHANNELS ARRAY_SIZE(channel_frequencies)
69
70static int compute_freq(struct iw_freq *freq, u32 mhz, u32 hz)
71{
72 u32 factor;
73
74 freq->e = 0;
75 if (mhz >= 1000000000U) {
76 pr_debug("zd1211 mhz %u to large\n", mhz);
77 freq->m = 0;
78 return -EINVAL;
79 }
80
81 factor = 1000;
82 while (mhz >= factor) {
83
84 freq->e += 1;
85 factor *= 10;
86 }
87
88 factor /= 1000U;
89 freq->m = mhz * (1000000U/factor) + hz/factor;
90
91 return 0;
92}
93
94int zd_channel_to_freq(struct iw_freq *freq, u8 channel)
95{
96 if (channel > NUM_CHANNELS) {
97 freq->m = 0;
98 freq->e = 0;
99 return -EINVAL;
100 }
101 if (!channel) {
102 freq->m = 0;
103 freq->e = 0;
104 return -EINVAL;
105 }
106 return compute_freq(freq, channel_frequencies[channel-1], 0);
107}
108
109static int freq_to_mhz(const struct iw_freq *freq)
110{
111 u32 factor;
112 int e;
113
114 /* Such high frequencies are not supported. */
115 if (freq->e > 6)
116 return -EINVAL;
117
118 factor = 1;
119 for (e = freq->e; e > 0; --e) {
120 factor *= 10;
121 }
122 factor = 1000000U / factor;
123
124 if (freq->m % factor) {
125 return -EINVAL;
126 }
127
128 return freq->m / factor;
129}
130
131int zd_find_channel(u8 *channel, const struct iw_freq *freq)
132{
133 int i, r;
134 u32 mhz;
135
136 if (!(freq->flags & IW_FREQ_FIXED))
137 return 0;
138
139 if (freq->m < 1000) {
140 if (freq->m > NUM_CHANNELS || freq->m == 0)
141 return -EINVAL;
142 *channel = freq->m;
143 return 1;
144 }
145
146 r = freq_to_mhz(freq);
147 if (r < 0)
148 return r;
149 mhz = r;
150
151 for (i = 0; i < NUM_CHANNELS; i++) {
152 if (mhz == channel_frequencies[i]) {
153 *channel = i+1;
154 return 1;
155 }
156 }
157
158 return -EINVAL;
159}
160
161int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain)
162{
163 struct ieee80211_geo geo;
164 const struct channel_range *range;
165 int i;
166 u8 channel;
167
168 dev_dbg(zd_mac_dev(zd_netdev_mac(ieee->dev)),
169 "regdomain %#04x\n", regdomain);
170
171 range = zd_channel_range(regdomain);
172 if (range->start == 0) {
173 dev_err(zd_mac_dev(zd_netdev_mac(ieee->dev)),
174 "zd1211 regdomain %#04x not supported\n",
175 regdomain);
176 return -EINVAL;
177 }
178
179 memset(&geo, 0, sizeof(geo));
180
181 for (i = 0, channel = range->start; channel < range->end; channel++) {
182 struct ieee80211_channel *chan = &geo.bg[i++];
183 chan->freq = channel_frequencies[channel - 1];
184 chan->channel = channel;
185 }
186
187 geo.bg_channels = i;
188 memcpy(geo.name, "XX ", 4);
189 ieee80211_set_geo(ieee, &geo);
190 return 0;
191}
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
new file mode 100644
index 000000000000..36329890dfec
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -0,0 +1,85 @@
1#ifndef _ZD_IEEE80211_H
2#define _ZD_IEEE80211_H
3
4#include <net/ieee80211.h>
5#include "zd_types.h"
6
7/* Additional definitions from the standards.
8 */
9
10#define ZD_REGDOMAIN_FCC 0x10
11#define ZD_REGDOMAIN_IC 0x20
12#define ZD_REGDOMAIN_ETSI 0x30
13#define ZD_REGDOMAIN_SPAIN 0x31
14#define ZD_REGDOMAIN_FRANCE 0x32
15#define ZD_REGDOMAIN_JAPAN_ADD 0x40
16#define ZD_REGDOMAIN_JAPAN 0x41
17
18enum {
19 MIN_CHANNEL24 = 1,
20 MAX_CHANNEL24 = 14,
21};
22
23struct channel_range {
24 u8 start;
25 u8 end; /* exclusive (channel must be less than end) */
26};
27
28struct iw_freq;
29
30int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain);
31
32const struct channel_range *zd_channel_range(u8 regdomain);
33int zd_regdomain_supports_channel(u8 regdomain, u8 channel);
34int zd_regdomain_supported(u8 regdomain);
35
36/* for 2.4 GHz band */
37int zd_channel_to_freq(struct iw_freq *freq, u8 channel);
38int zd_find_channel(u8 *channel, const struct iw_freq *freq);
39
40#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
41
42struct ofdm_plcp_header {
43 u8 prefix[3];
44 __le16 service;
45} __attribute__((packed));
46
47static inline u8 zd_ofdm_plcp_header_rate(
48 const struct ofdm_plcp_header *header)
49{
50 return header->prefix[0] & 0xf;
51}
52
53#define ZD_OFDM_RATE_6M 0xb
54#define ZD_OFDM_RATE_9M 0xf
55#define ZD_OFDM_RATE_12M 0xa
56#define ZD_OFDM_RATE_18M 0xe
57#define ZD_OFDM_RATE_24M 0x9
58#define ZD_OFDM_RATE_36M 0xd
59#define ZD_OFDM_RATE_48M 0x8
60#define ZD_OFDM_RATE_54M 0xc
61
62struct cck_plcp_header {
63 u8 signal;
64 u8 service;
65 __le16 length;
66 __le16 crc16;
67} __attribute__((packed));
68
69static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header)
70{
71 return header->signal;
72}
73
74#define ZD_CCK_SIGNAL_1M 0x0a
75#define ZD_CCK_SIGNAL_2M 0x14
76#define ZD_CCK_SIGNAL_5M5 0x37
77#define ZD_CCK_SIGNAL_11M 0x6e
78
79enum ieee80211_std {
80 IEEE80211B = 0x01,
81 IEEE80211A = 0x02,
82 IEEE80211G = 0x04,
83};
84
85#endif /* _ZD_IEEE80211_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
new file mode 100644
index 000000000000..3bdc54d128d0
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -0,0 +1,1057 @@
1/* zd_mac.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/wireless.h>
21#include <linux/usb.h>
22#include <linux/jiffies.h>
23#include <net/ieee80211_radiotap.h>
24
25#include "zd_def.h"
26#include "zd_chip.h"
27#include "zd_mac.h"
28#include "zd_ieee80211.h"
29#include "zd_netdev.h"
30#include "zd_rf.h"
31#include "zd_util.h"
32
33static void ieee_init(struct ieee80211_device *ieee);
34static void softmac_init(struct ieee80211softmac_device *sm);
35
36int zd_mac_init(struct zd_mac *mac,
37 struct net_device *netdev,
38 struct usb_interface *intf)
39{
40 struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev);
41
42 memset(mac, 0, sizeof(*mac));
43 spin_lock_init(&mac->lock);
44 mac->netdev = netdev;
45
46 ieee_init(ieee);
47 softmac_init(ieee80211_priv(netdev));
48 zd_chip_init(&mac->chip, netdev, intf);
49 return 0;
50}
51
52static int reset_channel(struct zd_mac *mac)
53{
54 int r;
55 unsigned long flags;
56 const struct channel_range *range;
57
58 spin_lock_irqsave(&mac->lock, flags);
59 range = zd_channel_range(mac->regdomain);
60 if (!range->start) {
61 r = -EINVAL;
62 goto out;
63 }
64 mac->requested_channel = range->start;
65 r = 0;
66out:
67 spin_unlock_irqrestore(&mac->lock, flags);
68 return r;
69}
70
71int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
72{
73 int r;
74 struct zd_chip *chip = &mac->chip;
75 u8 addr[ETH_ALEN];
76 u8 default_regdomain;
77
78 r = zd_chip_enable_int(chip);
79 if (r)
80 goto out;
81 r = zd_chip_init_hw(chip, device_type);
82 if (r)
83 goto disable_int;
84
85 zd_get_e2p_mac_addr(chip, addr);
86 r = zd_write_mac_addr(chip, addr);
87 if (r)
88 goto disable_int;
89 ZD_ASSERT(!irqs_disabled());
90 spin_lock_irq(&mac->lock);
91 memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
92 spin_unlock_irq(&mac->lock);
93
94 r = zd_read_regdomain(chip, &default_regdomain);
95 if (r)
96 goto disable_int;
97 if (!zd_regdomain_supported(default_regdomain)) {
98 dev_dbg_f(zd_mac_dev(mac),
99 "Regulatory Domain %#04x is not supported.\n",
100 default_regdomain);
101 r = -EINVAL;
102 goto disable_int;
103 }
104 spin_lock_irq(&mac->lock);
105 mac->regdomain = mac->default_regdomain = default_regdomain;
106 spin_unlock_irq(&mac->lock);
107 r = reset_channel(mac);
108 if (r)
109 goto disable_int;
110
111 r = zd_set_encryption_type(chip, NO_WEP);
112 if (r)
113 goto disable_int;
114
115 r = zd_geo_init(zd_mac_to_ieee80211(mac), mac->regdomain);
116 if (r)
117 goto disable_int;
118
119 r = 0;
120disable_int:
121 zd_chip_disable_int(chip);
122out:
123 return r;
124}
125
126void zd_mac_clear(struct zd_mac *mac)
127{
128 /* Aquire the lock. */
129 spin_lock(&mac->lock);
130 spin_unlock(&mac->lock);
131 zd_chip_clear(&mac->chip);
132 memset(mac, 0, sizeof(*mac));
133}
134
135static int reset_mode(struct zd_mac *mac)
136{
137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
138 struct zd_ioreq32 ioreqs[3] = {
139 { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE|
140 RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE },
141 { CR_SNIFFER_ON, 0U },
142 { CR_ENCRYPTION_TYPE, NO_WEP },
143 };
144
145 if (ieee->iw_mode == IW_MODE_MONITOR) {
146 ioreqs[0].value = 0xffffffff;
147 ioreqs[1].value = 0x1;
148 ioreqs[2].value = ENC_SNIFFER;
149 }
150
151 return zd_iowrite32a(&mac->chip, ioreqs, 3);
152}
153
154int zd_mac_open(struct net_device *netdev)
155{
156 struct zd_mac *mac = zd_netdev_mac(netdev);
157 struct zd_chip *chip = &mac->chip;
158 int r;
159
160 r = zd_chip_enable_int(chip);
161 if (r < 0)
162 goto out;
163
164 r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
165 if (r < 0)
166 goto disable_int;
167 r = reset_mode(mac);
168 if (r)
169 goto disable_int;
170 r = zd_chip_switch_radio_on(chip);
171 if (r < 0)
172 goto disable_int;
173 r = zd_chip_set_channel(chip, mac->requested_channel);
174 if (r < 0)
175 goto disable_radio;
176 r = zd_chip_enable_rx(chip);
177 if (r < 0)
178 goto disable_radio;
179 r = zd_chip_enable_hwint(chip);
180 if (r < 0)
181 goto disable_rx;
182
183 ieee80211softmac_start(netdev);
184 return 0;
185disable_rx:
186 zd_chip_disable_rx(chip);
187disable_radio:
188 zd_chip_switch_radio_off(chip);
189disable_int:
190 zd_chip_disable_int(chip);
191out:
192 return r;
193}
194
195int zd_mac_stop(struct net_device *netdev)
196{
197 struct zd_mac *mac = zd_netdev_mac(netdev);
198 struct zd_chip *chip = &mac->chip;
199
200 netif_stop_queue(netdev);
201
202 /*
203 * The order here deliberately is a little different from the open()
204 * method, since we need to make sure there is no opportunity for RX
205 * frames to be processed by softmac after we have stopped it.
206 */
207
208 zd_chip_disable_rx(chip);
209 ieee80211softmac_stop(netdev);
210
211 zd_chip_disable_hwint(chip);
212 zd_chip_switch_radio_off(chip);
213 zd_chip_disable_int(chip);
214
215 return 0;
216}
217
218int zd_mac_set_mac_address(struct net_device *netdev, void *p)
219{
220 int r;
221 unsigned long flags;
222 struct sockaddr *addr = p;
223 struct zd_mac *mac = zd_netdev_mac(netdev);
224 struct zd_chip *chip = &mac->chip;
225
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
229 dev_dbg_f(zd_mac_dev(mac),
230 "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data));
231
232 r = zd_write_mac_addr(chip, addr->sa_data);
233 if (r)
234 return r;
235
236 spin_lock_irqsave(&mac->lock, flags);
237 memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
238 spin_unlock_irqrestore(&mac->lock, flags);
239
240 return 0;
241}
242
243int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain)
244{
245 int r;
246 u8 channel;
247
248 ZD_ASSERT(!irqs_disabled());
249 spin_lock_irq(&mac->lock);
250 if (regdomain == 0) {
251 regdomain = mac->default_regdomain;
252 }
253 if (!zd_regdomain_supported(regdomain)) {
254 spin_unlock_irq(&mac->lock);
255 return -EINVAL;
256 }
257 mac->regdomain = regdomain;
258 channel = mac->requested_channel;
259 spin_unlock_irq(&mac->lock);
260
261 r = zd_geo_init(zd_mac_to_ieee80211(mac), regdomain);
262 if (r)
263 return r;
264 if (!zd_regdomain_supports_channel(regdomain, channel)) {
265 r = reset_channel(mac);
266 if (r)
267 return r;
268 }
269
270 return 0;
271}
272
273u8 zd_mac_get_regdomain(struct zd_mac *mac)
274{
275 unsigned long flags;
276 u8 regdomain;
277
278 spin_lock_irqsave(&mac->lock, flags);
279 regdomain = mac->regdomain;
280 spin_unlock_irqrestore(&mac->lock, flags);
281 return regdomain;
282}
283
284static void set_channel(struct net_device *netdev, u8 channel)
285{
286 struct zd_mac *mac = zd_netdev_mac(netdev);
287
288 dev_dbg_f(zd_mac_dev(mac), "channel %d\n", channel);
289
290 zd_chip_set_channel(&mac->chip, channel);
291}
292
293/* TODO: Should not work in Managed mode. */
294int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
295{
296 unsigned long lock_flags;
297 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
298
299 if (ieee->iw_mode == IW_MODE_INFRA)
300 return -EPERM;
301
302 spin_lock_irqsave(&mac->lock, lock_flags);
303 if (!zd_regdomain_supports_channel(mac->regdomain, channel)) {
304 spin_unlock_irqrestore(&mac->lock, lock_flags);
305 return -EINVAL;
306 }
307 mac->requested_channel = channel;
308 spin_unlock_irqrestore(&mac->lock, lock_flags);
309 if (netif_running(mac->netdev))
310 return zd_chip_set_channel(&mac->chip, channel);
311 else
312 return 0;
313}
314
315int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
316{
317 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
318
319 *channel = zd_chip_get_channel(&mac->chip);
320 if (ieee->iw_mode != IW_MODE_INFRA) {
321 spin_lock_irq(&mac->lock);
322 *flags = *channel == mac->requested_channel ?
323 MAC_FIXED_CHANNEL : 0;
324 spin_unlock(&mac->lock);
325 } else {
326 *flags = 0;
327 }
328 dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
329 return 0;
330}
331
332/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
333static u8 cs_typed_rate(u8 cs_rate)
334{
335 static const u8 typed_rates[16] = {
336 [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
337 [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
338 [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
339 [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
340 [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M,
341 [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M,
342 [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M,
343 [ZD_OFDM_RATE_18M] = ZD_CS_OFDM|ZD_OFDM_RATE_18M,
344 [ZD_OFDM_RATE_24M] = ZD_CS_OFDM|ZD_OFDM_RATE_24M,
345 [ZD_OFDM_RATE_36M] = ZD_CS_OFDM|ZD_OFDM_RATE_36M,
346 [ZD_OFDM_RATE_48M] = ZD_CS_OFDM|ZD_OFDM_RATE_48M,
347 [ZD_OFDM_RATE_54M] = ZD_CS_OFDM|ZD_OFDM_RATE_54M,
348 };
349
350 ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
351 return typed_rates[cs_rate & ZD_CS_RATE_MASK];
352}
353
354/* Fallback to lowest rate, if rate is unknown. */
355static u8 rate_to_cs_rate(u8 rate)
356{
357 switch (rate) {
358 case IEEE80211_CCK_RATE_2MB:
359 return ZD_CS_CCK_RATE_2M;
360 case IEEE80211_CCK_RATE_5MB:
361 return ZD_CS_CCK_RATE_5_5M;
362 case IEEE80211_CCK_RATE_11MB:
363 return ZD_CS_CCK_RATE_11M;
364 case IEEE80211_OFDM_RATE_6MB:
365 return ZD_OFDM_RATE_6M;
366 case IEEE80211_OFDM_RATE_9MB:
367 return ZD_OFDM_RATE_9M;
368 case IEEE80211_OFDM_RATE_12MB:
369 return ZD_OFDM_RATE_12M;
370 case IEEE80211_OFDM_RATE_18MB:
371 return ZD_OFDM_RATE_18M;
372 case IEEE80211_OFDM_RATE_24MB:
373 return ZD_OFDM_RATE_24M;
374 case IEEE80211_OFDM_RATE_36MB:
375 return ZD_OFDM_RATE_36M;
376 case IEEE80211_OFDM_RATE_48MB:
377 return ZD_OFDM_RATE_48M;
378 case IEEE80211_OFDM_RATE_54MB:
379 return ZD_OFDM_RATE_54M;
380 }
381 return ZD_CS_CCK_RATE_1M;
382}
383
384int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
385{
386 struct ieee80211_device *ieee;
387
388 switch (mode) {
389 case IW_MODE_AUTO:
390 case IW_MODE_ADHOC:
391 case IW_MODE_INFRA:
392 mac->netdev->type = ARPHRD_ETHER;
393 break;
394 case IW_MODE_MONITOR:
395 mac->netdev->type = ARPHRD_IEEE80211_RADIOTAP;
396 break;
397 default:
398 dev_dbg_f(zd_mac_dev(mac), "wrong mode %u\n", mode);
399 return -EINVAL;
400 }
401
402 ieee = zd_mac_to_ieee80211(mac);
403 ZD_ASSERT(!irqs_disabled());
404 spin_lock_irq(&ieee->lock);
405 ieee->iw_mode = mode;
406 spin_unlock_irq(&ieee->lock);
407
408 if (netif_running(mac->netdev))
409 return reset_mode(mac);
410
411 return 0;
412}
413
414int zd_mac_get_mode(struct zd_mac *mac, u32 *mode)
415{
416 unsigned long flags;
417 struct ieee80211_device *ieee;
418
419 ieee = zd_mac_to_ieee80211(mac);
420 spin_lock_irqsave(&ieee->lock, flags);
421 *mode = ieee->iw_mode;
422 spin_unlock_irqrestore(&ieee->lock, flags);
423 return 0;
424}
425
426int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range)
427{
428 int i;
429 const struct channel_range *channel_range;
430 u8 regdomain;
431
432 memset(range, 0, sizeof(*range));
433
434 /* FIXME: Not so important and depends on the mode. For 802.11g
435 * usually this value is used. It seems to be that Bit/s number is
436 * given here.
437 */
438 range->throughput = 27 * 1000 * 1000;
439
440 range->max_qual.qual = 100;
441 range->max_qual.level = 100;
442
443 /* FIXME: Needs still to be tuned. */
444 range->avg_qual.qual = 71;
445 range->avg_qual.level = 80;
446
447 /* FIXME: depends on standard? */
448 range->min_rts = 256;
449 range->max_rts = 2346;
450
451 range->min_frag = MIN_FRAG_THRESHOLD;
452 range->max_frag = MAX_FRAG_THRESHOLD;
453
454 range->max_encoding_tokens = WEP_KEYS;
455 range->num_encoding_sizes = 2;
456 range->encoding_size[0] = 5;
457 range->encoding_size[1] = WEP_KEY_LEN;
458
459 range->we_version_compiled = WIRELESS_EXT;
460 range->we_version_source = 20;
461
462 ZD_ASSERT(!irqs_disabled());
463 spin_lock_irq(&mac->lock);
464 regdomain = mac->regdomain;
465 spin_unlock_irq(&mac->lock);
466 channel_range = zd_channel_range(regdomain);
467
468 range->num_channels = channel_range->end - channel_range->start;
469 range->old_num_channels = range->num_channels;
470 range->num_frequency = range->num_channels;
471 range->old_num_frequency = range->num_frequency;
472
473 for (i = 0; i < range->num_frequency; i++) {
474 struct iw_freq *freq = &range->freq[i];
475 freq->i = channel_range->start + i;
476 zd_channel_to_freq(freq, freq->i);
477 }
478
479 return 0;
480}
481
482static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
483{
484 static const u8 rate_divisor[] = {
485 [ZD_CS_CCK_RATE_1M] = 1,
486 [ZD_CS_CCK_RATE_2M] = 2,
487 [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */
488 [ZD_CS_CCK_RATE_11M] = 11,
489 [ZD_OFDM_RATE_6M] = 6,
490 [ZD_OFDM_RATE_9M] = 9,
491 [ZD_OFDM_RATE_12M] = 12,
492 [ZD_OFDM_RATE_18M] = 18,
493 [ZD_OFDM_RATE_24M] = 24,
494 [ZD_OFDM_RATE_36M] = 36,
495 [ZD_OFDM_RATE_48M] = 48,
496 [ZD_OFDM_RATE_54M] = 54,
497 };
498
499 u32 bits = (u32)tx_length * 8;
500 u32 divisor;
501
502 divisor = rate_divisor[cs_rate];
503 if (divisor == 0)
504 return -EINVAL;
505
506 switch (cs_rate) {
507 case ZD_CS_CCK_RATE_5_5M:
508 bits = (2*bits) + 10; /* round up to the next integer */
509 break;
510 case ZD_CS_CCK_RATE_11M:
511 if (service) {
512 u32 t = bits % 11;
513 *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
514 if (0 < t && t <= 3) {
515 *service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION;
516 }
517 }
518 bits += 10; /* round up to the next integer */
519 break;
520 }
521
522 return bits/divisor;
523}
524
525enum {
526 R2M_SHORT_PREAMBLE = 0x01,
527 R2M_11A = 0x02,
528};
529
530static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
531{
532 u8 modulation;
533
534 modulation = cs_typed_rate(cs_rate);
535 if (flags & R2M_SHORT_PREAMBLE) {
536 switch (ZD_CS_RATE(modulation)) {
537 case ZD_CS_CCK_RATE_2M:
538 case ZD_CS_CCK_RATE_5_5M:
539 case ZD_CS_CCK_RATE_11M:
540 modulation |= ZD_CS_CCK_PREA_SHORT;
541 return modulation;
542 }
543 }
544 if (flags & R2M_11A) {
545 if (ZD_CS_TYPE(modulation) == ZD_CS_OFDM)
546 modulation |= ZD_CS_OFDM_MODE_11A;
547 }
548 return modulation;
549}
550
551static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs,
552 struct ieee80211_hdr_4addr *hdr)
553{
554 struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
555 u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
556 u8 rate, cs_rate;
557 int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
558
559 /* FIXME: 802.11a? short preamble? */
560 rate = ieee80211softmac_suggest_txrate(softmac,
561 is_multicast_ether_addr(hdr->addr1), is_mgt);
562
563 cs_rate = rate_to_cs_rate(rate);
564 cs->modulation = cs_rate_to_modulation(cs_rate, 0);
565}
566
567static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
568 struct ieee80211_hdr_4addr *header)
569{
570 unsigned int tx_length = le16_to_cpu(cs->tx_length);
571 u16 fctl = le16_to_cpu(header->frame_ctl);
572 u16 ftype = WLAN_FC_GET_TYPE(fctl);
573 u16 stype = WLAN_FC_GET_STYPE(fctl);
574
575 /*
576 * CONTROL:
577 * - start at 0x00
578 * - if fragment 0, enable bit 0
579 * - if backoff needed, enable bit 0
580 * - if burst (backoff not needed) disable bit 0
581 * - if multicast, enable bit 1
582 * - if PS-POLL frame, enable bit 2
583 * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
584 * bit 4 (FIXME: wtf)
585 * - if frag_len > RTS threshold, set bit 5 as long if it isnt
586 * multicast or mgt
587 * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
588 * 7
589 */
590
591 cs->control = 0;
592
593 /* First fragment */
594 if (WLAN_GET_SEQ_FRAG(le16_to_cpu(header->seq_ctl)) == 0)
595 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
596
597 /* Multicast */
598 if (is_multicast_ether_addr(header->addr1))
599 cs->control |= ZD_CS_MULTICAST;
600
601 /* PS-POLL */
602 if (stype == IEEE80211_STYPE_PSPOLL)
603 cs->control |= ZD_CS_PS_POLL_FRAME;
604
605 if (!is_multicast_ether_addr(header->addr1) &&
606 ftype != IEEE80211_FTYPE_MGMT &&
607 tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
608 {
609 /* FIXME: check the logic */
610 if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
611 /* 802.11g */
612 cs->control |= ZD_CS_SELF_CTS;
613 } else { /* 802.11b */
614 cs->control |= ZD_CS_RTS;
615 }
616 }
617
618 /* FIXME: Management frame? */
619}
620
621static int fill_ctrlset(struct zd_mac *mac,
622 struct ieee80211_txb *txb,
623 int frag_num)
624{
625 int r;
626 struct sk_buff *skb = txb->fragments[frag_num];
627 struct ieee80211_hdr_4addr *hdr =
628 (struct ieee80211_hdr_4addr *) skb->data;
629 unsigned int frag_len = skb->len + IEEE80211_FCS_LEN;
630 unsigned int next_frag_len;
631 unsigned int packet_length;
632 struct zd_ctrlset *cs = (struct zd_ctrlset *)
633 skb_push(skb, sizeof(struct zd_ctrlset));
634
635 if (frag_num+1 < txb->nr_frags) {
636 next_frag_len = txb->fragments[frag_num+1]->len +
637 IEEE80211_FCS_LEN;
638 } else {
639 next_frag_len = 0;
640 }
641 ZD_ASSERT(frag_len <= 0xffff);
642 ZD_ASSERT(next_frag_len <= 0xffff);
643
644 cs_set_modulation(mac, cs, hdr);
645
646 cs->tx_length = cpu_to_le16(frag_len);
647
648 cs_set_control(mac, cs, hdr);
649
650 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
651 ZD_ASSERT(packet_length <= 0xffff);
652 /* ZD1211B: Computing the length difference this way, gives us
653 * flexibility to compute the packet length.
654 */
655 cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ?
656 packet_length - frag_len : packet_length);
657
658 /*
659 * CURRENT LENGTH:
660 * - transmit frame length in microseconds
661 * - seems to be derived from frame length
662 * - see Cal_Us_Service() in zdinlinef.h
663 * - if macp->bTxBurstEnable is enabled, then multiply by 4
664 * - bTxBurstEnable is never set in the vendor driver
665 *
666 * SERVICE:
667 * - "for PLCP configuration"
668 * - always 0 except in some situations at 802.11b 11M
669 * - see line 53 of zdinlinef.h
670 */
671 cs->service = 0;
672 r = zd_calc_tx_length_us(&cs->service, ZD_CS_RATE(cs->modulation),
673 le16_to_cpu(cs->tx_length));
674 if (r < 0)
675 return r;
676 cs->current_length = cpu_to_le16(r);
677
678 if (next_frag_len == 0) {
679 cs->next_frame_length = 0;
680 } else {
681 r = zd_calc_tx_length_us(NULL, ZD_CS_RATE(cs->modulation),
682 next_frag_len);
683 if (r < 0)
684 return r;
685 cs->next_frame_length = cpu_to_le16(r);
686 }
687
688 return 0;
689}
690
691static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
692{
693 int i, r;
694
695 for (i = 0; i < txb->nr_frags; i++) {
696 struct sk_buff *skb = txb->fragments[i];
697
698 r = fill_ctrlset(mac, txb, i);
699 if (r)
700 return r;
701 r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len);
702 if (r)
703 return r;
704 }
705
706 /* FIXME: shouldn't this be handled by the upper layers? */
707 mac->netdev->trans_start = jiffies;
708
709 ieee80211_txb_free(txb);
710 return 0;
711}
712
713struct zd_rt_hdr {
714 struct ieee80211_radiotap_header rt_hdr;
715 u8 rt_flags;
716 u16 rt_channel;
717 u16 rt_chbitmask;
718 u16 rt_rate;
719};
720
721static void fill_rt_header(void *buffer, struct zd_mac *mac,
722 const struct ieee80211_rx_stats *stats,
723 const struct rx_status *status)
724{
725 struct zd_rt_hdr *hdr = buffer;
726
727 hdr->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
728 hdr->rt_hdr.it_pad = 0;
729 hdr->rt_hdr.it_len = cpu_to_le16(sizeof(struct zd_rt_hdr));
730 hdr->rt_hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
731 (1 << IEEE80211_RADIOTAP_CHANNEL) |
732 (1 << IEEE80211_RADIOTAP_RATE));
733
734 hdr->rt_flags = 0;
735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256))
736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP;
737
738 /* FIXME: 802.11a */
739 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz(
740 _zd_chip_get_channel(&mac->chip)));
741 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ |
742 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) ==
743 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK));
744
745 hdr->rt_rate = stats->rate / 5;
746}
747
748/* Returns 1 if the data packet is for us and 0 otherwise. */
749static int is_data_packet_for_us(struct ieee80211_device *ieee,
750 struct ieee80211_hdr_4addr *hdr)
751{
752 struct net_device *netdev = ieee->dev;
753 u16 fc = le16_to_cpu(hdr->frame_ctl);
754
755 ZD_ASSERT(WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA);
756
757 switch (ieee->iw_mode) {
758 case IW_MODE_ADHOC:
759 if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) != 0 ||
760 memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) != 0)
761 return 0;
762 break;
763 case IW_MODE_AUTO:
764 case IW_MODE_INFRA:
765 if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) !=
766 IEEE80211_FCTL_FROMDS ||
767 memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) != 0)
768 return 0;
769 break;
770 default:
771 ZD_ASSERT(ieee->iw_mode != IW_MODE_MONITOR);
772 return 0;
773 }
774
775 return memcmp(hdr->addr1, netdev->dev_addr, ETH_ALEN) == 0 ||
776 is_multicast_ether_addr(hdr->addr1) ||
777 (netdev->flags & IFF_PROMISC);
778}
779
780/* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
781 * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
782 * called here.
783 *
784 * It has been based on ieee80211_rx_any.
785 */
786static int filter_rx(struct ieee80211_device *ieee,
787 const u8 *buffer, unsigned int length,
788 struct ieee80211_rx_stats *stats)
789{
790 struct ieee80211_hdr_4addr *hdr;
791 u16 fc;
792
793 if (ieee->iw_mode == IW_MODE_MONITOR)
794 return 1;
795
796 hdr = (struct ieee80211_hdr_4addr *)buffer;
797 fc = le16_to_cpu(hdr->frame_ctl);
798 if ((fc & IEEE80211_FCTL_VERS) != 0)
799 return -EINVAL;
800
801 switch (WLAN_FC_GET_TYPE(fc)) {
802 case IEEE80211_FTYPE_MGMT:
803 if (length < sizeof(struct ieee80211_hdr_3addr))
804 return -EINVAL;
805 ieee80211_rx_mgt(ieee, hdr, stats);
806 return 0;
807 case IEEE80211_FTYPE_CTL:
808 /* Ignore invalid short buffers */
809 return 0;
810 case IEEE80211_FTYPE_DATA:
811 if (length < sizeof(struct ieee80211_hdr_3addr))
812 return -EINVAL;
813 return is_data_packet_for_us(ieee, hdr);
814 }
815
816 return -EINVAL;
817}
818
819static void update_qual_rssi(struct zd_mac *mac, u8 qual_percent, u8 rssi)
820{
821 unsigned long flags;
822
823 spin_lock_irqsave(&mac->lock, flags);
824 mac->qual_average = (7 * mac->qual_average + qual_percent) / 8;
825 mac->rssi_average = (7 * mac->rssi_average + rssi) / 8;
826 spin_unlock_irqrestore(&mac->lock, flags);
827}
828
829static int fill_rx_stats(struct ieee80211_rx_stats *stats,
830 const struct rx_status **pstatus,
831 struct zd_mac *mac,
832 const u8 *buffer, unsigned int length)
833{
834 const struct rx_status *status;
835
836 *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status));
837 if (status->frame_status & ZD_RX_ERROR) {
838 /* FIXME: update? */
839 return -EINVAL;
840 }
841 memset(stats, 0, sizeof(struct ieee80211_rx_stats));
842 stats->len = length - (ZD_PLCP_HEADER_SIZE + IEEE80211_FCS_LEN +
843 + sizeof(struct rx_status));
844 /* FIXME: 802.11a */
845 stats->freq = IEEE80211_24GHZ_BAND;
846 stats->received_channel = _zd_chip_get_channel(&mac->chip);
847 stats->rssi = zd_rx_strength_percent(status->signal_strength);
848 stats->signal = zd_rx_qual_percent(buffer,
849 length - sizeof(struct rx_status),
850 status);
851 stats->mask = IEEE80211_STATMASK_RSSI | IEEE80211_STATMASK_SIGNAL;
852 stats->rate = zd_rx_rate(buffer, status);
853 if (stats->rate)
854 stats->mask |= IEEE80211_STATMASK_RATE;
855
856 update_qual_rssi(mac, stats->signal, stats->rssi);
857 return 0;
858}
859
860int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length)
861{
862 int r;
863 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
864 struct ieee80211_rx_stats stats;
865 const struct rx_status *status;
866 struct sk_buff *skb;
867
868 if (length < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN +
869 IEEE80211_FCS_LEN + sizeof(struct rx_status))
870 return -EINVAL;
871
872 r = fill_rx_stats(&stats, &status, mac, buffer, length);
873 if (r)
874 return r;
875
876 length -= ZD_PLCP_HEADER_SIZE+IEEE80211_FCS_LEN+
877 sizeof(struct rx_status);
878 buffer += ZD_PLCP_HEADER_SIZE;
879
880 r = filter_rx(ieee, buffer, length, &stats);
881 if (r <= 0)
882 return r;
883
884 skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
885 if (!skb)
886 return -ENOMEM;
887 if (ieee->iw_mode == IW_MODE_MONITOR)
888 fill_rt_header(skb_put(skb, sizeof(struct zd_rt_hdr)), mac,
889 &stats, status);
890 memcpy(skb_put(skb, length), buffer, length);
891
892 r = ieee80211_rx(ieee, skb, &stats);
893 if (!r) {
894 ZD_ASSERT(in_irq());
895 dev_kfree_skb_irq(skb);
896 }
897 return 0;
898}
899
900static int netdev_tx(struct ieee80211_txb *txb, struct net_device *netdev,
901 int pri)
902{
903 return zd_mac_tx(zd_netdev_mac(netdev), txb, pri);
904}
905
906static void set_security(struct net_device *netdev,
907 struct ieee80211_security *sec)
908{
909 struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev);
910 struct ieee80211_security *secinfo = &ieee->sec;
911 int keyidx;
912
913 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n");
914
915 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
916 if (sec->flags & (1<<keyidx)) {
917 secinfo->encode_alg[keyidx] = sec->encode_alg[keyidx];
918 secinfo->key_sizes[keyidx] = sec->key_sizes[keyidx];
919 memcpy(secinfo->keys[keyidx], sec->keys[keyidx],
920 SCM_KEY_LEN);
921 }
922
923 if (sec->flags & SEC_ACTIVE_KEY) {
924 secinfo->active_key = sec->active_key;
925 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
926 " .active_key = %d\n", sec->active_key);
927 }
928 if (sec->flags & SEC_UNICAST_GROUP) {
929 secinfo->unicast_uses_group = sec->unicast_uses_group;
930 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
931 " .unicast_uses_group = %d\n",
932 sec->unicast_uses_group);
933 }
934 if (sec->flags & SEC_LEVEL) {
935 secinfo->level = sec->level;
936 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
937 " .level = %d\n", sec->level);
938 }
939 if (sec->flags & SEC_ENABLED) {
940 secinfo->enabled = sec->enabled;
941 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
942 " .enabled = %d\n", sec->enabled);
943 }
944 if (sec->flags & SEC_ENCRYPT) {
945 secinfo->encrypt = sec->encrypt;
946 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
947 " .encrypt = %d\n", sec->encrypt);
948 }
949 if (sec->flags & SEC_AUTH_MODE) {
950 secinfo->auth_mode = sec->auth_mode;
951 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)),
952 " .auth_mode = %d\n", sec->auth_mode);
953 }
954}
955
956static void ieee_init(struct ieee80211_device *ieee)
957{
958 ieee->mode = IEEE_B | IEEE_G;
959 ieee->freq_band = IEEE80211_24GHZ_BAND;
960 ieee->modulation = IEEE80211_OFDM_MODULATION | IEEE80211_CCK_MODULATION;
961 ieee->tx_headroom = sizeof(struct zd_ctrlset);
962 ieee->set_security = set_security;
963 ieee->hard_start_xmit = netdev_tx;
964
965 /* Software encryption/decryption for now */
966 ieee->host_build_iv = 0;
967 ieee->host_encrypt = 1;
968 ieee->host_decrypt = 1;
969
970 /* FIXME: default to managed mode, until ieee80211 and zd1211rw can
971 * correctly support AUTO */
972 ieee->iw_mode = IW_MODE_INFRA;
973}
974
975static void softmac_init(struct ieee80211softmac_device *sm)
976{
977 sm->set_channel = set_channel;
978}
979
980struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
981{
982 struct zd_mac *mac = zd_netdev_mac(ndev);
983 struct iw_statistics *iw_stats = &mac->iw_stats;
984
985 memset(iw_stats, 0, sizeof(struct iw_statistics));
986 /* We are not setting the status, because ieee->state is not updated
987 * at all and this driver doesn't track authentication state.
988 */
989 spin_lock_irq(&mac->lock);
990 iw_stats->qual.qual = mac->qual_average;
991 iw_stats->qual.level = mac->rssi_average;
992 iw_stats->qual.updated = IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED|
993 IW_QUAL_NOISE_INVALID;
994 spin_unlock_irq(&mac->lock);
995 /* TODO: update counter */
996 return iw_stats;
997}
998
999#ifdef DEBUG
1000static const char* decryption_types[] = {
1001 [ZD_RX_NO_WEP] = "none",
1002 [ZD_RX_WEP64] = "WEP64",
1003 [ZD_RX_TKIP] = "TKIP",
1004 [ZD_RX_AES] = "AES",
1005 [ZD_RX_WEP128] = "WEP128",
1006 [ZD_RX_WEP256] = "WEP256",
1007};
1008
1009static const char *decryption_type_string(u8 type)
1010{
1011 const char *s;
1012
1013 if (type < ARRAY_SIZE(decryption_types)) {
1014 s = decryption_types[type];
1015 } else {
1016 s = NULL;
1017 }
1018 return s ? s : "unknown";
1019}
1020
1021static int is_ofdm(u8 frame_status)
1022{
1023 return (frame_status & ZD_RX_OFDM);
1024}
1025
1026void zd_dump_rx_status(const struct rx_status *status)
1027{
1028 const char* modulation;
1029 u8 quality;
1030
1031 if (is_ofdm(status->frame_status)) {
1032 modulation = "ofdm";
1033 quality = status->signal_quality_ofdm;
1034 } else {
1035 modulation = "cck";
1036 quality = status->signal_quality_cck;
1037 }
1038 pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
1039 modulation, status->signal_strength, quality,
1040 decryption_type_string(status->decryption_type));
1041 if (status->frame_status & ZD_RX_ERROR) {
1042 pr_debug("rx error %s%s%s%s%s%s\n",
1043 (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
1044 "timeout " : "",
1045 (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
1046 "fifo " : "",
1047 (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
1048 "decryption " : "",
1049 (status->frame_status & ZD_RX_CRC32_ERROR) ?
1050 "crc32 " : "",
1051 (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
1052 "addr1 " : "",
1053 (status->frame_status & ZD_RX_CRC16_ERROR) ?
1054 "crc16" : "");
1055 }
1056}
1057#endif /* DEBUG */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
new file mode 100644
index 000000000000..71e382c589ee
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -0,0 +1,190 @@
1/* zd_mac.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_MAC_H
19#define _ZD_MAC_H
20
21#include <linux/wireless.h>
22#include <linux/kernel.h>
23#include <net/ieee80211.h>
24#include <net/ieee80211softmac.h>
25
26#include "zd_chip.h"
27#include "zd_netdev.h"
28
29struct zd_ctrlset {
30 u8 modulation;
31 __le16 tx_length;
32 u8 control;
33 /* stores only the difference to tx_length on ZD1211B */
34 __le16 packet_length;
35 __le16 current_length;
36 u8 service;
37 __le16 next_frame_length;
38} __attribute__((packed));
39
40#define ZD_CS_RESERVED_SIZE 25
41
42/* zd_crtlset field modulation */
43#define ZD_CS_RATE_MASK 0x0f
44#define ZD_CS_TYPE_MASK 0x10
45#define ZD_CS_RATE(modulation) ((modulation) & ZD_CS_RATE_MASK)
46#define ZD_CS_TYPE(modulation) ((modulation) & ZD_CS_TYPE_MASK)
47
48#define ZD_CS_CCK 0x00
49#define ZD_CS_OFDM 0x10
50
51#define ZD_CS_CCK_RATE_1M 0x00
52#define ZD_CS_CCK_RATE_2M 0x01
53#define ZD_CS_CCK_RATE_5_5M 0x02
54#define ZD_CS_CCK_RATE_11M 0x03
55/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
56 */
57
58/* bit 5 is preamble (when in CCK mode), or a/g selection (when in OFDM mode) */
59#define ZD_CS_CCK_PREA_LONG 0x00
60#define ZD_CS_CCK_PREA_SHORT 0x20
61#define ZD_CS_OFDM_MODE_11G 0x00
62#define ZD_CS_OFDM_MODE_11A 0x20
63
64/* zd_ctrlset control field */
65#define ZD_CS_NEED_RANDOM_BACKOFF 0x01
66#define ZD_CS_MULTICAST 0x02
67
68#define ZD_CS_FRAME_TYPE_MASK 0x0c
69#define ZD_CS_DATA_FRAME 0x00
70#define ZD_CS_PS_POLL_FRAME 0x04
71#define ZD_CS_MANAGEMENT_FRAME 0x08
72#define ZD_CS_NO_SEQUENCE_CTL_FRAME 0x0c
73
74#define ZD_CS_WAKE_DESTINATION 0x10
75#define ZD_CS_RTS 0x20
76#define ZD_CS_ENCRYPT 0x40
77#define ZD_CS_SELF_CTS 0x80
78
79/* Incoming frames are prepended by a PLCP header */
80#define ZD_PLCP_HEADER_SIZE 5
81
82struct rx_length_info {
83 __le16 length[3];
84 __le16 tag;
85} __attribute__((packed));
86
87#define RX_LENGTH_INFO_TAG 0x697e
88
89struct rx_status {
90 /* rssi */
91 u8 signal_strength;
92 u8 signal_quality_cck;
93 u8 signal_quality_ofdm;
94 u8 decryption_type;
95 u8 frame_status;
96} __attribute__((packed));
97
98/* rx_status field decryption_type */
99#define ZD_RX_NO_WEP 0
100#define ZD_RX_WEP64 1
101#define ZD_RX_TKIP 2
102#define ZD_RX_AES 4
103#define ZD_RX_WEP128 5
104#define ZD_RX_WEP256 6
105
106/* rx_status field frame_status */
107#define ZD_RX_FRAME_MODULATION_MASK 0x01
108#define ZD_RX_CCK 0x00
109#define ZD_RX_OFDM 0x01
110
111#define ZD_RX_TIMEOUT_ERROR 0x02
112#define ZD_RX_FIFO_OVERRUN_ERROR 0x04
113#define ZD_RX_DECRYPTION_ERROR 0x08
114#define ZD_RX_CRC32_ERROR 0x10
115#define ZD_RX_NO_ADDR1_MATCH_ERROR 0x20
116#define ZD_RX_CRC16_ERROR 0x40
117#define ZD_RX_ERROR 0x80
118
119enum mac_flags {
120 MAC_FIXED_CHANNEL = 0x01,
121};
122
123struct zd_mac {
124 struct net_device *netdev;
125 struct zd_chip chip;
126 spinlock_t lock;
127 /* Unlocked reading possible */
128 struct iw_statistics iw_stats;
129 u8 qual_average;
130 u8 rssi_average;
131 u8 regdomain;
132 u8 default_regdomain;
133 u8 requested_channel;
134};
135
136static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
137{
138 return zd_netdev_ieee80211(mac->netdev);
139}
140
141static inline struct zd_mac *zd_netdev_mac(struct net_device *netdev)
142{
143 return ieee80211softmac_priv(netdev);
144}
145
146static inline struct zd_mac *zd_chip_to_mac(struct zd_chip *chip)
147{
148 return container_of(chip, struct zd_mac, chip);
149}
150
151static inline struct zd_mac *zd_usb_to_mac(struct zd_usb *usb)
152{
153 return zd_chip_to_mac(zd_usb_to_chip(usb));
154}
155
156#define zd_mac_dev(mac) (zd_chip_dev(&(mac)->chip))
157
158int zd_mac_init(struct zd_mac *mac,
159 struct net_device *netdev,
160 struct usb_interface *intf);
161void zd_mac_clear(struct zd_mac *mac);
162
163int zd_mac_init_hw(struct zd_mac *mac, u8 device_type);
164
165int zd_mac_open(struct net_device *netdev);
166int zd_mac_stop(struct net_device *netdev);
167int zd_mac_set_mac_address(struct net_device *dev, void *p);
168
169int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length);
170
171int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain);
172u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
173
174int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
175int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
176
177int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
178int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
179
180int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range);
181
182struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev);
183
184#ifdef DEBUG
185void zd_dump_rx_status(const struct rx_status *status);
186#else
187#define zd_dump_rx_status(status)
188#endif /* DEBUG */
189
190#endif /* _ZD_MAC_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
new file mode 100644
index 000000000000..9df232c2c863
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -0,0 +1,267 @@
1/* zd_netdev.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/skbuff.h>
21#include <net/ieee80211.h>
22#include <net/ieee80211softmac.h>
23#include <net/ieee80211softmac_wx.h>
24#include <net/iw_handler.h>
25
26#include "zd_def.h"
27#include "zd_netdev.h"
28#include "zd_mac.h"
29#include "zd_ieee80211.h"
30
31/* Region 0 means reset regdomain to default. */
32static int zd_set_regdomain(struct net_device *netdev,
33 struct iw_request_info *info,
34 union iwreq_data *req, char *extra)
35{
36 const u8 *regdomain = (u8 *)req;
37 return zd_mac_set_regdomain(zd_netdev_mac(netdev), *regdomain);
38}
39
40static int zd_get_regdomain(struct net_device *netdev,
41 struct iw_request_info *info,
42 union iwreq_data *req, char *extra)
43{
44 u8 *regdomain = (u8 *)req;
45 if (!regdomain)
46 return -EINVAL;
47 *regdomain = zd_mac_get_regdomain(zd_netdev_mac(netdev));
48 return 0;
49}
50
51static const struct iw_priv_args zd_priv_args[] = {
52 {
53 .cmd = ZD_PRIV_SET_REGDOMAIN,
54 .set_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
55 .name = "set_regdomain",
56 },
57 {
58 .cmd = ZD_PRIV_GET_REGDOMAIN,
59 .get_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
60 .name = "get_regdomain",
61 },
62};
63
64#define PRIV_OFFSET(x) [(x)-SIOCIWFIRSTPRIV]
65
66static const iw_handler zd_priv_handler[] = {
67 PRIV_OFFSET(ZD_PRIV_SET_REGDOMAIN) = zd_set_regdomain,
68 PRIV_OFFSET(ZD_PRIV_GET_REGDOMAIN) = zd_get_regdomain,
69};
70
71static int iw_get_name(struct net_device *netdev,
72 struct iw_request_info *info,
73 union iwreq_data *req, char *extra)
74{
75 /* FIXME: check whether 802.11a will also supported, add also
76 * zd1211B, if we support it.
77 */
78 strlcpy(req->name, "802.11g zd1211", IFNAMSIZ);
79 return 0;
80}
81
82static int iw_set_freq(struct net_device *netdev,
83 struct iw_request_info *info,
84 union iwreq_data *req, char *extra)
85{
86 int r;
87 struct zd_mac *mac = zd_netdev_mac(netdev);
88 struct iw_freq *freq = &req->freq;
89 u8 channel;
90
91 r = zd_find_channel(&channel, freq);
92 if (r < 0)
93 return r;
94 r = zd_mac_request_channel(mac, channel);
95 return r;
96}
97
98static int iw_get_freq(struct net_device *netdev,
99 struct iw_request_info *info,
100 union iwreq_data *req, char *extra)
101{
102 int r;
103 struct zd_mac *mac = zd_netdev_mac(netdev);
104 struct iw_freq *freq = &req->freq;
105 u8 channel;
106 u8 flags;
107
108 r = zd_mac_get_channel(mac, &channel, &flags);
109 if (r)
110 return r;
111
112 freq->flags = (flags & MAC_FIXED_CHANNEL) ?
113 IW_FREQ_FIXED : IW_FREQ_AUTO;
114 dev_dbg_f(zd_mac_dev(mac), "channel %s\n",
115 (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto");
116 return zd_channel_to_freq(freq, channel);
117}
118
119static int iw_set_mode(struct net_device *netdev,
120 struct iw_request_info *info,
121 union iwreq_data *req, char *extra)
122{
123 return zd_mac_set_mode(zd_netdev_mac(netdev), req->mode);
124}
125
126static int iw_get_mode(struct net_device *netdev,
127 struct iw_request_info *info,
128 union iwreq_data *req, char *extra)
129{
130 return zd_mac_get_mode(zd_netdev_mac(netdev), &req->mode);
131}
132
133static int iw_get_range(struct net_device *netdev,
134 struct iw_request_info *info,
135 union iwreq_data *req, char *extra)
136{
137 struct iw_range *range = (struct iw_range *)extra;
138
139 dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n");
140 req->data.length = sizeof(*range);
141 return zd_mac_get_range(zd_netdev_mac(netdev), range);
142}
143
144static int iw_set_encode(struct net_device *netdev,
145 struct iw_request_info *info,
146 union iwreq_data *data,
147 char *extra)
148{
149 return ieee80211_wx_set_encode(zd_netdev_ieee80211(netdev), info,
150 data, extra);
151}
152
153static int iw_get_encode(struct net_device *netdev,
154 struct iw_request_info *info,
155 union iwreq_data *data,
156 char *extra)
157{
158 return ieee80211_wx_get_encode(zd_netdev_ieee80211(netdev), info,
159 data, extra);
160}
161
162static int iw_set_encodeext(struct net_device *netdev,
163 struct iw_request_info *info,
164 union iwreq_data *data,
165 char *extra)
166{
167 return ieee80211_wx_set_encodeext(zd_netdev_ieee80211(netdev), info,
168 data, extra);
169}
170
171static int iw_get_encodeext(struct net_device *netdev,
172 struct iw_request_info *info,
173 union iwreq_data *data,
174 char *extra)
175{
176 return ieee80211_wx_get_encodeext(zd_netdev_ieee80211(netdev), info,
177 data, extra);
178}
179
180#define WX(x) [(x)-SIOCIWFIRST]
181
182static const iw_handler zd_standard_iw_handlers[] = {
183 WX(SIOCGIWNAME) = iw_get_name,
184 WX(SIOCSIWFREQ) = iw_set_freq,
185 WX(SIOCGIWFREQ) = iw_get_freq,
186 WX(SIOCSIWMODE) = iw_set_mode,
187 WX(SIOCGIWMODE) = iw_get_mode,
188 WX(SIOCGIWRANGE) = iw_get_range,
189 WX(SIOCSIWENCODE) = iw_set_encode,
190 WX(SIOCGIWENCODE) = iw_get_encode,
191 WX(SIOCSIWENCODEEXT) = iw_set_encodeext,
192 WX(SIOCGIWENCODEEXT) = iw_get_encodeext,
193 WX(SIOCSIWAUTH) = ieee80211_wx_set_auth,
194 WX(SIOCGIWAUTH) = ieee80211_wx_get_auth,
195 WX(SIOCSIWSCAN) = ieee80211softmac_wx_trigger_scan,
196 WX(SIOCGIWSCAN) = ieee80211softmac_wx_get_scan_results,
197 WX(SIOCSIWESSID) = ieee80211softmac_wx_set_essid,
198 WX(SIOCGIWESSID) = ieee80211softmac_wx_get_essid,
199 WX(SIOCSIWAP) = ieee80211softmac_wx_set_wap,
200 WX(SIOCGIWAP) = ieee80211softmac_wx_get_wap,
201 WX(SIOCSIWRATE) = ieee80211softmac_wx_set_rate,
202 WX(SIOCGIWRATE) = ieee80211softmac_wx_get_rate,
203 WX(SIOCSIWGENIE) = ieee80211softmac_wx_set_genie,
204 WX(SIOCGIWGENIE) = ieee80211softmac_wx_get_genie,
205 WX(SIOCSIWMLME) = ieee80211softmac_wx_set_mlme,
206};
207
208static const struct iw_handler_def iw_handler_def = {
209 .standard = zd_standard_iw_handlers,
210 .num_standard = ARRAY_SIZE(zd_standard_iw_handlers),
211 .private = zd_priv_handler,
212 .num_private = ARRAY_SIZE(zd_priv_handler),
213 .private_args = zd_priv_args,
214 .num_private_args = ARRAY_SIZE(zd_priv_args),
215 .get_wireless_stats = zd_mac_get_wireless_stats,
216};
217
218struct net_device *zd_netdev_alloc(struct usb_interface *intf)
219{
220 int r;
221 struct net_device *netdev;
222 struct zd_mac *mac;
223
224 netdev = alloc_ieee80211softmac(sizeof(struct zd_mac));
225 if (!netdev) {
226 dev_dbg_f(&intf->dev, "out of memory\n");
227 return NULL;
228 }
229
230 mac = zd_netdev_mac(netdev);
231 r = zd_mac_init(mac, netdev, intf);
232 if (r) {
233 usb_set_intfdata(intf, NULL);
234 free_ieee80211(netdev);
235 return NULL;
236 }
237
238 SET_MODULE_OWNER(netdev);
239 SET_NETDEV_DEV(netdev, &intf->dev);
240
241 dev_dbg_f(&intf->dev, "netdev->flags %#06hx\n", netdev->flags);
242 dev_dbg_f(&intf->dev, "netdev->features %#010lx\n", netdev->features);
243
244 netdev->open = zd_mac_open;
245 netdev->stop = zd_mac_stop;
246 /* netdev->get_stats = */
247 /* netdev->set_multicast_list = */
248 netdev->set_mac_address = zd_mac_set_mac_address;
249 netdev->wireless_handlers = &iw_handler_def;
250 /* netdev->ethtool_ops = */
251
252 return netdev;
253}
254
255void zd_netdev_free(struct net_device *netdev)
256{
257 if (!netdev)
258 return;
259
260 zd_mac_clear(zd_netdev_mac(netdev));
261 free_ieee80211(netdev);
262}
263
264void zd_netdev_disconnect(struct net_device *netdev)
265{
266 unregister_netdev(netdev);
267}
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.h b/drivers/net/wireless/zd1211rw/zd_netdev.h
new file mode 100644
index 000000000000..374a957073c1
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.h
@@ -0,0 +1,45 @@
1/* zd_netdev.h: Header for net device related functions.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_NETDEV_H
19#define _ZD_NETDEV_H
20
21#include <linux/usb.h>
22#include <linux/netdevice.h>
23#include <net/ieee80211.h>
24
25#define ZD_PRIV_SET_REGDOMAIN (SIOCIWFIRSTPRIV)
26#define ZD_PRIV_GET_REGDOMAIN (SIOCIWFIRSTPRIV+1)
27
28static inline struct ieee80211_device *zd_netdev_ieee80211(
29 struct net_device *ndev)
30{
31 return netdev_priv(ndev);
32}
33
34static inline struct net_device *zd_ieee80211_to_netdev(
35 struct ieee80211_device *ieee)
36{
37 return ieee->dev;
38}
39
40struct net_device *zd_netdev_alloc(struct usb_interface *intf);
41void zd_netdev_free(struct net_device *netdev);
42
43void zd_netdev_disconnect(struct net_device *netdev);
44
45#endif /* _ZD_NETDEV_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
new file mode 100644
index 000000000000..d3770d2c61bc
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -0,0 +1,151 @@
1/* zd_rf.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/errno.h>
19#include <linux/string.h>
20
21#include "zd_def.h"
22#include "zd_rf.h"
23#include "zd_ieee80211.h"
24#include "zd_chip.h"
25
26static const char *rfs[] = {
27 [0] = "unknown RF0",
28 [1] = "unknown RF1",
29 [UW2451_RF] = "UW2451_RF",
30 [UCHIP_RF] = "UCHIP_RF",
31 [AL2230_RF] = "AL2230_RF",
32 [AL7230B_RF] = "AL7230B_RF",
33 [THETA_RF] = "THETA_RF",
34 [AL2210_RF] = "AL2210_RF",
35 [MAXIM_NEW_RF] = "MAXIM_NEW_RF",
36 [UW2453_RF] = "UW2453_RF",
37 [AL2230S_RF] = "AL2230S_RF",
38 [RALINK_RF] = "RALINK_RF",
39 [INTERSIL_RF] = "INTERSIL_RF",
40 [RF2959_RF] = "RF2959_RF",
41 [MAXIM_NEW2_RF] = "MAXIM_NEW2_RF",
42 [PHILIPS_RF] = "PHILIPS_RF",
43};
44
45const char *zd_rf_name(u8 type)
46{
47 if (type & 0xf0)
48 type = 0;
49 return rfs[type];
50}
51
52void zd_rf_init(struct zd_rf *rf)
53{
54 memset(rf, 0, sizeof(*rf));
55}
56
57void zd_rf_clear(struct zd_rf *rf)
58{
59 memset(rf, 0, sizeof(*rf));
60}
61
62int zd_rf_init_hw(struct zd_rf *rf, u8 type)
63{
64 int r, t;
65 struct zd_chip *chip = zd_rf_to_chip(rf);
66
67 ZD_ASSERT(mutex_is_locked(&chip->mutex));
68 switch (type) {
69 case RF2959_RF:
70 r = zd_rf_init_rf2959(rf);
71 if (r)
72 return r;
73 break;
74 case AL2230_RF:
75 r = zd_rf_init_al2230(rf);
76 if (r)
77 return r;
78 break;
79 default:
80 dev_err(zd_chip_dev(chip),
81 "RF %s %#x is not supported\n", zd_rf_name(type), type);
82 rf->type = 0;
83 return -ENODEV;
84 }
85
86 rf->type = type;
87
88 r = zd_chip_lock_phy_regs(chip);
89 if (r)
90 return r;
91 t = rf->init_hw(rf);
92 r = zd_chip_unlock_phy_regs(chip);
93 if (t)
94 r = t;
95 return r;
96}
97
98int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size)
99{
100 return scnprintf(buffer, size, "%s", zd_rf_name(rf->type));
101}
102
103int zd_rf_set_channel(struct zd_rf *rf, u8 channel)
104{
105 int r;
106
107 ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex));
108 if (channel < MIN_CHANNEL24)
109 return -EINVAL;
110 if (channel > MAX_CHANNEL24)
111 return -EINVAL;
112 dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel);
113
114 r = rf->set_channel(rf, channel);
115 if (r >= 0)
116 rf->channel = channel;
117 return r;
118}
119
120int zd_switch_radio_on(struct zd_rf *rf)
121{
122 int r, t;
123 struct zd_chip *chip = zd_rf_to_chip(rf);
124
125 ZD_ASSERT(mutex_is_locked(&chip->mutex));
126 r = zd_chip_lock_phy_regs(chip);
127 if (r)
128 return r;
129 t = rf->switch_radio_on(rf);
130 r = zd_chip_unlock_phy_regs(chip);
131 if (t)
132 r = t;
133 return r;
134}
135
136int zd_switch_radio_off(struct zd_rf *rf)
137{
138 int r, t;
139 struct zd_chip *chip = zd_rf_to_chip(rf);
140
141 /* TODO: move phy regs handling to zd_chip */
142 ZD_ASSERT(mutex_is_locked(&chip->mutex));
143 r = zd_chip_lock_phy_regs(chip);
144 if (r)
145 return r;
146 t = rf->switch_radio_off(rf);
147 r = zd_chip_unlock_phy_regs(chip);
148 if (t)
149 r = t;
150 return r;
151}
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
new file mode 100644
index 000000000000..ea30f693fcc8
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -0,0 +1,82 @@
1/* zd_rf.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_RF_H
19#define _ZD_RF_H
20
21#include "zd_types.h"
22
23#define UW2451_RF 0x2
24#define UCHIP_RF 0x3
25#define AL2230_RF 0x4
26#define AL7230B_RF 0x5 /* a,b,g */
27#define THETA_RF 0x6
28#define AL2210_RF 0x7
29#define MAXIM_NEW_RF 0x8
30#define UW2453_RF 0x9
31#define AL2230S_RF 0xa
32#define RALINK_RF 0xb
33#define INTERSIL_RF 0xc
34#define RF2959_RF 0xd
35#define MAXIM_NEW2_RF 0xe
36#define PHILIPS_RF 0xf
37
38#define RF_CHANNEL(ch) [(ch)-1]
39
40/* Provides functions of the RF transceiver. */
41
42enum {
43 RF_REG_BITS = 6,
44 RF_VALUE_BITS = 18,
45 RF_RV_BITS = RF_REG_BITS + RF_VALUE_BITS,
46};
47
48struct zd_rf {
49 u8 type;
50
51 u8 channel;
52 /*
53 * Whether this RF should patch the 6M band edge
54 * (assuming E2P_POD agrees)
55 */
56 u8 patch_6m_band_edge:1;
57
58 /* RF-specific functions */
59 int (*init_hw)(struct zd_rf *rf);
60 int (*set_channel)(struct zd_rf *rf, u8 channel);
61 int (*switch_radio_on)(struct zd_rf *rf);
62 int (*switch_radio_off)(struct zd_rf *rf);
63};
64
65const char *zd_rf_name(u8 type);
66void zd_rf_init(struct zd_rf *rf);
67void zd_rf_clear(struct zd_rf *rf);
68int zd_rf_init_hw(struct zd_rf *rf, u8 type);
69
70int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size);
71
72int zd_rf_set_channel(struct zd_rf *rf, u8 channel);
73
74int zd_switch_radio_on(struct zd_rf *rf);
75int zd_switch_radio_off(struct zd_rf *rf);
76
77/* Functions for individual RF chips */
78
79int zd_rf_init_rf2959(struct zd_rf *rf);
80int zd_rf_init_al2230(struct zd_rf *rf);
81
82#endif /* _ZD_RF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
new file mode 100644
index 000000000000..0948b25f660d
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -0,0 +1,308 @@
1/* zd_rf_al2230.c: Functions for the AL2230 RF controller
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/kernel.h>
19
20#include "zd_rf.h"
21#include "zd_usb.h"
22#include "zd_chip.h"
23
24static const u32 al2230_table[][3] = {
25 RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
26 RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
27 RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, },
28 RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, },
29 RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, },
30 RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, },
31 RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, },
32 RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, },
33 RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, },
34 RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, },
35 RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, },
36 RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, },
37 RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, },
38 RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, },
39};
40
41static int zd1211_al2230_init_hw(struct zd_rf *rf)
42{
43 int r;
44 struct zd_chip *chip = zd_rf_to_chip(rf);
45
46 static const struct zd_ioreq16 ioreqs[] = {
47 { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 },
48 { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 },
49 { CR44, 0x33 }, { CR106, 0x2a }, { CR107, 0x1a },
50 { CR109, 0x09 }, { CR110, 0x27 }, { CR111, 0x2b },
51 { CR112, 0x2b }, { CR119, 0x0a }, { CR10, 0x89 },
52 /* for newest (3rd cut) AL2300 */
53 { CR17, 0x28 },
54 { CR26, 0x93 }, { CR34, 0x30 },
55 /* for newest (3rd cut) AL2300 */
56 { CR35, 0x3e },
57 { CR41, 0x24 }, { CR44, 0x32 },
58 /* for newest (3rd cut) AL2300 */
59 { CR46, 0x96 },
60 { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 },
61 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 },
62 { CR92, 0x0a }, { CR99, 0x28 }, { CR100, 0x00 },
63 { CR101, 0x13 }, { CR102, 0x27 }, { CR106, 0x24 },
64 { CR107, 0x2a }, { CR109, 0x09 }, { CR110, 0x13 },
65 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 },
66 { CR114, 0x27 },
67 /* for newest (3rd cut) AL2300 */
68 { CR115, 0x24 },
69 { CR116, 0x24 }, { CR117, 0xf4 }, { CR118, 0xfc },
70 { CR119, 0x10 }, { CR120, 0x4f }, { CR121, 0x77 },
71 { CR122, 0xe0 }, { CR137, 0x88 }, { CR252, 0xff },
72 { CR253, 0xff },
73
74 /* These following happen separately in the vendor driver */
75 { },
76
77 /* shdnb(PLL_ON)=0 */
78 { CR251, 0x2f },
79 /* shdnb(PLL_ON)=1 */
80 { CR251, 0x3f },
81 { CR138, 0x28 }, { CR203, 0x06 },
82 };
83
84 static const u32 rv[] = {
85 /* Channel 1 */
86 0x03f790,
87 0x033331,
88 0x00000d,
89
90 0x0b3331,
91 0x03b812,
92 0x00fff3,
93 0x000da4,
94 0x0f4dc5, /* fix freq shift, 0x04edc5 */
95 0x0805b6,
96 0x011687,
97 0x000688,
98 0x0403b9, /* external control TX power (CR31) */
99 0x00dbba,
100 0x00099b,
101 0x0bdffc,
102 0x00000d,
103 0x00500f,
104
105 /* These writes happen separately in the vendor driver */
106 0x00d00f,
107 0x004c0f,
108 0x00540f,
109 0x00700f,
110 0x00500f,
111 };
112
113 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
114 if (r)
115 return r;
116
117 r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
118 if (r)
119 return r;
120
121 return 0;
122}
123
124static int zd1211b_al2230_init_hw(struct zd_rf *rf)
125{
126 int r;
127 struct zd_chip *chip = zd_rf_to_chip(rf);
128
129 static const struct zd_ioreq16 ioreqs1[] = {
130 { CR10, 0x89 }, { CR15, 0x20 },
131 { CR17, 0x2B }, /* for newest(3rd cut) AL2230 */
132 { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 },
133 { CR28, 0x3e }, { CR29, 0x00 },
134 { CR33, 0x28 }, /* 5621 */
135 { CR34, 0x30 },
136 { CR35, 0x3e }, /* for newest(3rd cut) AL2230 */
137 { CR41, 0x24 }, { CR44, 0x32 },
138 { CR46, 0x99 }, /* for newest(3rd cut) AL2230 */
139 { CR47, 0x1e },
140
141 /* ZD1211B 05.06.10 */
142 { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 },
143 { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 },
144 { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 },
145 { CR69, 0x28 },
146
147 { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 },
148 { CR87, 0x0a }, { CR89, 0x04 },
149 { CR91, 0x00 }, /* 5621 */
150 { CR92, 0x0a },
151 { CR98, 0x8d }, /* 4804, for 1212 new algorithm */
152 { CR99, 0x00 }, /* 5621 */
153 { CR101, 0x13 }, { CR102, 0x27 },
154 { CR106, 0x24 }, /* for newest(3rd cut) AL2230 */
155 { CR107, 0x2a },
156 { CR109, 0x13 }, /* 4804, for 1212 new algorithm */
157 { CR110, 0x1f }, /* 4804, for 1212 new algorithm */
158 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 },
159 { CR114, 0x27 },
160 { CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) AL2230 */
161 { CR116, 0x24 },
162 { CR117, 0xfa }, /* for 1211b */
163 { CR118, 0xfa }, /* for 1211b */
164 { CR119, 0x10 },
165 { CR120, 0x4f },
166 { CR121, 0x6c }, /* for 1211b */
167 { CR122, 0xfc }, /* E0->FC at 4902 */
168 { CR123, 0x57 }, /* 5623 */
169 { CR125, 0xad }, /* 4804, for 1212 new algorithm */
170 { CR126, 0x6c }, /* 5614 */
171 { CR127, 0x03 }, /* 4804, for 1212 new algorithm */
172 { CR137, 0x50 }, /* 5614 */
173 { CR138, 0xa8 },
174 { CR144, 0xac }, /* 5621 */
175 { CR150, 0x0d }, { CR252, 0x00 }, { CR253, 0x00 },
176 };
177
178 static const u32 rv1[] = {
179 /* channel 1 */
180 0x03f790,
181 0x033331,
182 0x00000d,
183
184 0x0b3331,
185 0x03b812,
186 0x00fff3,
187 0x0005a4,
188 0x0f4dc5, /* fix freq shift 0x044dc5 */
189 0x0805b6,
190 0x0146c7,
191 0x000688,
192 0x0403b9, /* External control TX power (CR31) */
193 0x00dbba,
194 0x00099b,
195 0x0bdffc,
196 0x00000d,
197 0x00580f,
198 };
199
200 static const struct zd_ioreq16 ioreqs2[] = {
201 { CR47, 0x1e }, { CR_RFCFG, 0x03 },
202 };
203
204 static const u32 rv2[] = {
205 0x00880f,
206 0x00080f,
207 };
208
209 static const struct zd_ioreq16 ioreqs3[] = {
210 { CR_RFCFG, 0x00 }, { CR47, 0x1e }, { CR251, 0x7f },
211 };
212
213 static const u32 rv3[] = {
214 0x00d80f,
215 0x00780f,
216 0x00580f,
217 };
218
219 static const struct zd_ioreq16 ioreqs4[] = {
220 { CR138, 0x28 }, { CR203, 0x06 },
221 };
222
223 r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1));
224 if (r)
225 return r;
226 r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS);
227 if (r)
228 return r;
229 r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2));
230 if (r)
231 return r;
232 r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS);
233 if (r)
234 return r;
235 r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3));
236 if (r)
237 return r;
238 r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS);
239 if (r)
240 return r;
241 return zd_iowrite16a_locked(chip, ioreqs4, ARRAY_SIZE(ioreqs4));
242}
243
244static int al2230_set_channel(struct zd_rf *rf, u8 channel)
245{
246 int r;
247 const u32 *rv = al2230_table[channel-1];
248 struct zd_chip *chip = zd_rf_to_chip(rf);
249 static const struct zd_ioreq16 ioreqs[] = {
250 { CR138, 0x28 },
251 { CR203, 0x06 },
252 };
253
254 r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS);
255 if (r)
256 return r;
257 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
258}
259
260static int zd1211_al2230_switch_radio_on(struct zd_rf *rf)
261{
262 struct zd_chip *chip = zd_rf_to_chip(rf);
263 static const struct zd_ioreq16 ioreqs[] = {
264 { CR11, 0x00 },
265 { CR251, 0x3f },
266 };
267
268 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
269}
270
271static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf)
272{
273 struct zd_chip *chip = zd_rf_to_chip(rf);
274 static const struct zd_ioreq16 ioreqs[] = {
275 { CR11, 0x00 },
276 { CR251, 0x7f },
277 };
278
279 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
280}
281
282static int al2230_switch_radio_off(struct zd_rf *rf)
283{
284 struct zd_chip *chip = zd_rf_to_chip(rf);
285 static const struct zd_ioreq16 ioreqs[] = {
286 { CR11, 0x04 },
287 { CR251, 0x2f },
288 };
289
290 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
291}
292
293int zd_rf_init_al2230(struct zd_rf *rf)
294{
295 struct zd_chip *chip = zd_rf_to_chip(rf);
296
297 rf->set_channel = al2230_set_channel;
298 rf->switch_radio_off = al2230_switch_radio_off;
299 if (chip->is_zd1211b) {
300 rf->init_hw = zd1211b_al2230_init_hw;
301 rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
302 } else {
303 rf->init_hw = zd1211_al2230_init_hw;
304 rf->switch_radio_on = zd1211_al2230_switch_radio_on;
305 }
306 rf->patch_6m_band_edge = 1;
307 return 0;
308}
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
new file mode 100644
index 000000000000..58247271cc24
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -0,0 +1,279 @@
1/* zd_rf_rfmd.c: Functions for the RFMD RF controller
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/kernel.h>
19
20#include "zd_rf.h"
21#include "zd_usb.h"
22#include "zd_chip.h"
23
24static u32 rf2959_table[][2] = {
25 RF_CHANNEL( 1) = { 0x181979, 0x1e6666 },
26 RF_CHANNEL( 2) = { 0x181989, 0x1e6666 },
27 RF_CHANNEL( 3) = { 0x181999, 0x1e6666 },
28 RF_CHANNEL( 4) = { 0x1819a9, 0x1e6666 },
29 RF_CHANNEL( 5) = { 0x1819b9, 0x1e6666 },
30 RF_CHANNEL( 6) = { 0x1819c9, 0x1e6666 },
31 RF_CHANNEL( 7) = { 0x1819d9, 0x1e6666 },
32 RF_CHANNEL( 8) = { 0x1819e9, 0x1e6666 },
33 RF_CHANNEL( 9) = { 0x1819f9, 0x1e6666 },
34 RF_CHANNEL(10) = { 0x181a09, 0x1e6666 },
35 RF_CHANNEL(11) = { 0x181a19, 0x1e6666 },
36 RF_CHANNEL(12) = { 0x181a29, 0x1e6666 },
37 RF_CHANNEL(13) = { 0x181a39, 0x1e6666 },
38 RF_CHANNEL(14) = { 0x181a60, 0x1c0000 },
39};
40
41#if 0
42static int bits(u32 rw, int from, int to)
43{
44 rw &= ~(0xffffffffU << (to+1));
45 rw >>= from;
46 return rw;
47}
48
49static int bit(u32 rw, int bit)
50{
51 return bits(rw, bit, bit);
52}
53
54static void dump_regwrite(u32 rw)
55{
56 int reg = bits(rw, 18, 22);
57 int rw_flag = bits(rw, 23, 23);
58 PDEBUG("rf2959 %#010x reg %d rw %d", rw, reg, rw_flag);
59
60 switch (reg) {
61 case 0:
62 PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d"
63 " if_vco_reg_en %d if_vga_en %d",
64 bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1),
65 bit(rw, 0));
66 break;
67 case 1:
68 PDEBUG("reg1 IFPLL1 pll_en1 %d kv_en1 %d vtc_en1 %d lpf1 %d"
69 " cpl1 %d pdp1 %d autocal_en1 %d ld_en1 %d ifloopr %d"
70 " ifloopc %d dac1 %d",
71 bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14),
72 bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10),
73 bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0, 3));
74 break;
75 case 2:
76 PDEBUG("reg2 IFPLL2 n1 %d num1 %d",
77 bits(rw, 6, 17), bits(rw, 0, 5));
78 break;
79 case 3:
80 PDEBUG("reg3 IFPLL3 num %d", bits(rw, 0, 17));
81 break;
82 case 4:
83 PDEBUG("reg4 IFPLL4 dn1 %#04x ct_def1 %d kv_def1 %d",
84 bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3));
85 break;
86 case 5:
87 PDEBUG("reg5 RFPLL1 pll_en %d kv_en %d vtc_en %d lpf %d cpl %d"
88 " pdp %d autocal_en %d ld_en %d rfloopr %d rfloopc %d"
89 " dac %d",
90 bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14),
91 bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10),
92 bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0,3));
93 break;
94 case 6:
95 PDEBUG("reg6 RFPLL2 n %d num %d",
96 bits(rw, 6, 17), bits(rw, 0, 5));
97 break;
98 case 7:
99 PDEBUG("reg7 RFPLL3 num2 %d", bits(rw, 0, 17));
100 break;
101 case 8:
102 PDEBUG("reg8 RFPLL4 dn %#06x ct_def %d kv_def %d",
103 bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3));
104 break;
105 case 9:
106 PDEBUG("reg9 CAL1 tvco %d tlock %d m_ct_value %d ld_window %d",
107 bits(rw, 13, 17), bits(rw, 8, 12), bits(rw, 3, 7),
108 bits(rw, 0, 2));
109 break;
110 case 10:
111 PDEBUG("reg10 TXRX1 rxdcfbbyps %d pcontrol %d txvgc %d"
112 " rxlpfbw %d txlpfbw %d txdiffmode %d txenmode %d"
113 " intbiasen %d tybypass %d",
114 bit(rw, 17), bits(rw, 15, 16), bits(rw, 10, 14),
115 bits(rw, 7, 9), bits(rw, 4, 6), bit(rw, 3), bit(rw, 2),
116 bit(rw, 1), bit(rw, 0));
117 break;
118 case 11:
119 PDEBUG("reg11 PCNT1 mid_bias %d p_desired %d pc_offset %d"
120 " tx_delay %d",
121 bits(rw, 15, 17), bits(rw, 9, 14), bits(rw, 3, 8),
122 bits(rw, 0, 2));
123 break;
124 case 12:
125 PDEBUG("reg12 PCNT2 max_power %d mid_power %d min_power %d",
126 bits(rw, 12, 17), bits(rw, 6, 11), bits(rw, 0, 5));
127 break;
128 case 13:
129 PDEBUG("reg13 VCOT1 rfpll vco comp %d ifpll vco comp %d"
130 " lobias %d if_biasbuf %d if_biasvco %d rf_biasbuf %d"
131 " rf_biasvco %d",
132 bit(rw, 17), bit(rw, 16), bit(rw, 15),
133 bits(rw, 8, 9), bits(rw, 5, 7), bits(rw, 3, 4),
134 bits(rw, 0, 2));
135 break;
136 case 14:
137 PDEBUG("reg14 IQCAL rx_acal %d rx_pcal %d"
138 " tx_acal %d tx_pcal %d",
139 bits(rw, 13, 17), bits(rw, 9, 12), bits(rw, 4, 8),
140 bits(rw, 0, 3));
141 break;
142 }
143}
144#endif /* 0 */
145
146static int rf2959_init_hw(struct zd_rf *rf)
147{
148 int r;
149 struct zd_chip *chip = zd_rf_to_chip(rf);
150
151 static const struct zd_ioreq16 ioreqs[] = {
152 { CR2, 0x1E }, { CR9, 0x20 }, { CR10, 0x89 },
153 { CR11, 0x00 }, { CR15, 0xD0 }, { CR17, 0x68 },
154 { CR19, 0x4a }, { CR20, 0x0c }, { CR21, 0x0E },
155 { CR23, 0x48 },
156 /* normal size for cca threshold */
157 { CR24, 0x14 },
158 /* { CR24, 0x20 }, */
159 { CR26, 0x90 }, { CR27, 0x30 }, { CR29, 0x20 },
160 { CR31, 0xb2 }, { CR32, 0x43 }, { CR33, 0x28 },
161 { CR38, 0x30 }, { CR34, 0x0f }, { CR35, 0xF0 },
162 { CR41, 0x2a }, { CR46, 0x7F }, { CR47, 0x1E },
163 { CR51, 0xc5 }, { CR52, 0xc5 }, { CR53, 0xc5 },
164 { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 },
165 { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 },
166 { CR85, 0x00 }, { CR86, 0x10 }, { CR87, 0x2A },
167 { CR88, 0x10 }, { CR89, 0x24 }, { CR90, 0x18 },
168 /* { CR91, 0x18 }, */
169 /* should solve continous CTS frame problems */
170 { CR91, 0x00 },
171 { CR92, 0x0a }, { CR93, 0x00 }, { CR94, 0x01 },
172 { CR95, 0x00 }, { CR96, 0x40 }, { CR97, 0x37 },
173 { CR98, 0x05 }, { CR99, 0x28 }, { CR100, 0x00 },
174 { CR101, 0x13 }, { CR102, 0x27 }, { CR103, 0x27 },
175 { CR104, 0x18 }, { CR105, 0x12 },
176 /* normal size */
177 { CR106, 0x1a },
178 /* { CR106, 0x22 }, */
179 { CR107, 0x24 }, { CR108, 0x0a }, { CR109, 0x13 },
180 { CR110, 0x2F }, { CR111, 0x27 }, { CR112, 0x27 },
181 { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x40 },
182 { CR116, 0x40 }, { CR117, 0xF0 }, { CR118, 0xF0 },
183 { CR119, 0x16 },
184 /* no TX continuation */
185 { CR122, 0x00 },
186 /* { CR122, 0xff }, */
187 { CR127, 0x03 }, { CR131, 0x08 }, { CR138, 0x28 },
188 { CR148, 0x44 }, { CR150, 0x10 }, { CR169, 0xBB },
189 { CR170, 0xBB },
190 };
191
192 static const u32 rv[] = {
193 0x000007, /* REG0(CFG1) */
194 0x07dd43, /* REG1(IFPLL1) */
195 0x080959, /* REG2(IFPLL2) */
196 0x0e6666,
197 0x116a57, /* REG4 */
198 0x17dd43, /* REG5 */
199 0x1819f9, /* REG6 */
200 0x1e6666,
201 0x214554,
202 0x25e7fa,
203 0x27fffa,
204 /* The Zydas driver somehow forgets to set this value. It's
205 * only set for Japan. We are using internal power control
206 * for now.
207 */
208 0x294128, /* internal power */
209 /* 0x28252c, */ /* External control TX power */
210 /* CR31_CCK, CR51_6-36M, CR52_48M, CR53_54M */
211 0x2c0000,
212 0x300000,
213 0x340000, /* REG13(0xD) */
214 0x381e0f, /* REG14(0xE) */
215 /* Bogus, RF2959's data sheet doesn't know register 27, which is
216 * actually referenced here. The commented 0x11 is 17.
217 */
218 0x6c180f, /* REG27(0x11) */
219 };
220
221 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
222 if (r)
223 return r;
224
225 return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
226}
227
228static int rf2959_set_channel(struct zd_rf *rf, u8 channel)
229{
230 int i, r;
231 u32 *rv = rf2959_table[channel-1];
232 struct zd_chip *chip = zd_rf_to_chip(rf);
233
234 for (i = 0; i < 2; i++) {
235 r = zd_rfwrite_locked(chip, rv[i], RF_RV_BITS);
236 if (r)
237 return r;
238 }
239 return 0;
240}
241
242static int rf2959_switch_radio_on(struct zd_rf *rf)
243{
244 static const struct zd_ioreq16 ioreqs[] = {
245 { CR10, 0x89 },
246 { CR11, 0x00 },
247 };
248 struct zd_chip *chip = zd_rf_to_chip(rf);
249
250 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
251}
252
253static int rf2959_switch_radio_off(struct zd_rf *rf)
254{
255 static const struct zd_ioreq16 ioreqs[] = {
256 { CR10, 0x15 },
257 { CR11, 0x81 },
258 };
259 struct zd_chip *chip = zd_rf_to_chip(rf);
260
261 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
262}
263
264int zd_rf_init_rf2959(struct zd_rf *rf)
265{
266 struct zd_chip *chip = zd_rf_to_chip(rf);
267
268 if (chip->is_zd1211b) {
269 dev_err(zd_chip_dev(chip),
270 "RF2959 is currently not supported for ZD1211B"
271 " devices\n");
272 return -ENODEV;
273 }
274 rf->init_hw = rf2959_init_hw;
275 rf->set_channel = rf2959_set_channel;
276 rf->switch_radio_on = rf2959_switch_radio_on;
277 rf->switch_radio_off = rf2959_switch_radio_off;
278 return 0;
279}
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h
new file mode 100644
index 000000000000..0155a1584ed3
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_types.h
@@ -0,0 +1,71 @@
1/* zd_types.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_TYPES_H
19#define _ZD_TYPES_H
20
21#include <linux/types.h>
22
23/* We have three register spaces mapped into the overall USB address space of
24 * 64K words (16-bit values). There is the control register space of
25 * double-word registers, the eeprom register space and the firmware register
26 * space. The control register space is byte mapped, the others are word
27 * mapped.
28 *
29 * For that reason, we are using byte offsets for control registers and word
30 * offsets for everything else.
31 */
32
33typedef u32 __nocast zd_addr_t;
34
35enum {
36 ADDR_BASE_MASK = 0xff000000,
37 ADDR_OFFSET_MASK = 0x0000ffff,
38 ADDR_ZERO_MASK = 0x00ff0000,
39 NULL_BASE = 0x00000000,
40 USB_BASE = 0x01000000,
41 CR_BASE = 0x02000000,
42 CR_MAX_OFFSET = 0x0b30,
43 E2P_BASE = 0x03000000,
44 E2P_MAX_OFFSET = 0x007e,
45 FW_BASE = 0x04000000,
46 FW_MAX_OFFSET = 0x0005,
47};
48
49#define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK)
50#define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK)
51
52#define ZD_ADDR(base, offset) \
53 ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK)))
54
55#define ZD_NULL_ADDR ((zd_addr_t)0)
56#define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */
57#define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */
58#define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */
59#define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */
60
61static inline zd_addr_t zd_inc_word(zd_addr_t addr)
62{
63 u32 base = ZD_ADDR_BASE(addr);
64 u32 offset = ZD_OFFSET(addr);
65
66 offset += base == CR_BASE ? 2 : 1;
67
68 return base | offset;
69}
70
71#endif /* _ZD_TYPES_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
new file mode 100644
index 000000000000..ce1cb2c6aa8d
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -0,0 +1,1316 @@
1/* zd_usb.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <asm/unaligned.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/firmware.h>
22#include <linux/device.h>
23#include <linux/errno.h>
24#include <linux/skbuff.h>
25#include <linux/usb.h>
26#include <net/ieee80211.h>
27
28#include "zd_def.h"
29#include "zd_netdev.h"
30#include "zd_mac.h"
31#include "zd_usb.h"
32#include "zd_util.h"
33
34static struct usb_device_id usb_ids[] = {
35 /* ZD1211 */
36 { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
37 { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
38 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
39 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
40 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
42 /* ZD1211B */
43 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
44 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
45 {}
46};
47
48MODULE_LICENSE("GPL");
49MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip.");
50MODULE_AUTHOR("Ulrich Kunitz");
51MODULE_AUTHOR("Daniel Drake");
52MODULE_VERSION("1.0");
53MODULE_DEVICE_TABLE(usb, usb_ids);
54
55#define FW_ZD1211_PREFIX "zd1211/zd1211_"
56#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
57
58/* register address handling */
59
60#ifdef DEBUG
61static int check_addr(struct zd_usb *usb, zd_addr_t addr)
62{
63 u32 base = ZD_ADDR_BASE(addr);
64 u32 offset = ZD_OFFSET(addr);
65
66 if ((u32)addr & ADDR_ZERO_MASK)
67 goto invalid_address;
68 switch (base) {
69 case USB_BASE:
70 break;
71 case CR_BASE:
72 if (offset > CR_MAX_OFFSET) {
73 dev_dbg(zd_usb_dev(usb),
74 "CR offset %#010x larger than"
75 " CR_MAX_OFFSET %#10x\n",
76 offset, CR_MAX_OFFSET);
77 goto invalid_address;
78 }
79 if (offset & 1) {
80 dev_dbg(zd_usb_dev(usb),
81 "CR offset %#010x is not a multiple of 2\n",
82 offset);
83 goto invalid_address;
84 }
85 break;
86 case E2P_BASE:
87 if (offset > E2P_MAX_OFFSET) {
88 dev_dbg(zd_usb_dev(usb),
89 "E2P offset %#010x larger than"
90 " E2P_MAX_OFFSET %#010x\n",
91 offset, E2P_MAX_OFFSET);
92 goto invalid_address;
93 }
94 break;
95 case FW_BASE:
96 if (!usb->fw_base_offset) {
97 dev_dbg(zd_usb_dev(usb),
98 "ERROR: fw base offset has not been set\n");
99 return -EAGAIN;
100 }
101 if (offset > FW_MAX_OFFSET) {
102 dev_dbg(zd_usb_dev(usb),
103 "FW offset %#10x is larger than"
104 " FW_MAX_OFFSET %#010x\n",
105 offset, FW_MAX_OFFSET);
106 goto invalid_address;
107 }
108 break;
109 default:
110 dev_dbg(zd_usb_dev(usb),
111 "address has unsupported base %#010x\n", addr);
112 goto invalid_address;
113 }
114
115 return 0;
116invalid_address:
117 dev_dbg(zd_usb_dev(usb),
118 "ERROR: invalid address: %#010x\n", addr);
119 return -EINVAL;
120}
121#endif /* DEBUG */
122
123static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr)
124{
125 u32 base;
126 u16 offset;
127
128 base = ZD_ADDR_BASE(addr);
129 offset = ZD_OFFSET(addr);
130
131 ZD_ASSERT(check_addr(usb, addr) == 0);
132
133 switch (base) {
134 case CR_BASE:
135 offset += CR_BASE_OFFSET;
136 break;
137 case E2P_BASE:
138 offset += E2P_BASE_OFFSET;
139 break;
140 case FW_BASE:
141 offset += usb->fw_base_offset;
142 break;
143 }
144
145 return offset;
146}
147
148/* USB device initialization */
149
150static int request_fw_file(
151 const struct firmware **fw, const char *name, struct device *device)
152{
153 int r;
154
155 dev_dbg_f(device, "fw name %s\n", name);
156
157 r = request_firmware(fw, name, device);
158 if (r)
159 dev_err(device,
160 "Could not load firmware file %s. Error number %d\n",
161 name, r);
162 return r;
163}
164
165static inline u16 get_bcdDevice(const struct usb_device *udev)
166{
167 return le16_to_cpu(udev->descriptor.bcdDevice);
168}
169
170enum upload_code_flags {
171 REBOOT = 1,
172};
173
174/* Ensures that MAX_TRANSFER_SIZE is even. */
175#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1)
176
177static int upload_code(struct usb_device *udev,
178 const u8 *data, size_t size, u16 code_offset, int flags)
179{
180 u8 *p;
181 int r;
182
183 /* USB request blocks need "kmalloced" buffers.
184 */
185 p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL);
186 if (!p) {
187 dev_err(&udev->dev, "out of memory\n");
188 r = -ENOMEM;
189 goto error;
190 }
191
192 size &= ~1;
193 while (size > 0) {
194 size_t transfer_size = size <= MAX_TRANSFER_SIZE ?
195 size : MAX_TRANSFER_SIZE;
196
197 dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size);
198
199 memcpy(p, data, transfer_size);
200 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
201 USB_REQ_FIRMWARE_DOWNLOAD,
202 USB_DIR_OUT | USB_TYPE_VENDOR,
203 code_offset, 0, p, transfer_size, 1000 /* ms */);
204 if (r < 0) {
205 dev_err(&udev->dev,
206 "USB control request for firmware upload"
207 " failed. Error number %d\n", r);
208 goto error;
209 }
210 transfer_size = r & ~1;
211
212 size -= transfer_size;
213 data += transfer_size;
214 code_offset += transfer_size/sizeof(u16);
215 }
216
217 if (flags & REBOOT) {
218 u8 ret;
219
220 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
221 USB_REQ_FIRMWARE_CONFIRM,
222 USB_DIR_IN | USB_TYPE_VENDOR,
223 0, 0, &ret, sizeof(ret), 5000 /* ms */);
224 if (r != sizeof(ret)) {
225 dev_err(&udev->dev,
226 "control request firmeware confirmation failed."
227 " Return value %d\n", r);
228 if (r >= 0)
229 r = -ENODEV;
230 goto error;
231 }
232 if (ret & 0x80) {
233 dev_err(&udev->dev,
234 "Internal error while downloading."
235 " Firmware confirm return value %#04x\n",
236 (unsigned int)ret);
237 r = -ENODEV;
238 goto error;
239 }
240 dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n",
241 (unsigned int)ret);
242 }
243
244 r = 0;
245error:
246 kfree(p);
247 return r;
248}
249
250static u16 get_word(const void *data, u16 offset)
251{
252 const __le16 *p = data;
253 return le16_to_cpu(p[offset]);
254}
255
256static char *get_fw_name(char *buffer, size_t size, u8 device_type,
257 const char* postfix)
258{
259 scnprintf(buffer, size, "%s%s",
260 device_type == DEVICE_ZD1211B ?
261 FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX,
262 postfix);
263 return buffer;
264}
265
266static int upload_firmware(struct usb_device *udev, u8 device_type)
267{
268 int r;
269 u16 fw_bcdDevice;
270 u16 bcdDevice;
271 const struct firmware *ub_fw = NULL;
272 const struct firmware *uph_fw = NULL;
273 char fw_name[128];
274
275 bcdDevice = get_bcdDevice(udev);
276
277 r = request_fw_file(&ub_fw,
278 get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"),
279 &udev->dev);
280 if (r)
281 goto error;
282
283 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET);
284
285 /* FIXME: do we have any reason to perform the kludge that the vendor
286 * driver does when there is a version mismatch? (their driver uploads
287 * different firmwares and stuff)
288 */
289 if (fw_bcdDevice != bcdDevice) {
290 dev_info(&udev->dev,
291 "firmware device id %#06x and actual device id "
292 "%#06x differ, continuing anyway\n",
293 fw_bcdDevice, bcdDevice);
294 } else {
295 dev_dbg_f(&udev->dev,
296 "firmware device id %#06x is equal to the "
297 "actual device id\n", fw_bcdDevice);
298 }
299
300
301 r = request_fw_file(&uph_fw,
302 get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"),
303 &udev->dev);
304 if (r)
305 goto error;
306
307 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET,
308 REBOOT);
309 if (r) {
310 dev_err(&udev->dev,
311 "Could not upload firmware code uph. Error number %d\n",
312 r);
313 }
314
315 /* FALL-THROUGH */
316error:
317 release_firmware(ub_fw);
318 release_firmware(uph_fw);
319 return r;
320}
321
322static void disable_read_regs_int(struct zd_usb *usb)
323{
324 struct zd_usb_interrupt *intr = &usb->intr;
325
326 ZD_ASSERT(in_interrupt());
327 spin_lock(&intr->lock);
328 intr->read_regs_enabled = 0;
329 spin_unlock(&intr->lock);
330}
331
332#define urb_dev(urb) (&(urb)->dev->dev)
333
334static inline void handle_regs_int(struct urb *urb)
335{
336 struct zd_usb *usb = urb->context;
337 struct zd_usb_interrupt *intr = &usb->intr;
338 int len;
339
340 ZD_ASSERT(in_interrupt());
341 spin_lock(&intr->lock);
342
343 if (intr->read_regs_enabled) {
344 intr->read_regs.length = len = urb->actual_length;
345
346 if (len > sizeof(intr->read_regs.buffer))
347 len = sizeof(intr->read_regs.buffer);
348 memcpy(intr->read_regs.buffer, urb->transfer_buffer, len);
349 intr->read_regs_enabled = 0;
350 complete(&intr->read_regs.completion);
351 goto out;
352 }
353
354 dev_dbg_f(urb_dev(urb), "regs interrupt ignored\n");
355out:
356 spin_unlock(&intr->lock);
357}
358
359static inline void handle_retry_failed_int(struct urb *urb)
360{
361 dev_dbg_f(urb_dev(urb), "retry failed interrupt\n");
362}
363
364
365static void int_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
366{
367 int r;
368 struct usb_int_header *hdr;
369
370 switch (urb->status) {
371 case 0:
372 break;
373 case -ESHUTDOWN:
374 case -EINVAL:
375 case -ENODEV:
376 case -ENOENT:
377 case -ECONNRESET:
378 goto kfree;
379 case -EPIPE:
380 usb_clear_halt(urb->dev, EP_INT_IN);
381 /* FALL-THROUGH */
382 default:
383 goto resubmit;
384 }
385
386 if (urb->actual_length < sizeof(hdr)) {
387 dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb);
388 goto resubmit;
389 }
390
391 hdr = urb->transfer_buffer;
392 if (hdr->type != USB_INT_TYPE) {
393 dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb);
394 goto resubmit;
395 }
396
397 switch (hdr->id) {
398 case USB_INT_ID_REGS:
399 handle_regs_int(urb);
400 break;
401 case USB_INT_ID_RETRY_FAILED:
402 handle_retry_failed_int(urb);
403 break;
404 default:
405 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
406 (unsigned int)hdr->id);
407 goto resubmit;
408 }
409
410resubmit:
411 r = usb_submit_urb(urb, GFP_ATOMIC);
412 if (r) {
413 dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
414 goto kfree;
415 }
416 return;
417kfree:
418 kfree(urb->transfer_buffer);
419}
420
421static inline int int_urb_interval(struct usb_device *udev)
422{
423 switch (udev->speed) {
424 case USB_SPEED_HIGH:
425 return 4;
426 case USB_SPEED_LOW:
427 return 10;
428 case USB_SPEED_FULL:
429 default:
430 return 1;
431 }
432}
433
434static inline int usb_int_enabled(struct zd_usb *usb)
435{
436 unsigned long flags;
437 struct zd_usb_interrupt *intr = &usb->intr;
438 struct urb *urb;
439
440 spin_lock_irqsave(&intr->lock, flags);
441 urb = intr->urb;
442 spin_unlock_irqrestore(&intr->lock, flags);
443 return urb != NULL;
444}
445
446int zd_usb_enable_int(struct zd_usb *usb)
447{
448 int r;
449 struct usb_device *udev;
450 struct zd_usb_interrupt *intr = &usb->intr;
451 void *transfer_buffer = NULL;
452 struct urb *urb;
453
454 dev_dbg_f(zd_usb_dev(usb), "\n");
455
456 urb = usb_alloc_urb(0, GFP_NOFS);
457 if (!urb) {
458 r = -ENOMEM;
459 goto out;
460 }
461
462 ZD_ASSERT(!irqs_disabled());
463 spin_lock_irq(&intr->lock);
464 if (intr->urb) {
465 spin_unlock_irq(&intr->lock);
466 r = 0;
467 goto error_free_urb;
468 }
469 intr->urb = urb;
470 spin_unlock_irq(&intr->lock);
471
472 /* TODO: make it a DMA buffer */
473 r = -ENOMEM;
474 transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_NOFS);
475 if (!transfer_buffer) {
476 dev_dbg_f(zd_usb_dev(usb),
477 "couldn't allocate transfer_buffer\n");
478 goto error_set_urb_null;
479 }
480
481 udev = zd_usb_to_usbdev(usb);
482 usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
483 transfer_buffer, USB_MAX_EP_INT_BUFFER,
484 int_urb_complete, usb,
485 intr->interval);
486
487 dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
488 r = usb_submit_urb(urb, GFP_NOFS);
489 if (r) {
490 dev_dbg_f(zd_usb_dev(usb),
491 "Couldn't submit urb. Error number %d\n", r);
492 goto error;
493 }
494
495 return 0;
496error:
497 kfree(transfer_buffer);
498error_set_urb_null:
499 spin_lock_irq(&intr->lock);
500 intr->urb = NULL;
501 spin_unlock_irq(&intr->lock);
502error_free_urb:
503 usb_free_urb(urb);
504out:
505 return r;
506}
507
508void zd_usb_disable_int(struct zd_usb *usb)
509{
510 unsigned long flags;
511 struct zd_usb_interrupt *intr = &usb->intr;
512 struct urb *urb;
513
514 spin_lock_irqsave(&intr->lock, flags);
515 urb = intr->urb;
516 if (!urb) {
517 spin_unlock_irqrestore(&intr->lock, flags);
518 return;
519 }
520 intr->urb = NULL;
521 spin_unlock_irqrestore(&intr->lock, flags);
522
523 usb_kill_urb(urb);
524 dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
525 usb_free_urb(urb);
526}
527
528static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
529 unsigned int length)
530{
531 int i;
532 struct zd_mac *mac = zd_usb_to_mac(usb);
533 const struct rx_length_info *length_info;
534
535 if (length < sizeof(struct rx_length_info)) {
536 /* It's not a complete packet anyhow. */
537 return;
538 }
539 length_info = (struct rx_length_info *)
540 (buffer + length - sizeof(struct rx_length_info));
541
542 /* It might be that three frames are merged into a single URB
543 * transaction. We have to check for the length info tag.
544 *
545 * While testing we discovered that length_info might be unaligned,
546 * because if USB transactions are merged, the last packet will not
547 * be padded. Unaligned access might also happen if the length_info
548 * structure is not present.
549 */
550 if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) {
551 unsigned int l, k, n;
552 for (i = 0, l = 0;; i++) {
553 k = le16_to_cpu(get_unaligned(
554 &length_info->length[i]));
555 n = l+k;
556 if (n > length)
557 return;
558 zd_mac_rx(mac, buffer+l, k);
559 if (i >= 2)
560 return;
561 l = (n+3) & ~3;
562 }
563 } else {
564 zd_mac_rx(mac, buffer, length);
565 }
566}
567
568static void rx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
569{
570 struct zd_usb *usb;
571 struct zd_usb_rx *rx;
572 const u8 *buffer;
573 unsigned int length;
574
575 switch (urb->status) {
576 case 0:
577 break;
578 case -ESHUTDOWN:
579 case -EINVAL:
580 case -ENODEV:
581 case -ENOENT:
582 case -ECONNRESET:
583 return;
584 case -EPIPE:
585 usb_clear_halt(urb->dev, EP_DATA_IN);
586 /* FALL-THROUGH */
587 default:
588 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
589 goto resubmit;
590 }
591
592 buffer = urb->transfer_buffer;
593 length = urb->actual_length;
594 usb = urb->context;
595 rx = &usb->rx;
596
597 if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
598 /* If there is an old first fragment, we don't care. */
599 dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
600 ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
601 spin_lock(&rx->lock);
602 memcpy(rx->fragment, buffer, length);
603 rx->fragment_length = length;
604 spin_unlock(&rx->lock);
605 goto resubmit;
606 }
607
608 spin_lock(&rx->lock);
609 if (rx->fragment_length > 0) {
610 /* We are on a second fragment, we believe */
611 ZD_ASSERT(length + rx->fragment_length <=
612 ARRAY_SIZE(rx->fragment));
613 dev_dbg_f(urb_dev(urb), "*** second fragment ***\n");
614 memcpy(rx->fragment+rx->fragment_length, buffer, length);
615 handle_rx_packet(usb, rx->fragment,
616 rx->fragment_length + length);
617 rx->fragment_length = 0;
618 spin_unlock(&rx->lock);
619 } else {
620 spin_unlock(&rx->lock);
621 handle_rx_packet(usb, buffer, length);
622 }
623
624resubmit:
625 usb_submit_urb(urb, GFP_ATOMIC);
626}
627
628struct urb *alloc_urb(struct zd_usb *usb)
629{
630 struct usb_device *udev = zd_usb_to_usbdev(usb);
631 struct urb *urb;
632 void *buffer;
633
634 urb = usb_alloc_urb(0, GFP_NOFS);
635 if (!urb)
636 return NULL;
637 buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_NOFS,
638 &urb->transfer_dma);
639 if (!buffer) {
640 usb_free_urb(urb);
641 return NULL;
642 }
643
644 usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
645 buffer, USB_MAX_RX_SIZE,
646 rx_urb_complete, usb);
647 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
648
649 return urb;
650}
651
652void free_urb(struct urb *urb)
653{
654 if (!urb)
655 return;
656 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
657 urb->transfer_buffer, urb->transfer_dma);
658 usb_free_urb(urb);
659}
660
661int zd_usb_enable_rx(struct zd_usb *usb)
662{
663 int i, r;
664 struct zd_usb_rx *rx = &usb->rx;
665 struct urb **urbs;
666
667 dev_dbg_f(zd_usb_dev(usb), "\n");
668
669 r = -ENOMEM;
670 urbs = kcalloc(URBS_COUNT, sizeof(struct urb *), GFP_NOFS);
671 if (!urbs)
672 goto error;
673 for (i = 0; i < URBS_COUNT; i++) {
674 urbs[i] = alloc_urb(usb);
675 if (!urbs[i])
676 goto error;
677 }
678
679 ZD_ASSERT(!irqs_disabled());
680 spin_lock_irq(&rx->lock);
681 if (rx->urbs) {
682 spin_unlock_irq(&rx->lock);
683 r = 0;
684 goto error;
685 }
686 rx->urbs = urbs;
687 rx->urbs_count = URBS_COUNT;
688 spin_unlock_irq(&rx->lock);
689
690 for (i = 0; i < URBS_COUNT; i++) {
691 r = usb_submit_urb(urbs[i], GFP_NOFS);
692 if (r)
693 goto error_submit;
694 }
695
696 return 0;
697error_submit:
698 for (i = 0; i < URBS_COUNT; i++) {
699 usb_kill_urb(urbs[i]);
700 }
701 spin_lock_irq(&rx->lock);
702 rx->urbs = NULL;
703 rx->urbs_count = 0;
704 spin_unlock_irq(&rx->lock);
705error:
706 if (urbs) {
707 for (i = 0; i < URBS_COUNT; i++)
708 free_urb(urbs[i]);
709 }
710 return r;
711}
712
713void zd_usb_disable_rx(struct zd_usb *usb)
714{
715 int i;
716 unsigned long flags;
717 struct urb **urbs;
718 unsigned int count;
719 struct zd_usb_rx *rx = &usb->rx;
720
721 spin_lock_irqsave(&rx->lock, flags);
722 urbs = rx->urbs;
723 count = rx->urbs_count;
724 spin_unlock_irqrestore(&rx->lock, flags);
725 if (!urbs)
726 return;
727
728 for (i = 0; i < count; i++) {
729 usb_kill_urb(urbs[i]);
730 free_urb(urbs[i]);
731 }
732 kfree(urbs);
733
734 spin_lock_irqsave(&rx->lock, flags);
735 rx->urbs = NULL;
736 rx->urbs_count = 0;
737 spin_unlock_irqrestore(&rx->lock, flags);
738}
739
740static void tx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
741{
742 int r;
743
744 switch (urb->status) {
745 case 0:
746 break;
747 case -ESHUTDOWN:
748 case -EINVAL:
749 case -ENODEV:
750 case -ENOENT:
751 case -ECONNRESET:
752 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
753 break;
754 case -EPIPE:
755 usb_clear_halt(urb->dev, EP_DATA_OUT);
756 /* FALL-THROUGH */
757 default:
758 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
759 goto resubmit;
760 }
761free_urb:
762 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
763 urb->transfer_buffer, urb->transfer_dma);
764 usb_free_urb(urb);
765 return;
766resubmit:
767 r = usb_submit_urb(urb, GFP_ATOMIC);
768 if (r) {
769 dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
770 goto free_urb;
771 }
772}
773
774/* Puts the frame on the USB endpoint. It doesn't wait for
775 * completion. The frame must contain the control set.
776 */
777int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length)
778{
779 int r;
780 struct usb_device *udev = zd_usb_to_usbdev(usb);
781 struct urb *urb;
782 void *buffer;
783
784 urb = usb_alloc_urb(0, GFP_ATOMIC);
785 if (!urb) {
786 r = -ENOMEM;
787 goto out;
788 }
789
790 buffer = usb_buffer_alloc(zd_usb_to_usbdev(usb), length, GFP_ATOMIC,
791 &urb->transfer_dma);
792 if (!buffer) {
793 r = -ENOMEM;
794 goto error_free_urb;
795 }
796 memcpy(buffer, frame, length);
797
798 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
799 buffer, length, tx_urb_complete, NULL);
800 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
801
802 r = usb_submit_urb(urb, GFP_ATOMIC);
803 if (r)
804 goto error;
805 return 0;
806error:
807 usb_buffer_free(zd_usb_to_usbdev(usb), length, buffer,
808 urb->transfer_dma);
809error_free_urb:
810 usb_free_urb(urb);
811out:
812 return r;
813}
814
815static inline void init_usb_interrupt(struct zd_usb *usb)
816{
817 struct zd_usb_interrupt *intr = &usb->intr;
818
819 spin_lock_init(&intr->lock);
820 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
821 init_completion(&intr->read_regs.completion);
822 intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT));
823}
824
825static inline void init_usb_rx(struct zd_usb *usb)
826{
827 struct zd_usb_rx *rx = &usb->rx;
828 spin_lock_init(&rx->lock);
829 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
830 rx->usb_packet_size = 512;
831 } else {
832 rx->usb_packet_size = 64;
833 }
834 ZD_ASSERT(rx->fragment_length == 0);
835}
836
837static inline void init_usb_tx(struct zd_usb *usb)
838{
839 /* FIXME: at this point we will allocate a fixed number of urb's for
840 * use in a cyclic scheme */
841}
842
843void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
844 struct usb_interface *intf)
845{
846 memset(usb, 0, sizeof(*usb));
847 usb->intf = usb_get_intf(intf);
848 usb_set_intfdata(usb->intf, netdev);
849 init_usb_interrupt(usb);
850 init_usb_tx(usb);
851 init_usb_rx(usb);
852}
853
854int zd_usb_init_hw(struct zd_usb *usb)
855{
856 int r;
857 struct zd_chip *chip = zd_usb_to_chip(usb);
858
859 ZD_ASSERT(mutex_is_locked(&chip->mutex));
860 r = zd_ioread16_locked(chip, &usb->fw_base_offset,
861 USB_REG((u16)FW_BASE_ADDR_OFFSET));
862 if (r)
863 return r;
864 dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n",
865 usb->fw_base_offset);
866
867 return 0;
868}
869
870void zd_usb_clear(struct zd_usb *usb)
871{
872 usb_set_intfdata(usb->intf, NULL);
873 usb_put_intf(usb->intf);
874 memset(usb, 0, sizeof(*usb));
875 /* FIXME: usb_interrupt, usb_tx, usb_rx? */
876}
877
878static const char *speed(enum usb_device_speed speed)
879{
880 switch (speed) {
881 case USB_SPEED_LOW:
882 return "low";
883 case USB_SPEED_FULL:
884 return "full";
885 case USB_SPEED_HIGH:
886 return "high";
887 default:
888 return "unknown speed";
889 }
890}
891
892static int scnprint_id(struct usb_device *udev, char *buffer, size_t size)
893{
894 return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s",
895 le16_to_cpu(udev->descriptor.idVendor),
896 le16_to_cpu(udev->descriptor.idProduct),
897 get_bcdDevice(udev),
898 speed(udev->speed));
899}
900
901int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size)
902{
903 struct usb_device *udev = interface_to_usbdev(usb->intf);
904 return scnprint_id(udev, buffer, size);
905}
906
907#ifdef DEBUG
908static void print_id(struct usb_device *udev)
909{
910 char buffer[40];
911
912 scnprint_id(udev, buffer, sizeof(buffer));
913 buffer[sizeof(buffer)-1] = 0;
914 dev_dbg_f(&udev->dev, "%s\n", buffer);
915}
916#else
917#define print_id(udev) do { } while (0)
918#endif
919
920static int probe(struct usb_interface *intf, const struct usb_device_id *id)
921{
922 int r;
923 struct usb_device *udev = interface_to_usbdev(intf);
924 struct net_device *netdev = NULL;
925
926 print_id(udev);
927
928 switch (udev->speed) {
929 case USB_SPEED_LOW:
930 case USB_SPEED_FULL:
931 case USB_SPEED_HIGH:
932 break;
933 default:
934 dev_dbg_f(&intf->dev, "Unknown USB speed\n");
935 r = -ENODEV;
936 goto error;
937 }
938
939 netdev = zd_netdev_alloc(intf);
940 if (netdev == NULL) {
941 r = -ENOMEM;
942 goto error;
943 }
944
945 r = upload_firmware(udev, id->driver_info);
946 if (r) {
947 dev_err(&intf->dev,
948 "couldn't load firmware. Error number %d\n", r);
949 goto error;
950 }
951
952 r = usb_reset_configuration(udev);
953 if (r) {
954 dev_dbg_f(&intf->dev,
955 "couldn't reset configuration. Error number %d\n", r);
956 goto error;
957 }
958
959 /* At this point the interrupt endpoint is not generally enabled. We
960 * save the USB bandwidth until the network device is opened. But
961 * notify that the initialization of the MAC will require the
962 * interrupts to be temporary enabled.
963 */
964 r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info);
965 if (r) {
966 dev_dbg_f(&intf->dev,
967 "couldn't initialize mac. Error number %d\n", r);
968 goto error;
969 }
970
971 r = register_netdev(netdev);
972 if (r) {
973 dev_dbg_f(&intf->dev,
974 "couldn't register netdev. Error number %d\n", r);
975 goto error;
976 }
977
978 dev_dbg_f(&intf->dev, "successful\n");
979 dev_info(&intf->dev,"%s\n", netdev->name);
980 return 0;
981error:
982 usb_reset_device(interface_to_usbdev(intf));
983 zd_netdev_free(netdev);
984 return r;
985}
986
987static void disconnect(struct usb_interface *intf)
988{
989 struct net_device *netdev = zd_intf_to_netdev(intf);
990 struct zd_mac *mac = zd_netdev_mac(netdev);
991 struct zd_usb *usb = &mac->chip.usb;
992
993 dev_dbg_f(zd_usb_dev(usb), "\n");
994
995 zd_netdev_disconnect(netdev);
996
997 /* Just in case something has gone wrong! */
998 zd_usb_disable_rx(usb);
999 zd_usb_disable_int(usb);
1000
1001 /* If the disconnect has been caused by a removal of the
1002 * driver module, the reset allows reloading of the driver. If the
1003 * reset will not be executed here, the upload of the firmware in the
1004 * probe function caused by the reloading of the driver will fail.
1005 */
1006 usb_reset_device(interface_to_usbdev(intf));
1007
1008 /* If somebody still waits on this lock now, this is an error. */
1009 zd_netdev_free(netdev);
1010 dev_dbg(&intf->dev, "disconnected\n");
1011}
1012
1013static struct usb_driver driver = {
1014 .name = "zd1211rw",
1015 .id_table = usb_ids,
1016 .probe = probe,
1017 .disconnect = disconnect,
1018};
1019
1020static int __init usb_init(void)
1021{
1022 int r;
1023
1024 pr_debug("usb_init()\n");
1025
1026 r = usb_register(&driver);
1027 if (r) {
1028 printk(KERN_ERR "usb_register() failed. Error number %d\n", r);
1029 return r;
1030 }
1031
1032 pr_debug("zd1211rw initialized\n");
1033 return 0;
1034}
1035
1036static void __exit usb_exit(void)
1037{
1038 pr_debug("usb_exit()\n");
1039 usb_deregister(&driver);
1040}
1041
1042module_init(usb_init);
1043module_exit(usb_exit);
1044
1045static int usb_int_regs_length(unsigned int count)
1046{
1047 return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
1048}
1049
1050static void prepare_read_regs_int(struct zd_usb *usb)
1051{
1052 struct zd_usb_interrupt *intr = &usb->intr;
1053
1054 spin_lock(&intr->lock);
1055 intr->read_regs_enabled = 1;
1056 INIT_COMPLETION(intr->read_regs.completion);
1057 spin_unlock(&intr->lock);
1058}
1059
1060static int get_results(struct zd_usb *usb, u16 *values,
1061 struct usb_req_read_regs *req, unsigned int count)
1062{
1063 int r;
1064 int i;
1065 struct zd_usb_interrupt *intr = &usb->intr;
1066 struct read_regs_int *rr = &intr->read_regs;
1067 struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
1068
1069 spin_lock(&intr->lock);
1070
1071 r = -EIO;
1072 /* The created block size seems to be larger than expected.
1073 * However results appear to be correct.
1074 */
1075 if (rr->length < usb_int_regs_length(count)) {
1076 dev_dbg_f(zd_usb_dev(usb),
1077 "error: actual length %d less than expected %d\n",
1078 rr->length, usb_int_regs_length(count));
1079 goto error_unlock;
1080 }
1081 if (rr->length > sizeof(rr->buffer)) {
1082 dev_dbg_f(zd_usb_dev(usb),
1083 "error: actual length %d exceeds buffer size %zu\n",
1084 rr->length, sizeof(rr->buffer));
1085 goto error_unlock;
1086 }
1087
1088 for (i = 0; i < count; i++) {
1089 struct reg_data *rd = &regs->regs[i];
1090 if (rd->addr != req->addr[i]) {
1091 dev_dbg_f(zd_usb_dev(usb),
1092 "rd[%d] addr %#06hx expected %#06hx\n", i,
1093 le16_to_cpu(rd->addr),
1094 le16_to_cpu(req->addr[i]));
1095 goto error_unlock;
1096 }
1097 values[i] = le16_to_cpu(rd->value);
1098 }
1099
1100 r = 0;
1101error_unlock:
1102 spin_unlock(&intr->lock);
1103 return r;
1104}
1105
1106int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1107 const zd_addr_t *addresses, unsigned int count)
1108{
1109 int r;
1110 int i, req_len, actual_req_len;
1111 struct usb_device *udev;
1112 struct usb_req_read_regs *req = NULL;
1113 unsigned long timeout;
1114
1115 if (count < 1) {
1116 dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n");
1117 return -EINVAL;
1118 }
1119 if (count > USB_MAX_IOREAD16_COUNT) {
1120 dev_dbg_f(zd_usb_dev(usb),
1121 "error: count %u exceeds possible max %u\n",
1122 count, USB_MAX_IOREAD16_COUNT);
1123 return -EINVAL;
1124 }
1125 if (in_atomic()) {
1126 dev_dbg_f(zd_usb_dev(usb),
1127 "error: io in atomic context not supported\n");
1128 return -EWOULDBLOCK;
1129 }
1130 if (!usb_int_enabled(usb)) {
1131 dev_dbg_f(zd_usb_dev(usb),
1132 "error: usb interrupt not enabled\n");
1133 return -EWOULDBLOCK;
1134 }
1135
1136 req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
1137 req = kmalloc(req_len, GFP_NOFS);
1138 if (!req)
1139 return -ENOMEM;
1140 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1141 for (i = 0; i < count; i++)
1142 req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i]));
1143
1144 udev = zd_usb_to_usbdev(usb);
1145 prepare_read_regs_int(usb);
1146 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
1147 req, req_len, &actual_req_len, 1000 /* ms */);
1148 if (r) {
1149 dev_dbg_f(zd_usb_dev(usb),
1150 "error in usb_bulk_msg(). Error number %d\n", r);
1151 goto error;
1152 }
1153 if (req_len != actual_req_len) {
1154 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
1155 " req_len %d != actual_req_len %d\n",
1156 req_len, actual_req_len);
1157 r = -EIO;
1158 goto error;
1159 }
1160
1161 timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
1162 msecs_to_jiffies(1000));
1163 if (!timeout) {
1164 disable_read_regs_int(usb);
1165 dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
1166 r = -ETIMEDOUT;
1167 goto error;
1168 }
1169
1170 r = get_results(usb, values, req, count);
1171error:
1172 kfree(req);
1173 return r;
1174}
1175
1176int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1177 unsigned int count)
1178{
1179 int r;
1180 struct usb_device *udev;
1181 struct usb_req_write_regs *req = NULL;
1182 int i, req_len, actual_req_len;
1183
1184 if (count == 0)
1185 return 0;
1186 if (count > USB_MAX_IOWRITE16_COUNT) {
1187 dev_dbg_f(zd_usb_dev(usb),
1188 "error: count %u exceeds possible max %u\n",
1189 count, USB_MAX_IOWRITE16_COUNT);
1190 return -EINVAL;
1191 }
1192 if (in_atomic()) {
1193 dev_dbg_f(zd_usb_dev(usb),
1194 "error: io in atomic context not supported\n");
1195 return -EWOULDBLOCK;
1196 }
1197
1198 req_len = sizeof(struct usb_req_write_regs) +
1199 count * sizeof(struct reg_data);
1200 req = kmalloc(req_len, GFP_NOFS);
1201 if (!req)
1202 return -ENOMEM;
1203
1204 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1205 for (i = 0; i < count; i++) {
1206 struct reg_data *rw = &req->reg_writes[i];
1207 rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr));
1208 rw->value = cpu_to_le16(ioreqs[i].value);
1209 }
1210
1211 udev = zd_usb_to_usbdev(usb);
1212 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
1213 req, req_len, &actual_req_len, 1000 /* ms */);
1214 if (r) {
1215 dev_dbg_f(zd_usb_dev(usb),
1216 "error in usb_bulk_msg(). Error number %d\n", r);
1217 goto error;
1218 }
1219 if (req_len != actual_req_len) {
1220 dev_dbg_f(zd_usb_dev(usb),
1221 "error in usb_bulk_msg()"
1222 " req_len %d != actual_req_len %d\n",
1223 req_len, actual_req_len);
1224 r = -EIO;
1225 goto error;
1226 }
1227
1228 /* FALL-THROUGH with r == 0 */
1229error:
1230 kfree(req);
1231 return r;
1232}
1233
1234int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1235{
1236 int r;
1237 struct usb_device *udev;
1238 struct usb_req_rfwrite *req = NULL;
1239 int i, req_len, actual_req_len;
1240 u16 bit_value_template;
1241
1242 if (in_atomic()) {
1243 dev_dbg_f(zd_usb_dev(usb),
1244 "error: io in atomic context not supported\n");
1245 return -EWOULDBLOCK;
1246 }
1247 if (bits < USB_MIN_RFWRITE_BIT_COUNT) {
1248 dev_dbg_f(zd_usb_dev(usb),
1249 "error: bits %d are smaller than"
1250 " USB_MIN_RFWRITE_BIT_COUNT %d\n",
1251 bits, USB_MIN_RFWRITE_BIT_COUNT);
1252 return -EINVAL;
1253 }
1254 if (bits > USB_MAX_RFWRITE_BIT_COUNT) {
1255 dev_dbg_f(zd_usb_dev(usb),
1256 "error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n",
1257 bits, USB_MAX_RFWRITE_BIT_COUNT);
1258 return -EINVAL;
1259 }
1260#ifdef DEBUG
1261 if (value & (~0UL << bits)) {
1262 dev_dbg_f(zd_usb_dev(usb),
1263 "error: value %#09x has bits >= %d set\n",
1264 value, bits);
1265 return -EINVAL;
1266 }
1267#endif /* DEBUG */
1268
1269 dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits);
1270
1271 r = zd_usb_ioread16(usb, &bit_value_template, CR203);
1272 if (r) {
1273 dev_dbg_f(zd_usb_dev(usb),
1274 "error %d: Couldn't read CR203\n", r);
1275 goto out;
1276 }
1277 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
1278
1279 req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
1280 req = kmalloc(req_len, GFP_NOFS);
1281 if (!req)
1282 return -ENOMEM;
1283
1284 req->id = cpu_to_le16(USB_REQ_WRITE_RF);
1285 /* 1: 3683a, but not used in ZYDAS driver */
1286 req->value = cpu_to_le16(2);
1287 req->bits = cpu_to_le16(bits);
1288
1289 for (i = 0; i < bits; i++) {
1290 u16 bv = bit_value_template;
1291 if (value & (1 << (bits-1-i)))
1292 bv |= RF_DATA;
1293 req->bit_values[i] = cpu_to_le16(bv);
1294 }
1295
1296 udev = zd_usb_to_usbdev(usb);
1297 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
1298 req, req_len, &actual_req_len, 1000 /* ms */);
1299 if (r) {
1300 dev_dbg_f(zd_usb_dev(usb),
1301 "error in usb_bulk_msg(). Error number %d\n", r);
1302 goto out;
1303 }
1304 if (req_len != actual_req_len) {
1305 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
1306 " req_len %d != actual_req_len %d\n",
1307 req_len, actual_req_len);
1308 r = -EIO;
1309 goto out;
1310 }
1311
1312 /* FALL-THROUGH with r == 0 */
1313out:
1314 kfree(req);
1315 return r;
1316}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
new file mode 100644
index 000000000000..d6420283bd5a
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -0,0 +1,240 @@
1/* zd_usb.h: Header for USB interface implemented by ZD1211 chip
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_USB_H
19#define _ZD_USB_H
20
21#include <linux/completion.h>
22#include <linux/netdevice.h>
23#include <linux/spinlock.h>
24#include <linux/skbuff.h>
25#include <linux/usb.h>
26
27#include "zd_def.h"
28#include "zd_types.h"
29
30enum devicetype {
31 DEVICE_ZD1211 = 0,
32 DEVICE_ZD1211B = 1,
33};
34
35enum endpoints {
36 EP_CTRL = 0,
37 EP_DATA_OUT = 1,
38 EP_DATA_IN = 2,
39 EP_INT_IN = 3,
40 EP_REGS_OUT = 4,
41};
42
43enum {
44 USB_MAX_TRANSFER_SIZE = 4096, /* bytes */
45 /* FIXME: The original driver uses this value. We have to check,
46 * whether the MAX_TRANSFER_SIZE is sufficient and this needs only be
47 * used if one combined frame is split over two USB transactions.
48 */
49 USB_MAX_RX_SIZE = 4800, /* bytes */
50 USB_MAX_IOWRITE16_COUNT = 15,
51 USB_MAX_IOWRITE32_COUNT = USB_MAX_IOWRITE16_COUNT/2,
52 USB_MAX_IOREAD16_COUNT = 15,
53 USB_MAX_IOREAD32_COUNT = USB_MAX_IOREAD16_COUNT/2,
54 USB_MIN_RFWRITE_BIT_COUNT = 16,
55 USB_MAX_RFWRITE_BIT_COUNT = 28,
56 USB_MAX_EP_INT_BUFFER = 64,
57 USB_ZD1211B_BCD_DEVICE = 0x4810,
58};
59
60enum control_requests {
61 USB_REQ_WRITE_REGS = 0x21,
62 USB_REQ_READ_REGS = 0x22,
63 USB_REQ_WRITE_RF = 0x23,
64 USB_REQ_PROG_FLASH = 0x24,
65 USB_REQ_EEPROM_START = 0x0128, /* ? request is a byte */
66 USB_REQ_EEPROM_MID = 0x28,
67 USB_REQ_EEPROM_END = 0x0228, /* ? request is a byte */
68 USB_REQ_FIRMWARE_DOWNLOAD = 0x30,
69 USB_REQ_FIRMWARE_CONFIRM = 0x31,
70 USB_REQ_FIRMWARE_READ_DATA = 0x32,
71};
72
73struct usb_req_read_regs {
74 __le16 id;
75 __le16 addr[0];
76} __attribute__((packed));
77
78struct reg_data {
79 __le16 addr;
80 __le16 value;
81} __attribute__((packed));
82
83struct usb_req_write_regs {
84 __le16 id;
85 struct reg_data reg_writes[0];
86} __attribute__((packed));
87
88enum {
89 RF_IF_LE = 0x02,
90 RF_CLK = 0x04,
91 RF_DATA = 0x08,
92};
93
94struct usb_req_rfwrite {
95 __le16 id;
96 __le16 value;
97 /* 1: 3683a */
98 /* 2: other (default) */
99 __le16 bits;
100 /* RF2595: 24 */
101 __le16 bit_values[0];
102 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
103} __attribute__((packed));
104
105/* USB interrupt */
106
107enum usb_int_id {
108 USB_INT_TYPE = 0x01,
109 USB_INT_ID_REGS = 0x90,
110 USB_INT_ID_RETRY_FAILED = 0xa0,
111};
112
113enum usb_int_flags {
114 USB_INT_READ_REGS_EN = 0x01,
115};
116
117struct usb_int_header {
118 u8 type; /* must always be 1 */
119 u8 id;
120} __attribute__((packed));
121
122struct usb_int_regs {
123 struct usb_int_header hdr;
124 struct reg_data regs[0];
125} __attribute__((packed));
126
127struct usb_int_retry_fail {
128 struct usb_int_header hdr;
129 u8 new_rate;
130 u8 _dummy;
131 u8 addr[ETH_ALEN];
132 u8 ibss_wakeup_dest;
133} __attribute__((packed));
134
135struct read_regs_int {
136 struct completion completion;
137 /* Stores the USB int structure and contains the USB address of the
138 * first requested register before request.
139 */
140 u8 buffer[USB_MAX_EP_INT_BUFFER];
141 int length;
142 __le16 cr_int_addr;
143};
144
145struct zd_ioreq16 {
146 zd_addr_t addr;
147 u16 value;
148};
149
150struct zd_ioreq32 {
151 zd_addr_t addr;
152 u32 value;
153};
154
155struct zd_usb_interrupt {
156 struct read_regs_int read_regs;
157 spinlock_t lock;
158 struct urb *urb;
159 int interval;
160 u8 read_regs_enabled:1;
161};
162
163static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
164{
165 return (struct usb_int_regs *)intr->read_regs.buffer;
166}
167
168#define URBS_COUNT 5
169
170struct zd_usb_rx {
171 spinlock_t lock;
172 u8 fragment[2*USB_MAX_RX_SIZE];
173 unsigned int fragment_length;
174 unsigned int usb_packet_size;
175 struct urb **urbs;
176 int urbs_count;
177};
178
179struct zd_usb_tx {
180 spinlock_t lock;
181};
182
183/* Contains the usb parts. The structure doesn't require a lock, because intf
184 * and fw_base_offset, will not be changed after initialization.
185 */
186struct zd_usb {
187 struct zd_usb_interrupt intr;
188 struct zd_usb_rx rx;
189 struct zd_usb_tx tx;
190 struct usb_interface *intf;
191 u16 fw_base_offset;
192};
193
194#define zd_usb_dev(usb) (&usb->intf->dev)
195
196static inline struct usb_device *zd_usb_to_usbdev(struct zd_usb *usb)
197{
198 return interface_to_usbdev(usb->intf);
199}
200
201static inline struct net_device *zd_intf_to_netdev(struct usb_interface *intf)
202{
203 return usb_get_intfdata(intf);
204}
205
206static inline struct net_device *zd_usb_to_netdev(struct zd_usb *usb)
207{
208 return zd_intf_to_netdev(usb->intf);
209}
210
211void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
212 struct usb_interface *intf);
213int zd_usb_init_hw(struct zd_usb *usb);
214void zd_usb_clear(struct zd_usb *usb);
215
216int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
217
218int zd_usb_enable_int(struct zd_usb *usb);
219void zd_usb_disable_int(struct zd_usb *usb);
220
221int zd_usb_enable_rx(struct zd_usb *usb);
222void zd_usb_disable_rx(struct zd_usb *usb);
223
224int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length);
225
226int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
227 const zd_addr_t *addresses, unsigned int count);
228
229static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
230 const zd_addr_t addr)
231{
232 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
233}
234
235int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
236 unsigned int count);
237
238int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits);
239
240#endif /* _ZD_USB_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_util.c b/drivers/net/wireless/zd1211rw/zd_util.c
new file mode 100644
index 000000000000..d20036c15d11
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_util.c
@@ -0,0 +1,82 @@
1/* zd_util.c
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Utility program
18 */
19
20#include "zd_def.h"
21#include "zd_util.h"
22
23#ifdef DEBUG
24static char hex(u8 v)
25{
26 v &= 0xf;
27 return (v < 10 ? '0' : 'a' - 10) + v;
28}
29
30static char hex_print(u8 c)
31{
32 return (0x20 <= c && c < 0x7f) ? c : '.';
33}
34
35static void dump_line(const u8 *bytes, size_t size)
36{
37 char c;
38 size_t i;
39
40 size = size <= 8 ? size : 8;
41 printk(KERN_DEBUG "zd1211 %p ", bytes);
42 for (i = 0; i < 8; i++) {
43 switch (i) {
44 case 1:
45 case 5:
46 c = '.';
47 break;
48 case 3:
49 c = ':';
50 break;
51 default:
52 c = ' ';
53 }
54 if (i < size) {
55 printk("%c%c%c", hex(bytes[i] >> 4), hex(bytes[i]), c);
56 } else {
57 printk(" %c", c);
58 }
59 }
60
61 for (i = 0; i < size; i++)
62 printk("%c", hex_print(bytes[i]));
63 printk("\n");
64}
65
66void zd_hexdump(const void *bytes, size_t size)
67{
68 size_t i = 0;
69
70 do {
71 dump_line((u8 *)bytes + i, size-i);
72 i += 8;
73 } while (i < size);
74}
75#endif /* DEBUG */
76
77void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size)
78{
79 if (buffer_size < tail_size)
80 return NULL;
81 return (u8 *)buffer + (buffer_size - tail_size);
82}
diff --git a/drivers/net/wireless/zd1211rw/zd_util.h b/drivers/net/wireless/zd1211rw/zd_util.h
new file mode 100644
index 000000000000..ce26f7adea92
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_util.h
@@ -0,0 +1,29 @@
1/* zd_util.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_UTIL_H
19#define _ZD_UTIL_H
20
21void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size);
22
23#ifdef DEBUG
24void zd_hexdump(const void *bytes, size_t size);
25#else
26#define zd_hexdump(bytes, size)
27#endif /* DEBUG */
28
29#endif /* _ZD_UTIL_H */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index bbbf7e274a2a..8459a18254a4 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -19,37 +19,13 @@
19 19
20 Support and updates available at 20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html 21 http://www.scyld.com/network/yellowfin.html
22 [link no longer provides useful info -jgarzik]
22 23
23
24 Linux kernel changelog:
25 -----------------------
26
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
28
29 LK1.1.2 (jgarzik):
30 * Merge in becker version 1.05
31
32 LK1.1.3 (jgarzik):
33 * Various cleanups
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
36
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
40
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
43
44 LK1.1.6 (val@nmt.edu):
45 * Only print warning on truly "oversized" packets
46 * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
47
48*/ 24*/
49 25
50#define DRV_NAME "yellowfin" 26#define DRV_NAME "yellowfin"
51#define DRV_VERSION "1.05+LK1.1.6" 27#define DRV_VERSION "2.0"
52#define DRV_RELDATE "Feb 11, 2002" 28#define DRV_RELDATE "Jun 27, 2006"
53 29
54#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
55 31
@@ -239,8 +215,11 @@ enum capability_flags {
239 HasMACAddrBug=32, /* Only on early revs. */ 215 HasMACAddrBug=32, /* Only on early revs. */
240 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */ 216 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
241}; 217};
218
242/* The PCI I/O space extent. */ 219/* The PCI I/O space extent. */
243#define YELLOWFIN_SIZE 0x100 220enum {
221 YELLOWFIN_SIZE = 0x100,
222};
244 223
245struct pci_id_info { 224struct pci_id_info {
246 const char *name; 225 const char *name;
@@ -248,16 +227,14 @@ struct pci_id_info {
248 int pci, pci_mask, subsystem, subsystem_mask; 227 int pci, pci_mask, subsystem, subsystem_mask;
249 int revision, revision_mask; /* Only 8 bits. */ 228 int revision, revision_mask; /* Only 8 bits. */
250 } id; 229 } id;
251 int io_size; /* Needed for I/O region check or ioremap(). */
252 int drv_flags; /* Driver use, intended as capability flags. */ 230 int drv_flags; /* Driver use, intended as capability flags. */
253}; 231};
254 232
255static const struct pci_id_info pci_id_tbl[] = { 233static const struct pci_id_info pci_id_tbl[] = {
256 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, 234 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
257 YELLOWFIN_SIZE,
258 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, 235 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
259 {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, 236 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
260 YELLOWFIN_SIZE, HasMII | DontUseEeprom }, 237 HasMII | DontUseEeprom },
261 { } 238 { }
262}; 239};
263 240
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 15f6cd4279b7..77e7202a0eba 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -1052,7 +1052,7 @@ static void ahci_thaw(struct ata_port *ap)
1052 1052
1053static void ahci_error_handler(struct ata_port *ap) 1053static void ahci_error_handler(struct ata_port *ap)
1054{ 1054{
1055 if (!(ap->flags & ATA_FLAG_FROZEN)) { 1055 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1056 /* restart engine */ 1056 /* restart engine */
1057 ahci_stop_engine(ap); 1057 ahci_stop_engine(ap);
1058 ahci_start_engine(ap); 1058 ahci_start_engine(ap);
@@ -1323,6 +1323,17 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1323 if (!printed_version++) 1323 if (!printed_version++)
1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1325 1325
1326 /* JMicron-specific fixup: make sure we're in AHCI mode */
1327 /* This is protected from races with ata_jmicron by the pci probe
1328 locking */
1329 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1330 /* AHCI enable, AHCI on function 0 */
1331 pci_write_config_byte(pdev, 0x41, 0xa1);
1332 /* Function 1 is the PATA controller */
1333 if (PCI_FUNC(pdev->devfn))
1334 return -ENODEV;
1335 }
1336
1326 rc = pci_enable_device(pdev); 1337 rc = pci_enable_device(pdev);
1327 if (rc) 1338 if (rc)
1328 return rc; 1339 return rc;
@@ -1378,10 +1389,6 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1378 if (have_msi) 1389 if (have_msi)
1379 hpriv->flags |= AHCI_FLAG_MSI; 1390 hpriv->flags |= AHCI_FLAG_MSI;
1380 1391
1381 /* JMicron-specific fixup: make sure we're in AHCI mode */
1382 if (pdev->vendor == 0x197b)
1383 pci_write_config_byte(pdev, 0x41, 0xa1);
1384
1385 /* initialize adapter */ 1392 /* initialize adapter */
1386 rc = ahci_host_init(probe_ent); 1393 rc = ahci_host_init(probe_ent);
1387 if (rc) 1394 if (rc)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 1c960ac1617f..386e5f21e191 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,9 +61,9 @@
61#include "libata.h" 61#include "libata.h"
62 62
63/* debounce timing parameters in msecs { interval, duration, timeout } */ 63/* debounce timing parameters in msecs { interval, duration, timeout } */
64const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 }; 64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 }; 65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 }; 66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 67
68static unsigned int ata_dev_init_params(struct ata_device *dev, 68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors); 69 u16 heads, u16 sectors);
@@ -907,7 +907,7 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
907{ 907{
908 int rc; 908 int rc;
909 909
910 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK) 910 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
911 return; 911 return;
912 912
913 PREPARE_WORK(&ap->port_task, fn, data); 913 PREPARE_WORK(&ap->port_task, fn, data);
@@ -938,7 +938,7 @@ void ata_port_flush_task(struct ata_port *ap)
938 DPRINTK("ENTER\n"); 938 DPRINTK("ENTER\n");
939 939
940 spin_lock_irqsave(ap->lock, flags); 940 spin_lock_irqsave(ap->lock, flags);
941 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 941 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
942 spin_unlock_irqrestore(ap->lock, flags); 942 spin_unlock_irqrestore(ap->lock, flags);
943 943
944 DPRINTK("flush #1\n"); 944 DPRINTK("flush #1\n");
@@ -957,7 +957,7 @@ void ata_port_flush_task(struct ata_port *ap)
957 } 957 }
958 958
959 spin_lock_irqsave(ap->lock, flags); 959 spin_lock_irqsave(ap->lock, flags);
960 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 960 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
961 spin_unlock_irqrestore(ap->lock, flags); 961 spin_unlock_irqrestore(ap->lock, flags);
962 962
963 if (ata_msg_ctl(ap)) 963 if (ata_msg_ctl(ap))
@@ -1009,7 +1009,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1009 spin_lock_irqsave(ap->lock, flags); 1009 spin_lock_irqsave(ap->lock, flags);
1010 1010
1011 /* no internal command while frozen */ 1011 /* no internal command while frozen */
1012 if (ap->flags & ATA_FLAG_FROZEN) { 1012 if (ap->pflags & ATA_PFLAG_FROZEN) {
1013 spin_unlock_irqrestore(ap->lock, flags); 1013 spin_unlock_irqrestore(ap->lock, flags);
1014 return AC_ERR_SYSTEM; 1014 return AC_ERR_SYSTEM;
1015 } 1015 }
@@ -1325,6 +1325,19 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1326} 1326}
1327 1327
1328static void ata_set_port_max_cmd_len(struct ata_port *ap)
1329{
1330 int i;
1331
1332 if (ap->host) {
1333 ap->host->max_cmd_len = 0;
1334 for (i = 0; i < ATA_MAX_DEVICES; i++)
1335 ap->host->max_cmd_len = max_t(unsigned int,
1336 ap->host->max_cmd_len,
1337 ap->device[i].cdb_len);
1338 }
1339}
1340
1328/** 1341/**
1329 * ata_dev_configure - Configure the specified ATA/ATAPI device 1342 * ata_dev_configure - Configure the specified ATA/ATAPI device
1330 * @dev: Target device to configure 1343 * @dev: Target device to configure
@@ -1344,7 +1357,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1344 struct ata_port *ap = dev->ap; 1357 struct ata_port *ap = dev->ap;
1345 const u16 *id = dev->id; 1358 const u16 *id = dev->id;
1346 unsigned int xfer_mask; 1359 unsigned int xfer_mask;
1347 int i, rc; 1360 int rc;
1348 1361
1349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1362 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1350 ata_dev_printk(dev, KERN_INFO, 1363 ata_dev_printk(dev, KERN_INFO,
@@ -1404,7 +1417,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1404 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 1417 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1405 1418
1406 /* print device info to dmesg */ 1419 /* print device info to dmesg */
1407 if (ata_msg_info(ap)) 1420 if (ata_msg_drv(ap) && print_info)
1408 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1421 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1409 "max %s, %Lu sectors: %s %s\n", 1422 "max %s, %Lu sectors: %s %s\n",
1410 ata_id_major_version(id), 1423 ata_id_major_version(id),
@@ -1427,7 +1440,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1427 } 1440 }
1428 1441
1429 /* print device info to dmesg */ 1442 /* print device info to dmesg */
1430 if (ata_msg_info(ap)) 1443 if (ata_msg_drv(ap) && print_info)
1431 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1444 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1432 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1445 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1433 ata_id_major_version(id), 1446 ata_id_major_version(id),
@@ -1439,7 +1452,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1439 1452
1440 if (dev->id[59] & 0x100) { 1453 if (dev->id[59] & 0x100) {
1441 dev->multi_count = dev->id[59] & 0xff; 1454 dev->multi_count = dev->id[59] & 0xff;
1442 if (ata_msg_info(ap)) 1455 if (ata_msg_drv(ap) && print_info)
1443 ata_dev_printk(dev, KERN_INFO, 1456 ata_dev_printk(dev, KERN_INFO,
1444 "ata%u: dev %u multi count %u\n", 1457 "ata%u: dev %u multi count %u\n",
1445 ap->id, dev->devno, dev->multi_count); 1458 ap->id, dev->devno, dev->multi_count);
@@ -1468,21 +1481,17 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1468 } 1481 }
1469 1482
1470 /* print device info to dmesg */ 1483 /* print device info to dmesg */
1471 if (ata_msg_info(ap)) 1484 if (ata_msg_drv(ap) && print_info)
1472 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", 1485 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1473 ata_mode_string(xfer_mask), 1486 ata_mode_string(xfer_mask),
1474 cdb_intr_string); 1487 cdb_intr_string);
1475 } 1488 }
1476 1489
1477 ap->host->max_cmd_len = 0; 1490 ata_set_port_max_cmd_len(ap);
1478 for (i = 0; i < ATA_MAX_DEVICES; i++)
1479 ap->host->max_cmd_len = max_t(unsigned int,
1480 ap->host->max_cmd_len,
1481 ap->device[i].cdb_len);
1482 1491
1483 /* limit bridge transfers to udma5, 200 sectors */ 1492 /* limit bridge transfers to udma5, 200 sectors */
1484 if (ata_dev_knobble(dev)) { 1493 if (ata_dev_knobble(dev)) {
1485 if (ata_msg_info(ap)) 1494 if (ata_msg_drv(ap) && print_info)
1486 ata_dev_printk(dev, KERN_INFO, 1495 ata_dev_printk(dev, KERN_INFO,
1487 "applying bridge limits\n"); 1496 "applying bridge limits\n");
1488 dev->udma_mask &= ATA_UDMA5; 1497 dev->udma_mask &= ATA_UDMA5;
@@ -2137,7 +2146,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2137 * return error code and failing device on failure. 2146 * return error code and failing device on failure.
2138 */ 2147 */
2139 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2148 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2140 if (ata_dev_enabled(&ap->device[i])) { 2149 if (ata_dev_ready(&ap->device[i])) {
2141 ap->ops->set_mode(ap); 2150 ap->ops->set_mode(ap);
2142 break; 2151 break;
2143 } 2152 }
@@ -2203,7 +2212,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2203 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2212 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2204 dev = &ap->device[i]; 2213 dev = &ap->device[i];
2205 2214
2206 if (!ata_dev_enabled(dev)) 2215 /* don't udpate suspended devices' xfer mode */
2216 if (!ata_dev_ready(dev))
2207 continue; 2217 continue;
2208 2218
2209 rc = ata_dev_set_mode(dev); 2219 rc = ata_dev_set_mode(dev);
@@ -2579,7 +2589,7 @@ static void ata_wait_spinup(struct ata_port *ap)
2579 2589
2580 /* first, debounce phy if SATA */ 2590 /* first, debounce phy if SATA */
2581 if (ap->cbl == ATA_CBL_SATA) { 2591 if (ap->cbl == ATA_CBL_SATA) {
2582 rc = sata_phy_debounce(ap, sata_deb_timing_eh); 2592 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2583 2593
2584 /* if debounced successfully and offline, no need to wait */ 2594 /* if debounced successfully and offline, no need to wait */
2585 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) 2595 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
@@ -2615,16 +2625,17 @@ static void ata_wait_spinup(struct ata_port *ap)
2615int ata_std_prereset(struct ata_port *ap) 2625int ata_std_prereset(struct ata_port *ap)
2616{ 2626{
2617 struct ata_eh_context *ehc = &ap->eh_context; 2627 struct ata_eh_context *ehc = &ap->eh_context;
2618 const unsigned long *timing; 2628 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2619 int rc; 2629 int rc;
2620 2630
2621 /* hotplug? */ 2631 /* handle link resume & hotplug spinup */
2622 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) { 2632 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2623 if (ap->flags & ATA_FLAG_HRST_TO_RESUME) 2633 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2624 ehc->i.action |= ATA_EH_HARDRESET; 2634 ehc->i.action |= ATA_EH_HARDRESET;
2625 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY) 2635
2626 ata_wait_spinup(ap); 2636 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2627 } 2637 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2638 ata_wait_spinup(ap);
2628 2639
2629 /* if we're about to do hardreset, nothing more to do */ 2640 /* if we're about to do hardreset, nothing more to do */
2630 if (ehc->i.action & ATA_EH_HARDRESET) 2641 if (ehc->i.action & ATA_EH_HARDRESET)
@@ -2632,11 +2643,6 @@ int ata_std_prereset(struct ata_port *ap)
2632 2643
2633 /* if SATA, resume phy */ 2644 /* if SATA, resume phy */
2634 if (ap->cbl == ATA_CBL_SATA) { 2645 if (ap->cbl == ATA_CBL_SATA) {
2635 if (ap->flags & ATA_FLAG_LOADING)
2636 timing = sata_deb_timing_boot;
2637 else
2638 timing = sata_deb_timing_eh;
2639
2640 rc = sata_phy_resume(ap, timing); 2646 rc = sata_phy_resume(ap, timing);
2641 if (rc && rc != -EOPNOTSUPP) { 2647 if (rc && rc != -EOPNOTSUPP) {
2642 /* phy resume failed */ 2648 /* phy resume failed */
@@ -2724,6 +2730,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2724 */ 2730 */
2725int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 2731int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2726{ 2732{
2733 struct ata_eh_context *ehc = &ap->eh_context;
2734 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2727 u32 scontrol; 2735 u32 scontrol;
2728 int rc; 2736 int rc;
2729 2737
@@ -2761,7 +2769,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2761 msleep(1); 2769 msleep(1);
2762 2770
2763 /* bring phy back */ 2771 /* bring phy back */
2764 sata_phy_resume(ap, sata_deb_timing_eh); 2772 sata_phy_resume(ap, timing);
2765 2773
2766 /* TODO: phy layer with polling, timeouts, etc. */ 2774 /* TODO: phy layer with polling, timeouts, etc. */
2767 if (ata_port_offline(ap)) { 2775 if (ata_port_offline(ap)) {
@@ -4285,7 +4293,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4285 unsigned int i; 4293 unsigned int i;
4286 4294
4287 /* no command while frozen */ 4295 /* no command while frozen */
4288 if (unlikely(ap->flags & ATA_FLAG_FROZEN)) 4296 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4289 return NULL; 4297 return NULL;
4290 4298
4291 /* the last tag is reserved for internal command. */ 4299 /* the last tag is reserved for internal command. */
@@ -4407,7 +4415,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4407 * taken care of. 4415 * taken care of.
4408 */ 4416 */
4409 if (ap->ops->error_handler) { 4417 if (ap->ops->error_handler) {
4410 WARN_ON(ap->flags & ATA_FLAG_FROZEN); 4418 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4411 4419
4412 if (unlikely(qc->err_mask)) 4420 if (unlikely(qc->err_mask))
4413 qc->flags |= ATA_QCFLAG_FAILED; 4421 qc->flags |= ATA_QCFLAG_FAILED;
@@ -5001,86 +5009,120 @@ int ata_flush_cache(struct ata_device *dev)
5001 return 0; 5009 return 0;
5002} 5010}
5003 5011
5004static int ata_standby_drive(struct ata_device *dev) 5012static int ata_host_set_request_pm(struct ata_host_set *host_set,
5013 pm_message_t mesg, unsigned int action,
5014 unsigned int ehi_flags, int wait)
5005{ 5015{
5006 unsigned int err_mask; 5016 unsigned long flags;
5017 int i, rc;
5007 5018
5008 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 5019 for (i = 0; i < host_set->n_ports; i++) {
5009 if (err_mask) { 5020 struct ata_port *ap = host_set->ports[i];
5010 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5011 "(err_mask=0x%x)\n", err_mask);
5012 return -EIO;
5013 }
5014 5021
5015 return 0; 5022 /* Previous resume operation might still be in
5016} 5023 * progress. Wait for PM_PENDING to clear.
5024 */
5025 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5026 ata_port_wait_eh(ap);
5027 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5028 }
5017 5029
5018static int ata_start_drive(struct ata_device *dev) 5030 /* request PM ops to EH */
5019{ 5031 spin_lock_irqsave(ap->lock, flags);
5020 unsigned int err_mask;
5021 5032
5022 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); 5033 ap->pm_mesg = mesg;
5023 if (err_mask) { 5034 if (wait) {
5024 ata_dev_printk(dev, KERN_ERR, "failed to start drive " 5035 rc = 0;
5025 "(err_mask=0x%x)\n", err_mask); 5036 ap->pm_result = &rc;
5026 return -EIO; 5037 }
5038
5039 ap->pflags |= ATA_PFLAG_PM_PENDING;
5040 ap->eh_info.action |= action;
5041 ap->eh_info.flags |= ehi_flags;
5042
5043 ata_port_schedule_eh(ap);
5044
5045 spin_unlock_irqrestore(ap->lock, flags);
5046
5047 /* wait and check result */
5048 if (wait) {
5049 ata_port_wait_eh(ap);
5050 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5051 if (rc)
5052 return rc;
5053 }
5027 } 5054 }
5028 5055
5029 return 0; 5056 return 0;
5030} 5057}
5031 5058
5032/** 5059/**
5033 * ata_device_resume - wakeup a previously suspended devices 5060 * ata_host_set_suspend - suspend host_set
5034 * @dev: the device to resume 5061 * @host_set: host_set to suspend
5062 * @mesg: PM message
5035 * 5063 *
5036 * Kick the drive back into action, by sending it an idle immediate 5064 * Suspend @host_set. Actual operation is performed by EH. This
5037 * command and making sure its transfer mode matches between drive 5065 * function requests EH to perform PM operations and waits for EH
5038 * and host. 5066 * to finish.
5039 * 5067 *
5068 * LOCKING:
5069 * Kernel thread context (may sleep).
5070 *
5071 * RETURNS:
5072 * 0 on success, -errno on failure.
5040 */ 5073 */
5041int ata_device_resume(struct ata_device *dev) 5074int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5042{ 5075{
5043 struct ata_port *ap = dev->ap; 5076 int i, j, rc;
5044 5077
5045 if (ap->flags & ATA_FLAG_SUSPENDED) { 5078 rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
5046 struct ata_device *failed_dev; 5079 if (rc)
5080 goto fail;
5047 5081
5048 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 5082 /* EH is quiescent now. Fail if we have any ready device.
5049 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 5083 * This happens if hotplug occurs between completion of device
5084 * suspension and here.
5085 */
5086 for (i = 0; i < host_set->n_ports; i++) {
5087 struct ata_port *ap = host_set->ports[i];
5050 5088
5051 ap->flags &= ~ATA_FLAG_SUSPENDED; 5089 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5052 while (ata_set_mode(ap, &failed_dev)) 5090 struct ata_device *dev = &ap->device[j];
5053 ata_dev_disable(failed_dev); 5091
5092 if (ata_dev_ready(dev)) {
5093 ata_port_printk(ap, KERN_WARNING,
5094 "suspend failed, device %d "
5095 "still active\n", dev->devno);
5096 rc = -EBUSY;
5097 goto fail;
5098 }
5099 }
5054 } 5100 }
5055 if (!ata_dev_enabled(dev))
5056 return 0;
5057 if (dev->class == ATA_DEV_ATA)
5058 ata_start_drive(dev);
5059 5101
5102 host_set->dev->power.power_state = mesg;
5060 return 0; 5103 return 0;
5104
5105 fail:
5106 ata_host_set_resume(host_set);
5107 return rc;
5061} 5108}
5062 5109
5063/** 5110/**
5064 * ata_device_suspend - prepare a device for suspend 5111 * ata_host_set_resume - resume host_set
5065 * @dev: the device to suspend 5112 * @host_set: host_set to resume
5066 * @state: target power management state 5113 *
5114 * Resume @host_set. Actual operation is performed by EH. This
5115 * function requests EH to perform PM operations and returns.
5116 * Note that all resume operations are performed parallely.
5067 * 5117 *
5068 * Flush the cache on the drive, if appropriate, then issue a 5118 * LOCKING:
5069 * standbynow command. 5119 * Kernel thread context (may sleep).
5070 */ 5120 */
5071int ata_device_suspend(struct ata_device *dev, pm_message_t state) 5121void ata_host_set_resume(struct ata_host_set *host_set)
5072{ 5122{
5073 struct ata_port *ap = dev->ap; 5123 ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
5074 5124 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5075 if (!ata_dev_enabled(dev)) 5125 host_set->dev->power.power_state = PMSG_ON;
5076 return 0;
5077 if (dev->class == ATA_DEV_ATA)
5078 ata_flush_cache(dev);
5079
5080 if (state.event != PM_EVENT_FREEZE)
5081 ata_standby_drive(dev);
5082 ap->flags |= ATA_FLAG_SUSPENDED;
5083 return 0;
5084} 5126}
5085 5127
5086/** 5128/**
@@ -5440,6 +5482,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
5440 } 5482 }
5441 5483
5442 if (ap->ops->error_handler) { 5484 if (ap->ops->error_handler) {
5485 struct ata_eh_info *ehi = &ap->eh_info;
5443 unsigned long flags; 5486 unsigned long flags;
5444 5487
5445 ata_port_probe(ap); 5488 ata_port_probe(ap);
@@ -5447,10 +5490,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
5447 /* kick EH for boot probing */ 5490 /* kick EH for boot probing */
5448 spin_lock_irqsave(ap->lock, flags); 5491 spin_lock_irqsave(ap->lock, flags);
5449 5492
5450 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; 5493 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5451 ap->eh_info.action |= ATA_EH_SOFTRESET; 5494 ehi->action |= ATA_EH_SOFTRESET;
5495 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5452 5496
5453 ap->flags |= ATA_FLAG_LOADING; 5497 ap->pflags |= ATA_PFLAG_LOADING;
5454 ata_port_schedule_eh(ap); 5498 ata_port_schedule_eh(ap);
5455 5499
5456 spin_unlock_irqrestore(ap->lock, flags); 5500 spin_unlock_irqrestore(ap->lock, flags);
@@ -5518,7 +5562,7 @@ void ata_port_detach(struct ata_port *ap)
5518 5562
5519 /* tell EH we're leaving & flush EH */ 5563 /* tell EH we're leaving & flush EH */
5520 spin_lock_irqsave(ap->lock, flags); 5564 spin_lock_irqsave(ap->lock, flags);
5521 ap->flags |= ATA_FLAG_UNLOADING; 5565 ap->pflags |= ATA_PFLAG_UNLOADING;
5522 spin_unlock_irqrestore(ap->lock, flags); 5566 spin_unlock_irqrestore(ap->lock, flags);
5523 5567
5524 ata_port_wait_eh(ap); 5568 ata_port_wait_eh(ap);
@@ -5723,20 +5767,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5723 return (tmp == bits->val) ? 1 : 0; 5767 return (tmp == bits->val) ? 1 : 0;
5724} 5768}
5725 5769
5726int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) 5770void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
5727{ 5771{
5728 pci_save_state(pdev); 5772 pci_save_state(pdev);
5729 pci_disable_device(pdev); 5773
5730 pci_set_power_state(pdev, PCI_D3hot); 5774 if (state.event == PM_EVENT_SUSPEND) {
5731 return 0; 5775 pci_disable_device(pdev);
5776 pci_set_power_state(pdev, PCI_D3hot);
5777 }
5732} 5778}
5733 5779
5734int ata_pci_device_resume(struct pci_dev *pdev) 5780void ata_pci_device_do_resume(struct pci_dev *pdev)
5735{ 5781{
5736 pci_set_power_state(pdev, PCI_D0); 5782 pci_set_power_state(pdev, PCI_D0);
5737 pci_restore_state(pdev); 5783 pci_restore_state(pdev);
5738 pci_enable_device(pdev); 5784 pci_enable_device(pdev);
5739 pci_set_master(pdev); 5785 pci_set_master(pdev);
5786}
5787
5788int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5789{
5790 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5791 int rc = 0;
5792
5793 rc = ata_host_set_suspend(host_set, state);
5794 if (rc)
5795 return rc;
5796
5797 if (host_set->next) {
5798 rc = ata_host_set_suspend(host_set->next, state);
5799 if (rc) {
5800 ata_host_set_resume(host_set);
5801 return rc;
5802 }
5803 }
5804
5805 ata_pci_device_do_suspend(pdev, state);
5806
5807 return 0;
5808}
5809
5810int ata_pci_device_resume(struct pci_dev *pdev)
5811{
5812 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5813
5814 ata_pci_device_do_resume(pdev);
5815 ata_host_set_resume(host_set);
5816 if (host_set->next)
5817 ata_host_set_resume(host_set->next);
5818
5740 return 0; 5819 return 0;
5741} 5820}
5742#endif /* CONFIG_PCI */ 5821#endif /* CONFIG_PCI */
@@ -5842,9 +5921,9 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5842 * Do not depend on ABI/API stability. 5921 * Do not depend on ABI/API stability.
5843 */ 5922 */
5844 5923
5845EXPORT_SYMBOL_GPL(sata_deb_timing_boot); 5924EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
5846EXPORT_SYMBOL_GPL(sata_deb_timing_eh); 5925EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
5847EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst); 5926EXPORT_SYMBOL_GPL(sata_deb_timing_long);
5848EXPORT_SYMBOL_GPL(ata_std_bios_param); 5927EXPORT_SYMBOL_GPL(ata_std_bios_param);
5849EXPORT_SYMBOL_GPL(ata_std_ports); 5928EXPORT_SYMBOL_GPL(ata_std_ports);
5850EXPORT_SYMBOL_GPL(ata_device_add); 5929EXPORT_SYMBOL_GPL(ata_device_add);
@@ -5916,6 +5995,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
5916EXPORT_SYMBOL_GPL(sata_scr_write_flush); 5995EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5917EXPORT_SYMBOL_GPL(ata_port_online); 5996EXPORT_SYMBOL_GPL(ata_port_online);
5918EXPORT_SYMBOL_GPL(ata_port_offline); 5997EXPORT_SYMBOL_GPL(ata_port_offline);
5998EXPORT_SYMBOL_GPL(ata_host_set_suspend);
5999EXPORT_SYMBOL_GPL(ata_host_set_resume);
5919EXPORT_SYMBOL_GPL(ata_id_string); 6000EXPORT_SYMBOL_GPL(ata_id_string);
5920EXPORT_SYMBOL_GPL(ata_id_c_string); 6001EXPORT_SYMBOL_GPL(ata_id_c_string);
5921EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6002EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5930,14 +6011,14 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5930EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 6011EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5931EXPORT_SYMBOL_GPL(ata_pci_init_one); 6012EXPORT_SYMBOL_GPL(ata_pci_init_one);
5932EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6013EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6014EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6015EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
5933EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6016EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5934EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6017EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5935EXPORT_SYMBOL_GPL(ata_pci_default_filter); 6018EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5936EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 6019EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5937#endif /* CONFIG_PCI */ 6020#endif /* CONFIG_PCI */
5938 6021
5939EXPORT_SYMBOL_GPL(ata_device_suspend);
5940EXPORT_SYMBOL_GPL(ata_device_resume);
5941EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 6022EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5942EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 6023EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5943 6024
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index bf5a72aca8a4..4b6aa30f4d68 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -47,6 +47,8 @@
47 47
48static void __ata_port_freeze(struct ata_port *ap); 48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap); 49static void ata_eh_finish(struct ata_port *ap);
50static void ata_eh_handle_port_suspend(struct ata_port *ap);
51static void ata_eh_handle_port_resume(struct ata_port *ap);
50 52
51static void ata_ering_record(struct ata_ering *ering, int is_io, 53static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask) 54 unsigned int err_mask)
@@ -190,7 +192,6 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
190void ata_scsi_error(struct Scsi_Host *host) 192void ata_scsi_error(struct Scsi_Host *host)
191{ 193{
192 struct ata_port *ap = ata_shost_to_port(host); 194 struct ata_port *ap = ata_shost_to_port(host);
193 spinlock_t *ap_lock = ap->lock;
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT; 195 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
195 unsigned long flags; 196 unsigned long flags;
196 197
@@ -217,7 +218,7 @@ void ata_scsi_error(struct Scsi_Host *host)
217 struct scsi_cmnd *scmd, *tmp; 218 struct scsi_cmnd *scmd, *tmp;
218 int nr_timedout = 0; 219 int nr_timedout = 0;
219 220
220 spin_lock_irqsave(ap_lock, flags); 221 spin_lock_irqsave(ap->lock, flags);
221 222
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 223 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc; 224 struct ata_queued_cmd *qc;
@@ -256,43 +257,49 @@ void ata_scsi_error(struct Scsi_Host *host)
256 if (nr_timedout) 257 if (nr_timedout)
257 __ata_port_freeze(ap); 258 __ata_port_freeze(ap);
258 259
259 spin_unlock_irqrestore(ap_lock, flags); 260 spin_unlock_irqrestore(ap->lock, flags);
260 } else 261 } else
261 spin_unlock_wait(ap_lock); 262 spin_unlock_wait(ap->lock);
262 263
263 repeat: 264 repeat:
264 /* invoke error handler */ 265 /* invoke error handler */
265 if (ap->ops->error_handler) { 266 if (ap->ops->error_handler) {
267 /* process port resume request */
268 ata_eh_handle_port_resume(ap);
269
266 /* fetch & clear EH info */ 270 /* fetch & clear EH info */
267 spin_lock_irqsave(ap_lock, flags); 271 spin_lock_irqsave(ap->lock, flags);
268 272
269 memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 273 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
270 ap->eh_context.i = ap->eh_info; 274 ap->eh_context.i = ap->eh_info;
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 275 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
272 276
273 ap->flags |= ATA_FLAG_EH_IN_PROGRESS; 277 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
274 ap->flags &= ~ATA_FLAG_EH_PENDING; 278 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
275 279
276 spin_unlock_irqrestore(ap_lock, flags); 280 spin_unlock_irqrestore(ap->lock, flags);
277 281
278 /* invoke EH. if unloading, just finish failed qcs */ 282 /* invoke EH, skip if unloading or suspended */
279 if (!(ap->flags & ATA_FLAG_UNLOADING)) 283 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
280 ap->ops->error_handler(ap); 284 ap->ops->error_handler(ap);
281 else 285 else
282 ata_eh_finish(ap); 286 ata_eh_finish(ap);
283 287
288 /* process port suspend request */
289 ata_eh_handle_port_suspend(ap);
290
284 /* Exception might have happend after ->error_handler 291 /* Exception might have happend after ->error_handler
285 * recovered the port but before this point. Repeat 292 * recovered the port but before this point. Repeat
286 * EH in such case. 293 * EH in such case.
287 */ 294 */
288 spin_lock_irqsave(ap_lock, flags); 295 spin_lock_irqsave(ap->lock, flags);
289 296
290 if (ap->flags & ATA_FLAG_EH_PENDING) { 297 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
291 if (--repeat_cnt) { 298 if (--repeat_cnt) {
292 ata_port_printk(ap, KERN_INFO, 299 ata_port_printk(ap, KERN_INFO,
293 "EH pending after completion, " 300 "EH pending after completion, "
294 "repeating EH (cnt=%d)\n", repeat_cnt); 301 "repeating EH (cnt=%d)\n", repeat_cnt);
295 spin_unlock_irqrestore(ap_lock, flags); 302 spin_unlock_irqrestore(ap->lock, flags);
296 goto repeat; 303 goto repeat;
297 } 304 }
298 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 305 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
@@ -302,14 +309,14 @@ void ata_scsi_error(struct Scsi_Host *host)
302 /* this run is complete, make sure EH info is clear */ 309 /* this run is complete, make sure EH info is clear */
303 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 310 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
304 311
305 /* Clear host_eh_scheduled while holding ap_lock such 312 /* Clear host_eh_scheduled while holding ap->lock such
306 * that if exception occurs after this point but 313 * that if exception occurs after this point but
307 * before EH completion, SCSI midlayer will 314 * before EH completion, SCSI midlayer will
308 * re-initiate EH. 315 * re-initiate EH.
309 */ 316 */
310 host->host_eh_scheduled = 0; 317 host->host_eh_scheduled = 0;
311 318
312 spin_unlock_irqrestore(ap_lock, flags); 319 spin_unlock_irqrestore(ap->lock, flags);
313 } else { 320 } else {
314 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 321 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
315 ap->ops->eng_timeout(ap); 322 ap->ops->eng_timeout(ap);
@@ -321,24 +328,23 @@ void ata_scsi_error(struct Scsi_Host *host)
321 scsi_eh_flush_done_q(&ap->eh_done_q); 328 scsi_eh_flush_done_q(&ap->eh_done_q);
322 329
323 /* clean up */ 330 /* clean up */
324 spin_lock_irqsave(ap_lock, flags); 331 spin_lock_irqsave(ap->lock, flags);
325 332
326 if (ap->flags & ATA_FLAG_LOADING) { 333 if (ap->pflags & ATA_PFLAG_LOADING)
327 ap->flags &= ~ATA_FLAG_LOADING; 334 ap->pflags &= ~ATA_PFLAG_LOADING;
328 } else { 335 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
329 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG) 336 queue_work(ata_aux_wq, &ap->hotplug_task);
330 queue_work(ata_aux_wq, &ap->hotplug_task); 337
331 if (ap->flags & ATA_FLAG_RECOVERED) 338 if (ap->pflags & ATA_PFLAG_RECOVERED)
332 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 339 ata_port_printk(ap, KERN_INFO, "EH complete\n");
333 }
334 340
335 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED); 341 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
336 342
337 /* tell wait_eh that we're done */ 343 /* tell wait_eh that we're done */
338 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; 344 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
339 wake_up_all(&ap->eh_wait_q); 345 wake_up_all(&ap->eh_wait_q);
340 346
341 spin_unlock_irqrestore(ap_lock, flags); 347 spin_unlock_irqrestore(ap->lock, flags);
342 348
343 DPRINTK("EXIT\n"); 349 DPRINTK("EXIT\n");
344} 350}
@@ -360,7 +366,7 @@ void ata_port_wait_eh(struct ata_port *ap)
360 retry: 366 retry:
361 spin_lock_irqsave(ap->lock, flags); 367 spin_lock_irqsave(ap->lock, flags);
362 368
363 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { 369 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
364 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 370 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
365 spin_unlock_irqrestore(ap->lock, flags); 371 spin_unlock_irqrestore(ap->lock, flags);
366 schedule(); 372 schedule();
@@ -489,7 +495,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
489 WARN_ON(!ap->ops->error_handler); 495 WARN_ON(!ap->ops->error_handler);
490 496
491 qc->flags |= ATA_QCFLAG_FAILED; 497 qc->flags |= ATA_QCFLAG_FAILED;
492 qc->ap->flags |= ATA_FLAG_EH_PENDING; 498 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
493 499
494 /* The following will fail if timeout has already expired. 500 /* The following will fail if timeout has already expired.
495 * ata_scsi_error() takes care of such scmds on EH entry. 501 * ata_scsi_error() takes care of such scmds on EH entry.
@@ -513,7 +519,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
513{ 519{
514 WARN_ON(!ap->ops->error_handler); 520 WARN_ON(!ap->ops->error_handler);
515 521
516 ap->flags |= ATA_FLAG_EH_PENDING; 522 ap->pflags |= ATA_PFLAG_EH_PENDING;
517 scsi_schedule_eh(ap->host); 523 scsi_schedule_eh(ap->host);
518 524
519 DPRINTK("port EH scheduled\n"); 525 DPRINTK("port EH scheduled\n");
@@ -578,7 +584,7 @@ static void __ata_port_freeze(struct ata_port *ap)
578 if (ap->ops->freeze) 584 if (ap->ops->freeze)
579 ap->ops->freeze(ap); 585 ap->ops->freeze(ap);
580 586
581 ap->flags |= ATA_FLAG_FROZEN; 587 ap->pflags |= ATA_PFLAG_FROZEN;
582 588
583 DPRINTK("ata%u port frozen\n", ap->id); 589 DPRINTK("ata%u port frozen\n", ap->id);
584} 590}
@@ -646,7 +652,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
646 652
647 spin_lock_irqsave(ap->lock, flags); 653 spin_lock_irqsave(ap->lock, flags);
648 654
649 ap->flags &= ~ATA_FLAG_FROZEN; 655 ap->pflags &= ~ATA_PFLAG_FROZEN;
650 656
651 if (ap->ops->thaw) 657 if (ap->ops->thaw)
652 ap->ops->thaw(ap); 658 ap->ops->thaw(ap);
@@ -731,7 +737,7 @@ static void ata_eh_detach_dev(struct ata_device *dev)
731 737
732 if (ata_scsi_offline_dev(dev)) { 738 if (ata_scsi_offline_dev(dev)) {
733 dev->flags |= ATA_DFLAG_DETACHED; 739 dev->flags |= ATA_DFLAG_DETACHED;
734 ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 740 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
735 } 741 }
736 742
737 /* clear per-dev EH actions */ 743 /* clear per-dev EH actions */
@@ -760,8 +766,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
760 unsigned long flags; 766 unsigned long flags;
761 767
762 spin_lock_irqsave(ap->lock, flags); 768 spin_lock_irqsave(ap->lock, flags);
769
763 ata_eh_clear_action(dev, &ap->eh_info, action); 770 ata_eh_clear_action(dev, &ap->eh_info, action);
764 ap->flags |= ATA_FLAG_RECOVERED; 771
772 if (!(ap->eh_context.i.flags & ATA_EHI_QUIET))
773 ap->pflags |= ATA_PFLAG_RECOVERED;
774
765 spin_unlock_irqrestore(ap->lock, flags); 775 spin_unlock_irqrestore(ap->lock, flags);
766} 776}
767 777
@@ -1027,7 +1037,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1027 int tag, rc; 1037 int tag, rc;
1028 1038
1029 /* if frozen, we can't do much */ 1039 /* if frozen, we can't do much */
1030 if (ap->flags & ATA_FLAG_FROZEN) 1040 if (ap->pflags & ATA_PFLAG_FROZEN)
1031 return; 1041 return;
1032 1042
1033 /* is it NCQ device error? */ 1043 /* is it NCQ device error? */
@@ -1275,6 +1285,9 @@ static void ata_eh_autopsy(struct ata_port *ap)
1275 1285
1276 DPRINTK("ENTER\n"); 1286 DPRINTK("ENTER\n");
1277 1287
1288 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1289 return;
1290
1278 /* obtain and analyze SError */ 1291 /* obtain and analyze SError */
1279 rc = sata_scr_read(ap, SCR_ERROR, &serror); 1292 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1280 if (rc == 0) { 1293 if (rc == 0) {
@@ -1327,7 +1340,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1327 } 1340 }
1328 1341
1329 /* enforce default EH actions */ 1342 /* enforce default EH actions */
1330 if (ap->flags & ATA_FLAG_FROZEN || 1343 if (ap->pflags & ATA_PFLAG_FROZEN ||
1331 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1344 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1332 action |= ATA_EH_SOFTRESET; 1345 action |= ATA_EH_SOFTRESET;
1333 else if (all_err_mask) 1346 else if (all_err_mask)
@@ -1346,7 +1359,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1346 1359
1347 /* record autopsy result */ 1360 /* record autopsy result */
1348 ehc->i.dev = failed_dev; 1361 ehc->i.dev = failed_dev;
1349 ehc->i.action = action; 1362 ehc->i.action |= action;
1350 1363
1351 DPRINTK("EXIT\n"); 1364 DPRINTK("EXIT\n");
1352} 1365}
@@ -1385,7 +1398,7 @@ static void ata_eh_report(struct ata_port *ap)
1385 return; 1398 return;
1386 1399
1387 frozen = ""; 1400 frozen = "";
1388 if (ap->flags & ATA_FLAG_FROZEN) 1401 if (ap->pflags & ATA_PFLAG_FROZEN)
1389 frozen = " frozen"; 1402 frozen = " frozen";
1390 1403
1391 if (ehc->i.dev) { 1404 if (ehc->i.dev) {
@@ -1465,7 +1478,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1465 struct ata_eh_context *ehc = &ap->eh_context; 1478 struct ata_eh_context *ehc = &ap->eh_context;
1466 unsigned int *classes = ehc->classes; 1479 unsigned int *classes = ehc->classes;
1467 int tries = ATA_EH_RESET_TRIES; 1480 int tries = ATA_EH_RESET_TRIES;
1468 int verbose = !(ap->flags & ATA_FLAG_LOADING); 1481 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1469 unsigned int action; 1482 unsigned int action;
1470 ata_reset_fn_t reset; 1483 ata_reset_fn_t reset;
1471 int i, did_followup_srst, rc; 1484 int i, did_followup_srst, rc;
@@ -1605,7 +1618,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1605 dev = &ap->device[i]; 1618 dev = &ap->device[i];
1606 action = ata_eh_dev_action(dev); 1619 action = ata_eh_dev_action(dev);
1607 1620
1608 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { 1621 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1609 if (ata_port_offline(ap)) { 1622 if (ata_port_offline(ap)) {
1610 rc = -EIO; 1623 rc = -EIO;
1611 break; 1624 break;
@@ -1636,7 +1649,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1636 } 1649 }
1637 1650
1638 spin_lock_irqsave(ap->lock, flags); 1651 spin_lock_irqsave(ap->lock, flags);
1639 ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 1652 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1640 spin_unlock_irqrestore(ap->lock, flags); 1653 spin_unlock_irqrestore(ap->lock, flags);
1641 } 1654 }
1642 } 1655 }
@@ -1648,6 +1661,164 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1648 return rc; 1661 return rc;
1649} 1662}
1650 1663
1664/**
1665 * ata_eh_suspend - handle suspend EH action
1666 * @ap: target host port
1667 * @r_failed_dev: result parameter to indicate failing device
1668 *
1669 * Handle suspend EH action. Disk devices are spinned down and
1670 * other types of devices are just marked suspended. Once
1671 * suspended, no EH action to the device is allowed until it is
1672 * resumed.
1673 *
1674 * LOCKING:
1675 * Kernel thread context (may sleep).
1676 *
1677 * RETURNS:
1678 * 0 on success, -errno otherwise
1679 */
1680static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1681{
1682 struct ata_device *dev;
1683 int i, rc = 0;
1684
1685 DPRINTK("ENTER\n");
1686
1687 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1688 unsigned long flags;
1689 unsigned int action, err_mask;
1690
1691 dev = &ap->device[i];
1692 action = ata_eh_dev_action(dev);
1693
1694 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1695 continue;
1696
1697 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1698
1699 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1700
1701 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1702 /* flush cache */
1703 rc = ata_flush_cache(dev);
1704 if (rc)
1705 break;
1706
1707 /* spin down */
1708 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1709 if (err_mask) {
1710 ata_dev_printk(dev, KERN_ERR, "failed to "
1711 "spin down (err_mask=0x%x)\n",
1712 err_mask);
1713 rc = -EIO;
1714 break;
1715 }
1716 }
1717
1718 spin_lock_irqsave(ap->lock, flags);
1719 dev->flags |= ATA_DFLAG_SUSPENDED;
1720 spin_unlock_irqrestore(ap->lock, flags);
1721
1722 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1723 }
1724
1725 if (rc)
1726 *r_failed_dev = dev;
1727
1728 DPRINTK("EXIT\n");
1729 return 0;
1730}
1731
1732/**
1733 * ata_eh_prep_resume - prep for resume EH action
1734 * @ap: target host port
1735 *
1736 * Clear SUSPENDED in preparation for scheduled resume actions.
1737 * This allows other parts of EH to access the devices being
1738 * resumed.
1739 *
1740 * LOCKING:
1741 * Kernel thread context (may sleep).
1742 */
1743static void ata_eh_prep_resume(struct ata_port *ap)
1744{
1745 struct ata_device *dev;
1746 unsigned long flags;
1747 int i;
1748
1749 DPRINTK("ENTER\n");
1750
1751 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1752 unsigned int action;
1753
1754 dev = &ap->device[i];
1755 action = ata_eh_dev_action(dev);
1756
1757 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1758 continue;
1759
1760 spin_lock_irqsave(ap->lock, flags);
1761 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1762 spin_unlock_irqrestore(ap->lock, flags);
1763 }
1764
1765 DPRINTK("EXIT\n");
1766}
1767
1768/**
1769 * ata_eh_resume - handle resume EH action
1770 * @ap: target host port
1771 * @r_failed_dev: result parameter to indicate failing device
1772 *
1773 * Handle resume EH action. Target devices are already reset and
1774 * revalidated. Spinning up is the only operation left.
1775 *
1776 * LOCKING:
1777 * Kernel thread context (may sleep).
1778 *
1779 * RETURNS:
1780 * 0 on success, -errno otherwise
1781 */
1782static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1783{
1784 struct ata_device *dev;
1785 int i, rc = 0;
1786
1787 DPRINTK("ENTER\n");
1788
1789 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1790 unsigned int action, err_mask;
1791
1792 dev = &ap->device[i];
1793 action = ata_eh_dev_action(dev);
1794
1795 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1796 continue;
1797
1798 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1799
1800 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1801 err_mask = ata_do_simple_cmd(dev,
1802 ATA_CMD_IDLEIMMEDIATE);
1803 if (err_mask) {
1804 ata_dev_printk(dev, KERN_ERR, "failed to "
1805 "spin up (err_mask=0x%x)\n",
1806 err_mask);
1807 rc = -EIO;
1808 break;
1809 }
1810 }
1811
1812 ata_eh_done(ap, dev, ATA_EH_RESUME);
1813 }
1814
1815 if (rc)
1816 *r_failed_dev = dev;
1817
1818 DPRINTK("EXIT\n");
1819 return 0;
1820}
1821
1651static int ata_port_nr_enabled(struct ata_port *ap) 1822static int ata_port_nr_enabled(struct ata_port *ap)
1652{ 1823{
1653 int i, cnt = 0; 1824 int i, cnt = 0;
@@ -1673,7 +1844,19 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
1673 struct ata_eh_context *ehc = &ap->eh_context; 1844 struct ata_eh_context *ehc = &ap->eh_context;
1674 int i; 1845 int i;
1675 1846
1676 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap)) 1847 /* skip if all possible devices are suspended */
1848 for (i = 0; i < ata_port_max_devices(ap); i++) {
1849 struct ata_device *dev = &ap->device[i];
1850
1851 if (ata_dev_absent(dev) || ata_dev_ready(dev))
1852 break;
1853 }
1854
1855 if (i == ata_port_max_devices(ap))
1856 return 1;
1857
1858 /* always thaw frozen port and recover failed devices */
1859 if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap))
1677 return 0; 1860 return 0;
1678 1861
1679 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1862 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
@@ -1744,9 +1927,12 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1744 rc = 0; 1927 rc = 0;
1745 1928
1746 /* if UNLOADING, finish immediately */ 1929 /* if UNLOADING, finish immediately */
1747 if (ap->flags & ATA_FLAG_UNLOADING) 1930 if (ap->pflags & ATA_PFLAG_UNLOADING)
1748 goto out; 1931 goto out;
1749 1932
1933 /* prep for resume */
1934 ata_eh_prep_resume(ap);
1935
1750 /* skip EH if possible. */ 1936 /* skip EH if possible. */
1751 if (ata_eh_skip_recovery(ap)) 1937 if (ata_eh_skip_recovery(ap))
1752 ehc->i.action = 0; 1938 ehc->i.action = 0;
@@ -1774,6 +1960,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1774 if (rc) 1960 if (rc)
1775 goto dev_fail; 1961 goto dev_fail;
1776 1962
1963 /* resume devices */
1964 rc = ata_eh_resume(ap, &dev);
1965 if (rc)
1966 goto dev_fail;
1967
1777 /* configure transfer mode if the port has been reset */ 1968 /* configure transfer mode if the port has been reset */
1778 if (ehc->i.flags & ATA_EHI_DID_RESET) { 1969 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1779 rc = ata_set_mode(ap, &dev); 1970 rc = ata_set_mode(ap, &dev);
@@ -1783,6 +1974,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1783 } 1974 }
1784 } 1975 }
1785 1976
1977 /* suspend devices */
1978 rc = ata_eh_suspend(ap, &dev);
1979 if (rc)
1980 goto dev_fail;
1981
1786 goto out; 1982 goto out;
1787 1983
1788 dev_fail: 1984 dev_fail:
@@ -1908,11 +2104,124 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1908 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2104 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1909 ata_postreset_fn_t postreset) 2105 ata_postreset_fn_t postreset)
1910{ 2106{
1911 if (!(ap->flags & ATA_FLAG_LOADING)) { 2107 ata_eh_autopsy(ap);
1912 ata_eh_autopsy(ap); 2108 ata_eh_report(ap);
1913 ata_eh_report(ap);
1914 }
1915
1916 ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2109 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1917 ata_eh_finish(ap); 2110 ata_eh_finish(ap);
1918} 2111}
2112
2113/**
2114 * ata_eh_handle_port_suspend - perform port suspend operation
2115 * @ap: port to suspend
2116 *
2117 * Suspend @ap.
2118 *
2119 * LOCKING:
2120 * Kernel thread context (may sleep).
2121 */
2122static void ata_eh_handle_port_suspend(struct ata_port *ap)
2123{
2124 unsigned long flags;
2125 int rc = 0;
2126
2127 /* are we suspending? */
2128 spin_lock_irqsave(ap->lock, flags);
2129 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2130 ap->pm_mesg.event == PM_EVENT_ON) {
2131 spin_unlock_irqrestore(ap->lock, flags);
2132 return;
2133 }
2134 spin_unlock_irqrestore(ap->lock, flags);
2135
2136 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2137
2138 /* suspend */
2139 ata_eh_freeze_port(ap);
2140
2141 if (ap->ops->port_suspend)
2142 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2143
2144 /* report result */
2145 spin_lock_irqsave(ap->lock, flags);
2146
2147 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2148 if (rc == 0)
2149 ap->pflags |= ATA_PFLAG_SUSPENDED;
2150 else
2151 ata_port_schedule_eh(ap);
2152
2153 if (ap->pm_result) {
2154 *ap->pm_result = rc;
2155 ap->pm_result = NULL;
2156 }
2157
2158 spin_unlock_irqrestore(ap->lock, flags);
2159
2160 return;
2161}
2162
2163/**
2164 * ata_eh_handle_port_resume - perform port resume operation
2165 * @ap: port to resume
2166 *
2167 * Resume @ap.
2168 *
2169 * This function also waits upto one second until all devices
2170 * hanging off this port requests resume EH action. This is to
2171 * prevent invoking EH and thus reset multiple times on resume.
2172 *
2173 * On DPM resume, where some of devices might not be resumed
2174 * together, this may delay port resume upto one second, but such
2175 * DPM resumes are rare and 1 sec delay isn't too bad.
2176 *
2177 * LOCKING:
2178 * Kernel thread context (may sleep).
2179 */
2180static void ata_eh_handle_port_resume(struct ata_port *ap)
2181{
2182 unsigned long timeout;
2183 unsigned long flags;
2184 int i, rc = 0;
2185
2186 /* are we resuming? */
2187 spin_lock_irqsave(ap->lock, flags);
2188 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2189 ap->pm_mesg.event != PM_EVENT_ON) {
2190 spin_unlock_irqrestore(ap->lock, flags);
2191 return;
2192 }
2193 spin_unlock_irqrestore(ap->lock, flags);
2194
2195 /* spurious? */
2196 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2197 goto done;
2198
2199 if (ap->ops->port_resume)
2200 rc = ap->ops->port_resume(ap);
2201
2202 /* give devices time to request EH */
2203 timeout = jiffies + HZ; /* 1s max */
2204 while (1) {
2205 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2206 struct ata_device *dev = &ap->device[i];
2207 unsigned int action = ata_eh_dev_action(dev);
2208
2209 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2210 !(action & ATA_EH_RESUME))
2211 break;
2212 }
2213
2214 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2215 break;
2216 msleep(10);
2217 }
2218
2219 done:
2220 spin_lock_irqsave(ap->lock, flags);
2221 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2222 if (ap->pm_result) {
2223 *ap->pm_result = rc;
2224 ap->pm_result = NULL;
2225 }
2226 spin_unlock_irqrestore(ap->lock, flags);
2227}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 2915bca691e8..7ced41ecde86 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -397,20 +397,129 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
397 } 397 }
398} 398}
399 399
400int ata_scsi_device_resume(struct scsi_device *sdev) 400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @state: target power management state
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
401{ 415{
402 struct ata_port *ap = ata_shost_to_port(sdev->host); 416 struct ata_port *ap = ata_shost_to_port(sdev->host);
403 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (state.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
404 461
405 return ata_device_resume(dev); 462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = state;
467 return rc;
406} 468}
407 469
408int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
409{ 485{
410 struct ata_port *ap = ata_shost_to_port(sdev->host); 486 struct ata_port *ap = ata_shost_to_port(sdev->host);
411 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
412 501
413 return ata_device_suspend(dev, state); 502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
414} 523}
415 524
416/** 525/**
@@ -2930,7 +3039,7 @@ void ata_scsi_hotplug(void *data)
2930 struct ata_port *ap = data; 3039 struct ata_port *ap = data;
2931 int i; 3040 int i;
2932 3041
2933 if (ap->flags & ATA_FLAG_UNLOADING) { 3042 if (ap->pflags & ATA_PFLAG_UNLOADING) {
2934 DPRINTK("ENTER/EXIT - unloading\n"); 3043 DPRINTK("ENTER/EXIT - unloading\n");
2935 return; 3044 return;
2936 } 3045 }
@@ -3011,6 +3120,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3011 if (dev) { 3120 if (dev) {
3012 ap->eh_info.probe_mask |= 1 << dev->devno; 3121 ap->eh_info.probe_mask |= 1 << dev->devno;
3013 ap->eh_info.action |= ATA_EH_SOFTRESET; 3122 ap->eh_info.action |= ATA_EH_SOFTRESET;
3123 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3014 } else 3124 } else
3015 rc = -EINVAL; 3125 rc = -EINVAL;
3016 } 3126 }
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 7aabb45c35e5..d0a85073ebf7 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -109,6 +109,7 @@ enum {
109}; 109};
110 110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112static int sil_pci_device_resume(struct pci_dev *pdev);
112static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); 113static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
113static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 114static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
114static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 115static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@@ -160,6 +161,8 @@ static struct pci_driver sil_pci_driver = {
160 .id_table = sil_pci_tbl, 161 .id_table = sil_pci_tbl,
161 .probe = sil_init_one, 162 .probe = sil_init_one,
162 .remove = ata_pci_remove_one, 163 .remove = ata_pci_remove_one,
164 .suspend = ata_pci_device_suspend,
165 .resume = sil_pci_device_resume,
163}; 166};
164 167
165static struct scsi_host_template sil_sht = { 168static struct scsi_host_template sil_sht = {
@@ -178,6 +181,8 @@ static struct scsi_host_template sil_sht = {
178 .slave_configure = ata_scsi_slave_config, 181 .slave_configure = ata_scsi_slave_config,
179 .slave_destroy = ata_scsi_slave_destroy, 182 .slave_destroy = ata_scsi_slave_destroy,
180 .bios_param = ata_std_bios_param, 183 .bios_param = ata_std_bios_param,
184 .suspend = ata_scsi_device_suspend,
185 .resume = ata_scsi_device_resume,
181}; 186};
182 187
183static const struct ata_port_operations sil_ops = { 188static const struct ata_port_operations sil_ops = {
@@ -370,7 +375,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
370 * during hardreset makes controllers with broken SIEN 375 * during hardreset makes controllers with broken SIEN
371 * repeat probing needlessly. 376 * repeat probing needlessly.
372 */ 377 */
373 if (!(ap->flags & ATA_FLAG_FROZEN)) { 378 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
374 ata_ehi_hotplugged(&ap->eh_info); 379 ata_ehi_hotplugged(&ap->eh_info);
375 ap->eh_info.serror |= serror; 380 ap->eh_info.serror |= serror;
376 } 381 }
@@ -561,6 +566,52 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
561 } 566 }
562} 567}
563 568
569static void sil_init_controller(struct pci_dev *pdev,
570 int n_ports, unsigned long host_flags,
571 void __iomem *mmio_base)
572{
573 u8 cls;
574 u32 tmp;
575 int i;
576
577 /* Initialize FIFO PCI bus arbitration */
578 cls = sil_get_device_cache_line(pdev);
579 if (cls) {
580 cls >>= 3;
581 cls++; /* cls = (line_size/8)+1 */
582 for (i = 0; i < n_ports; i++)
583 writew(cls << 8 | cls,
584 mmio_base + sil_port[i].fifo_cfg);
585 } else
586 dev_printk(KERN_WARNING, &pdev->dev,
587 "cache line size not set. Driver may not function\n");
588
589 /* Apply R_ERR on DMA activate FIS errata workaround */
590 if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
591 int cnt;
592
593 for (i = 0, cnt = 0; i < n_ports; i++) {
594 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
595 if ((tmp & 0x3) != 0x01)
596 continue;
597 if (!cnt)
598 dev_printk(KERN_INFO, &pdev->dev,
599 "Applying R_ERR on DMA activate "
600 "FIS errata fix\n");
601 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
602 cnt++;
603 }
604 }
605
606 if (n_ports == 4) {
607 /* flip the magic "make 4 ports work" bit */
608 tmp = readl(mmio_base + sil_port[2].bmdma);
609 if ((tmp & SIL_INTR_STEERING) == 0)
610 writel(tmp | SIL_INTR_STEERING,
611 mmio_base + sil_port[2].bmdma);
612 }
613}
614
564static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 615static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
565{ 616{
566 static int printed_version; 617 static int printed_version;
@@ -570,8 +621,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
570 int rc; 621 int rc;
571 unsigned int i; 622 unsigned int i;
572 int pci_dev_busy = 0; 623 int pci_dev_busy = 0;
573 u32 tmp;
574 u8 cls;
575 624
576 if (!printed_version++) 625 if (!printed_version++)
577 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 626 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -630,42 +679,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
630 ata_std_ports(&probe_ent->port[i]); 679 ata_std_ports(&probe_ent->port[i]);
631 } 680 }
632 681
633 /* Initialize FIFO PCI bus arbitration */ 682 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
634 cls = sil_get_device_cache_line(pdev); 683 mmio_base);
635 if (cls) {
636 cls >>= 3;
637 cls++; /* cls = (line_size/8)+1 */
638 for (i = 0; i < probe_ent->n_ports; i++)
639 writew(cls << 8 | cls,
640 mmio_base + sil_port[i].fifo_cfg);
641 } else
642 dev_printk(KERN_WARNING, &pdev->dev,
643 "cache line size not set. Driver may not function\n");
644
645 /* Apply R_ERR on DMA activate FIS errata workaround */
646 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
647 int cnt;
648
649 for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
650 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
651 if ((tmp & 0x3) != 0x01)
652 continue;
653 if (!cnt)
654 dev_printk(KERN_INFO, &pdev->dev,
655 "Applying R_ERR on DMA activate "
656 "FIS errata fix\n");
657 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
658 cnt++;
659 }
660 }
661
662 if (ent->driver_data == sil_3114) {
663 /* flip the magic "make 4 ports work" bit */
664 tmp = readl(mmio_base + sil_port[2].bmdma);
665 if ((tmp & SIL_INTR_STEERING) == 0)
666 writel(tmp | SIL_INTR_STEERING,
667 mmio_base + sil_port[2].bmdma);
668 }
669 684
670 pci_set_master(pdev); 685 pci_set_master(pdev);
671 686
@@ -685,6 +700,18 @@ err_out:
685 return rc; 700 return rc;
686} 701}
687 702
703static int sil_pci_device_resume(struct pci_dev *pdev)
704{
705 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
706
707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
709 host_set->mmio_base);
710 ata_host_set_resume(host_set);
711
712 return 0;
713}
714
688static int __init sil_init(void) 715static int __init sil_init(void)
689{ 716{
690 return pci_module_init(&sil_pci_driver); 717 return pci_module_init(&sil_pci_driver);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 07a1c6a8a414..2e0f4a4076af 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -92,6 +92,7 @@ enum {
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ 92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ 93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ 94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
95 96
96 /* 97 /*
97 * Port registers 98 * Port registers
@@ -338,6 +339,7 @@ static int sil24_port_start(struct ata_port *ap);
338static void sil24_port_stop(struct ata_port *ap); 339static void sil24_port_stop(struct ata_port *ap);
339static void sil24_host_stop(struct ata_host_set *host_set); 340static void sil24_host_stop(struct ata_host_set *host_set);
340static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342static int sil24_pci_device_resume(struct pci_dev *pdev);
341 343
342static const struct pci_device_id sil24_pci_tbl[] = { 344static const struct pci_device_id sil24_pci_tbl[] = {
343 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 345 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
@@ -353,6 +355,8 @@ static struct pci_driver sil24_pci_driver = {
353 .id_table = sil24_pci_tbl, 355 .id_table = sil24_pci_tbl,
354 .probe = sil24_init_one, 356 .probe = sil24_init_one,
355 .remove = ata_pci_remove_one, /* safe? */ 357 .remove = ata_pci_remove_one, /* safe? */
358 .suspend = ata_pci_device_suspend,
359 .resume = sil24_pci_device_resume,
356}; 360};
357 361
358static struct scsi_host_template sil24_sht = { 362static struct scsi_host_template sil24_sht = {
@@ -372,6 +376,8 @@ static struct scsi_host_template sil24_sht = {
372 .slave_configure = ata_scsi_slave_config, 376 .slave_configure = ata_scsi_slave_config,
373 .slave_destroy = ata_scsi_slave_destroy, 377 .slave_destroy = ata_scsi_slave_destroy,
374 .bios_param = ata_std_bios_param, 378 .bios_param = ata_std_bios_param,
379 .suspend = ata_scsi_device_suspend,
380 .resume = ata_scsi_device_resume,
375}; 381};
376 382
377static const struct ata_port_operations sil24_ops = { 383static const struct ata_port_operations sil24_ops = {
@@ -607,7 +613,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
607 /* SStatus oscillates between zero and valid status after 613 /* SStatus oscillates between zero and valid status after
608 * DEV_RST, debounce it. 614 * DEV_RST, debounce it.
609 */ 615 */
610 rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst); 616 rc = sata_phy_debounce(ap, sata_deb_timing_long);
611 if (rc) { 617 if (rc) {
612 reason = "PHY debouncing failed"; 618 reason = "PHY debouncing failed";
613 goto err; 619 goto err;
@@ -988,6 +994,64 @@ static void sil24_host_stop(struct ata_host_set *host_set)
988 kfree(hpriv); 994 kfree(hpriv);
989} 995}
990 996
997static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
998 unsigned long host_flags,
999 void __iomem *host_base,
1000 void __iomem *port_base)
1001{
1002 u32 tmp;
1003 int i;
1004
1005 /* GPIO off */
1006 writel(0, host_base + HOST_FLASH_CMD);
1007
1008 /* clear global reset & mask interrupts during initialization */
1009 writel(0, host_base + HOST_CTRL);
1010
1011 /* init ports */
1012 for (i = 0; i < n_ports; i++) {
1013 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1014
1015 /* Initial PHY setting */
1016 writel(0x20c, port + PORT_PHY_CFG);
1017
1018 /* Clear port RST */
1019 tmp = readl(port + PORT_CTRL_STAT);
1020 if (tmp & PORT_CS_PORT_RST) {
1021 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1022 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1023 PORT_CS_PORT_RST,
1024 PORT_CS_PORT_RST, 10, 100);
1025 if (tmp & PORT_CS_PORT_RST)
1026 dev_printk(KERN_ERR, &pdev->dev,
1027 "failed to clear port RST\n");
1028 }
1029
1030 /* Configure IRQ WoC */
1031 if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1032 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1033 else
1034 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1035
1036 /* Zero error counters. */
1037 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1038 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1039 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1040 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1041 writel(0x0000, port + PORT_CRC_ERR_CNT);
1042 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1043
1044 /* Always use 64bit activation */
1045 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1046
1047 /* Clear port multiplier enable and resume bits */
1048 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1049 }
1050
1051 /* Turn on interrupts */
1052 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1053}
1054
991static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1055static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
992{ 1056{
993 static int printed_version = 0; 1057 static int printed_version = 0;
@@ -1076,9 +1140,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1076 } 1140 }
1077 } 1141 }
1078 1142
1079 /* GPIO off */
1080 writel(0, host_base + HOST_FLASH_CMD);
1081
1082 /* Apply workaround for completion IRQ loss on PCI-X errata */ 1143 /* Apply workaround for completion IRQ loss on PCI-X errata */
1083 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1144 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1084 tmp = readl(host_base + HOST_CTRL); 1145 tmp = readl(host_base + HOST_CTRL);
@@ -1090,56 +1151,18 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1151 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1091 } 1152 }
1092 1153
1093 /* clear global reset & mask interrupts during initialization */
1094 writel(0, host_base + HOST_CTRL);
1095
1096 for (i = 0; i < probe_ent->n_ports; i++) { 1154 for (i = 0; i < probe_ent->n_ports; i++) {
1097 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1155 unsigned long portu =
1098 unsigned long portu = (unsigned long)port; 1156 (unsigned long)port_base + i * PORT_REGS_SIZE;
1099 1157
1100 probe_ent->port[i].cmd_addr = portu; 1158 probe_ent->port[i].cmd_addr = portu;
1101 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1159 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1102 1160
1103 ata_std_ports(&probe_ent->port[i]); 1161 ata_std_ports(&probe_ent->port[i]);
1104
1105 /* Initial PHY setting */
1106 writel(0x20c, port + PORT_PHY_CFG);
1107
1108 /* Clear port RST */
1109 tmp = readl(port + PORT_CTRL_STAT);
1110 if (tmp & PORT_CS_PORT_RST) {
1111 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1112 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1113 PORT_CS_PORT_RST,
1114 PORT_CS_PORT_RST, 10, 100);
1115 if (tmp & PORT_CS_PORT_RST)
1116 dev_printk(KERN_ERR, &pdev->dev,
1117 "failed to clear port RST\n");
1118 }
1119
1120 /* Configure IRQ WoC */
1121 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1122 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1123 else
1124 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1125
1126 /* Zero error counters. */
1127 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1128 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1129 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1130 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1131 writel(0x0000, port + PORT_CRC_ERR_CNT);
1132 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1133
1134 /* Always use 64bit activation */
1135 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1136
1137 /* Clear port multiplier enable and resume bits */
1138 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1139 } 1162 }
1140 1163
1141 /* Turn on interrupts */ 1164 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
1142 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1165 host_base, port_base);
1143 1166
1144 pci_set_master(pdev); 1167 pci_set_master(pdev);
1145 1168
@@ -1162,6 +1185,25 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1162 return rc; 1185 return rc;
1163} 1186}
1164 1187
1188static int sil24_pci_device_resume(struct pci_dev *pdev)
1189{
1190 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1191 struct sil24_host_priv *hpriv = host_set->private_data;
1192
1193 ata_pci_device_do_resume(pdev);
1194
1195 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1196 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1197
1198 sil24_init_controller(pdev, host_set->n_ports,
1199 host_set->ports[0]->flags,
1200 hpriv->host_base, hpriv->port_base);
1201
1202 ata_host_set_resume(host_set);
1203
1204 return 0;
1205}
1206
1165static int __init sil24_init(void) 1207static int __init sil24_init(void)
1166{ 1208{
1167 return pci_module_init(&sil24_pci_driver); 1209 return pci_module_init(&sil24_pci_driver);
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 916fe6fba756..ad37871594f5 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -297,7 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = {
297 .bmdma_status = ata_bmdma_status, 297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep, 298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot, 299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer, 300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze, 301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw, 302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler, 303 .error_handler = ata_bmdma_error_handler,
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index a7d664383dae..54c6b2adf7b7 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -41,6 +41,7 @@
41#include <asm/mach/serial_at91.h> 41#include <asm/mach/serial_at91.h>
42#include <asm/arch/board.h> 42#include <asm/arch/board.h>
43#include <asm/arch/system.h> 43#include <asm/arch/system.h>
44#include <asm/arch/gpio.h>
44 45
45#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 46#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
46#define SUPPORT_SYSRQ 47#define SUPPORT_SYSRQ
@@ -140,9 +141,9 @@ static void at91_set_mctrl(struct uart_port *port, u_int mctrl)
140 */ 141 */
141 if (port->mapbase == AT91_BASE_US0) { 142 if (port->mapbase == AT91_BASE_US0) {
142 if (mctrl & TIOCM_RTS) 143 if (mctrl & TIOCM_RTS)
143 at91_sys_write(AT91_PIOA + PIO_CODR, AT91_PA21_RTS0); 144 at91_set_gpio_value(AT91_PIN_PA21, 0);
144 else 145 else
145 at91_sys_write(AT91_PIOA + PIO_SODR, AT91_PA21_RTS0); 146 at91_set_gpio_value(AT91_PIN_PA21, 1);
146 } 147 }
147 } 148 }
148 149
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 5980c45998cc..89ba0df14c22 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -454,7 +454,7 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho
454 fl->fl_ops = &nlmclnt_lock_ops; 454 fl->fl_ops = &nlmclnt_lock_ops;
455} 455}
456 456
457static void do_vfs_lock(struct file_lock *fl) 457static int do_vfs_lock(struct file_lock *fl)
458{ 458{
459 int res = 0; 459 int res = 0;
460 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 460 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
@@ -467,9 +467,7 @@ static void do_vfs_lock(struct file_lock *fl)
467 default: 467 default:
468 BUG(); 468 BUG();
469 } 469 }
470 if (res < 0) 470 return res;
471 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
472 __FUNCTION__);
473} 471}
474 472
475/* 473/*
@@ -498,6 +496,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
498 struct nlm_host *host = req->a_host; 496 struct nlm_host *host = req->a_host;
499 struct nlm_res *resp = &req->a_res; 497 struct nlm_res *resp = &req->a_res;
500 struct nlm_wait *block = NULL; 498 struct nlm_wait *block = NULL;
499 unsigned char fl_flags = fl->fl_flags;
501 int status = -ENOLCK; 500 int status = -ENOLCK;
502 501
503 if (!host->h_monitored && nsm_monitor(host) < 0) { 502 if (!host->h_monitored && nsm_monitor(host) < 0) {
@@ -505,6 +504,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
505 host->h_name); 504 host->h_name);
506 goto out; 505 goto out;
507 } 506 }
507 fl->fl_flags |= FL_ACCESS;
508 status = do_vfs_lock(fl);
509 if (status < 0)
510 goto out;
508 511
509 block = nlmclnt_prepare_block(host, fl); 512 block = nlmclnt_prepare_block(host, fl);
510again: 513again:
@@ -539,9 +542,10 @@ again:
539 up_read(&host->h_rwsem); 542 up_read(&host->h_rwsem);
540 goto again; 543 goto again;
541 } 544 }
542 fl->fl_flags |= FL_SLEEP;
543 /* Ensure the resulting lock will get added to granted list */ 545 /* Ensure the resulting lock will get added to granted list */
544 do_vfs_lock(fl); 546 fl->fl_flags = fl_flags | FL_SLEEP;
547 if (do_vfs_lock(fl) < 0)
548 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
545 up_read(&host->h_rwsem); 549 up_read(&host->h_rwsem);
546 } 550 }
547 status = nlm_stat_to_errno(resp->status); 551 status = nlm_stat_to_errno(resp->status);
@@ -552,6 +556,7 @@ out_unblock:
552 nlmclnt_cancel(host, req->a_args.block, fl); 556 nlmclnt_cancel(host, req->a_args.block, fl);
553out: 557out:
554 nlm_release_call(req); 558 nlm_release_call(req);
559 fl->fl_flags = fl_flags;
555 return status; 560 return status;
556} 561}
557 562
@@ -606,15 +611,19 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
606{ 611{
607 struct nlm_host *host = req->a_host; 612 struct nlm_host *host = req->a_host;
608 struct nlm_res *resp = &req->a_res; 613 struct nlm_res *resp = &req->a_res;
609 int status; 614 int status = 0;
610 615
611 /* 616 /*
612 * Note: the server is supposed to either grant us the unlock 617 * Note: the server is supposed to either grant us the unlock
613 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either 618 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
614 * case, we want to unlock. 619 * case, we want to unlock.
615 */ 620 */
621 fl->fl_flags |= FL_EXISTS;
616 down_read(&host->h_rwsem); 622 down_read(&host->h_rwsem);
617 do_vfs_lock(fl); 623 if (do_vfs_lock(fl) == -ENOENT) {
624 up_read(&host->h_rwsem);
625 goto out;
626 }
618 up_read(&host->h_rwsem); 627 up_read(&host->h_rwsem);
619 628
620 if (req->a_flags & RPC_TASK_ASYNC) 629 if (req->a_flags & RPC_TASK_ASYNC)
@@ -624,7 +633,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
624 if (status < 0) 633 if (status < 0)
625 goto out; 634 goto out;
626 635
627 status = 0;
628 if (resp->status == NLM_LCK_GRANTED) 636 if (resp->status == NLM_LCK_GRANTED)
629 goto out; 637 goto out;
630 638
diff --git a/fs/locks.c b/fs/locks.c
index 1ad29c9b6252..b0b41a64e10b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -725,6 +725,10 @@ next_task:
725/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 725/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
726 * at the head of the list, but that's secret knowledge known only to 726 * at the head of the list, but that's secret knowledge known only to
727 * flock_lock_file and posix_lock_file. 727 * flock_lock_file and posix_lock_file.
728 *
729 * Note that if called with an FL_EXISTS argument, the caller may determine
730 * whether or not a lock was successfully freed by testing the return
731 * value for -ENOENT.
728 */ 732 */
729static int flock_lock_file(struct file *filp, struct file_lock *request) 733static int flock_lock_file(struct file *filp, struct file_lock *request)
730{ 734{
@@ -735,6 +739,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
735 int found = 0; 739 int found = 0;
736 740
737 lock_kernel(); 741 lock_kernel();
742 if (request->fl_flags & FL_ACCESS)
743 goto find_conflict;
738 for_each_lock(inode, before) { 744 for_each_lock(inode, before) {
739 struct file_lock *fl = *before; 745 struct file_lock *fl = *before;
740 if (IS_POSIX(fl)) 746 if (IS_POSIX(fl))
@@ -750,8 +756,11 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
750 break; 756 break;
751 } 757 }
752 758
753 if (request->fl_type == F_UNLCK) 759 if (request->fl_type == F_UNLCK) {
760 if ((request->fl_flags & FL_EXISTS) && !found)
761 error = -ENOENT;
754 goto out; 762 goto out;
763 }
755 764
756 error = -ENOMEM; 765 error = -ENOMEM;
757 new_fl = locks_alloc_lock(); 766 new_fl = locks_alloc_lock();
@@ -764,6 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
764 if (found) 773 if (found)
765 cond_resched(); 774 cond_resched();
766 775
776find_conflict:
767 for_each_lock(inode, before) { 777 for_each_lock(inode, before) {
768 struct file_lock *fl = *before; 778 struct file_lock *fl = *before;
769 if (IS_POSIX(fl)) 779 if (IS_POSIX(fl))
@@ -777,6 +787,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
777 locks_insert_block(fl, request); 787 locks_insert_block(fl, request);
778 goto out; 788 goto out;
779 } 789 }
790 if (request->fl_flags & FL_ACCESS)
791 goto out;
780 locks_copy_lock(new_fl, request); 792 locks_copy_lock(new_fl, request);
781 locks_insert_lock(&inode->i_flock, new_fl); 793 locks_insert_lock(&inode->i_flock, new_fl);
782 new_fl = NULL; 794 new_fl = NULL;
@@ -948,8 +960,11 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
948 960
949 error = 0; 961 error = 0;
950 if (!added) { 962 if (!added) {
951 if (request->fl_type == F_UNLCK) 963 if (request->fl_type == F_UNLCK) {
964 if (request->fl_flags & FL_EXISTS)
965 error = -ENOENT;
952 goto out; 966 goto out;
967 }
953 968
954 if (!new_fl) { 969 if (!new_fl) {
955 error = -ENOLCK; 970 error = -ENOLCK;
@@ -996,6 +1011,10 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
996 * Add a POSIX style lock to a file. 1011 * Add a POSIX style lock to a file.
997 * We merge adjacent & overlapping locks whenever possible. 1012 * We merge adjacent & overlapping locks whenever possible.
998 * POSIX locks are sorted by owner task, then by starting address 1013 * POSIX locks are sorted by owner task, then by starting address
1014 *
1015 * Note that if called with an FL_EXISTS argument, the caller may determine
1016 * whether or not a lock was successfully freed by testing the return
1017 * value for -ENOENT.
999 */ 1018 */
1000int posix_lock_file(struct file *filp, struct file_lock *fl) 1019int posix_lock_file(struct file *filp, struct file_lock *fl)
1001{ 1020{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 3ddda6f7ecc2..e7ffb4deb3e5 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -690,7 +690,9 @@ int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
690 goto out_force; 690 goto out_force;
691 /* This is an open(2) */ 691 /* This is an open(2) */
692 if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && 692 if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
693 !(server->flags & NFS_MOUNT_NOCTO)) 693 !(server->flags & NFS_MOUNT_NOCTO) &&
694 (S_ISREG(inode->i_mode) ||
695 S_ISDIR(inode->i_mode)))
694 goto out_force; 696 goto out_force;
695 } 697 }
696 return nfs_revalidate_inode(server, inode); 698 return nfs_revalidate_inode(server, inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 4cdd1b499e35..fecd3b095deb 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -67,25 +67,19 @@ struct nfs_direct_req {
67 struct kref kref; /* release manager */ 67 struct kref kref; /* release manager */
68 68
69 /* I/O parameters */ 69 /* I/O parameters */
70 struct list_head list, /* nfs_read/write_data structs */
71 rewrite_list; /* saved nfs_write_data structs */
72 struct nfs_open_context *ctx; /* file open context info */ 70 struct nfs_open_context *ctx; /* file open context info */
73 struct kiocb * iocb; /* controlling i/o request */ 71 struct kiocb * iocb; /* controlling i/o request */
74 struct inode * inode; /* target file of i/o */ 72 struct inode * inode; /* target file of i/o */
75 unsigned long user_addr; /* location of user's buffer */
76 size_t user_count; /* total bytes to move */
77 loff_t pos; /* starting offset in file */
78 struct page ** pages; /* pages in our buffer */
79 unsigned int npages; /* count of pages */
80 73
81 /* completion state */ 74 /* completion state */
75 atomic_t io_count; /* i/os we're waiting for */
82 spinlock_t lock; /* protect completion state */ 76 spinlock_t lock; /* protect completion state */
83 int outstanding; /* i/os we're waiting for */
84 ssize_t count, /* bytes actually processed */ 77 ssize_t count, /* bytes actually processed */
85 error; /* any reported error */ 78 error; /* any reported error */
86 struct completion completion; /* wait for i/o completion */ 79 struct completion completion; /* wait for i/o completion */
87 80
88 /* commit state */ 81 /* commit state */
82 struct list_head rewrite_list; /* saved nfs_write_data structs */
89 struct nfs_write_data * commit_data; /* special write_data for commits */ 83 struct nfs_write_data * commit_data; /* special write_data for commits */
90 int flags; 84 int flags;
91#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 85#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
@@ -93,8 +87,37 @@ struct nfs_direct_req {
93 struct nfs_writeverf verf; /* unstable write verifier */ 87 struct nfs_writeverf verf; /* unstable write verifier */
94}; 88};
95 89
96static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
97static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 90static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
91static const struct rpc_call_ops nfs_write_direct_ops;
92
93static inline void get_dreq(struct nfs_direct_req *dreq)
94{
95 atomic_inc(&dreq->io_count);
96}
97
98static inline int put_dreq(struct nfs_direct_req *dreq)
99{
100 return atomic_dec_and_test(&dreq->io_count);
101}
102
103/*
104 * "size" is never larger than rsize or wsize.
105 */
106static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
107{
108 int page_count;
109
110 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
111 page_count -= user_addr >> PAGE_SHIFT;
112 BUG_ON(page_count < 0);
113
114 return page_count;
115}
116
117static inline unsigned int nfs_max_pages(unsigned int size)
118{
119 return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
120}
98 121
99/** 122/**
100 * nfs_direct_IO - NFS address space operation for direct I/O 123 * nfs_direct_IO - NFS address space operation for direct I/O
@@ -118,50 +141,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
118 return -EINVAL; 141 return -EINVAL;
119} 142}
120 143
121static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) 144static void nfs_direct_dirty_pages(struct page **pages, int npages)
122{ 145{
123 int i; 146 int i;
124 for (i = 0; i < npages; i++) { 147 for (i = 0; i < npages; i++) {
125 struct page *page = pages[i]; 148 struct page *page = pages[i];
126 if (do_dirty && !PageCompound(page)) 149 if (!PageCompound(page))
127 set_page_dirty_lock(page); 150 set_page_dirty_lock(page);
128 page_cache_release(page);
129 } 151 }
130 kfree(pages);
131} 152}
132 153
133static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) 154static void nfs_direct_release_pages(struct page **pages, int npages)
134{ 155{
135 int result = -ENOMEM; 156 int i;
136 unsigned long page_count; 157 for (i = 0; i < npages; i++)
137 size_t array_size; 158 page_cache_release(pages[i]);
138
139 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
140 page_count -= user_addr >> PAGE_SHIFT;
141
142 array_size = (page_count * sizeof(struct page *));
143 *pages = kmalloc(array_size, GFP_KERNEL);
144 if (*pages) {
145 down_read(&current->mm->mmap_sem);
146 result = get_user_pages(current, current->mm, user_addr,
147 page_count, (rw == READ), 0,
148 *pages, NULL);
149 up_read(&current->mm->mmap_sem);
150 if (result != page_count) {
151 /*
152 * If we got fewer pages than expected from
153 * get_user_pages(), the user buffer runs off the
154 * end of a mapping; return EFAULT.
155 */
156 if (result >= 0) {
157 nfs_free_user_pages(*pages, result, 0);
158 result = -EFAULT;
159 } else
160 kfree(*pages);
161 *pages = NULL;
162 }
163 }
164 return result;
165} 159}
166 160
167static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 161static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
@@ -173,13 +167,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
173 return NULL; 167 return NULL;
174 168
175 kref_init(&dreq->kref); 169 kref_init(&dreq->kref);
170 kref_get(&dreq->kref);
176 init_completion(&dreq->completion); 171 init_completion(&dreq->completion);
177 INIT_LIST_HEAD(&dreq->list);
178 INIT_LIST_HEAD(&dreq->rewrite_list); 172 INIT_LIST_HEAD(&dreq->rewrite_list);
179 dreq->iocb = NULL; 173 dreq->iocb = NULL;
180 dreq->ctx = NULL; 174 dreq->ctx = NULL;
181 spin_lock_init(&dreq->lock); 175 spin_lock_init(&dreq->lock);
182 dreq->outstanding = 0; 176 atomic_set(&dreq->io_count, 0);
183 dreq->count = 0; 177 dreq->count = 0;
184 dreq->error = 0; 178 dreq->error = 0;
185 dreq->flags = 0; 179 dreq->flags = 0;
@@ -220,18 +214,11 @@ out:
220} 214}
221 215
222/* 216/*
223 * We must hold a reference to all the pages in this direct read request 217 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
224 * until the RPCs complete. This could be long *after* we are woken up in 218 * the iocb is still valid here if this is a synchronous request.
225 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
226 *
227 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
228 * can't trust the iocb is still valid here if this is a synchronous
229 * request. If the waiter is woken prematurely, the iocb is long gone.
230 */ 219 */
231static void nfs_direct_complete(struct nfs_direct_req *dreq) 220static void nfs_direct_complete(struct nfs_direct_req *dreq)
232{ 221{
233 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
234
235 if (dreq->iocb) { 222 if (dreq->iocb) {
236 long res = (long) dreq->error; 223 long res = (long) dreq->error;
237 if (!res) 224 if (!res)
@@ -244,48 +231,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
244} 231}
245 232
246/* 233/*
247 * Note we also set the number of requests we have in the dreq when we are 234 * We must hold a reference to all the pages in this direct read request
248 * done. This prevents races with I/O completion so we will always wait 235 * until the RPCs complete. This could be long *after* we are woken up in
249 * until all requests have been dispatched and completed. 236 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
250 */ 237 */
251static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
252{
253 struct list_head *list;
254 struct nfs_direct_req *dreq;
255 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
256
257 dreq = nfs_direct_req_alloc();
258 if (!dreq)
259 return NULL;
260
261 list = &dreq->list;
262 for(;;) {
263 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
264
265 if (unlikely(!data)) {
266 while (!list_empty(list)) {
267 data = list_entry(list->next,
268 struct nfs_read_data, pages);
269 list_del(&data->pages);
270 nfs_readdata_free(data);
271 }
272 kref_put(&dreq->kref, nfs_direct_req_release);
273 return NULL;
274 }
275
276 INIT_LIST_HEAD(&data->pages);
277 list_add(&data->pages, list);
278
279 data->req = (struct nfs_page *) dreq;
280 dreq->outstanding++;
281 if (nbytes <= rsize)
282 break;
283 nbytes -= rsize;
284 }
285 kref_get(&dreq->kref);
286 return dreq;
287}
288
289static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 238static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
290{ 239{
291 struct nfs_read_data *data = calldata; 240 struct nfs_read_data *data = calldata;
@@ -294,6 +243,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
294 if (nfs_readpage_result(task, data) != 0) 243 if (nfs_readpage_result(task, data) != 0)
295 return; 244 return;
296 245
246 nfs_direct_dirty_pages(data->pagevec, data->npages);
247 nfs_direct_release_pages(data->pagevec, data->npages);
248
297 spin_lock(&dreq->lock); 249 spin_lock(&dreq->lock);
298 250
299 if (likely(task->tk_status >= 0)) 251 if (likely(task->tk_status >= 0))
@@ -301,13 +253,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
301 else 253 else
302 dreq->error = task->tk_status; 254 dreq->error = task->tk_status;
303 255
304 if (--dreq->outstanding) {
305 spin_unlock(&dreq->lock);
306 return;
307 }
308
309 spin_unlock(&dreq->lock); 256 spin_unlock(&dreq->lock);
310 nfs_direct_complete(dreq); 257
258 if (put_dreq(dreq))
259 nfs_direct_complete(dreq);
311} 260}
312 261
313static const struct rpc_call_ops nfs_read_direct_ops = { 262static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -316,41 +265,60 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
316}; 265};
317 266
318/* 267/*
319 * For each nfs_read_data struct that was allocated on the list, dispatch 268 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
320 * an NFS READ operation 269 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
270 * bail and stop sending more reads. Read length accounting is
271 * handled automatically by nfs_direct_read_result(). Otherwise, if
272 * no requests have been sent, just return an error.
321 */ 273 */
322static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) 274static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
323{ 275{
324 struct nfs_open_context *ctx = dreq->ctx; 276 struct nfs_open_context *ctx = dreq->ctx;
325 struct inode *inode = ctx->dentry->d_inode; 277 struct inode *inode = ctx->dentry->d_inode;
326 struct list_head *list = &dreq->list;
327 struct page **pages = dreq->pages;
328 size_t count = dreq->user_count;
329 loff_t pos = dreq->pos;
330 size_t rsize = NFS_SERVER(inode)->rsize; 278 size_t rsize = NFS_SERVER(inode)->rsize;
331 unsigned int curpage, pgbase; 279 unsigned int rpages = nfs_max_pages(rsize);
280 unsigned int pgbase;
281 int result;
282 ssize_t started = 0;
283
284 get_dreq(dreq);
332 285
333 curpage = 0; 286 pgbase = user_addr & ~PAGE_MASK;
334 pgbase = dreq->user_addr & ~PAGE_MASK;
335 do { 287 do {
336 struct nfs_read_data *data; 288 struct nfs_read_data *data;
337 size_t bytes; 289 size_t bytes;
338 290
291 result = -ENOMEM;
292 data = nfs_readdata_alloc(rpages);
293 if (unlikely(!data))
294 break;
295
339 bytes = rsize; 296 bytes = rsize;
340 if (count < rsize) 297 if (count < rsize)
341 bytes = count; 298 bytes = count;
342 299
343 BUG_ON(list_empty(list)); 300 data->npages = nfs_direct_count_pages(user_addr, bytes);
344 data = list_entry(list->next, struct nfs_read_data, pages); 301 down_read(&current->mm->mmap_sem);
345 list_del_init(&data->pages); 302 result = get_user_pages(current, current->mm, user_addr,
303 data->npages, 1, 0, data->pagevec, NULL);
304 up_read(&current->mm->mmap_sem);
305 if (unlikely(result < data->npages)) {
306 if (result > 0)
307 nfs_direct_release_pages(data->pagevec, result);
308 nfs_readdata_release(data);
309 break;
310 }
311
312 get_dreq(dreq);
346 313
314 data->req = (struct nfs_page *) dreq;
347 data->inode = inode; 315 data->inode = inode;
348 data->cred = ctx->cred; 316 data->cred = ctx->cred;
349 data->args.fh = NFS_FH(inode); 317 data->args.fh = NFS_FH(inode);
350 data->args.context = ctx; 318 data->args.context = ctx;
351 data->args.offset = pos; 319 data->args.offset = pos;
352 data->args.pgbase = pgbase; 320 data->args.pgbase = pgbase;
353 data->args.pages = &pages[curpage]; 321 data->args.pages = data->pagevec;
354 data->args.count = bytes; 322 data->args.count = bytes;
355 data->res.fattr = &data->fattr; 323 data->res.fattr = &data->fattr;
356 data->res.eof = 0; 324 data->res.eof = 0;
@@ -373,33 +341,35 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
373 bytes, 341 bytes,
374 (unsigned long long)data->args.offset); 342 (unsigned long long)data->args.offset);
375 343
344 started += bytes;
345 user_addr += bytes;
376 pos += bytes; 346 pos += bytes;
377 pgbase += bytes; 347 pgbase += bytes;
378 curpage += pgbase >> PAGE_SHIFT;
379 pgbase &= ~PAGE_MASK; 348 pgbase &= ~PAGE_MASK;
380 349
381 count -= bytes; 350 count -= bytes;
382 } while (count != 0); 351 } while (count != 0);
383 BUG_ON(!list_empty(list)); 352
353 if (put_dreq(dreq))
354 nfs_direct_complete(dreq);
355
356 if (started)
357 return 0;
358 return result < 0 ? (ssize_t) result : -EFAULT;
384} 359}
385 360
386static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) 361static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
387{ 362{
388 ssize_t result; 363 ssize_t result = 0;
389 sigset_t oldset; 364 sigset_t oldset;
390 struct inode *inode = iocb->ki_filp->f_mapping->host; 365 struct inode *inode = iocb->ki_filp->f_mapping->host;
391 struct rpc_clnt *clnt = NFS_CLIENT(inode); 366 struct rpc_clnt *clnt = NFS_CLIENT(inode);
392 struct nfs_direct_req *dreq; 367 struct nfs_direct_req *dreq;
393 368
394 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); 369 dreq = nfs_direct_req_alloc();
395 if (!dreq) 370 if (!dreq)
396 return -ENOMEM; 371 return -ENOMEM;
397 372
398 dreq->user_addr = user_addr;
399 dreq->user_count = count;
400 dreq->pos = pos;
401 dreq->pages = pages;
402 dreq->npages = nr_pages;
403 dreq->inode = inode; 373 dreq->inode = inode;
404 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 374 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
405 if (!is_sync_kiocb(iocb)) 375 if (!is_sync_kiocb(iocb))
@@ -407,8 +377,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
407 377
408 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); 378 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
409 rpc_clnt_sigmask(clnt, &oldset); 379 rpc_clnt_sigmask(clnt, &oldset);
410 nfs_direct_read_schedule(dreq); 380 result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
411 result = nfs_direct_wait(dreq); 381 if (!result)
382 result = nfs_direct_wait(dreq);
412 rpc_clnt_sigunmask(clnt, &oldset); 383 rpc_clnt_sigunmask(clnt, &oldset);
413 384
414 return result; 385 return result;
@@ -416,10 +387,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
416 387
417static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 388static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
418{ 389{
419 list_splice_init(&dreq->rewrite_list, &dreq->list); 390 while (!list_empty(&dreq->rewrite_list)) {
420 while (!list_empty(&dreq->list)) { 391 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
421 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
422 list_del(&data->pages); 392 list_del(&data->pages);
393 nfs_direct_release_pages(data->pagevec, data->npages);
423 nfs_writedata_release(data); 394 nfs_writedata_release(data);
424 } 395 }
425} 396}
@@ -427,14 +398,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
427#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 398#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
428static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 399static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
429{ 400{
430 struct list_head *pos; 401 struct inode *inode = dreq->inode;
402 struct list_head *p;
403 struct nfs_write_data *data;
431 404
432 list_splice_init(&dreq->rewrite_list, &dreq->list);
433 list_for_each(pos, &dreq->list)
434 dreq->outstanding++;
435 dreq->count = 0; 405 dreq->count = 0;
406 get_dreq(dreq);
407
408 list_for_each(p, &dreq->rewrite_list) {
409 data = list_entry(p, struct nfs_write_data, pages);
410
411 get_dreq(dreq);
412
413 /*
414 * Reset data->res.
415 */
416 nfs_fattr_init(&data->fattr);
417 data->res.count = data->args.count;
418 memset(&data->verf, 0, sizeof(data->verf));
419
420 /*
421 * Reuse data->task; data->args should not have changed
422 * since the original request was sent.
423 */
424 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
425 &nfs_write_direct_ops, data);
426 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
427
428 data->task.tk_priority = RPC_PRIORITY_NORMAL;
429 data->task.tk_cookie = (unsigned long) inode;
430
431 /*
432 * We're called via an RPC callback, so BKL is already held.
433 */
434 rpc_execute(&data->task);
435
436 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
437 data->task.tk_pid,
438 inode->i_sb->s_id,
439 (long long)NFS_FILEID(inode),
440 data->args.count,
441 (unsigned long long)data->args.offset);
442 }
436 443
437 nfs_direct_write_schedule(dreq, FLUSH_STABLE); 444 if (put_dreq(dreq))
445 nfs_direct_write_complete(dreq, inode);
438} 446}
439 447
440static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 448static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
@@ -471,8 +479,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
471 data->cred = dreq->ctx->cred; 479 data->cred = dreq->ctx->cred;
472 480
473 data->args.fh = NFS_FH(data->inode); 481 data->args.fh = NFS_FH(data->inode);
474 data->args.offset = dreq->pos; 482 data->args.offset = 0;
475 data->args.count = dreq->user_count; 483 data->args.count = 0;
476 data->res.count = 0; 484 data->res.count = 0;
477 data->res.fattr = &data->fattr; 485 data->res.fattr = &data->fattr;
478 data->res.verf = &data->verf; 486 data->res.verf = &data->verf;
@@ -534,47 +542,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
534} 542}
535#endif 543#endif
536 544
537static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
538{
539 struct list_head *list;
540 struct nfs_direct_req *dreq;
541 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
542
543 dreq = nfs_direct_req_alloc();
544 if (!dreq)
545 return NULL;
546
547 list = &dreq->list;
548 for(;;) {
549 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
550
551 if (unlikely(!data)) {
552 while (!list_empty(list)) {
553 data = list_entry(list->next,
554 struct nfs_write_data, pages);
555 list_del(&data->pages);
556 nfs_writedata_free(data);
557 }
558 kref_put(&dreq->kref, nfs_direct_req_release);
559 return NULL;
560 }
561
562 INIT_LIST_HEAD(&data->pages);
563 list_add(&data->pages, list);
564
565 data->req = (struct nfs_page *) dreq;
566 dreq->outstanding++;
567 if (nbytes <= wsize)
568 break;
569 nbytes -= wsize;
570 }
571
572 nfs_alloc_commit_data(dreq);
573
574 kref_get(&dreq->kref);
575 return dreq;
576}
577
578static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 545static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
579{ 546{
580 struct nfs_write_data *data = calldata; 547 struct nfs_write_data *data = calldata;
@@ -604,8 +571,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
604 } 571 }
605 } 572 }
606 } 573 }
607 /* In case we have to resend */
608 data->args.stable = NFS_FILE_SYNC;
609 574
610 spin_unlock(&dreq->lock); 575 spin_unlock(&dreq->lock);
611} 576}
@@ -619,14 +584,8 @@ static void nfs_direct_write_release(void *calldata)
619 struct nfs_write_data *data = calldata; 584 struct nfs_write_data *data = calldata;
620 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 585 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
621 586
622 spin_lock(&dreq->lock); 587 if (put_dreq(dreq))
623 if (--dreq->outstanding) { 588 nfs_direct_write_complete(dreq, data->inode);
624 spin_unlock(&dreq->lock);
625 return;
626 }
627 spin_unlock(&dreq->lock);
628
629 nfs_direct_write_complete(dreq, data->inode);
630} 589}
631 590
632static const struct rpc_call_ops nfs_write_direct_ops = { 591static const struct rpc_call_ops nfs_write_direct_ops = {
@@ -635,41 +594,62 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
635}; 594};
636 595
637/* 596/*
638 * For each nfs_write_data struct that was allocated on the list, dispatch 597 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
639 * an NFS WRITE operation 598 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
599 * bail and stop sending more writes. Write length accounting is
600 * handled automatically by nfs_direct_write_result(). Otherwise, if
601 * no requests have been sent, just return an error.
640 */ 602 */
641static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) 603static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
642{ 604{
643 struct nfs_open_context *ctx = dreq->ctx; 605 struct nfs_open_context *ctx = dreq->ctx;
644 struct inode *inode = ctx->dentry->d_inode; 606 struct inode *inode = ctx->dentry->d_inode;
645 struct list_head *list = &dreq->list;
646 struct page **pages = dreq->pages;
647 size_t count = dreq->user_count;
648 loff_t pos = dreq->pos;
649 size_t wsize = NFS_SERVER(inode)->wsize; 607 size_t wsize = NFS_SERVER(inode)->wsize;
650 unsigned int curpage, pgbase; 608 unsigned int wpages = nfs_max_pages(wsize);
609 unsigned int pgbase;
610 int result;
611 ssize_t started = 0;
651 612
652 curpage = 0; 613 get_dreq(dreq);
653 pgbase = dreq->user_addr & ~PAGE_MASK; 614
615 pgbase = user_addr & ~PAGE_MASK;
654 do { 616 do {
655 struct nfs_write_data *data; 617 struct nfs_write_data *data;
656 size_t bytes; 618 size_t bytes;
657 619
620 result = -ENOMEM;
621 data = nfs_writedata_alloc(wpages);
622 if (unlikely(!data))
623 break;
624
658 bytes = wsize; 625 bytes = wsize;
659 if (count < wsize) 626 if (count < wsize)
660 bytes = count; 627 bytes = count;
661 628
662 BUG_ON(list_empty(list)); 629 data->npages = nfs_direct_count_pages(user_addr, bytes);
663 data = list_entry(list->next, struct nfs_write_data, pages); 630 down_read(&current->mm->mmap_sem);
631 result = get_user_pages(current, current->mm, user_addr,
632 data->npages, 0, 0, data->pagevec, NULL);
633 up_read(&current->mm->mmap_sem);
634 if (unlikely(result < data->npages)) {
635 if (result > 0)
636 nfs_direct_release_pages(data->pagevec, result);
637 nfs_writedata_release(data);
638 break;
639 }
640
641 get_dreq(dreq);
642
664 list_move_tail(&data->pages, &dreq->rewrite_list); 643 list_move_tail(&data->pages, &dreq->rewrite_list);
665 644
645 data->req = (struct nfs_page *) dreq;
666 data->inode = inode; 646 data->inode = inode;
667 data->cred = ctx->cred; 647 data->cred = ctx->cred;
668 data->args.fh = NFS_FH(inode); 648 data->args.fh = NFS_FH(inode);
669 data->args.context = ctx; 649 data->args.context = ctx;
670 data->args.offset = pos; 650 data->args.offset = pos;
671 data->args.pgbase = pgbase; 651 data->args.pgbase = pgbase;
672 data->args.pages = &pages[curpage]; 652 data->args.pages = data->pagevec;
673 data->args.count = bytes; 653 data->args.count = bytes;
674 data->res.fattr = &data->fattr; 654 data->res.fattr = &data->fattr;
675 data->res.count = bytes; 655 data->res.count = bytes;
@@ -693,19 +673,26 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
693 bytes, 673 bytes,
694 (unsigned long long)data->args.offset); 674 (unsigned long long)data->args.offset);
695 675
676 started += bytes;
677 user_addr += bytes;
696 pos += bytes; 678 pos += bytes;
697 pgbase += bytes; 679 pgbase += bytes;
698 curpage += pgbase >> PAGE_SHIFT;
699 pgbase &= ~PAGE_MASK; 680 pgbase &= ~PAGE_MASK;
700 681
701 count -= bytes; 682 count -= bytes;
702 } while (count != 0); 683 } while (count != 0);
703 BUG_ON(!list_empty(list)); 684
685 if (put_dreq(dreq))
686 nfs_direct_write_complete(dreq, inode);
687
688 if (started)
689 return 0;
690 return result < 0 ? (ssize_t) result : -EFAULT;
704} 691}
705 692
706static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) 693static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
707{ 694{
708 ssize_t result; 695 ssize_t result = 0;
709 sigset_t oldset; 696 sigset_t oldset;
710 struct inode *inode = iocb->ki_filp->f_mapping->host; 697 struct inode *inode = iocb->ki_filp->f_mapping->host;
711 struct rpc_clnt *clnt = NFS_CLIENT(inode); 698 struct rpc_clnt *clnt = NFS_CLIENT(inode);
@@ -713,17 +700,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
713 size_t wsize = NFS_SERVER(inode)->wsize; 700 size_t wsize = NFS_SERVER(inode)->wsize;
714 int sync = 0; 701 int sync = 0;
715 702
716 dreq = nfs_direct_write_alloc(count, wsize); 703 dreq = nfs_direct_req_alloc();
717 if (!dreq) 704 if (!dreq)
718 return -ENOMEM; 705 return -ENOMEM;
706 nfs_alloc_commit_data(dreq);
707
719 if (dreq->commit_data == NULL || count < wsize) 708 if (dreq->commit_data == NULL || count < wsize)
720 sync = FLUSH_STABLE; 709 sync = FLUSH_STABLE;
721 710
722 dreq->user_addr = user_addr;
723 dreq->user_count = count;
724 dreq->pos = pos;
725 dreq->pages = pages;
726 dreq->npages = nr_pages;
727 dreq->inode = inode; 711 dreq->inode = inode;
728 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 712 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
729 if (!is_sync_kiocb(iocb)) 713 if (!is_sync_kiocb(iocb))
@@ -734,8 +718,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
734 nfs_begin_data_update(inode); 718 nfs_begin_data_update(inode);
735 719
736 rpc_clnt_sigmask(clnt, &oldset); 720 rpc_clnt_sigmask(clnt, &oldset);
737 nfs_direct_write_schedule(dreq, sync); 721 result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
738 result = nfs_direct_wait(dreq); 722 if (!result)
723 result = nfs_direct_wait(dreq);
739 rpc_clnt_sigunmask(clnt, &oldset); 724 rpc_clnt_sigunmask(clnt, &oldset);
740 725
741 return result; 726 return result;
@@ -765,8 +750,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
765ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 750ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
766{ 751{
767 ssize_t retval = -EINVAL; 752 ssize_t retval = -EINVAL;
768 int page_count;
769 struct page **pages;
770 struct file *file = iocb->ki_filp; 753 struct file *file = iocb->ki_filp;
771 struct address_space *mapping = file->f_mapping; 754 struct address_space *mapping = file->f_mapping;
772 755
@@ -788,14 +771,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count,
788 if (retval) 771 if (retval)
789 goto out; 772 goto out;
790 773
791 retval = nfs_get_user_pages(READ, (unsigned long) buf, 774 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
792 count, &pages);
793 if (retval < 0)
794 goto out;
795 page_count = retval;
796
797 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
798 pages, page_count);
799 if (retval > 0) 775 if (retval > 0)
800 iocb->ki_pos = pos + retval; 776 iocb->ki_pos = pos + retval;
801 777
@@ -831,8 +807,6 @@ out:
831ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) 807ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
832{ 808{
833 ssize_t retval; 809 ssize_t retval;
834 int page_count;
835 struct page **pages;
836 struct file *file = iocb->ki_filp; 810 struct file *file = iocb->ki_filp;
837 struct address_space *mapping = file->f_mapping; 811 struct address_space *mapping = file->f_mapping;
838 812
@@ -860,14 +834,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t
860 if (retval) 834 if (retval)
861 goto out; 835 goto out;
862 836
863 retval = nfs_get_user_pages(WRITE, (unsigned long) buf, 837 retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
864 count, &pages);
865 if (retval < 0)
866 goto out;
867 page_count = retval;
868
869 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
870 pos, pages, page_count);
871 838
872 /* 839 /*
873 * XXX: nfs_end_data_update() already ensures this file's 840 * XXX: nfs_end_data_update() already ensures this file's
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b4916b092194..e6ee97f19d81 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3144,9 +3144,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
3144 default: 3144 default:
3145 BUG(); 3145 BUG();
3146 } 3146 }
3147 if (res < 0)
3148 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
3149 __FUNCTION__);
3150 return res; 3147 return res;
3151} 3148}
3152 3149
@@ -3258,8 +3255,6 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
3258 return ERR_PTR(-ENOMEM); 3255 return ERR_PTR(-ENOMEM);
3259 } 3256 }
3260 3257
3261 /* Unlock _before_ we do the RPC call */
3262 do_vfs_lock(fl->fl_file, fl);
3263 return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); 3258 return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data);
3264} 3259}
3265 3260
@@ -3270,30 +3265,28 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
3270 struct rpc_task *task; 3265 struct rpc_task *task;
3271 int status = 0; 3266 int status = 0;
3272 3267
3273 /* Is this a delegated lock? */
3274 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3275 goto out_unlock;
3276 /* Is this open_owner holding any locks on the server? */
3277 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
3278 goto out_unlock;
3279
3280 status = nfs4_set_lock_state(state, request); 3268 status = nfs4_set_lock_state(state, request);
3269 /* Unlock _before_ we do the RPC call */
3270 request->fl_flags |= FL_EXISTS;
3271 if (do_vfs_lock(request->fl_file, request) == -ENOENT)
3272 goto out;
3281 if (status != 0) 3273 if (status != 0)
3282 goto out_unlock; 3274 goto out;
3275 /* Is this a delegated lock? */
3276 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3277 goto out;
3283 lsp = request->fl_u.nfs4_fl.owner; 3278 lsp = request->fl_u.nfs4_fl.owner;
3284 status = -ENOMEM;
3285 seqid = nfs_alloc_seqid(&lsp->ls_seqid); 3279 seqid = nfs_alloc_seqid(&lsp->ls_seqid);
3280 status = -ENOMEM;
3286 if (seqid == NULL) 3281 if (seqid == NULL)
3287 goto out_unlock; 3282 goto out;
3288 task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); 3283 task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid);
3289 status = PTR_ERR(task); 3284 status = PTR_ERR(task);
3290 if (IS_ERR(task)) 3285 if (IS_ERR(task))
3291 goto out_unlock; 3286 goto out;
3292 status = nfs4_wait_for_completion_rpc_task(task); 3287 status = nfs4_wait_for_completion_rpc_task(task);
3293 rpc_release_task(task); 3288 rpc_release_task(task);
3294 return status; 3289out:
3295out_unlock:
3296 do_vfs_lock(request->fl_file, request);
3297 return status; 3290 return status;
3298} 3291}
3299 3292
@@ -3461,10 +3454,10 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
3461 struct nfs4_exception exception = { }; 3454 struct nfs4_exception exception = { };
3462 int err; 3455 int err;
3463 3456
3464 /* Cache the lock if possible... */
3465 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3466 return 0;
3467 do { 3457 do {
3458 /* Cache the lock if possible... */
3459 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
3460 return 0;
3468 err = _nfs4_do_setlk(state, F_SETLK, request, 1); 3461 err = _nfs4_do_setlk(state, F_SETLK, request, 1);
3469 if (err != -NFS4ERR_DELAY) 3462 if (err != -NFS4ERR_DELAY)
3470 break; 3463 break;
@@ -3483,6 +3476,8 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
3483 if (err != 0) 3476 if (err != 0)
3484 return err; 3477 return err;
3485 do { 3478 do {
3479 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
3480 return 0;
3486 err = _nfs4_do_setlk(state, F_SETLK, request, 0); 3481 err = _nfs4_do_setlk(state, F_SETLK, request, 0);
3487 if (err != -NFS4ERR_DELAY) 3482 if (err != -NFS4ERR_DELAY)
3488 break; 3483 break;
@@ -3494,29 +3489,42 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
3494static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 3489static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3495{ 3490{
3496 struct nfs4_client *clp = state->owner->so_client; 3491 struct nfs4_client *clp = state->owner->so_client;
3492 unsigned char fl_flags = request->fl_flags;
3497 int status; 3493 int status;
3498 3494
3499 /* Is this a delegated open? */ 3495 /* Is this a delegated open? */
3500 if (NFS_I(state->inode)->delegation_state != 0) {
3501 /* Yes: cache locks! */
3502 status = do_vfs_lock(request->fl_file, request);
3503 /* ...but avoid races with delegation recall... */
3504 if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags))
3505 return status;
3506 }
3507 down_read(&clp->cl_sem);
3508 status = nfs4_set_lock_state(state, request); 3496 status = nfs4_set_lock_state(state, request);
3509 if (status != 0) 3497 if (status != 0)
3510 goto out; 3498 goto out;
3499 request->fl_flags |= FL_ACCESS;
3500 status = do_vfs_lock(request->fl_file, request);
3501 if (status < 0)
3502 goto out;
3503 down_read(&clp->cl_sem);
3504 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
3505 struct nfs_inode *nfsi = NFS_I(state->inode);
3506 /* Yes: cache locks! */
3507 down_read(&nfsi->rwsem);
3508 /* ...but avoid races with delegation recall... */
3509 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
3510 request->fl_flags = fl_flags & ~FL_SLEEP;
3511 status = do_vfs_lock(request->fl_file, request);
3512 up_read(&nfsi->rwsem);
3513 goto out_unlock;
3514 }
3515 up_read(&nfsi->rwsem);
3516 }
3511 status = _nfs4_do_setlk(state, cmd, request, 0); 3517 status = _nfs4_do_setlk(state, cmd, request, 0);
3512 if (status != 0) 3518 if (status != 0)
3513 goto out; 3519 goto out_unlock;
3514 /* Note: we always want to sleep here! */ 3520 /* Note: we always want to sleep here! */
3515 request->fl_flags |= FL_SLEEP; 3521 request->fl_flags = fl_flags | FL_SLEEP;
3516 if (do_vfs_lock(request->fl_file, request) < 0) 3522 if (do_vfs_lock(request->fl_file, request) < 0)
3517 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); 3523 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
3518out: 3524out_unlock:
3519 up_read(&clp->cl_sem); 3525 up_read(&clp->cl_sem);
3526out:
3527 request->fl_flags = fl_flags;
3520 return status; 3528 return status;
3521} 3529}
3522 3530
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index bca5734ca9fb..86bac6a5008e 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -578,7 +578,7 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un
578 return ret; 578 return ret;
579} 579}
580 580
581static void nfs_cancel_requests(struct list_head *head) 581static void nfs_cancel_dirty_list(struct list_head *head)
582{ 582{
583 struct nfs_page *req; 583 struct nfs_page *req;
584 while(!list_empty(head)) { 584 while(!list_empty(head)) {
@@ -589,6 +589,19 @@ static void nfs_cancel_requests(struct list_head *head)
589 } 589 }
590} 590}
591 591
592static void nfs_cancel_commit_list(struct list_head *head)
593{
594 struct nfs_page *req;
595
596 while(!list_empty(head)) {
597 req = nfs_list_entry(head->next);
598 nfs_list_remove_request(req);
599 nfs_inode_remove_request(req);
600 nfs_clear_page_writeback(req);
601 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
602 }
603}
604
592/* 605/*
593 * nfs_scan_dirty - Scan an inode for dirty requests 606 * nfs_scan_dirty - Scan an inode for dirty requests
594 * @inode: NFS inode to scan 607 * @inode: NFS inode to scan
@@ -1381,6 +1394,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1381 nfs_list_remove_request(req); 1394 nfs_list_remove_request(req);
1382 nfs_mark_request_commit(req); 1395 nfs_mark_request_commit(req);
1383 nfs_clear_page_writeback(req); 1396 nfs_clear_page_writeback(req);
1397 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1384 } 1398 }
1385 return -ENOMEM; 1399 return -ENOMEM;
1386} 1400}
@@ -1499,7 +1513,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1499 if (pages != 0) { 1513 if (pages != 0) {
1500 spin_unlock(&nfsi->req_lock); 1514 spin_unlock(&nfsi->req_lock);
1501 if (how & FLUSH_INVALIDATE) 1515 if (how & FLUSH_INVALIDATE)
1502 nfs_cancel_requests(&head); 1516 nfs_cancel_dirty_list(&head);
1503 else 1517 else
1504 ret = nfs_flush_list(inode, &head, pages, how); 1518 ret = nfs_flush_list(inode, &head, pages, how);
1505 spin_lock(&nfsi->req_lock); 1519 spin_lock(&nfsi->req_lock);
@@ -1512,7 +1526,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1512 break; 1526 break;
1513 if (how & FLUSH_INVALIDATE) { 1527 if (how & FLUSH_INVALIDATE) {
1514 spin_unlock(&nfsi->req_lock); 1528 spin_unlock(&nfsi->req_lock);
1515 nfs_cancel_requests(&head); 1529 nfs_cancel_commit_list(&head);
1516 spin_lock(&nfsi->req_lock); 1530 spin_lock(&nfsi->req_lock);
1517 continue; 1531 continue;
1518 } 1532 }
diff --git a/include/asm-arm/arch-at91rm9200/irqs.h b/include/asm-arm/arch-at91rm9200/irqs.h
index 2dc93b174a8f..f63842c2c093 100644
--- a/include/asm-arm/arch-at91rm9200/irqs.h
+++ b/include/asm-arm/arch-at91rm9200/irqs.h
@@ -39,12 +39,4 @@
39 */ 39 */
40#define NR_IRQS (NR_AIC_IRQS + (4 * 32)) 40#define NR_IRQS (NR_AIC_IRQS + (4 * 32))
41 41
42
43#ifndef __ASSEMBLY__
44/*
45 * Initialize the IRQ controller.
46 */
47extern void at91rm9200_init_irq(unsigned int priority[]);
48#endif
49
50#endif 42#endif
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index a21185d47883..310804485208 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -43,6 +43,7 @@ typedef u64 cputime64_t;
43 43
44#define cputime64_zero ((cputime64_t)0) 44#define cputime64_zero ((cputime64_t)0)
45#define cputime64_add(__a, __b) ((__a) + (__b)) 45#define cputime64_add(__a, __b) ((__a) + (__b))
46#define cputime64_sub(__a, __b) ((__a) - (__b))
46#define cputime_to_cputime64(__ct) (__ct) 47#define cputime_to_cputime64(__ct) (__ct)
47 48
48#ifdef __KERNEL__ 49#ifdef __KERNEL__
@@ -74,6 +75,23 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
74 return ct; 75 return ct;
75} 76}
76 77
78static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
79{
80 cputime_t ct;
81 u64 sec;
82
83 /* have to be a little careful about overflow */
84 ct = jif % HZ;
85 sec = jif / HZ;
86 if (ct) {
87 ct *= tb_ticks_per_sec;
88 do_div(ct, HZ);
89 }
90 if (sec)
91 ct += (cputime_t) sec * tb_ticks_per_sec;
92 return ct;
93}
94
77static inline u64 cputime64_to_jiffies64(const cputime_t ct) 95static inline u64 cputime64_to_jiffies64(const cputime_t ct)
78{ 96{
79 return mulhdu(ct, __cputime_jiffies_factor); 97 return mulhdu(ct, __cputime_jiffies_factor);
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 0f5b89c9323b..27c46fbeebd6 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -160,6 +160,20 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
160 BUG(); 160 BUG();
161} 161}
162 162
163static inline void
164dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
165 enum dma_data_direction direction)
166{
167 BUG();
168}
169
170static inline void
171dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
172 enum dma_data_direction direction)
173{
174 BUG();
175}
176
163#endif /* PCI */ 177#endif /* PCI */
164 178
165 179
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8f3ab56765a5..b04eab2cc663 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -716,6 +716,7 @@ extern spinlock_t files_lock;
716#define FL_POSIX 1 716#define FL_POSIX 1
717#define FL_FLOCK 2 717#define FL_FLOCK 2
718#define FL_ACCESS 8 /* not trying to lock, just looking */ 718#define FL_ACCESS 8 /* not trying to lock, just looking */
719#define FL_EXISTS 16 /* when unlocking, test for existence */
719#define FL_LEASE 32 /* lease held on this file */ 720#define FL_LEASE 32 /* lease held on this file */
720#define FL_CLOSE 64 /* unlock on close */ 721#define FL_CLOSE 64 /* unlock on close */
721#define FL_SLEEP 128 /* A blocking lock */ 722#define FL_SLEEP 128 /* A blocking lock */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f4284bf89758..6cc497a2b6da 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -131,6 +131,7 @@ enum {
131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
132 132
133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
134 ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */
134 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 135 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
135 136
136 ATA_DFLAG_DETACH = (1 << 16), 137 ATA_DFLAG_DETACH = (1 << 16),
@@ -160,22 +161,28 @@ enum {
160 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 161 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
161 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 162 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
162 * Register FIS clearing BSY */ 163 * Register FIS clearing BSY */
163
164 ATA_FLAG_DEBUGMSG = (1 << 13), 164 ATA_FLAG_DEBUGMSG = (1 << 13),
165 ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
166 165
167 ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */ 166 /* The following flag belongs to ap->pflags but is kept in
168 ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */ 167 * ap->flags because it's referenced in many LLDs and will be
169 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */ 168 * removed in not-too-distant future.
170 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */ 169 */
171 ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */ 170 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
172 ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */ 171
173 ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */ 172 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
174 173
175 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */ 174 /* struct ata_port pflags */
176 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */ 175 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
176 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
177 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
178 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
179 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
180 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
181 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
177 182
178 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ 183 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
184 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
185 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
179 186
180 /* struct ata_queued_cmd flags */ 187 /* struct ata_queued_cmd flags */
181 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 188 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
@@ -248,12 +255,19 @@ enum {
248 ATA_EH_REVALIDATE = (1 << 0), 255 ATA_EH_REVALIDATE = (1 << 0),
249 ATA_EH_SOFTRESET = (1 << 1), 256 ATA_EH_SOFTRESET = (1 << 1),
250 ATA_EH_HARDRESET = (1 << 2), 257 ATA_EH_HARDRESET = (1 << 2),
258 ATA_EH_SUSPEND = (1 << 3),
259 ATA_EH_RESUME = (1 << 4),
260 ATA_EH_PM_FREEZE = (1 << 5),
251 261
252 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 262 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
253 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, 263 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
264 ATA_EH_RESUME | ATA_EH_PM_FREEZE,
254 265
255 /* ata_eh_info->flags */ 266 /* ata_eh_info->flags */
256 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */
269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
270 ATA_EHI_QUIET = (1 << 3), /* be quiet */
257 271
258 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
259 273
@@ -486,6 +500,7 @@ struct ata_port {
486 const struct ata_port_operations *ops; 500 const struct ata_port_operations *ops;
487 spinlock_t *lock; 501 spinlock_t *lock;
488 unsigned long flags; /* ATA_FLAG_xxx */ 502 unsigned long flags; /* ATA_FLAG_xxx */
503 unsigned int pflags; /* ATA_PFLAG_xxx */
489 unsigned int id; /* unique id req'd by scsi midlyr */ 504 unsigned int id; /* unique id req'd by scsi midlyr */
490 unsigned int port_no; /* unique port #; from zero */ 505 unsigned int port_no; /* unique port #; from zero */
491 unsigned int hard_port_no; /* hardware port #; from zero */ 506 unsigned int hard_port_no; /* hardware port #; from zero */
@@ -535,6 +550,9 @@ struct ata_port {
535 struct list_head eh_done_q; 550 struct list_head eh_done_q;
536 wait_queue_head_t eh_wait_q; 551 wait_queue_head_t eh_wait_q;
537 552
553 pm_message_t pm_mesg;
554 int *pm_result;
555
538 void *private_data; 556 void *private_data;
539 557
540 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 558 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
@@ -589,6 +607,9 @@ struct ata_port_operations {
589 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 607 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
590 u32 val); 608 u32 val);
591 609
610 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
611 int (*port_resume) (struct ata_port *ap);
612
592 int (*port_start) (struct ata_port *ap); 613 int (*port_start) (struct ata_port *ap);
593 void (*port_stop) (struct ata_port *ap); 614 void (*port_stop) (struct ata_port *ap);
594 615
@@ -622,9 +643,18 @@ struct ata_timing {
622 643
623#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 644#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
624 645
625extern const unsigned long sata_deb_timing_boot[]; 646extern const unsigned long sata_deb_timing_normal[];
626extern const unsigned long sata_deb_timing_eh[]; 647extern const unsigned long sata_deb_timing_hotplug[];
627extern const unsigned long sata_deb_timing_before_fsrst[]; 648extern const unsigned long sata_deb_timing_long[];
649
650static inline const unsigned long *
651sata_ehc_deb_timing(struct ata_eh_context *ehc)
652{
653 if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
654 return sata_deb_timing_hotplug;
655 else
656 return sata_deb_timing_normal;
657}
628 658
629extern void ata_port_probe(struct ata_port *); 659extern void ata_port_probe(struct ata_port *);
630extern void __sata_phy_reset(struct ata_port *ap); 660extern void __sata_phy_reset(struct ata_port *ap);
@@ -644,6 +674,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
644extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 674extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
645 unsigned int n_ports); 675 unsigned int n_ports);
646extern void ata_pci_remove_one (struct pci_dev *pdev); 676extern void ata_pci_remove_one (struct pci_dev *pdev);
677extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
678extern void ata_pci_device_do_resume(struct pci_dev *pdev);
647extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); 679extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
648extern int ata_pci_device_resume(struct pci_dev *pdev); 680extern int ata_pci_device_resume(struct pci_dev *pdev);
649extern int ata_pci_clear_simplex(struct pci_dev *pdev); 681extern int ata_pci_clear_simplex(struct pci_dev *pdev);
@@ -664,8 +696,9 @@ extern int ata_port_online(struct ata_port *ap);
664extern int ata_port_offline(struct ata_port *ap); 696extern int ata_port_offline(struct ata_port *ap);
665extern int ata_scsi_device_resume(struct scsi_device *); 697extern int ata_scsi_device_resume(struct scsi_device *);
666extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 698extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
667extern int ata_device_resume(struct ata_device *); 699extern int ata_host_set_suspend(struct ata_host_set *host_set,
668extern int ata_device_suspend(struct ata_device *, pm_message_t state); 700 pm_message_t mesg);
701extern void ata_host_set_resume(struct ata_host_set *host_set);
669extern int ata_ratelimit(void); 702extern int ata_ratelimit(void);
670extern unsigned int ata_busy_sleep(struct ata_port *ap, 703extern unsigned int ata_busy_sleep(struct ata_port *ap,
671 unsigned long timeout_pat, 704 unsigned long timeout_pat,
@@ -825,19 +858,24 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
825 (ehi)->desc_len = 0; \ 858 (ehi)->desc_len = 0; \
826} while (0) 859} while (0)
827 860
828static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 861static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
829{ 862{
830 if (ehi->flags & ATA_EHI_HOTPLUGGED) 863 if (ehi->flags & ATA_EHI_HOTPLUGGED)
831 return; 864 return;
832 865
833 ehi->flags |= ATA_EHI_HOTPLUGGED; 866 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
834 ehi->hotplug_timestamp = jiffies; 867 ehi->hotplug_timestamp = jiffies;
835 868
836 ehi->err_mask |= AC_ERR_ATA_BUS;
837 ehi->action |= ATA_EH_SOFTRESET; 869 ehi->action |= ATA_EH_SOFTRESET;
838 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 870 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
839} 871}
840 872
873static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
874{
875 __ata_ehi_hotplugged(ehi);
876 ehi->err_mask |= AC_ERR_ATA_BUS;
877}
878
841/* 879/*
842 * qc helpers 880 * qc helpers
843 */ 881 */
@@ -921,6 +959,11 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
921 return ata_class_absent(dev->class); 959 return ata_class_absent(dev->class);
922} 960}
923 961
962static inline unsigned int ata_dev_ready(const struct ata_device *dev)
963{
964 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
965}
966
924/* 967/*
925 * port helpers 968 * port helpers
926 */ 969 */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7c7320fa51aa..2d3fb6416d91 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -729,6 +729,7 @@ struct nfs_read_data {
729 struct list_head pages; /* Coalesced read requests */ 729 struct list_head pages; /* Coalesced read requests */
730 struct nfs_page *req; /* multi ops per nfs_page */ 730 struct nfs_page *req; /* multi ops per nfs_page */
731 struct page **pagevec; 731 struct page **pagevec;
732 unsigned int npages; /* active pages in pagevec */
732 struct nfs_readargs args; 733 struct nfs_readargs args;
733 struct nfs_readres res; 734 struct nfs_readres res;
734#ifdef CONFIG_NFS_V4 735#ifdef CONFIG_NFS_V4
@@ -747,6 +748,7 @@ struct nfs_write_data {
747 struct list_head pages; /* Coalesced requests we wish to flush */ 748 struct list_head pages; /* Coalesced requests we wish to flush */
748 struct nfs_page *req; /* multi ops per nfs_page */ 749 struct nfs_page *req; /* multi ops per nfs_page */
749 struct page **pagevec; 750 struct page **pagevec;
751 unsigned int npages; /* active pages in pagevec */
750 struct nfs_writeargs args; /* argument struct */ 752 struct nfs_writeargs args; /* argument struct */
751 struct nfs_writeres res; /* result struct */ 753 struct nfs_writeres res; /* result struct */
752#ifdef CONFIG_NFS_V4 754#ifdef CONFIG_NFS_V4
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 685081c01342..c09396d2c77b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2019,6 +2019,13 @@
2019#define PCI_VENDOR_ID_TDI 0x192E 2019#define PCI_VENDOR_ID_TDI 0x192E
2020#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2020#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2021 2021
2022#define PCI_VENDOR_ID_JMICRON 0x197B
2023#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
2024#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
2025#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
2026#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
2027#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
2028#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
2022 2029
2023#define PCI_VENDOR_ID_TEKRAM 0x1de1 2030#define PCI_VENDOR_ID_TEKRAM 0x1de1
2024#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2031#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 7a483ab4022f..00ad810eb883 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -104,6 +104,7 @@ struct ieee80211softmac_assoc_info {
104 */ 104 */
105 u8 static_essid:1, 105 u8 static_essid:1,
106 associating:1, 106 associating:1,
107 assoc_wait:1,
107 bssvalid:1, 108 bssvalid:1,
108 bssfixed:1; 109 bssfixed:1;
109 110
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 47ccf159372c..72d4d4e04d42 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -368,6 +368,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
368 368
369 /* Put this code here so that we avoid duplicating it in all 369 /* Put this code here so that we avoid duplicating it in all
370 * Rx paths. - Jean II */ 370 * Rx paths. - Jean II */
371#ifdef CONFIG_WIRELESS_EXT
371#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ 372#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
372 /* If spy monitoring on */ 373 /* If spy monitoring on */
373 if (ieee->spy_data.spy_number > 0) { 374 if (ieee->spy_data.spy_number > 0) {
@@ -396,15 +397,16 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
396 wireless_spy_update(ieee->dev, hdr->addr2, &wstats); 397 wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
397 } 398 }
398#endif /* IW_WIRELESS_SPY */ 399#endif /* IW_WIRELESS_SPY */
400#endif /* CONFIG_WIRELESS_EXT */
399 401
400#ifdef NOT_YET 402#ifdef NOT_YET
401 hostap_update_rx_stats(local->ap, hdr, rx_stats); 403 hostap_update_rx_stats(local->ap, hdr, rx_stats);
402#endif 404#endif
403 405
404 if (ieee->iw_mode == IW_MODE_MONITOR) { 406 if (ieee->iw_mode == IW_MODE_MONITOR) {
405 ieee80211_monitor_rx(ieee, skb, rx_stats);
406 stats->rx_packets++; 407 stats->rx_packets++;
407 stats->rx_bytes += skb->len; 408 stats->rx_bytes += skb->len;
409 ieee80211_monitor_rx(ieee, skb, rx_stats);
408 return 1; 410 return 1;
409 } 411 }
410 412
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index de148ae594f3..bf042139c7ab 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -562,10 +562,13 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
562 struct net_device_stats *stats = &ieee->stats; 562 struct net_device_stats *stats = &ieee->stats;
563 struct sk_buff *skb_frag; 563 struct sk_buff *skb_frag;
564 int priority = -1; 564 int priority = -1;
565 int fraglen = total_len;
566 int headroom = ieee->tx_headroom;
567 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
565 568
566 spin_lock_irqsave(&ieee->lock, flags); 569 spin_lock_irqsave(&ieee->lock, flags);
567 570
568 if (encrypt_mpdu && !ieee->sec.encrypt) 571 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
569 encrypt_mpdu = 0; 572 encrypt_mpdu = 0;
570 573
571 /* If there is no driver handler to take the TXB, dont' bother 574 /* If there is no driver handler to take the TXB, dont' bother
@@ -581,20 +584,24 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
581 goto success; 584 goto success;
582 } 585 }
583 586
584 if (encrypt_mpdu) 587 if (encrypt_mpdu) {
585 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 588 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
589 fraglen += crypt->ops->extra_mpdu_prefix_len +
590 crypt->ops->extra_mpdu_postfix_len;
591 headroom += crypt->ops->extra_mpdu_prefix_len;
592 }
586 593
587 /* When we allocate the TXB we allocate enough space for the reserve 594 /* When we allocate the TXB we allocate enough space for the reserve
588 * and full fragment bytes (bytes_per_frag doesn't include prefix, 595 * and full fragment bytes (bytes_per_frag doesn't include prefix,
589 * postfix, header, FCS, etc.) */ 596 * postfix, header, FCS, etc.) */
590 txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC); 597 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
591 if (unlikely(!txb)) { 598 if (unlikely(!txb)) {
592 printk(KERN_WARNING "%s: Could not allocate TXB\n", 599 printk(KERN_WARNING "%s: Could not allocate TXB\n",
593 ieee->dev->name); 600 ieee->dev->name);
594 goto failed; 601 goto failed;
595 } 602 }
596 txb->encrypted = 0; 603 txb->encrypted = 0;
597 txb->payload_size = total_len; 604 txb->payload_size = fraglen;
598 605
599 skb_frag = txb->fragments[0]; 606 skb_frag = txb->fragments[0];
600 607
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index 5e9a90651d04..44215ce64d4e 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -47,9 +47,7 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
47 47
48 dprintk(KERN_INFO PFX "sent association request!\n"); 48 dprintk(KERN_INFO PFX "sent association request!\n");
49 49
50 /* Change the state to associating */
51 spin_lock_irqsave(&mac->lock, flags); 50 spin_lock_irqsave(&mac->lock, flags);
52 mac->associnfo.associating = 1;
53 mac->associated = 0; /* just to make sure */ 51 mac->associated = 0; /* just to make sure */
54 52
55 /* Set a timer for timeout */ 53 /* Set a timer for timeout */
@@ -63,6 +61,7 @@ void
63ieee80211softmac_assoc_timeout(void *d) 61ieee80211softmac_assoc_timeout(void *d)
64{ 62{
65 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 63 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
64 struct ieee80211softmac_network *n;
66 unsigned long flags; 65 unsigned long flags;
67 66
68 spin_lock_irqsave(&mac->lock, flags); 67 spin_lock_irqsave(&mac->lock, flags);
@@ -75,11 +74,12 @@ ieee80211softmac_assoc_timeout(void *d)
75 mac->associnfo.associating = 0; 74 mac->associnfo.associating = 0;
76 mac->associnfo.bssvalid = 0; 75 mac->associnfo.bssvalid = 0;
77 mac->associated = 0; 76 mac->associated = 0;
77
78 n = ieee80211softmac_get_network_by_bssid_locked(mac, mac->associnfo.bssid);
78 spin_unlock_irqrestore(&mac->lock, flags); 79 spin_unlock_irqrestore(&mac->lock, flags);
79 80
80 dprintk(KERN_INFO PFX "assoc request timed out!\n"); 81 dprintk(KERN_INFO PFX "assoc request timed out!\n");
81 /* FIXME: we need to know the network here. that requires a bit of restructuring */ 82 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, n);
82 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL);
83} 83}
84 84
85void 85void
@@ -203,6 +203,10 @@ ieee80211softmac_assoc_work(void *d)
203 if (mac->associated) 203 if (mac->associated)
204 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); 204 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
205 205
206 spin_lock_irqsave(&mac->lock, flags);
207 mac->associnfo.associating = 1;
208 spin_unlock_irqrestore(&mac->lock, flags);
209
206 /* try to find the requested network in our list, if we found one already */ 210 /* try to find the requested network in our list, if we found one already */
207 if (bssvalid || mac->associnfo.bssfixed) 211 if (bssvalid || mac->associnfo.bssfixed)
208 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); 212 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
@@ -295,19 +299,32 @@ ieee80211softmac_assoc_work(void *d)
295 memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1); 299 memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1);
296 300
297 /* we found a network! authenticate (if necessary) and associate to it. */ 301 /* we found a network! authenticate (if necessary) and associate to it. */
298 if (!found->authenticated) { 302 if (found->authenticating) {
303 dprintk(KERN_INFO PFX "Already requested authentication, waiting...\n");
304 if(!mac->associnfo.assoc_wait) {
305 mac->associnfo.assoc_wait = 1;
306 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
307 }
308 return;
309 }
310 if (!found->authenticated && !found->authenticating) {
299 /* This relies on the fact that _auth_req only queues the work, 311 /* This relies on the fact that _auth_req only queues the work,
300 * otherwise adding the notification would be racy. */ 312 * otherwise adding the notification would be racy. */
301 if (!ieee80211softmac_auth_req(mac, found)) { 313 if (!ieee80211softmac_auth_req(mac, found)) {
302 dprintk(KERN_INFO PFX "cannot associate without being authenticated, requested authentication\n"); 314 if(!mac->associnfo.assoc_wait) {
303 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL); 315 dprintk(KERN_INFO PFX "Cannot associate without being authenticated, requested authentication\n");
316 mac->associnfo.assoc_wait = 1;
317 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
318 }
304 } else { 319 } else {
305 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n"); 320 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n");
321 mac->associnfo.assoc_wait = 0;
306 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found); 322 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found);
307 } 323 }
308 return; 324 return;
309 } 325 }
310 /* finally! now we can start associating */ 326 /* finally! now we can start associating */
327 mac->associnfo.assoc_wait = 0;
311 ieee80211softmac_assoc(mac, found); 328 ieee80211softmac_assoc(mac, found);
312} 329}
313 330
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 90b8484e509b..ebc33ca6e692 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -36,8 +36,9 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
36 struct ieee80211softmac_auth_queue_item *auth; 36 struct ieee80211softmac_auth_queue_item *auth;
37 unsigned long flags; 37 unsigned long flags;
38 38
39 if (net->authenticating) 39 if (net->authenticating || net->authenticated)
40 return 0; 40 return 0;
41 net->authenticating = 1;
41 42
42 /* Add the network if it's not already added */ 43 /* Add the network if it's not already added */
43 ieee80211softmac_add_network(mac, net); 44 ieee80211softmac_add_network(mac, net);
@@ -92,7 +93,6 @@ ieee80211softmac_auth_queue(void *data)
92 return; 93 return;
93 } 94 }
94 net->authenticated = 0; 95 net->authenticated = 0;
95 net->authenticating = 1;
96 /* add a timeout call so we eventually give up waiting for an auth reply */ 96 /* add a timeout call so we eventually give up waiting for an auth reply */
97 schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT); 97 schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT);
98 auth->retry--; 98 auth->retry--;
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 09541611e48c..8cc8b20f5cda 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -229,6 +229,9 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
229 return 0; 229 return 0;
230 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid); 230 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid);
231 231
232 /* Fill in the capabilities */
233 (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
234
232 /* Fill in Listen Interval (?) */ 235 /* Fill in Listen Interval (?) */
233 (*pkt)->listen_interval = cpu_to_le16(10); 236 (*pkt)->listen_interval = cpu_to_le16(10);
234 237
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 0e65ff4e33fc..75320b6842ab 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,12 +70,44 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
70 char *extra) 70 char *extra)
71{ 71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_network *n;
74 struct ieee80211softmac_auth_queue_item *authptr;
73 int length = 0; 75 int length = 0;
74 unsigned long flags; 76 unsigned long flags;
75 77
78 /* Check if we're already associating to this or another network
79 * If it's another network, cancel and start over with our new network
80 * If it's our network, ignore the change, we're already doing it!
81 */
82 if((sm->associnfo.associating || sm->associated) &&
83 (data->essid.flags && data->essid.length && extra)) {
84 /* Get the associating network */
85 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
86 if(n && n->essid.len == (data->essid.length - 1) &&
87 !memcmp(n->essid.data, extra, n->essid.len)) {
88 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
89 MAC_ARG(sm->associnfo.bssid));
90 return 0;
91 } else {
92 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
93 spin_lock_irqsave(&sm->lock,flags);
94 /* Cancel assoc work */
95 cancel_delayed_work(&sm->associnfo.work);
96 /* We don't have to do this, but it's a little cleaner */
97 list_for_each_entry(authptr, &sm->auth_queue, list)
98 cancel_delayed_work(&authptr->work);
99 sm->associnfo.bssvalid = 0;
100 sm->associnfo.bssfixed = 0;
101 spin_unlock_irqrestore(&sm->lock,flags);
102 flush_scheduled_work();
103 }
104 }
105
106
76 spin_lock_irqsave(&sm->lock, flags); 107 spin_lock_irqsave(&sm->lock, flags);
77 108
78 sm->associnfo.static_essid = 0; 109 sm->associnfo.static_essid = 0;
110 sm->associnfo.assoc_wait = 0;
79 111
80 if (data->essid.flags && data->essid.length && extra /*required?*/) { 112 if (data->essid.flags && data->essid.length && extra /*required?*/) {
81 length = min(data->essid.length - 1, IW_ESSID_MAX_SIZE); 113 length = min(data->essid.length - 1, IW_ESSID_MAX_SIZE);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5b9397b33238..599423cc9d0d 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -250,15 +250,17 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
250 RTA_PUT(skb, a->order, 0, NULL); 250 RTA_PUT(skb, a->order, 0, NULL);
251 err = tcf_action_dump_1(skb, a, bind, ref); 251 err = tcf_action_dump_1(skb, a, bind, ref);
252 if (err < 0) 252 if (err < 0)
253 goto rtattr_failure; 253 goto errout;
254 r->rta_len = skb->tail - (u8*)r; 254 r->rta_len = skb->tail - (u8*)r;
255 } 255 }
256 256
257 return 0; 257 return 0;
258 258
259rtattr_failure: 259rtattr_failure:
260 err = -EINVAL;
261errout:
260 skb_trim(skb, b - skb->data); 262 skb_trim(skb, b - skb->data);
261 return -err; 263 return err;
262} 264}
263 265
264struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, 266struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
@@ -305,6 +307,7 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
305 goto err_mod; 307 goto err_mod;
306 } 308 }
307#endif 309#endif
310 *err = -ENOENT;
308 goto err_out; 311 goto err_out;
309 } 312 }
310 313
@@ -776,7 +779,7 @@ replay:
776 return ret; 779 return ret;
777} 780}
778 781
779static char * 782static struct rtattr *
780find_dump_kind(struct nlmsghdr *n) 783find_dump_kind(struct nlmsghdr *n)
781{ 784{
782 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1]; 785 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1];
@@ -804,7 +807,7 @@ find_dump_kind(struct nlmsghdr *n)
804 return NULL; 807 return NULL;
805 kind = tb2[TCA_ACT_KIND-1]; 808 kind = tb2[TCA_ACT_KIND-1];
806 809
807 return (char *) RTA_DATA(kind); 810 return kind;
808} 811}
809 812
810static int 813static int
@@ -817,16 +820,15 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
817 struct tc_action a; 820 struct tc_action a;
818 int ret = 0; 821 int ret = 0;
819 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 822 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
820 char *kind = find_dump_kind(cb->nlh); 823 struct rtattr *kind = find_dump_kind(cb->nlh);
821 824
822 if (kind == NULL) { 825 if (kind == NULL) {
823 printk("tc_dump_action: action bad kind\n"); 826 printk("tc_dump_action: action bad kind\n");
824 return 0; 827 return 0;
825 } 828 }
826 829
827 a_o = tc_lookup_action_n(kind); 830 a_o = tc_lookup_action(kind);
828 if (a_o == NULL) { 831 if (a_o == NULL) {
829 printk("failed to find %s\n", kind);
830 return 0; 832 return 0;
831 } 833 }
832 834
@@ -834,7 +836,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
834 a.ops = a_o; 836 a.ops = a_o;
835 837
836 if (a_o->walk == NULL) { 838 if (a_o->walk == NULL) {
837 printk("tc_dump_action: %s !capable of dumping table\n", kind); 839 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
838 goto rtattr_failure; 840 goto rtattr_failure;
839 } 841 }
840 842
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 49174f0d0a3e..6ac45103a272 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -191,7 +191,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
191 do { 191 do {
192 /* Are any pointers crossing a page boundary? */ 192 /* Are any pointers crossing a page boundary? */
193 if (pgto_base == 0) { 193 if (pgto_base == 0) {
194 flush_dcache_page(*pgto);
195 pgto_base = PAGE_CACHE_SIZE; 194 pgto_base = PAGE_CACHE_SIZE;
196 pgto--; 195 pgto--;
197 } 196 }
@@ -211,11 +210,11 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
211 vto = kmap_atomic(*pgto, KM_USER0); 210 vto = kmap_atomic(*pgto, KM_USER0);
212 vfrom = kmap_atomic(*pgfrom, KM_USER1); 211 vfrom = kmap_atomic(*pgfrom, KM_USER1);
213 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 212 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
213 flush_dcache_page(*pgto);
214 kunmap_atomic(vfrom, KM_USER1); 214 kunmap_atomic(vfrom, KM_USER1);
215 kunmap_atomic(vto, KM_USER0); 215 kunmap_atomic(vto, KM_USER0);
216 216
217 } while ((len -= copy) != 0); 217 } while ((len -= copy) != 0);
218 flush_dcache_page(*pgto);
219} 218}
220 219
221/* 220/*