diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-02 18:08:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-02 18:08:32 -0500 |
commit | 97be852f81c5bb114aab31974af2c061eb86a6de (patch) | |
tree | 701a9c88eef7fc3692150f5dd7edb226a6089173 | |
parent | cdb54fac35812a21943f0e506e8e3b94b469a77c (diff) | |
parent | aae343d493df965ac3abec1bd97cccfe44a7d920 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (118 commits)
[netdrvr] skge: build fix
[PATCH] NetXen: driver cleanup, removed unnecessary __iomem type casts
[PATCH] PHY: Add support for configuring the PHY connection interface
[PATCH] chelesio: transmit locking (plus bug fix).
[PATCH] chelsio: statistics improvement
[PATCH] chelsio: add MSI support
[PATCH] chelsio: use standard CRC routines
[PATCH] chelsio: cleanup pm3393 code
[PATCH] chelsio: add 1G swcixw aupport
[PATCH] chelsio: add support for other 10G boards
[PATCH] chelsio: remove unused mutex
[PATCH] chelsio: use kzalloc
[PATCH] chelsio: whitespace fixes
[PATCH] amd8111e use standard CRC lib
[PATCH] sky2: msi enhancements.
[PATCH] sky2: kfree_skb_any needed
[PATCH] sky2: fixes for Yukon EC_U chip revisions
[PATCH] sky2: add Dlink 560SX id
[PATCH] sky2: receive error handling fix
[PATCH] skge: don't clear MC state on link down
...
137 files changed, 24511 insertions, 3156 deletions
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt index 5c0a5cc03998..61b171cf5313 100644 --- a/Documentation/networking/e1000.txt +++ b/Documentation/networking/e1000.txt | |||
@@ -1,7 +1,7 @@ | |||
1 | Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters | 1 | Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters |
2 | =============================================================== | 2 | =============================================================== |
3 | 3 | ||
4 | November 15, 2005 | 4 | September 26, 2006 |
5 | 5 | ||
6 | 6 | ||
7 | Contents | 7 | Contents |
@@ -9,6 +9,7 @@ Contents | |||
9 | 9 | ||
10 | - In This Release | 10 | - In This Release |
11 | - Identifying Your Adapter | 11 | - Identifying Your Adapter |
12 | - Building and Installation | ||
12 | - Command Line Parameters | 13 | - Command Line Parameters |
13 | - Speed and Duplex Configuration | 14 | - Speed and Duplex Configuration |
14 | - Additional Configurations | 15 | - Additional Configurations |
@@ -41,6 +42,9 @@ or later), lspci, and ifconfig to obtain the same information. | |||
41 | Instructions on updating ethtool can be found in the section "Additional | 42 | Instructions on updating ethtool can be found in the section "Additional |
42 | Configurations" later in this document. | 43 | Configurations" later in this document. |
43 | 44 | ||
45 | NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100 | ||
46 | support. | ||
47 | |||
44 | 48 | ||
45 | Identifying Your Adapter | 49 | Identifying Your Adapter |
46 | ======================== | 50 | ======================== |
@@ -51,28 +55,27 @@ Driver ID Guide at: | |||
51 | http://support.intel.com/support/network/adapter/pro100/21397.htm | 55 | http://support.intel.com/support/network/adapter/pro100/21397.htm |
52 | 56 | ||
53 | For the latest Intel network drivers for Linux, refer to the following | 57 | For the latest Intel network drivers for Linux, refer to the following |
54 | website. In the search field, enter your adapter name or type, or use the | 58 | website. In the search field, enter your adapter name or type, or use the |
55 | networking link on the left to search for your adapter: | 59 | networking link on the left to search for your adapter: |
56 | 60 | ||
57 | http://downloadfinder.intel.com/scripts-df/support_intel.asp | 61 | http://downloadfinder.intel.com/scripts-df/support_intel.asp |
58 | 62 | ||
59 | 63 | ||
60 | Command Line Parameters ======================= | 64 | Command Line Parameters |
65 | ======================= | ||
61 | 66 | ||
62 | If the driver is built as a module, the following optional parameters | 67 | If the driver is built as a module, the following optional parameters |
63 | are used by entering them on the command line with the modprobe or insmod | 68 | are used by entering them on the command line with the modprobe command |
64 | command using this syntax: | 69 | using this syntax: |
65 | 70 | ||
66 | modprobe e1000 [<option>=<VAL1>,<VAL2>,...] | 71 | modprobe e1000 [<option>=<VAL1>,<VAL2>,...] |
67 | 72 | ||
68 | insmod e1000 [<option>=<VAL1>,<VAL2>,...] | ||
69 | |||
70 | For example, with two PRO/1000 PCI adapters, entering: | 73 | For example, with two PRO/1000 PCI adapters, entering: |
71 | 74 | ||
72 | insmod e1000 TxDescriptors=80,128 | 75 | modprobe e1000 TxDescriptors=80,128 |
73 | 76 | ||
74 | loads the e1000 driver with 80 TX descriptors for the first adapter and 128 | 77 | loads the e1000 driver with 80 TX descriptors for the first adapter and |
75 | TX descriptors for the second adapter. | 78 | 128 TX descriptors for the second adapter. |
76 | 79 | ||
77 | The default value for each parameter is generally the recommended setting, | 80 | The default value for each parameter is generally the recommended setting, |
78 | unless otherwise noted. | 81 | unless otherwise noted. |
@@ -87,7 +90,7 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed | |||
87 | http://www.intel.com/design/network/applnots/ap450.htm | 90 | http://www.intel.com/design/network/applnots/ap450.htm |
88 | 91 | ||
89 | A descriptor describes a data buffer and attributes related to | 92 | A descriptor describes a data buffer and attributes related to |
90 | the data buffer. This information is accessed by the hardware. | 93 | the data buffer. This information is accessed by the hardware. |
91 | 94 | ||
92 | 95 | ||
93 | AutoNeg | 96 | AutoNeg |
@@ -96,9 +99,9 @@ AutoNeg | |||
96 | Valid Range: 0x01-0x0F, 0x20-0x2F | 99 | Valid Range: 0x01-0x0F, 0x20-0x2F |
97 | Default Value: 0x2F | 100 | Default Value: 0x2F |
98 | 101 | ||
99 | This parameter is a bit mask that specifies which speed and duplex | 102 | This parameter is a bit-mask that specifies the speed and duplex settings |
100 | settings the board advertises. When this parameter is used, the Speed | 103 | advertised by the adapter. When this parameter is used, the Speed and |
101 | and Duplex parameters must not be specified. | 104 | Duplex parameters must not be specified. |
102 | 105 | ||
103 | NOTE: Refer to the Speed and Duplex section of this readme for more | 106 | NOTE: Refer to the Speed and Duplex section of this readme for more |
104 | information on the AutoNeg parameter. | 107 | information on the AutoNeg parameter. |
@@ -110,14 +113,15 @@ Duplex | |||
110 | Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) | 113 | Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) |
111 | Default Value: 0 | 114 | Default Value: 0 |
112 | 115 | ||
113 | Defines the direction in which data is allowed to flow. Can be either | 116 | This defines the direction in which data is allowed to flow. Can be |
114 | one or two-directional. If both Duplex and the link partner are set to | 117 | either one or two-directional. If both Duplex and the link partner are |
115 | auto-negotiate, the board auto-detects the correct duplex. If the link | 118 | set to auto-negotiate, the board auto-detects the correct duplex. If the |
116 | partner is forced (either full or half), Duplex defaults to half-duplex. | 119 | link partner is forced (either full or half), Duplex defaults to half- |
120 | duplex. | ||
117 | 121 | ||
118 | 122 | ||
119 | FlowControl | 123 | FlowControl |
120 | ---------- | 124 | ----------- |
121 | Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) | 125 | Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) |
122 | Default Value: Reads flow control settings from the EEPROM | 126 | Default Value: Reads flow control settings from the EEPROM |
123 | 127 | ||
@@ -127,57 +131,107 @@ to Ethernet PAUSE frames. | |||
127 | 131 | ||
128 | InterruptThrottleRate | 132 | InterruptThrottleRate |
129 | --------------------- | 133 | --------------------- |
130 | (not supported on Intel 82542, 82543 or 82544-based adapters) | 134 | (not supported on Intel(R) 82542, 82543 or 82544-based adapters) |
131 | Valid Range: 100-100000 (0=off, 1=dynamic) | 135 | Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative) |
132 | Default Value: 8000 | 136 | Default Value: 3 |
133 | 137 | ||
134 | This value represents the maximum number of interrupts per second the | 138 | The driver can limit the amount of interrupts per second that the adapter |
135 | controller generates. InterruptThrottleRate is another setting used in | 139 | will generate for incoming packets. It does this by writing a value to the |
136 | interrupt moderation. Dynamic mode uses a heuristic algorithm to adjust | 140 | adapter that is based on the maximum amount of interrupts that the adapter |
137 | InterruptThrottleRate based on the current traffic load. | 141 | will generate per second. |
142 | |||
143 | Setting InterruptThrottleRate to a value greater or equal to 100 | ||
144 | will program the adapter to send out a maximum of that many interrupts | ||
145 | per second, even if more packets have come in. This reduces interrupt | ||
146 | load on the system and can lower CPU utilization under heavy load, | ||
147 | but will increase latency as packets are not processed as quickly. | ||
148 | |||
149 | The default behaviour of the driver previously assumed a static | ||
150 | InterruptThrottleRate value of 8000, providing a good fallback value for | ||
151 | all traffic types,but lacking in small packet performance and latency. | ||
152 | The hardware can handle many more small packets per second however, and | ||
153 | for this reason an adaptive interrupt moderation algorithm was implemented. | ||
154 | |||
155 | Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which | ||
156 | it dynamically adjusts the InterruptThrottleRate value based on the traffic | ||
157 | that it receives. After determining the type of incoming traffic in the last | ||
158 | timeframe, it will adjust the InterruptThrottleRate to an appropriate value | ||
159 | for that traffic. | ||
160 | |||
161 | The algorithm classifies the incoming traffic every interval into | ||
162 | classes. Once the class is determined, the InterruptThrottleRate value is | ||
163 | adjusted to suit that traffic type the best. There are three classes defined: | ||
164 | "Bulk traffic", for large amounts of packets of normal size; "Low latency", | ||
165 | for small amounts of traffic and/or a significant percentage of small | ||
166 | packets; and "Lowest latency", for almost completely small packets or | ||
167 | minimal traffic. | ||
168 | |||
169 | In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 | ||
170 | for traffic that falls in class "Bulk traffic". If traffic falls in the "Low | ||
171 | latency" or "Lowest latency" class, the InterruptThrottleRate is increased | ||
172 | stepwise to 20000. This default mode is suitable for most applications. | ||
173 | |||
174 | For situations where low latency is vital such as cluster or | ||
175 | grid computing, the algorithm can reduce latency even more when | ||
176 | InterruptThrottleRate is set to mode 1. In this mode, which operates | ||
177 | the same as mode 3, the InterruptThrottleRate will be increased stepwise to | ||
178 | 70000 for traffic in class "Lowest latency". | ||
179 | |||
180 | Setting InterruptThrottleRate to 0 turns off any interrupt moderation | ||
181 | and may improve small packet latency, but is generally not suitable | ||
182 | for bulk throughput traffic. | ||
138 | 183 | ||
139 | NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and | 184 | NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and |
140 | RxAbsIntDelay parameters. In other words, minimizing the receive | 185 | RxAbsIntDelay parameters. In other words, minimizing the receive |
141 | and/or transmit absolute delays does not force the controller to | 186 | and/or transmit absolute delays does not force the controller to |
142 | generate more interrupts than what the Interrupt Throttle Rate | 187 | generate more interrupts than what the Interrupt Throttle Rate |
143 | allows. | 188 | allows. |
144 | 189 | ||
145 | CAUTION: If you are using the Intel PRO/1000 CT Network Connection | 190 | CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection |
146 | (controller 82547), setting InterruptThrottleRate to a value | 191 | (controller 82547), setting InterruptThrottleRate to a value |
147 | greater than 75,000, may hang (stop transmitting) adapters | 192 | greater than 75,000, may hang (stop transmitting) adapters |
148 | under certain network conditions. If this occurs a NETDEV | 193 | under certain network conditions. If this occurs a NETDEV |
149 | WATCHDOG message is logged in the system event log. In | 194 | WATCHDOG message is logged in the system event log. In |
150 | addition, the controller is automatically reset, restoring | 195 | addition, the controller is automatically reset, restoring |
151 | the network connection. To eliminate the potential for the | 196 | the network connection. To eliminate the potential for the |
152 | hang, ensure that InterruptThrottleRate is set no greater | 197 | hang, ensure that InterruptThrottleRate is set no greater |
153 | than 75,000 and is not set to 0. | 198 | than 75,000 and is not set to 0. |
154 | 199 | ||
155 | NOTE: When e1000 is loaded with default settings and multiple adapters | 200 | NOTE: When e1000 is loaded with default settings and multiple adapters |
156 | are in use simultaneously, the CPU utilization may increase non- | 201 | are in use simultaneously, the CPU utilization may increase non- |
157 | linearly. In order to limit the CPU utilization without impacting | 202 | linearly. In order to limit the CPU utilization without impacting |
158 | the overall throughput, we recommend that you load the driver as | 203 | the overall throughput, we recommend that you load the driver as |
159 | follows: | 204 | follows: |
160 | 205 | ||
161 | insmod e1000.o InterruptThrottleRate=3000,3000,3000 | 206 | modprobe e1000 InterruptThrottleRate=3000,3000,3000 |
162 | 207 | ||
163 | This sets the InterruptThrottleRate to 3000 interrupts/sec for | 208 | This sets the InterruptThrottleRate to 3000 interrupts/sec for |
164 | the first, second, and third instances of the driver. The range | 209 | the first, second, and third instances of the driver. The range |
165 | of 2000 to 3000 interrupts per second works on a majority of | 210 | of 2000 to 3000 interrupts per second works on a majority of |
166 | systems and is a good starting point, but the optimal value will | 211 | systems and is a good starting point, but the optimal value will |
167 | be platform-specific. If CPU utilization is not a concern, use | 212 | be platform-specific. If CPU utilization is not a concern, use |
168 | RX_POLLING (NAPI) and default driver settings. | 213 | RX_POLLING (NAPI) and default driver settings. |
169 | 214 | ||
170 | 215 | ||
216 | |||
171 | RxDescriptors | 217 | RxDescriptors |
172 | ------------- | 218 | ------------- |
173 | Valid Range: 80-256 for 82542 and 82543-based adapters | 219 | Valid Range: 80-256 for 82542 and 82543-based adapters |
174 | 80-4096 for all other supported adapters | 220 | 80-4096 for all other supported adapters |
175 | Default Value: 256 | 221 | Default Value: 256 |
176 | 222 | ||
177 | This value specifies the number of receive descriptors allocated by the | 223 | This value specifies the number of receive buffer descriptors allocated |
178 | driver. Increasing this value allows the driver to buffer more incoming | 224 | by the driver. Increasing this value allows the driver to buffer more |
179 | packets. Each descriptor is 16 bytes. A receive buffer is also | 225 | incoming packets, at the expense of increased system memory utilization. |
180 | allocated for each descriptor and is 2048. | 226 | |
227 | Each descriptor is 16 bytes. A receive buffer is also allocated for each | ||
228 | descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending | ||
229 | on the MTU setting. The maximum MTU size is 16110. | ||
230 | |||
231 | NOTE: MTU designates the frame size. It only needs to be set for Jumbo | ||
232 | Frames. Depending on the available system resources, the request | ||
233 | for a higher number of receive descriptors may be denied. In this | ||
234 | case, use a lower number. | ||
181 | 235 | ||
182 | 236 | ||
183 | RxIntDelay | 237 | RxIntDelay |
@@ -187,17 +241,17 @@ Default Value: 0 | |||
187 | 241 | ||
188 | This value delays the generation of receive interrupts in units of 1.024 | 242 | This value delays the generation of receive interrupts in units of 1.024 |
189 | microseconds. Receive interrupt reduction can improve CPU efficiency if | 243 | microseconds. Receive interrupt reduction can improve CPU efficiency if |
190 | properly tuned for specific network traffic. Increasing this value adds | 244 | properly tuned for specific network traffic. Increasing this value adds |
191 | extra latency to frame reception and can end up decreasing the throughput | 245 | extra latency to frame reception and can end up decreasing the throughput |
192 | of TCP traffic. If the system is reporting dropped receives, this value | 246 | of TCP traffic. If the system is reporting dropped receives, this value |
193 | may be set too high, causing the driver to run out of available receive | 247 | may be set too high, causing the driver to run out of available receive |
194 | descriptors. | 248 | descriptors. |
195 | 249 | ||
196 | CAUTION: When setting RxIntDelay to a value other than 0, adapters may | 250 | CAUTION: When setting RxIntDelay to a value other than 0, adapters may |
197 | hang (stop transmitting) under certain network conditions. If | 251 | hang (stop transmitting) under certain network conditions. If |
198 | this occurs a NETDEV WATCHDOG message is logged in the system | 252 | this occurs a NETDEV WATCHDOG message is logged in the system |
199 | event log. In addition, the controller is automatically reset, | 253 | event log. In addition, the controller is automatically reset, |
200 | restoring the network connection. To eliminate the potential | 254 | restoring the network connection. To eliminate the potential |
201 | for the hang ensure that RxIntDelay is set to 0. | 255 | for the hang ensure that RxIntDelay is set to 0. |
202 | 256 | ||
203 | 257 | ||
@@ -208,7 +262,7 @@ Valid Range: 0-65535 (0=off) | |||
208 | Default Value: 128 | 262 | Default Value: 128 |
209 | 263 | ||
210 | This value, in units of 1.024 microseconds, limits the delay in which a | 264 | This value, in units of 1.024 microseconds, limits the delay in which a |
211 | receive interrupt is generated. Useful only if RxIntDelay is non-zero, | 265 | receive interrupt is generated. Useful only if RxIntDelay is non-zero, |
212 | this value ensures that an interrupt is generated after the initial | 266 | this value ensures that an interrupt is generated after the initial |
213 | packet is received within the set amount of time. Proper tuning, | 267 | packet is received within the set amount of time. Proper tuning, |
214 | along with RxIntDelay, may improve traffic throughput in specific network | 268 | along with RxIntDelay, may improve traffic throughput in specific network |
@@ -222,9 +276,9 @@ Valid Settings: 0, 10, 100, 1000 | |||
222 | Default Value: 0 (auto-negotiate at all supported speeds) | 276 | Default Value: 0 (auto-negotiate at all supported speeds) |
223 | 277 | ||
224 | Speed forces the line speed to the specified value in megabits per second | 278 | Speed forces the line speed to the specified value in megabits per second |
225 | (Mbps). If this parameter is not specified or is set to 0 and the link | 279 | (Mbps). If this parameter is not specified or is set to 0 and the link |
226 | partner is set to auto-negotiate, the board will auto-detect the correct | 280 | partner is set to auto-negotiate, the board will auto-detect the correct |
227 | speed. Duplex should also be set when Speed is set to either 10 or 100. | 281 | speed. Duplex should also be set when Speed is set to either 10 or 100. |
228 | 282 | ||
229 | 283 | ||
230 | TxDescriptors | 284 | TxDescriptors |
@@ -234,7 +288,7 @@ Valid Range: 80-256 for 82542 and 82543-based adapters | |||
234 | Default Value: 256 | 288 | Default Value: 256 |
235 | 289 | ||
236 | This value is the number of transmit descriptors allocated by the driver. | 290 | This value is the number of transmit descriptors allocated by the driver. |
237 | Increasing this value allows the driver to queue more transmits. Each | 291 | Increasing this value allows the driver to queue more transmits. Each |
238 | descriptor is 16 bytes. | 292 | descriptor is 16 bytes. |
239 | 293 | ||
240 | NOTE: Depending on the available system resources, the request for a | 294 | NOTE: Depending on the available system resources, the request for a |
@@ -248,8 +302,8 @@ Valid Range: 0-65535 (0=off) | |||
248 | Default Value: 64 | 302 | Default Value: 64 |
249 | 303 | ||
250 | This value delays the generation of transmit interrupts in units of | 304 | This value delays the generation of transmit interrupts in units of |
251 | 1.024 microseconds. Transmit interrupt reduction can improve CPU | 305 | 1.024 microseconds. Transmit interrupt reduction can improve CPU |
252 | efficiency if properly tuned for specific network traffic. If the | 306 | efficiency if properly tuned for specific network traffic. If the |
253 | system is reporting dropped transmits, this value may be set too high | 307 | system is reporting dropped transmits, this value may be set too high |
254 | causing the driver to run out of available transmit descriptors. | 308 | causing the driver to run out of available transmit descriptors. |
255 | 309 | ||
@@ -261,7 +315,7 @@ Valid Range: 0-65535 (0=off) | |||
261 | Default Value: 64 | 315 | Default Value: 64 |
262 | 316 | ||
263 | This value, in units of 1.024 microseconds, limits the delay in which a | 317 | This value, in units of 1.024 microseconds, limits the delay in which a |
264 | transmit interrupt is generated. Useful only if TxIntDelay is non-zero, | 318 | transmit interrupt is generated. Useful only if TxIntDelay is non-zero, |
265 | this value ensures that an interrupt is generated after the initial | 319 | this value ensures that an interrupt is generated after the initial |
266 | packet is sent on the wire within the set amount of time. Proper tuning, | 320 | packet is sent on the wire within the set amount of time. Proper tuning, |
267 | along with TxIntDelay, may improve traffic throughput in specific | 321 | along with TxIntDelay, may improve traffic throughput in specific |
@@ -288,15 +342,15 @@ fiber interface board only links at 1000 Mbps full-duplex. | |||
288 | 342 | ||
289 | For copper-based boards, the keywords interact as follows: | 343 | For copper-based boards, the keywords interact as follows: |
290 | 344 | ||
291 | The default operation is auto-negotiate. The board advertises all | 345 | The default operation is auto-negotiate. The board advertises all |
292 | supported speed and duplex combinations, and it links at the highest | 346 | supported speed and duplex combinations, and it links at the highest |
293 | common speed and duplex mode IF the link partner is set to auto-negotiate. | 347 | common speed and duplex mode IF the link partner is set to auto-negotiate. |
294 | 348 | ||
295 | If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps | 349 | If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps |
296 | is advertised (The 1000BaseT spec requires auto-negotiation.) | 350 | is advertised (The 1000BaseT spec requires auto-negotiation.) |
297 | 351 | ||
298 | If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- | 352 | If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- |
299 | negotiation is disabled, and the AutoNeg parameter is ignored. Partner | 353 | negotiation is disabled, and the AutoNeg parameter is ignored. Partner |
300 | SHOULD also be forced. | 354 | SHOULD also be forced. |
301 | 355 | ||
302 | The AutoNeg parameter is used when more control is required over the | 356 | The AutoNeg parameter is used when more control is required over the |
@@ -304,7 +358,7 @@ auto-negotiation process. It should be used when you wish to control which | |||
304 | speed and duplex combinations are advertised during the auto-negotiation | 358 | speed and duplex combinations are advertised during the auto-negotiation |
305 | process. | 359 | process. |
306 | 360 | ||
307 | The parameter may be specified as either a decimal or hexidecimal value as | 361 | The parameter may be specified as either a decimal or hexadecimal value as |
308 | determined by the bitmap below. | 362 | determined by the bitmap below. |
309 | 363 | ||
310 | Bit position 7 6 5 4 3 2 1 0 | 364 | Bit position 7 6 5 4 3 2 1 0 |
@@ -337,20 +391,19 @@ Additional Configurations | |||
337 | 391 | ||
338 | Configuring the Driver on Different Distributions | 392 | Configuring the Driver on Different Distributions |
339 | ------------------------------------------------- | 393 | ------------------------------------------------- |
340 | |||
341 | Configuring a network driver to load properly when the system is started | 394 | Configuring a network driver to load properly when the system is started |
342 | is distribution dependent. Typically, the configuration process involves | 395 | is distribution dependent. Typically, the configuration process involves |
343 | adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well | 396 | adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well |
344 | as editing other system startup scripts and/or configuration files. Many | 397 | as editing other system startup scripts and/or configuration files. Many |
345 | popular Linux distributions ship with tools to make these changes for you. | 398 | popular Linux distributions ship with tools to make these changes for you. |
346 | To learn the proper way to configure a network device for your system, | 399 | To learn the proper way to configure a network device for your system, |
347 | refer to your distribution documentation. If during this process you are | 400 | refer to your distribution documentation. If during this process you are |
348 | asked for the driver or module name, the name for the Linux Base Driver | 401 | asked for the driver or module name, the name for the Linux Base Driver |
349 | for the Intel PRO/1000 Family of Adapters is e1000. | 402 | for the Intel(R) PRO/1000 Family of Adapters is e1000. |
350 | 403 | ||
351 | As an example, if you install the e1000 driver for two PRO/1000 adapters | 404 | As an example, if you install the e1000 driver for two PRO/1000 adapters |
352 | (eth0 and eth1) and set the speed and duplex to 10full and 100half, add | 405 | (eth0 and eth1) and set the speed and duplex to 10full and 100half, add |
353 | the following to modules.conf or modprobe.conf: | 406 | the following to modules.conf or or modprobe.conf: |
354 | 407 | ||
355 | alias eth0 e1000 | 408 | alias eth0 e1000 |
356 | alias eth1 e1000 | 409 | alias eth1 e1000 |
@@ -358,9 +411,8 @@ Additional Configurations | |||
358 | 411 | ||
359 | Viewing Link Messages | 412 | Viewing Link Messages |
360 | --------------------- | 413 | --------------------- |
361 | |||
362 | Link messages will not be displayed to the console if the distribution is | 414 | Link messages will not be displayed to the console if the distribution is |
363 | restricting system messages. In order to see network driver link messages | 415 | restricting system messages. In order to see network driver link messages |
364 | on your console, set dmesg to eight by entering the following: | 416 | on your console, set dmesg to eight by entering the following: |
365 | 417 | ||
366 | dmesg -n 8 | 418 | dmesg -n 8 |
@@ -369,11 +421,9 @@ Additional Configurations | |||
369 | 421 | ||
370 | Jumbo Frames | 422 | Jumbo Frames |
371 | ------------ | 423 | ------------ |
372 | 424 | Jumbo Frames support is enabled by changing the MTU to a value larger than | |
373 | The driver supports Jumbo Frames for all adapters except 82542 and | 425 | the default of 1500. Use the ifconfig command to increase the MTU size. |
374 | 82573-based adapters. Jumbo Frames support is enabled by changing the | 426 | For example: |
375 | MTU to a value larger than the default of 1500. Use the ifconfig command | ||
376 | to increase the MTU size. For example: | ||
377 | 427 | ||
378 | ifconfig eth<x> mtu 9000 up | 428 | ifconfig eth<x> mtu 9000 up |
379 | 429 | ||
@@ -390,26 +440,49 @@ Additional Configurations | |||
390 | 440 | ||
391 | - To enable Jumbo Frames, increase the MTU size on the interface beyond | 441 | - To enable Jumbo Frames, increase the MTU size on the interface beyond |
392 | 1500. | 442 | 1500. |
393 | - The maximum MTU setting for Jumbo Frames is 16110. This value coincides | 443 | |
444 | - The maximum MTU setting for Jumbo Frames is 16110. This value coincides | ||
394 | with the maximum Jumbo Frames size of 16128. | 445 | with the maximum Jumbo Frames size of 16128. |
446 | |||
395 | - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or | 447 | - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or |
396 | loss of link. | 448 | loss of link. |
449 | |||
397 | - Some Intel gigabit adapters that support Jumbo Frames have a frame size | 450 | - Some Intel gigabit adapters that support Jumbo Frames have a frame size |
398 | limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes. | 451 | limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes. |
399 | The adapters with this limitation are based on the Intel 82571EB and | 452 | The adapters with this limitation are based on the Intel(R) 82571EB, |
400 | 82572EI controllers, which correspond to these product names: | 453 | 82572EI, 82573L and 80003ES2LAN controller. These correspond to the |
401 | Intel® PRO/1000 PT Dual Port Server Adapter | 454 | following product names: |
402 | Intel® PRO/1000 PF Dual Port Server Adapter | 455 | Intel(R) PRO/1000 PT Server Adapter |
403 | Intel® PRO/1000 PT Server Adapter | 456 | Intel(R) PRO/1000 PT Desktop Adapter |
404 | Intel® PRO/1000 PT Desktop Adapter | 457 | Intel(R) PRO/1000 PT Network Connection |
405 | Intel® PRO/1000 PF Server Adapter | 458 | Intel(R) PRO/1000 PT Dual Port Server Adapter |
406 | 459 | Intel(R) PRO/1000 PT Dual Port Network Connection | |
407 | - The Intel PRO/1000 PM Network Connection does not support jumbo frames. | 460 | Intel(R) PRO/1000 PF Server Adapter |
461 | Intel(R) PRO/1000 PF Network Connection | ||
462 | Intel(R) PRO/1000 PF Dual Port Server Adapter | ||
463 | Intel(R) PRO/1000 PB Server Connection | ||
464 | Intel(R) PRO/1000 PL Network Connection | ||
465 | Intel(R) PRO/1000 EB Network Connection with I/O Acceleration | ||
466 | Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration | ||
467 | Intel(R) PRO/1000 PT Quad Port Server Adapter | ||
468 | |||
469 | - Adapters based on the Intel(R) 82542 and 82573V/E controller do not | ||
470 | support Jumbo Frames. These correspond to the following product names: | ||
471 | Intel(R) PRO/1000 Gigabit Server Adapter | ||
472 | Intel(R) PRO/1000 PM Network Connection | ||
473 | |||
474 | - The following adapters do not support Jumbo Frames: | ||
475 | Intel(R) 82562V 10/100 Network Connection | ||
476 | Intel(R) 82566DM Gigabit Network Connection | ||
477 | Intel(R) 82566DC Gigabit Network Connection | ||
478 | Intel(R) 82566MM Gigabit Network Connection | ||
479 | Intel(R) 82566MC Gigabit Network Connection | ||
480 | Intel(R) 82562GT 10/100 Network Connection | ||
481 | Intel(R) 82562G 10/100 Network Connection | ||
408 | 482 | ||
409 | 483 | ||
410 | Ethtool | 484 | Ethtool |
411 | ------- | 485 | ------- |
412 | |||
413 | The driver utilizes the ethtool interface for driver configuration and | 486 | The driver utilizes the ethtool interface for driver configuration and |
414 | diagnostics, as well as displaying statistical information. Ethtool | 487 | diagnostics, as well as displaying statistical information. Ethtool |
415 | version 1.6 or later is required for this functionality. | 488 | version 1.6 or later is required for this functionality. |
@@ -417,15 +490,14 @@ Additional Configurations | |||
417 | The latest release of ethtool can be found from | 490 | The latest release of ethtool can be found from |
418 | http://sourceforge.net/projects/gkernel. | 491 | http://sourceforge.net/projects/gkernel. |
419 | 492 | ||
420 | NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support | 493 | NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support |
421 | for a more complete ethtool feature set can be enabled by upgrading | 494 | for a more complete ethtool feature set can be enabled by upgrading |
422 | ethtool to ethtool-1.8.1. | 495 | ethtool to ethtool-1.8.1. |
423 | 496 | ||
424 | Enabling Wake on LAN* (WoL) | 497 | Enabling Wake on LAN* (WoL) |
425 | --------------------------- | 498 | --------------------------- |
426 | 499 | WoL is configured through the Ethtool* utility. Ethtool is included with | |
427 | WoL is configured through the Ethtool* utility. Ethtool is included with | 500 | all versions of Red Hat after Red Hat 7.2. For other Linux distributions, |
428 | all versions of Red Hat after Red Hat 7.2. For other Linux distributions, | ||
429 | download and install Ethtool from the following website: | 501 | download and install Ethtool from the following website: |
430 | http://sourceforge.net/projects/gkernel. | 502 | http://sourceforge.net/projects/gkernel. |
431 | 503 | ||
@@ -436,11 +508,17 @@ Additional Configurations | |||
436 | For this driver version, in order to enable WoL, the e1000 driver must be | 508 | For this driver version, in order to enable WoL, the e1000 driver must be |
437 | loaded when shutting down or rebooting the system. | 509 | loaded when shutting down or rebooting the system. |
438 | 510 | ||
511 | Wake On LAN is only supported on port A for the following devices: | ||
512 | Intel(R) PRO/1000 PT Dual Port Network Connection | ||
513 | Intel(R) PRO/1000 PT Dual Port Server Connection | ||
514 | Intel(R) PRO/1000 PT Dual Port Server Adapter | ||
515 | Intel(R) PRO/1000 PF Dual Port Server Adapter | ||
516 | Intel(R) PRO/1000 PT Quad Port Server Adapter | ||
517 | |||
439 | NAPI | 518 | NAPI |
440 | ---- | 519 | ---- |
441 | 520 | NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled | |
442 | NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled | 521 | or disabled based on the configuration of the kernel. To override |
443 | or disabled based on the configuration of the kernel. To override | ||
444 | the default, use the following compile-time flags. | 522 | the default, use the following compile-time flags. |
445 | 523 | ||
446 | To enable NAPI, compile the driver module, passing in a configuration option: | 524 | To enable NAPI, compile the driver module, passing in a configuration option: |
@@ -457,88 +535,105 @@ Additional Configurations | |||
457 | Known Issues | 535 | Known Issues |
458 | ============ | 536 | ============ |
459 | 537 | ||
460 | Jumbo Frames System Requirement | 538 | Dropped Receive Packets on Half-duplex 10/100 Networks |
461 | ------------------------------- | 539 | ------------------------------------------------------ |
462 | 540 | If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half- | |
463 | Memory allocation failures have been observed on Linux systems with 64 MB | 541 | duplex, you may observe occasional dropped receive packets. There are no |
464 | of RAM or less that are running Jumbo Frames. If you are using Jumbo | 542 | workarounds for this problem in this network configuration. The network must |
465 | Frames, your system may require more than the advertised minimum | 543 | be updated to operate in full-duplex, and/or 1000mbps only. |
466 | requirement of 64 MB of system memory. | 544 | |
467 | 545 | Jumbo Frames System Requirement | |
468 | Performance Degradation with Jumbo Frames | 546 | ------------------------------- |
469 | ----------------------------------------- | 547 | Memory allocation failures have been observed on Linux systems with 64 MB |
470 | 548 | of RAM or less that are running Jumbo Frames. If you are using Jumbo | |
471 | Degradation in throughput performance may be observed in some Jumbo frames | 549 | Frames, your system may require more than the advertised minimum |
472 | environments. If this is observed, increasing the application's socket | 550 | requirement of 64 MB of system memory. |
473 | buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values | 551 | |
474 | may help. See the specific application manual and | 552 | Performance Degradation with Jumbo Frames |
475 | /usr/src/linux*/Documentation/ | 553 | ----------------------------------------- |
476 | networking/ip-sysctl.txt for more details. | 554 | Degradation in throughput performance may be observed in some Jumbo frames |
477 | 555 | environments. If this is observed, increasing the application's socket | |
478 | Jumbo frames on Foundry BigIron 8000 switch | 556 | buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values |
479 | ------------------------------------------- | 557 | may help. See the specific application manual and |
480 | There is a known issue using Jumbo frames when connected to a Foundry | 558 | /usr/src/linux*/Documentation/ |
481 | BigIron 8000 switch. This is a 3rd party limitation. If you experience | 559 | networking/ip-sysctl.txt for more details. |
482 | loss of packets, lower the MTU size. | 560 | |
483 | 561 | Jumbo Frames on Foundry BigIron 8000 switch | |
484 | Multiple Interfaces on Same Ethernet Broadcast Network | 562 | ------------------------------------------- |
485 | ------------------------------------------------------ | 563 | There is a known issue using Jumbo frames when connected to a Foundry |
486 | 564 | BigIron 8000 switch. This is a 3rd party limitation. If you experience | |
487 | Due to the default ARP behavior on Linux, it is not possible to have | 565 | loss of packets, lower the MTU size. |
488 | one system on two IP networks in the same Ethernet broadcast domain | 566 | |
489 | (non-partitioned switch) behave as expected. All Ethernet interfaces | 567 | Allocating Rx Buffers when Using Jumbo Frames |
490 | will respond to IP traffic for any IP address assigned to the system. | 568 | --------------------------------------------- |
491 | This results in unbalanced receive traffic. | 569 | Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if |
492 | 570 | the available memory is heavily fragmented. This issue may be seen with PCI-X | |
493 | If you have multiple interfaces in a server, either turn on ARP | 571 | adapters or with packet split disabled. This can be reduced or eliminated |
494 | filtering by entering: | 572 | by changing the amount of available memory for receive buffer allocation, by |
495 | 573 | increasing /proc/sys/vm/min_free_kbytes. | |
496 | echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter | 574 | |
497 | (this only works if your kernel's version is higher than 2.4.5), | 575 | Multiple Interfaces on Same Ethernet Broadcast Network |
498 | 576 | ------------------------------------------------------ | |
499 | NOTE: This setting is not saved across reboots. The configuration | 577 | Due to the default ARP behavior on Linux, it is not possible to have |
500 | change can be made permanent by adding the line: | 578 | one system on two IP networks in the same Ethernet broadcast domain |
501 | net.ipv4.conf.all.arp_filter = 1 | 579 | (non-partitioned switch) behave as expected. All Ethernet interfaces |
502 | to the file /etc/sysctl.conf | 580 | will respond to IP traffic for any IP address assigned to the system. |
503 | 581 | This results in unbalanced receive traffic. | |
504 | or, | 582 | |
505 | 583 | If you have multiple interfaces in a server, either turn on ARP | |
506 | install the interfaces in separate broadcast domains (either in | 584 | filtering by entering: |
507 | different switches or in a switch partitioned to VLANs). | 585 | |
508 | 586 | echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter | |
509 | 82541/82547 can't link or are slow to link with some link partners | 587 | (this only works if your kernel's version is higher than 2.4.5), |
510 | ----------------------------------------------------------------- | 588 | |
511 | 589 | NOTE: This setting is not saved across reboots. The configuration | |
512 | There is a known compatibility issue with 82541/82547 and some | 590 | change can be made permanent by adding the line: |
513 | low-end switches where the link will not be established, or will | 591 | net.ipv4.conf.all.arp_filter = 1 |
514 | be slow to establish. In particular, these switches are known to | 592 | to the file /etc/sysctl.conf |
515 | be incompatible with 82541/82547: | 593 | |
516 | 594 | or, | |
517 | Planex FXG-08TE | 595 | |
518 | I-O Data ETG-SH8 | 596 | install the interfaces in separate broadcast domains (either in |
519 | 597 | different switches or in a switch partitioned to VLANs). | |
520 | To workaround this issue, the driver can be compiled with an override | 598 | |
521 | of the PHY's master/slave setting. Forcing master or forcing slave | 599 | 82541/82547 can't link or are slow to link with some link partners |
522 | mode will improve time-to-link. | 600 | ----------------------------------------------------------------- |
523 | 601 | There is a known compatibility issue with 82541/82547 and some | |
524 | # make EXTRA_CFLAGS=-DE1000_MASTER_SLAVE=<n> | 602 | low-end switches where the link will not be established, or will |
525 | 603 | be slow to establish. In particular, these switches are known to | |
526 | Where <n> is: | 604 | be incompatible with 82541/82547: |
527 | 605 | ||
528 | 0 = Hardware default | 606 | Planex FXG-08TE |
529 | 1 = Master mode | 607 | I-O Data ETG-SH8 |
530 | 2 = Slave mode | 608 | |
531 | 3 = Auto master/slave | 609 | To workaround this issue, the driver can be compiled with an override |
532 | 610 | of the PHY's master/slave setting. Forcing master or forcing slave | |
533 | Disable rx flow control with ethtool | 611 | mode will improve time-to-link. |
534 | ------------------------------------ | 612 | |
535 | 613 | # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n> | |
536 | In order to disable receive flow control using ethtool, you must turn | 614 | |
537 | off auto-negotiation on the same command line. | 615 | Where <n> is: |
538 | 616 | ||
539 | For example: | 617 | 0 = Hardware default |
540 | 618 | 1 = Master mode | |
541 | ethtool -A eth? autoneg off rx off | 619 | 2 = Slave mode |
620 | 3 = Auto master/slave | ||
621 | |||
622 | Disable rx flow control with ethtool | ||
623 | ------------------------------------ | ||
624 | In order to disable receive flow control using ethtool, you must turn | ||
625 | off auto-negotiation on the same command line. | ||
626 | |||
627 | For example: | ||
628 | |||
629 | ethtool -A eth? autoneg off rx off | ||
630 | |||
631 | Unplugging network cable while ethtool -p is running | ||
632 | ---------------------------------------------------- | ||
633 | In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging | ||
634 | the network cable while ethtool -p is running will cause the system to | ||
635 | become unresponsive to keyboard commands, except for control-alt-delete. | ||
636 | Restarting the system appears to be the only remedy. | ||
542 | 637 | ||
543 | 638 | ||
544 | Support | 639 | Support |
@@ -548,24 +643,10 @@ For general information, go to the Intel support website at: | |||
548 | 643 | ||
549 | http://support.intel.com | 644 | http://support.intel.com |
550 | 645 | ||
551 | or the Intel Wired Networking project hosted by Sourceforge at: | 646 | or the Intel Wired Networking project hosted by Sourceforge at: |
552 | 647 | ||
553 | http://sourceforge.net/projects/e1000 | 648 | http://sourceforge.net/projects/e1000 |
554 | 649 | ||
555 | If an issue is identified with the released source code on the supported | 650 | If an issue is identified with the released source code on the supported |
556 | kernel with a supported adapter, email the specific information related | 651 | kernel with a supported adapter, email the specific information related |
557 | to the issue to e1000-devel@lists.sourceforge.net | 652 | to the issue to e1000-devel@lists.sf.net |
558 | |||
559 | |||
560 | License | ||
561 | ======= | ||
562 | |||
563 | This software program is released under the terms of a license agreement | ||
564 | between you ('Licensee') and Intel. Do not use or load this software or any | ||
565 | associated materials (collectively, the 'Software') until you have carefully | ||
566 | read the full terms and conditions of the file COPYING located in this software | ||
567 | package. By loading or using the Software, you agree to the terms of this | ||
568 | Agreement. If you do not agree with the terms of this Agreement, do not | ||
569 | install or use the Software. | ||
570 | |||
571 | * Other names and brands may be claimed as the property of others. | ||
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt index 29ccae409031..0bc95eab1512 100644 --- a/Documentation/networking/phy.txt +++ b/Documentation/networking/phy.txt | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | ------- | 2 | ------- |
3 | PHY Abstraction Layer | 3 | PHY Abstraction Layer |
4 | (Updated 2005-07-21) | 4 | (Updated 2006-11-30) |
5 | 5 | ||
6 | Purpose | 6 | Purpose |
7 | 7 | ||
@@ -97,11 +97,12 @@ Letting the PHY Abstraction Layer do Everything | |||
97 | 97 | ||
98 | Next, you need to know the device name of the PHY connected to this device. | 98 | Next, you need to know the device name of the PHY connected to this device. |
99 | The name will look something like, "phy0:0", where the first number is the | 99 | The name will look something like, "phy0:0", where the first number is the |
100 | bus id, and the second is the PHY's address on that bus. | 100 | bus id, and the second is the PHY's address on that bus. Typically, |
101 | the bus is responsible for making its ID unique. | ||
101 | 102 | ||
102 | Now, to connect, just call this function: | 103 | Now, to connect, just call this function: |
103 | 104 | ||
104 | phydev = phy_connect(dev, phy_name, &adjust_link, flags); | 105 | phydev = phy_connect(dev, phy_name, &adjust_link, flags, interface); |
105 | 106 | ||
106 | phydev is a pointer to the phy_device structure which represents the PHY. If | 107 | phydev is a pointer to the phy_device structure which represents the PHY. If |
107 | phy_connect is successful, it will return the pointer. dev, here, is the | 108 | phy_connect is successful, it will return the pointer. dev, here, is the |
@@ -115,6 +116,10 @@ Letting the PHY Abstraction Layer do Everything | |||
115 | This is useful if the system has put hardware restrictions on | 116 | This is useful if the system has put hardware restrictions on |
116 | the PHY/controller, of which the PHY needs to be aware. | 117 | the PHY/controller, of which the PHY needs to be aware. |
117 | 118 | ||
119 | interface is a u32 which specifies the connection type used | ||
120 | between the controller and the PHY. Examples are GMII, MII, | ||
121 | RGMII, and SGMII. For a full list, see include/linux/phy.h | ||
122 | |||
118 | Now just make sure that phydev->supported and phydev->advertising have any | 123 | Now just make sure that phydev->supported and phydev->advertising have any |
119 | values pruned from them which don't make sense for your controller (a 10/100 | 124 | values pruned from them which don't make sense for your controller (a 10/100 |
120 | controller may be connected to a gigabit capable PHY, so you would need to | 125 | controller may be connected to a gigabit capable PHY, so you would need to |
@@ -191,7 +196,7 @@ Doing it all yourself | |||
191 | start, or disables then frees them for stop. | 196 | start, or disables then frees them for stop. |
192 | 197 | ||
193 | struct phy_device * phy_attach(struct net_device *dev, const char *phy_id, | 198 | struct phy_device * phy_attach(struct net_device *dev, const char *phy_id, |
194 | u32 flags); | 199 | u32 flags, phy_interface_t interface); |
195 | 200 | ||
196 | Attaches a network device to a particular PHY, binding the PHY to a generic | 201 | Attaches a network device to a particular PHY, binding the PHY to a generic |
197 | driver if none was found during bus initialization. Passes in | 202 | driver if none was found during bus initialization. Passes in |
diff --git a/MAINTAINERS b/MAINTAINERS index 846e77a78710..45df5d4e2ab3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -432,6 +432,13 @@ L: linux-atm-general@lists.sourceforge.net (subscribers-only) | |||
432 | W: http://linux-atm.sourceforge.net | 432 | W: http://linux-atm.sourceforge.net |
433 | S: Maintained | 433 | S: Maintained |
434 | 434 | ||
435 | ATMEL MACB ETHERNET DRIVER | ||
436 | P: Atmel AVR32 Support Team | ||
437 | M: avr32@atmel.com | ||
438 | P: Haavard Skinnemoen | ||
439 | M: hskinnemoen@atmel.com | ||
440 | S: Supported | ||
441 | |||
435 | ATMEL WIRELESS DRIVER | 442 | ATMEL WIRELESS DRIVER |
436 | P: Simon Kelley | 443 | P: Simon Kelley |
437 | M: simon@thekelleys.org.uk | 444 | M: simon@thekelleys.org.uk |
@@ -2132,6 +2139,13 @@ L: netdev@vger.kernel.org | |||
2132 | T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git | 2139 | T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git |
2133 | S: Maintained | 2140 | S: Maintained |
2134 | 2141 | ||
2142 | NETXEN (1/10) GbE SUPPORT | ||
2143 | P: Amit S. Kale | ||
2144 | M: amitkale@netxen.com | ||
2145 | L: netdev@vger.kernel.org | ||
2146 | W: http://www.netxen.com | ||
2147 | S: Supported | ||
2148 | |||
2135 | IPVS | 2149 | IPVS |
2136 | P: Wensong Zhang | 2150 | P: Wensong Zhang |
2137 | M: wensong@linux-vs.org | 2151 | M: wensong@linux-vs.org |
diff --git a/drivers/net/8390.c b/drivers/net/8390.c index 3d1c599ac3cb..a82807641dcf 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c | |||
@@ -1,1104 +1,40 @@ | |||
1 | /* 8390.c: A general NS8390 ethernet driver core for linux. */ | 1 | /* 8390 core for usual drivers */ |
2 | /* | ||
3 | Written 1992-94 by Donald Becker. | ||
4 | |||
5 | Copyright 1993 United States Government as represented by the | ||
6 | Director, National Security Agency. | ||
7 | |||
8 | This software may be used and distributed according to the terms | ||
9 | of the GNU General Public License, incorporated herein by reference. | ||
10 | |||
11 | The author may be reached as becker@scyld.com, or C/O | ||
12 | Scyld Computing Corporation | ||
13 | 410 Severn Ave., Suite 210 | ||
14 | Annapolis MD 21403 | ||
15 | |||
16 | |||
17 | This is the chip-specific code for many 8390-based ethernet adaptors. | ||
18 | This is not a complete driver, it must be combined with board-specific | ||
19 | code such as ne.c, wd.c, 3c503.c, etc. | ||
20 | |||
21 | Seeing how at least eight drivers use this code, (not counting the | ||
22 | PCMCIA ones either) it is easy to break some card by what seems like | ||
23 | a simple innocent change. Please contact me or Donald if you think | ||
24 | you have found something that needs changing. -- PG | ||
25 | |||
26 | |||
27 | Changelog: | ||
28 | |||
29 | Paul Gortmaker : remove set_bit lock, other cleanups. | ||
30 | Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to | ||
31 | ei_block_input() for eth_io_copy_and_sum(). | ||
32 | Paul Gortmaker : exchange static int ei_pingpong for a #define, | ||
33 | also add better Tx error handling. | ||
34 | Paul Gortmaker : rewrite Rx overrun handling as per NS specs. | ||
35 | Alexey Kuznetsov : use the 8390's six bit hash multicast filter. | ||
36 | Paul Gortmaker : tweak ANK's above multicast changes a bit. | ||
37 | Paul Gortmaker : update packet statistics for v2.1.x | ||
38 | Alan Cox : support arbitary stupid port mappings on the | ||
39 | 68K Macintosh. Support >16bit I/O spaces | ||
40 | Paul Gortmaker : add kmod support for auto-loading of the 8390 | ||
41 | module by all drivers that require it. | ||
42 | Alan Cox : Spinlocking work, added 'BUG_83C690' | ||
43 | Paul Gortmaker : Separate out Tx timeout code from Tx path. | ||
44 | Paul Gortmaker : Remove old unused single Tx buffer code. | ||
45 | Hayato Fujiwara : Add m32r support. | ||
46 | Paul Gortmaker : use skb_padto() instead of stack scratch area | ||
47 | |||
48 | Sources: | ||
49 | The National Semiconductor LAN Databook, and the 3Com 3c503 databook. | ||
50 | |||
51 | */ | ||
52 | 2 | ||
53 | static const char version[] = | 3 | static const char version[] = |
54 | "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; | 4 | "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; |
55 | 5 | ||
56 | #include <linux/module.h> | 6 | #include "lib8390.c" |
57 | #include <linux/kernel.h> | ||
58 | #include <linux/jiffies.h> | ||
59 | #include <linux/fs.h> | ||
60 | #include <linux/types.h> | ||
61 | #include <linux/string.h> | ||
62 | #include <linux/bitops.h> | ||
63 | #include <asm/system.h> | ||
64 | #include <asm/uaccess.h> | ||
65 | #include <asm/io.h> | ||
66 | #include <asm/irq.h> | ||
67 | #include <linux/delay.h> | ||
68 | #include <linux/errno.h> | ||
69 | #include <linux/fcntl.h> | ||
70 | #include <linux/in.h> | ||
71 | #include <linux/interrupt.h> | ||
72 | #include <linux/init.h> | ||
73 | #include <linux/crc32.h> | ||
74 | |||
75 | #include <linux/netdevice.h> | ||
76 | #include <linux/etherdevice.h> | ||
77 | |||
78 | #define NS8390_CORE | ||
79 | #include "8390.h" | ||
80 | |||
81 | #define BUG_83C690 | ||
82 | |||
83 | /* These are the operational function interfaces to board-specific | ||
84 | routines. | ||
85 | void reset_8390(struct net_device *dev) | ||
86 | Resets the board associated with DEV, including a hardware reset of | ||
87 | the 8390. This is only called when there is a transmit timeout, and | ||
88 | it is always followed by 8390_init(). | ||
89 | void block_output(struct net_device *dev, int count, const unsigned char *buf, | ||
90 | int start_page) | ||
91 | Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The | ||
92 | "page" value uses the 8390's 256-byte pages. | ||
93 | void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) | ||
94 | Read the 4 byte, page aligned 8390 header. *If* there is a | ||
95 | subsequent read, it will be of the rest of the packet. | ||
96 | void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) | ||
97 | Read COUNT bytes from the packet buffer into the skb data area. Start | ||
98 | reading from RING_OFFSET, the address as the 8390 sees it. This will always | ||
99 | follow the read of the 8390 header. | ||
100 | */ | ||
101 | #define ei_reset_8390 (ei_local->reset_8390) | ||
102 | #define ei_block_output (ei_local->block_output) | ||
103 | #define ei_block_input (ei_local->block_input) | ||
104 | #define ei_get_8390_hdr (ei_local->get_8390_hdr) | ||
105 | |||
106 | /* use 0 for production, 1 for verification, >2 for debug */ | ||
107 | #ifndef ei_debug | ||
108 | int ei_debug = 1; | ||
109 | #endif | ||
110 | |||
111 | /* Index to functions. */ | ||
112 | static void ei_tx_intr(struct net_device *dev); | ||
113 | static void ei_tx_err(struct net_device *dev); | ||
114 | static void ei_tx_timeout(struct net_device *dev); | ||
115 | static void ei_receive(struct net_device *dev); | ||
116 | static void ei_rx_overrun(struct net_device *dev); | ||
117 | |||
118 | /* Routines generic to NS8390-based boards. */ | ||
119 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | ||
120 | int start_page); | ||
121 | static void set_multicast_list(struct net_device *dev); | ||
122 | static void do_set_multicast_list(struct net_device *dev); | ||
123 | |||
124 | /* | ||
125 | * SMP and the 8390 setup. | ||
126 | * | ||
127 | * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is | ||
128 | * a page register that controls bank and packet buffer access. We guard | ||
129 | * this with ei_local->page_lock. Nobody should assume or set the page other | ||
130 | * than zero when the lock is not held. Lock holders must restore page 0 | ||
131 | * before unlocking. Even pure readers must take the lock to protect in | ||
132 | * page 0. | ||
133 | * | ||
134 | * To make life difficult the chip can also be very slow. We therefore can't | ||
135 | * just use spinlocks. For the longer lockups we disable the irq the device | ||
136 | * sits on and hold the lock. We must hold the lock because there is a dual | ||
137 | * processor case other than interrupts (get stats/set multicast list in | ||
138 | * parallel with each other and transmit). | ||
139 | * | ||
140 | * Note: in theory we can just disable the irq on the card _but_ there is | ||
141 | * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" | ||
142 | * enter lock, take the queued irq. So we waddle instead of flying. | ||
143 | * | ||
144 | * Finally by special arrangement for the purpose of being generally | ||
145 | * annoying the transmit function is called bh atomic. That places | ||
146 | * restrictions on the user context callers as disable_irq won't save | ||
147 | * them. | ||
148 | */ | ||
149 | |||
150 | |||
151 | 7 | ||
152 | /** | ||
153 | * ei_open - Open/initialize the board. | ||
154 | * @dev: network device to initialize | ||
155 | * | ||
156 | * This routine goes all-out, setting everything | ||
157 | * up anew at each open, even though many of these registers should only | ||
158 | * need to be set once at boot. | ||
159 | */ | ||
160 | int ei_open(struct net_device *dev) | 8 | int ei_open(struct net_device *dev) |
161 | { | 9 | { |
162 | unsigned long flags; | 10 | return __ei_open(dev); |
163 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
164 | |||
165 | /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout | ||
166 | wrapper that does e.g. media check & then calls ei_tx_timeout. */ | ||
167 | if (dev->tx_timeout == NULL) | ||
168 | dev->tx_timeout = ei_tx_timeout; | ||
169 | if (dev->watchdog_timeo <= 0) | ||
170 | dev->watchdog_timeo = TX_TIMEOUT; | ||
171 | |||
172 | /* | ||
173 | * Grab the page lock so we own the register set, then call | ||
174 | * the init function. | ||
175 | */ | ||
176 | |||
177 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
178 | NS8390_init(dev, 1); | ||
179 | /* Set the flag before we drop the lock, That way the IRQ arrives | ||
180 | after its set and we get no silly warnings */ | ||
181 | netif_start_queue(dev); | ||
182 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
183 | ei_local->irqlock = 0; | ||
184 | return 0; | ||
185 | } | 11 | } |
186 | 12 | ||
187 | /** | ||
188 | * ei_close - shut down network device | ||
189 | * @dev: network device to close | ||
190 | * | ||
191 | * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done. | ||
192 | */ | ||
193 | int ei_close(struct net_device *dev) | 13 | int ei_close(struct net_device *dev) |
194 | { | 14 | { |
195 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | 15 | return __ei_close(dev); |
196 | unsigned long flags; | ||
197 | |||
198 | /* | ||
199 | * Hold the page lock during close | ||
200 | */ | ||
201 | |||
202 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
203 | NS8390_init(dev, 0); | ||
204 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
205 | netif_stop_queue(dev); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * ei_tx_timeout - handle transmit time out condition | ||
211 | * @dev: network device which has apparently fallen asleep | ||
212 | * | ||
213 | * Called by kernel when device never acknowledges a transmit has | ||
214 | * completed (or failed) - i.e. never posted a Tx related interrupt. | ||
215 | */ | ||
216 | |||
217 | void ei_tx_timeout(struct net_device *dev) | ||
218 | { | ||
219 | long e8390_base = dev->base_addr; | ||
220 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
221 | int txsr, isr, tickssofar = jiffies - dev->trans_start; | ||
222 | unsigned long flags; | ||
223 | |||
224 | #if defined(CONFIG_M32R) && defined(CONFIG_SMP) | ||
225 | unsigned long icucr; | ||
226 | |||
227 | local_irq_save(flags); | ||
228 | icucr = inl(M32R_ICU_CR1_PORTL); | ||
229 | icucr |= M32R_ICUCR_ISMOD11; | ||
230 | outl(icucr, M32R_ICU_CR1_PORTL); | ||
231 | local_irq_restore(flags); | ||
232 | #endif | ||
233 | ei_local->stat.tx_errors++; | ||
234 | |||
235 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
236 | txsr = inb(e8390_base+EN0_TSR); | ||
237 | isr = inb(e8390_base+EN0_ISR); | ||
238 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
239 | |||
240 | printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", | ||
241 | dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : | ||
242 | (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); | ||
243 | |||
244 | if (!isr && !ei_local->stat.tx_packets) | ||
245 | { | ||
246 | /* The 8390 probably hasn't gotten on the cable yet. */ | ||
247 | ei_local->interface_num ^= 1; /* Try a different xcvr. */ | ||
248 | } | ||
249 | |||
250 | /* Ugly but a reset can be slow, yet must be protected */ | ||
251 | |||
252 | disable_irq_nosync_lockdep(dev->irq); | ||
253 | spin_lock(&ei_local->page_lock); | ||
254 | |||
255 | /* Try to restart the card. Perhaps the user has fixed something. */ | ||
256 | ei_reset_8390(dev); | ||
257 | NS8390_init(dev, 1); | ||
258 | |||
259 | spin_unlock(&ei_local->page_lock); | ||
260 | enable_irq_lockdep(dev->irq); | ||
261 | netif_wake_queue(dev); | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * ei_start_xmit - begin packet transmission | ||
266 | * @skb: packet to be sent | ||
267 | * @dev: network device to which packet is sent | ||
268 | * | ||
269 | * Sends a packet to an 8390 network device. | ||
270 | */ | ||
271 | |||
272 | static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
273 | { | ||
274 | long e8390_base = dev->base_addr; | ||
275 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
276 | int send_length = skb->len, output_page; | ||
277 | unsigned long flags; | ||
278 | char buf[ETH_ZLEN]; | ||
279 | char *data = skb->data; | ||
280 | |||
281 | if (skb->len < ETH_ZLEN) { | ||
282 | memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ | ||
283 | memcpy(buf, data, skb->len); | ||
284 | send_length = ETH_ZLEN; | ||
285 | data = buf; | ||
286 | } | ||
287 | |||
288 | /* Mask interrupts from the ethercard. | ||
289 | SMP: We have to grab the lock here otherwise the IRQ handler | ||
290 | on another CPU can flip window and race the IRQ mask set. We end | ||
291 | up trashing the mcast filter not disabling irqs if we don't lock */ | ||
292 | |||
293 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
294 | outb_p(0x00, e8390_base + EN0_IMR); | ||
295 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
296 | |||
297 | |||
298 | /* | ||
299 | * Slow phase with lock held. | ||
300 | */ | ||
301 | |||
302 | disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); | ||
303 | |||
304 | spin_lock(&ei_local->page_lock); | ||
305 | |||
306 | ei_local->irqlock = 1; | ||
307 | |||
308 | /* | ||
309 | * We have two Tx slots available for use. Find the first free | ||
310 | * slot, and then perform some sanity checks. With two Tx bufs, | ||
311 | * you get very close to transmitting back-to-back packets. With | ||
312 | * only one Tx buf, the transmitter sits idle while you reload the | ||
313 | * card, leaving a substantial gap between each transmitted packet. | ||
314 | */ | ||
315 | |||
316 | if (ei_local->tx1 == 0) | ||
317 | { | ||
318 | output_page = ei_local->tx_start_page; | ||
319 | ei_local->tx1 = send_length; | ||
320 | if (ei_debug && ei_local->tx2 > 0) | ||
321 | printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", | ||
322 | dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); | ||
323 | } | ||
324 | else if (ei_local->tx2 == 0) | ||
325 | { | ||
326 | output_page = ei_local->tx_start_page + TX_PAGES/2; | ||
327 | ei_local->tx2 = send_length; | ||
328 | if (ei_debug && ei_local->tx1 > 0) | ||
329 | printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", | ||
330 | dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); | ||
331 | } | ||
332 | else | ||
333 | { /* We should never get here. */ | ||
334 | if (ei_debug) | ||
335 | printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", | ||
336 | dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); | ||
337 | ei_local->irqlock = 0; | ||
338 | netif_stop_queue(dev); | ||
339 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
340 | spin_unlock(&ei_local->page_lock); | ||
341 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | ||
342 | ei_local->stat.tx_errors++; | ||
343 | return 1; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Okay, now upload the packet and trigger a send if the transmitter | ||
348 | * isn't already sending. If it is busy, the interrupt handler will | ||
349 | * trigger the send later, upon receiving a Tx done interrupt. | ||
350 | */ | ||
351 | |||
352 | ei_block_output(dev, send_length, data, output_page); | ||
353 | |||
354 | if (! ei_local->txing) | ||
355 | { | ||
356 | ei_local->txing = 1; | ||
357 | NS8390_trigger_send(dev, send_length, output_page); | ||
358 | dev->trans_start = jiffies; | ||
359 | if (output_page == ei_local->tx_start_page) | ||
360 | { | ||
361 | ei_local->tx1 = -1; | ||
362 | ei_local->lasttx = -1; | ||
363 | } | ||
364 | else | ||
365 | { | ||
366 | ei_local->tx2 = -1; | ||
367 | ei_local->lasttx = -2; | ||
368 | } | ||
369 | } | ||
370 | else ei_local->txqueue++; | ||
371 | |||
372 | if (ei_local->tx1 && ei_local->tx2) | ||
373 | netif_stop_queue(dev); | ||
374 | else | ||
375 | netif_start_queue(dev); | ||
376 | |||
377 | /* Turn 8390 interrupts back on. */ | ||
378 | ei_local->irqlock = 0; | ||
379 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
380 | |||
381 | spin_unlock(&ei_local->page_lock); | ||
382 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | ||
383 | |||
384 | dev_kfree_skb (skb); | ||
385 | ei_local->stat.tx_bytes += send_length; | ||
386 | |||
387 | return 0; | ||
388 | } | 16 | } |
389 | 17 | ||
390 | /** | ||
391 | * ei_interrupt - handle the interrupts from an 8390 | ||
392 | * @irq: interrupt number | ||
393 | * @dev_id: a pointer to the net_device | ||
394 | * | ||
395 | * Handle the ether interface interrupts. We pull packets from | ||
396 | * the 8390 via the card specific functions and fire them at the networking | ||
397 | * stack. We also handle transmit completions and wake the transmit path if | ||
398 | * necessary. We also update the counters and do other housekeeping as | ||
399 | * needed. | ||
400 | */ | ||
401 | |||
402 | irqreturn_t ei_interrupt(int irq, void *dev_id) | 18 | irqreturn_t ei_interrupt(int irq, void *dev_id) |
403 | { | 19 | { |
404 | struct net_device *dev = dev_id; | 20 | return __ei_interrupt(irq, dev_id); |
405 | long e8390_base; | ||
406 | int interrupts, nr_serviced = 0; | ||
407 | struct ei_device *ei_local; | ||
408 | |||
409 | e8390_base = dev->base_addr; | ||
410 | ei_local = netdev_priv(dev); | ||
411 | |||
412 | /* | ||
413 | * Protect the irq test too. | ||
414 | */ | ||
415 | |||
416 | spin_lock(&ei_local->page_lock); | ||
417 | |||
418 | if (ei_local->irqlock) | ||
419 | { | ||
420 | #if 1 /* This might just be an interrupt for a PCI device sharing this line */ | ||
421 | /* The "irqlock" check is only for testing. */ | ||
422 | printk(ei_local->irqlock | ||
423 | ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" | ||
424 | : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", | ||
425 | dev->name, inb_p(e8390_base + EN0_ISR), | ||
426 | inb_p(e8390_base + EN0_IMR)); | ||
427 | #endif | ||
428 | spin_unlock(&ei_local->page_lock); | ||
429 | return IRQ_NONE; | ||
430 | } | ||
431 | |||
432 | /* Change to page 0 and read the intr status reg. */ | ||
433 | outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | ||
434 | if (ei_debug > 3) | ||
435 | printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, | ||
436 | inb_p(e8390_base + EN0_ISR)); | ||
437 | |||
438 | /* !!Assumption!! -- we stay in page 0. Don't break this. */ | ||
439 | while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 | ||
440 | && ++nr_serviced < MAX_SERVICE) | ||
441 | { | ||
442 | if (!netif_running(dev)) { | ||
443 | printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); | ||
444 | /* rmk - acknowledge the interrupts */ | ||
445 | outb_p(interrupts, e8390_base + EN0_ISR); | ||
446 | interrupts = 0; | ||
447 | break; | ||
448 | } | ||
449 | if (interrupts & ENISR_OVER) | ||
450 | ei_rx_overrun(dev); | ||
451 | else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) | ||
452 | { | ||
453 | /* Got a good (?) packet. */ | ||
454 | ei_receive(dev); | ||
455 | } | ||
456 | /* Push the next to-transmit packet through. */ | ||
457 | if (interrupts & ENISR_TX) | ||
458 | ei_tx_intr(dev); | ||
459 | else if (interrupts & ENISR_TX_ERR) | ||
460 | ei_tx_err(dev); | ||
461 | |||
462 | if (interrupts & ENISR_COUNTERS) | ||
463 | { | ||
464 | ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); | ||
465 | ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); | ||
466 | ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); | ||
467 | outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ | ||
468 | } | ||
469 | |||
470 | /* Ignore any RDC interrupts that make it back to here. */ | ||
471 | if (interrupts & ENISR_RDC) | ||
472 | { | ||
473 | outb_p(ENISR_RDC, e8390_base + EN0_ISR); | ||
474 | } | ||
475 | |||
476 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | ||
477 | } | ||
478 | |||
479 | if (interrupts && ei_debug) | ||
480 | { | ||
481 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | ||
482 | if (nr_serviced >= MAX_SERVICE) | ||
483 | { | ||
484 | /* 0xFF is valid for a card removal */ | ||
485 | if(interrupts!=0xFF) | ||
486 | printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", | ||
487 | dev->name, interrupts); | ||
488 | outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ | ||
489 | } else { | ||
490 | printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); | ||
491 | outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ | ||
492 | } | ||
493 | } | ||
494 | spin_unlock(&ei_local->page_lock); | ||
495 | return IRQ_RETVAL(nr_serviced > 0); | ||
496 | } | 21 | } |
497 | 22 | ||
498 | #ifdef CONFIG_NET_POLL_CONTROLLER | 23 | #ifdef CONFIG_NET_POLL_CONTROLLER |
499 | void ei_poll(struct net_device *dev) | 24 | void ei_poll(struct net_device *dev) |
500 | { | 25 | { |
501 | disable_irq_lockdep(dev->irq); | 26 | __ei_poll(dev); |
502 | ei_interrupt(dev->irq, dev); | ||
503 | enable_irq_lockdep(dev->irq); | ||
504 | } | 27 | } |
505 | #endif | 28 | #endif |
506 | 29 | ||
507 | /** | ||
508 | * ei_tx_err - handle transmitter error | ||
509 | * @dev: network device which threw the exception | ||
510 | * | ||
511 | * A transmitter error has happened. Most likely excess collisions (which | ||
512 | * is a fairly normal condition). If the error is one where the Tx will | ||
513 | * have been aborted, we try and send another one right away, instead of | ||
514 | * letting the failed packet sit and collect dust in the Tx buffer. This | ||
515 | * is a much better solution as it avoids kernel based Tx timeouts, and | ||
516 | * an unnecessary card reset. | ||
517 | * | ||
518 | * Called with lock held. | ||
519 | */ | ||
520 | |||
521 | static void ei_tx_err(struct net_device *dev) | ||
522 | { | ||
523 | long e8390_base = dev->base_addr; | ||
524 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
525 | unsigned char txsr = inb_p(e8390_base+EN0_TSR); | ||
526 | unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); | ||
527 | |||
528 | #ifdef VERBOSE_ERROR_DUMP | ||
529 | printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); | ||
530 | if (txsr & ENTSR_ABT) | ||
531 | printk("excess-collisions "); | ||
532 | if (txsr & ENTSR_ND) | ||
533 | printk("non-deferral "); | ||
534 | if (txsr & ENTSR_CRS) | ||
535 | printk("lost-carrier "); | ||
536 | if (txsr & ENTSR_FU) | ||
537 | printk("FIFO-underrun "); | ||
538 | if (txsr & ENTSR_CDH) | ||
539 | printk("lost-heartbeat "); | ||
540 | printk("\n"); | ||
541 | #endif | ||
542 | |||
543 | outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ | ||
544 | |||
545 | if (tx_was_aborted) | ||
546 | ei_tx_intr(dev); | ||
547 | else | ||
548 | { | ||
549 | ei_local->stat.tx_errors++; | ||
550 | if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; | ||
551 | if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; | ||
552 | if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; | ||
553 | } | ||
554 | } | ||
555 | |||
556 | /** | ||
557 | * ei_tx_intr - transmit interrupt handler | ||
558 | * @dev: network device for which tx intr is handled | ||
559 | * | ||
560 | * We have finished a transmit: check for errors and then trigger the next | ||
561 | * packet to be sent. Called with lock held. | ||
562 | */ | ||
563 | |||
564 | static void ei_tx_intr(struct net_device *dev) | ||
565 | { | ||
566 | long e8390_base = dev->base_addr; | ||
567 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
568 | int status = inb(e8390_base + EN0_TSR); | ||
569 | |||
570 | outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ | ||
571 | |||
572 | /* | ||
573 | * There are two Tx buffers, see which one finished, and trigger | ||
574 | * the send of another one if it exists. | ||
575 | */ | ||
576 | ei_local->txqueue--; | ||
577 | |||
578 | if (ei_local->tx1 < 0) | ||
579 | { | ||
580 | if (ei_local->lasttx != 1 && ei_local->lasttx != -1) | ||
581 | printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", | ||
582 | ei_local->name, ei_local->lasttx, ei_local->tx1); | ||
583 | ei_local->tx1 = 0; | ||
584 | if (ei_local->tx2 > 0) | ||
585 | { | ||
586 | ei_local->txing = 1; | ||
587 | NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); | ||
588 | dev->trans_start = jiffies; | ||
589 | ei_local->tx2 = -1, | ||
590 | ei_local->lasttx = 2; | ||
591 | } | ||
592 | else ei_local->lasttx = 20, ei_local->txing = 0; | ||
593 | } | ||
594 | else if (ei_local->tx2 < 0) | ||
595 | { | ||
596 | if (ei_local->lasttx != 2 && ei_local->lasttx != -2) | ||
597 | printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", | ||
598 | ei_local->name, ei_local->lasttx, ei_local->tx2); | ||
599 | ei_local->tx2 = 0; | ||
600 | if (ei_local->tx1 > 0) | ||
601 | { | ||
602 | ei_local->txing = 1; | ||
603 | NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); | ||
604 | dev->trans_start = jiffies; | ||
605 | ei_local->tx1 = -1; | ||
606 | ei_local->lasttx = 1; | ||
607 | } | ||
608 | else | ||
609 | ei_local->lasttx = 10, ei_local->txing = 0; | ||
610 | } | ||
611 | // else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", | ||
612 | // dev->name, ei_local->lasttx); | ||
613 | |||
614 | /* Minimize Tx latency: update the statistics after we restart TXing. */ | ||
615 | if (status & ENTSR_COL) | ||
616 | ei_local->stat.collisions++; | ||
617 | if (status & ENTSR_PTX) | ||
618 | ei_local->stat.tx_packets++; | ||
619 | else | ||
620 | { | ||
621 | ei_local->stat.tx_errors++; | ||
622 | if (status & ENTSR_ABT) | ||
623 | { | ||
624 | ei_local->stat.tx_aborted_errors++; | ||
625 | ei_local->stat.collisions += 16; | ||
626 | } | ||
627 | if (status & ENTSR_CRS) | ||
628 | ei_local->stat.tx_carrier_errors++; | ||
629 | if (status & ENTSR_FU) | ||
630 | ei_local->stat.tx_fifo_errors++; | ||
631 | if (status & ENTSR_CDH) | ||
632 | ei_local->stat.tx_heartbeat_errors++; | ||
633 | if (status & ENTSR_OWC) | ||
634 | ei_local->stat.tx_window_errors++; | ||
635 | } | ||
636 | netif_wake_queue(dev); | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * ei_receive - receive some packets | ||
641 | * @dev: network device with which receive will be run | ||
642 | * | ||
643 | * We have a good packet(s), get it/them out of the buffers. | ||
644 | * Called with lock held. | ||
645 | */ | ||
646 | |||
647 | static void ei_receive(struct net_device *dev) | ||
648 | { | ||
649 | long e8390_base = dev->base_addr; | ||
650 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
651 | unsigned char rxing_page, this_frame, next_frame; | ||
652 | unsigned short current_offset; | ||
653 | int rx_pkt_count = 0; | ||
654 | struct e8390_pkt_hdr rx_frame; | ||
655 | int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; | ||
656 | |||
657 | while (++rx_pkt_count < 10) | ||
658 | { | ||
659 | int pkt_len, pkt_stat; | ||
660 | |||
661 | /* Get the rx page (incoming packet pointer). */ | ||
662 | outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); | ||
663 | rxing_page = inb_p(e8390_base + EN1_CURPAG); | ||
664 | outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | ||
665 | |||
666 | /* Remove one frame from the ring. Boundary is always a page behind. */ | ||
667 | this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; | ||
668 | if (this_frame >= ei_local->stop_page) | ||
669 | this_frame = ei_local->rx_start_page; | ||
670 | |||
671 | /* Someday we'll omit the previous, iff we never get this message. | ||
672 | (There is at least one clone claimed to have a problem.) | ||
673 | |||
674 | Keep quiet if it looks like a card removal. One problem here | ||
675 | is that some clones crash in roughly the same way. | ||
676 | */ | ||
677 | if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) | ||
678 | printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", | ||
679 | dev->name, this_frame, ei_local->current_page); | ||
680 | |||
681 | if (this_frame == rxing_page) /* Read all the frames? */ | ||
682 | break; /* Done for now */ | ||
683 | |||
684 | current_offset = this_frame << 8; | ||
685 | ei_get_8390_hdr(dev, &rx_frame, this_frame); | ||
686 | |||
687 | pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); | ||
688 | pkt_stat = rx_frame.status; | ||
689 | |||
690 | next_frame = this_frame + 1 + ((pkt_len+4)>>8); | ||
691 | |||
692 | /* Check for bogosity warned by 3c503 book: the status byte is never | ||
693 | written. This happened a lot during testing! This code should be | ||
694 | cleaned up someday. */ | ||
695 | if (rx_frame.next != next_frame | ||
696 | && rx_frame.next != next_frame + 1 | ||
697 | && rx_frame.next != next_frame - num_rx_pages | ||
698 | && rx_frame.next != next_frame + 1 - num_rx_pages) { | ||
699 | ei_local->current_page = rxing_page; | ||
700 | outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); | ||
701 | ei_local->stat.rx_errors++; | ||
702 | continue; | ||
703 | } | ||
704 | |||
705 | if (pkt_len < 60 || pkt_len > 1518) | ||
706 | { | ||
707 | if (ei_debug) | ||
708 | printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", | ||
709 | dev->name, rx_frame.count, rx_frame.status, | ||
710 | rx_frame.next); | ||
711 | ei_local->stat.rx_errors++; | ||
712 | ei_local->stat.rx_length_errors++; | ||
713 | } | ||
714 | else if ((pkt_stat & 0x0F) == ENRSR_RXOK) | ||
715 | { | ||
716 | struct sk_buff *skb; | ||
717 | |||
718 | skb = dev_alloc_skb(pkt_len+2); | ||
719 | if (skb == NULL) | ||
720 | { | ||
721 | if (ei_debug > 1) | ||
722 | printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", | ||
723 | dev->name, pkt_len); | ||
724 | ei_local->stat.rx_dropped++; | ||
725 | break; | ||
726 | } | ||
727 | else | ||
728 | { | ||
729 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | ||
730 | skb->dev = dev; | ||
731 | skb_put(skb, pkt_len); /* Make room */ | ||
732 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | ||
733 | skb->protocol=eth_type_trans(skb,dev); | ||
734 | netif_rx(skb); | ||
735 | dev->last_rx = jiffies; | ||
736 | ei_local->stat.rx_packets++; | ||
737 | ei_local->stat.rx_bytes += pkt_len; | ||
738 | if (pkt_stat & ENRSR_PHY) | ||
739 | ei_local->stat.multicast++; | ||
740 | } | ||
741 | } | ||
742 | else | ||
743 | { | ||
744 | if (ei_debug) | ||
745 | printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", | ||
746 | dev->name, rx_frame.status, rx_frame.next, | ||
747 | rx_frame.count); | ||
748 | ei_local->stat.rx_errors++; | ||
749 | /* NB: The NIC counts CRC, frame and missed errors. */ | ||
750 | if (pkt_stat & ENRSR_FO) | ||
751 | ei_local->stat.rx_fifo_errors++; | ||
752 | } | ||
753 | next_frame = rx_frame.next; | ||
754 | |||
755 | /* This _should_ never happen: it's here for avoiding bad clones. */ | ||
756 | if (next_frame >= ei_local->stop_page) { | ||
757 | printk("%s: next frame inconsistency, %#2x\n", dev->name, | ||
758 | next_frame); | ||
759 | next_frame = ei_local->rx_start_page; | ||
760 | } | ||
761 | ei_local->current_page = next_frame; | ||
762 | outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); | ||
763 | } | ||
764 | |||
765 | /* We used to also ack ENISR_OVER here, but that would sometimes mask | ||
766 | a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ | ||
767 | outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); | ||
768 | return; | ||
769 | } | ||
770 | |||
771 | /** | ||
772 | * ei_rx_overrun - handle receiver overrun | ||
773 | * @dev: network device which threw exception | ||
774 | * | ||
775 | * We have a receiver overrun: we have to kick the 8390 to get it started | ||
776 | * again. Problem is that you have to kick it exactly as NS prescribes in | ||
777 | * the updated datasheets, or "the NIC may act in an unpredictable manner." | ||
778 | * This includes causing "the NIC to defer indefinitely when it is stopped | ||
779 | * on a busy network." Ugh. | ||
780 | * Called with lock held. Don't call this with the interrupts off or your | ||
781 | * computer will hate you - it takes 10ms or so. | ||
782 | */ | ||
783 | |||
784 | static void ei_rx_overrun(struct net_device *dev) | ||
785 | { | ||
786 | long e8390_base = dev->base_addr; | ||
787 | unsigned char was_txing, must_resend = 0; | ||
788 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
789 | |||
790 | /* | ||
791 | * Record whether a Tx was in progress and then issue the | ||
792 | * stop command. | ||
793 | */ | ||
794 | was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; | ||
795 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | ||
796 | |||
797 | if (ei_debug > 1) | ||
798 | printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); | ||
799 | ei_local->stat.rx_over_errors++; | ||
800 | |||
801 | /* | ||
802 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. | ||
803 | * Early datasheets said to poll the reset bit, but now they say that | ||
804 | * it "is not a reliable indicator and subsequently should be ignored." | ||
805 | * We wait at least 10ms. | ||
806 | */ | ||
807 | |||
808 | mdelay(10); | ||
809 | |||
810 | /* | ||
811 | * Reset RBCR[01] back to zero as per magic incantation. | ||
812 | */ | ||
813 | outb_p(0x00, e8390_base+EN0_RCNTLO); | ||
814 | outb_p(0x00, e8390_base+EN0_RCNTHI); | ||
815 | |||
816 | /* | ||
817 | * See if any Tx was interrupted or not. According to NS, this | ||
818 | * step is vital, and skipping it will cause no end of havoc. | ||
819 | */ | ||
820 | |||
821 | if (was_txing) | ||
822 | { | ||
823 | unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); | ||
824 | if (!tx_completed) | ||
825 | must_resend = 1; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * Have to enter loopback mode and then restart the NIC before | ||
830 | * you are allowed to slurp packets up off the ring. | ||
831 | */ | ||
832 | outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); | ||
833 | outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); | ||
834 | |||
835 | /* | ||
836 | * Clear the Rx ring of all the debris, and ack the interrupt. | ||
837 | */ | ||
838 | ei_receive(dev); | ||
839 | outb_p(ENISR_OVER, e8390_base+EN0_ISR); | ||
840 | |||
841 | /* | ||
842 | * Leave loopback mode, and resend any packet that got stopped. | ||
843 | */ | ||
844 | outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); | ||
845 | if (must_resend) | ||
846 | outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); | ||
847 | } | ||
848 | |||
849 | /* | ||
850 | * Collect the stats. This is called unlocked and from several contexts. | ||
851 | */ | ||
852 | |||
853 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
854 | { | ||
855 | long ioaddr = dev->base_addr; | ||
856 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
857 | unsigned long flags; | ||
858 | |||
859 | /* If the card is stopped, just return the present stats. */ | ||
860 | if (!netif_running(dev)) | ||
861 | return &ei_local->stat; | ||
862 | |||
863 | spin_lock_irqsave(&ei_local->page_lock,flags); | ||
864 | /* Read the counter registers, assuming we are in page 0. */ | ||
865 | ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); | ||
866 | ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); | ||
867 | ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); | ||
868 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
869 | |||
870 | return &ei_local->stat; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Form the 64 bit 8390 multicast table from the linked list of addresses | ||
875 | * associated with this dev structure. | ||
876 | */ | ||
877 | |||
878 | static inline void make_mc_bits(u8 *bits, struct net_device *dev) | ||
879 | { | ||
880 | struct dev_mc_list *dmi; | ||
881 | |||
882 | for (dmi=dev->mc_list; dmi; dmi=dmi->next) | ||
883 | { | ||
884 | u32 crc; | ||
885 | if (dmi->dmi_addrlen != ETH_ALEN) | ||
886 | { | ||
887 | printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); | ||
888 | continue; | ||
889 | } | ||
890 | crc = ether_crc(ETH_ALEN, dmi->dmi_addr); | ||
891 | /* | ||
892 | * The 8390 uses the 6 most significant bits of the | ||
893 | * CRC to index the multicast table. | ||
894 | */ | ||
895 | bits[crc>>29] |= (1<<((crc>>26)&7)); | ||
896 | } | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * do_set_multicast_list - set/clear multicast filter | ||
901 | * @dev: net device for which multicast filter is adjusted | ||
902 | * | ||
903 | * Set or clear the multicast filter for this adaptor. May be called | ||
904 | * from a BH in 2.1.x. Must be called with lock held. | ||
905 | */ | ||
906 | |||
907 | static void do_set_multicast_list(struct net_device *dev) | ||
908 | { | ||
909 | long e8390_base = dev->base_addr; | ||
910 | int i; | ||
911 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | ||
912 | |||
913 | if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) | ||
914 | { | ||
915 | memset(ei_local->mcfilter, 0, 8); | ||
916 | if (dev->mc_list) | ||
917 | make_mc_bits(ei_local->mcfilter, dev); | ||
918 | } | ||
919 | else | ||
920 | memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ | ||
921 | |||
922 | /* | ||
923 | * DP8390 manuals don't specify any magic sequence for altering | ||
924 | * the multicast regs on an already running card. To be safe, we | ||
925 | * ensure multicast mode is off prior to loading up the new hash | ||
926 | * table. If this proves to be not enough, we can always resort | ||
927 | * to stopping the NIC, loading the table and then restarting. | ||
928 | * | ||
929 | * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC | ||
930 | * Elite16) appear to be write-only. The NS 8390 data sheet lists | ||
931 | * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and | ||
932 | * Ultra32 EISA) appears to have this bug fixed. | ||
933 | */ | ||
934 | |||
935 | if (netif_running(dev)) | ||
936 | outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
937 | outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); | ||
938 | for(i = 0; i < 8; i++) | ||
939 | { | ||
940 | outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); | ||
941 | #ifndef BUG_83C690 | ||
942 | if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i]) | ||
943 | printk(KERN_ERR "Multicast filter read/write mismap %d\n",i); | ||
944 | #endif | ||
945 | } | ||
946 | outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); | ||
947 | |||
948 | if(dev->flags&IFF_PROMISC) | ||
949 | outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); | ||
950 | else if(dev->flags&IFF_ALLMULTI || dev->mc_list) | ||
951 | outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); | ||
952 | else | ||
953 | outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Called without lock held. This is invoked from user context and may | ||
958 | * be parallel to just about everything else. Its also fairly quick and | ||
959 | * not called too often. Must protect against both bh and irq users | ||
960 | */ | ||
961 | |||
962 | static void set_multicast_list(struct net_device *dev) | ||
963 | { | ||
964 | unsigned long flags; | ||
965 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | ||
966 | |||
967 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
968 | do_set_multicast_list(dev); | ||
969 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
970 | } | ||
971 | |||
972 | /** | ||
973 | * ethdev_setup - init rest of 8390 device struct | ||
974 | * @dev: network device structure to init | ||
975 | * | ||
976 | * Initialize the rest of the 8390 device structure. Do NOT __init | ||
977 | * this, as it is used by 8390 based modular drivers too. | ||
978 | */ | ||
979 | |||
980 | static void ethdev_setup(struct net_device *dev) | ||
981 | { | ||
982 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
983 | if (ei_debug > 1) | ||
984 | printk(version); | ||
985 | |||
986 | dev->hard_start_xmit = &ei_start_xmit; | ||
987 | dev->get_stats = get_stats; | ||
988 | dev->set_multicast_list = &set_multicast_list; | ||
989 | |||
990 | ether_setup(dev); | ||
991 | |||
992 | spin_lock_init(&ei_local->page_lock); | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * alloc_ei_netdev - alloc_etherdev counterpart for 8390 | ||
997 | * @size: extra bytes to allocate | ||
998 | * | ||
999 | * Allocate 8390-specific net_device. | ||
1000 | */ | ||
1001 | struct net_device *__alloc_ei_netdev(int size) | 30 | struct net_device *__alloc_ei_netdev(int size) |
1002 | { | 31 | { |
1003 | return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", | 32 | return ____alloc_ei_netdev(size); |
1004 | ethdev_setup); | ||
1005 | } | 33 | } |
1006 | 34 | ||
1007 | |||
1008 | |||
1009 | |||
1010 | /* This page of functions should be 8390 generic */ | ||
1011 | /* Follow National Semi's recommendations for initializing the "NIC". */ | ||
1012 | |||
1013 | /** | ||
1014 | * NS8390_init - initialize 8390 hardware | ||
1015 | * @dev: network device to initialize | ||
1016 | * @startp: boolean. non-zero value to initiate chip processing | ||
1017 | * | ||
1018 | * Must be called with lock held. | ||
1019 | */ | ||
1020 | |||
1021 | void NS8390_init(struct net_device *dev, int startp) | 35 | void NS8390_init(struct net_device *dev, int startp) |
1022 | { | 36 | { |
1023 | long e8390_base = dev->base_addr; | 37 | return __NS8390_init(dev, startp); |
1024 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
1025 | int i; | ||
1026 | int endcfg = ei_local->word16 | ||
1027 | ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) | ||
1028 | : 0x48; | ||
1029 | |||
1030 | if(sizeof(struct e8390_pkt_hdr)!=4) | ||
1031 | panic("8390.c: header struct mispacked\n"); | ||
1032 | /* Follow National Semi's recommendations for initing the DP83902. */ | ||
1033 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ | ||
1034 | outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ | ||
1035 | /* Clear the remote byte count registers. */ | ||
1036 | outb_p(0x00, e8390_base + EN0_RCNTLO); | ||
1037 | outb_p(0x00, e8390_base + EN0_RCNTHI); | ||
1038 | /* Set to monitor and loopback mode -- this is vital!. */ | ||
1039 | outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ | ||
1040 | outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ | ||
1041 | /* Set the transmit page and receive ring. */ | ||
1042 | outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); | ||
1043 | ei_local->tx1 = ei_local->tx2 = 0; | ||
1044 | outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); | ||
1045 | outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ | ||
1046 | ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ | ||
1047 | outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); | ||
1048 | /* Clear the pending interrupts and mask. */ | ||
1049 | outb_p(0xFF, e8390_base + EN0_ISR); | ||
1050 | outb_p(0x00, e8390_base + EN0_IMR); | ||
1051 | |||
1052 | /* Copy the station address into the DS8390 registers. */ | ||
1053 | |||
1054 | outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ | ||
1055 | for(i = 0; i < 6; i++) | ||
1056 | { | ||
1057 | outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); | ||
1058 | if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) | ||
1059 | printk(KERN_ERR "Hw. address read/write mismap %d\n",i); | ||
1060 | } | ||
1061 | |||
1062 | outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); | ||
1063 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | ||
1064 | |||
1065 | netif_start_queue(dev); | ||
1066 | ei_local->tx1 = ei_local->tx2 = 0; | ||
1067 | ei_local->txing = 0; | ||
1068 | |||
1069 | if (startp) | ||
1070 | { | ||
1071 | outb_p(0xff, e8390_base + EN0_ISR); | ||
1072 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
1073 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); | ||
1074 | outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ | ||
1075 | /* 3c503 TechMan says rxconfig only after the NIC is started. */ | ||
1076 | outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ | ||
1077 | do_set_multicast_list(dev); /* (re)load the mcast table */ | ||
1078 | } | ||
1079 | } | ||
1080 | |||
1081 | /* Trigger a transmit start, assuming the length is valid. | ||
1082 | Always called with the page lock held */ | ||
1083 | |||
1084 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | ||
1085 | int start_page) | ||
1086 | { | ||
1087 | long e8390_base = dev->base_addr; | ||
1088 | struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); | ||
1089 | |||
1090 | outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); | ||
1091 | |||
1092 | if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS) | ||
1093 | { | ||
1094 | printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", | ||
1095 | dev->name); | ||
1096 | return; | ||
1097 | } | ||
1098 | outb_p(length & 0xff, e8390_base + EN0_TCNTLO); | ||
1099 | outb_p(length >> 8, e8390_base + EN0_TCNTHI); | ||
1100 | outb_p(start_page, e8390_base + EN0_TPSR); | ||
1101 | outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); | ||
1102 | } | 38 | } |
1103 | 39 | ||
1104 | EXPORT_SYMBOL(ei_open); | 40 | EXPORT_SYMBOL(ei_open); |
diff --git a/drivers/net/8390.h b/drivers/net/8390.h index f44f1220b3a5..414de5bd228f 100644 --- a/drivers/net/8390.h +++ b/drivers/net/8390.h | |||
@@ -107,35 +107,14 @@ struct ei_device { | |||
107 | * - removed AMIGA_PCMCIA from this list, handled as ISA io now | 107 | * - removed AMIGA_PCMCIA from this list, handled as ISA io now |
108 | */ | 108 | */ |
109 | 109 | ||
110 | #if defined(CONFIG_MAC) || \ | 110 | #ifndef ei_inb |
111 | defined(CONFIG_ZORRO8390) || defined(CONFIG_ZORRO8390_MODULE) || \ | 111 | #define ei_inb(_p) inb(_p) |
112 | defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE) | 112 | #define ei_outb(_v,_p) outb(_v,_p) |
113 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) | 113 | #define ei_inb_p(_p) inb_p(_p) |
114 | #undef inb | 114 | #define ei_outb_p(_v,_p) outb_p(_v,_p) |
115 | #undef inb_p | 115 | #endif |
116 | #undef outb | 116 | |
117 | #undef outb_p | 117 | #ifndef EI_SHIFT |
118 | |||
119 | #define inb(port) in_8(port) | ||
120 | #define outb(val,port) out_8(port,val) | ||
121 | #define inb_p(port) in_8(port) | ||
122 | #define outb_p(val,port) out_8(port,val) | ||
123 | |||
124 | #elif defined(CONFIG_ARM_ETHERH) || defined(CONFIG_ARM_ETHERH_MODULE) | ||
125 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) | ||
126 | #undef inb | ||
127 | #undef inb_p | ||
128 | #undef outb | ||
129 | #undef outb_p | ||
130 | |||
131 | #define inb(_p) readb(_p) | ||
132 | #define outb(_v,_p) writeb(_v,_p) | ||
133 | #define inb_p(_p) inb(_p) | ||
134 | #define outb_p(_v,_p) outb(_v,_p) | ||
135 | |||
136 | #elif defined(CONFIG_NE_H8300) || defined(CONFIG_NE_H8300_MODULE) | ||
137 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) | ||
138 | #else | ||
139 | #define EI_SHIFT(x) (x) | 118 | #define EI_SHIFT(x) (x) |
140 | #endif | 119 | #endif |
141 | 120 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d3abf80ea3e2..9de0eed6755b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -188,6 +188,17 @@ config MII | |||
188 | or internal device. It is safe to say Y or M here even if your | 188 | or internal device. It is safe to say Y or M here even if your |
189 | ethernet card lack MII. | 189 | ethernet card lack MII. |
190 | 190 | ||
191 | config MACB | ||
192 | tristate "Atmel MACB support" | ||
193 | depends on NET_ETHERNET && AVR32 | ||
194 | select MII | ||
195 | help | ||
196 | The Atmel MACB ethernet interface is found on many AT32 and AT91 | ||
197 | parts. Say Y to include support for the MACB chip. | ||
198 | |||
199 | To compile this driver as a module, choose M here: the module | ||
200 | will be called macb. | ||
201 | |||
191 | source "drivers/net/arm/Kconfig" | 202 | source "drivers/net/arm/Kconfig" |
192 | 203 | ||
193 | config MACE | 204 | config MACE |
@@ -2251,6 +2262,14 @@ config SPIDER_NET | |||
2251 | This driver supports the Gigabit Ethernet chips present on the | 2262 | This driver supports the Gigabit Ethernet chips present on the |
2252 | Cell Processor-Based Blades from IBM. | 2263 | Cell Processor-Based Blades from IBM. |
2253 | 2264 | ||
2265 | config TSI108_ETH | ||
2266 | tristate "Tundra TSI108 gigabit Ethernet support" | ||
2267 | depends on TSI108_BRIDGE | ||
2268 | help | ||
2269 | This driver supports Tundra TSI108 gigabit Ethernet ports. | ||
2270 | To compile this driver as a module, choose M here: the module | ||
2271 | will be called tsi108_eth. | ||
2272 | |||
2254 | config GIANFAR | 2273 | config GIANFAR |
2255 | tristate "Gianfar Ethernet" | 2274 | tristate "Gianfar Ethernet" |
2256 | depends on 85xx || 83xx || PPC_86xx | 2275 | depends on 85xx || 83xx || PPC_86xx |
@@ -2341,10 +2360,11 @@ menu "Ethernet (10000 Mbit)" | |||
2341 | config CHELSIO_T1 | 2360 | config CHELSIO_T1 |
2342 | tristate "Chelsio 10Gb Ethernet support" | 2361 | tristate "Chelsio 10Gb Ethernet support" |
2343 | depends on PCI | 2362 | depends on PCI |
2363 | select CRC32 | ||
2344 | help | 2364 | help |
2345 | This driver supports Chelsio N110 and N210 models 10Gb Ethernet | 2365 | This driver supports Chelsio gigabit and 10-gigabit |
2346 | cards. More information about adapter features and performance | 2366 | Ethernet cards. More information about adapter features and |
2347 | tuning is in <file:Documentation/networking/cxgb.txt>. | 2367 | performance tuning is in <file:Documentation/networking/cxgb.txt>. |
2348 | 2368 | ||
2349 | For general information about Chelsio and our products, visit | 2369 | For general information about Chelsio and our products, visit |
2350 | our website at <http://www.chelsio.com>. | 2370 | our website at <http://www.chelsio.com>. |
@@ -2357,6 +2377,13 @@ config CHELSIO_T1 | |||
2357 | To compile this driver as a module, choose M here: the module | 2377 | To compile this driver as a module, choose M here: the module |
2358 | will be called cxgb. | 2378 | will be called cxgb. |
2359 | 2379 | ||
2380 | config CHELSIO_T1_1G | ||
2381 | bool "Chelsio gigabit Ethernet support" | ||
2382 | depends on CHELSIO_T1 | ||
2383 | help | ||
2384 | Enables support for Chelsio's gigabit Ethernet PCI cards. If you | ||
2385 | are using only 10G cards say 'N' here. | ||
2386 | |||
2360 | config EHEA | 2387 | config EHEA |
2361 | tristate "eHEA Ethernet support" | 2388 | tristate "eHEA Ethernet support" |
2362 | depends on IBMEBUS | 2389 | depends on IBMEBUS |
@@ -2447,6 +2474,12 @@ config MYRI10GE | |||
2447 | <file:Documentation/networking/net-modules.txt>. The module | 2474 | <file:Documentation/networking/net-modules.txt>. The module |
2448 | will be called myri10ge. | 2475 | will be called myri10ge. |
2449 | 2476 | ||
2477 | config NETXEN_NIC | ||
2478 | tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" | ||
2479 | depends on PCI | ||
2480 | help | ||
2481 | This enables the support for NetXen's Gigabit Ethernet card. | ||
2482 | |||
2450 | endmenu | 2483 | endmenu |
2451 | 2484 | ||
2452 | source "drivers/net/tokenring/Kconfig" | 2485 | source "drivers/net/tokenring/Kconfig" |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index f270bc49e571..4c0d4e5ce42b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -82,7 +82,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o | |||
82 | obj-$(CONFIG_NET) += Space.o loopback.o | 82 | obj-$(CONFIG_NET) += Space.o loopback.o |
83 | obj-$(CONFIG_SEEQ8005) += seeq8005.o | 83 | obj-$(CONFIG_SEEQ8005) += seeq8005.o |
84 | obj-$(CONFIG_NET_SB1000) += sb1000.o | 84 | obj-$(CONFIG_NET_SB1000) += sb1000.o |
85 | obj-$(CONFIG_MAC8390) += mac8390.o 8390.o | 85 | obj-$(CONFIG_MAC8390) += mac8390.o |
86 | obj-$(CONFIG_APNE) += apne.o 8390.o | 86 | obj-$(CONFIG_APNE) += apne.o 8390.o |
87 | obj-$(CONFIG_PCMCIA_PCNET) += 8390.o | 87 | obj-$(CONFIG_PCMCIA_PCNET) += 8390.o |
88 | obj-$(CONFIG_SHAPER) += shaper.o | 88 | obj-$(CONFIG_SHAPER) += shaper.o |
@@ -90,7 +90,6 @@ obj-$(CONFIG_HP100) += hp100.o | |||
90 | obj-$(CONFIG_SMC9194) += smc9194.o | 90 | obj-$(CONFIG_SMC9194) += smc9194.o |
91 | obj-$(CONFIG_FEC) += fec.o | 91 | obj-$(CONFIG_FEC) += fec.o |
92 | obj-$(CONFIG_68360_ENET) += 68360enet.o | 92 | obj-$(CONFIG_68360_ENET) += 68360enet.o |
93 | obj-$(CONFIG_ARM_ETHERH) += 8390.o | ||
94 | obj-$(CONFIG_WD80x3) += wd.o 8390.o | 93 | obj-$(CONFIG_WD80x3) += wd.o 8390.o |
95 | obj-$(CONFIG_EL2) += 3c503.o 8390.o | 94 | obj-$(CONFIG_EL2) += 3c503.o 8390.o |
96 | obj-$(CONFIG_NE2000) += ne.o 8390.o | 95 | obj-$(CONFIG_NE2000) += ne.o 8390.o |
@@ -107,8 +106,9 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o | |||
107 | obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o | 106 | obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o |
108 | obj-$(CONFIG_B44) += b44.o | 107 | obj-$(CONFIG_B44) += b44.o |
109 | obj-$(CONFIG_FORCEDETH) += forcedeth.o | 108 | obj-$(CONFIG_FORCEDETH) += forcedeth.o |
110 | obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o | 109 | obj-$(CONFIG_NE_H8300) += ne-h8300.o |
111 | 110 | ||
111 | obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o | ||
112 | obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o | 112 | obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o |
113 | obj-$(CONFIG_QLA3XXX) += qla3xxx.o | 113 | obj-$(CONFIG_QLA3XXX) += qla3xxx.o |
114 | 114 | ||
@@ -165,7 +165,7 @@ obj-$(CONFIG_BVME6000_NET) += 82596.o | |||
165 | obj-$(CONFIG_LP486E) += lp486e.o | 165 | obj-$(CONFIG_LP486E) += lp486e.o |
166 | 166 | ||
167 | obj-$(CONFIG_ETH16I) += eth16i.o | 167 | obj-$(CONFIG_ETH16I) += eth16i.o |
168 | obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o | 168 | obj-$(CONFIG_ZORRO8390) += zorro8390.o |
169 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o | 169 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o |
170 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o | 170 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o |
171 | obj-$(CONFIG_EQUALIZER) += eql.o | 171 | obj-$(CONFIG_EQUALIZER) += eql.o |
@@ -178,7 +178,7 @@ obj-$(CONFIG_ATARILANCE) += atarilance.o | |||
178 | obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o | 178 | obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o |
179 | obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o | 179 | obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o |
180 | obj-$(CONFIG_A2065) += a2065.o | 180 | obj-$(CONFIG_A2065) += a2065.o |
181 | obj-$(CONFIG_HYDRA) += hydra.o 8390.o | 181 | obj-$(CONFIG_HYDRA) += hydra.o |
182 | obj-$(CONFIG_ARIADNE) += ariadne.o | 182 | obj-$(CONFIG_ARIADNE) += ariadne.o |
183 | obj-$(CONFIG_CS89x0) += cs89x0.o | 183 | obj-$(CONFIG_CS89x0) += cs89x0.o |
184 | obj-$(CONFIG_MACSONIC) += macsonic.o | 184 | obj-$(CONFIG_MACSONIC) += macsonic.o |
@@ -197,6 +197,8 @@ obj-$(CONFIG_SMC911X) += smc911x.o | |||
197 | obj-$(CONFIG_DM9000) += dm9000.o | 197 | obj-$(CONFIG_DM9000) += dm9000.o |
198 | obj-$(CONFIG_FEC_8XX) += fec_8xx/ | 198 | obj-$(CONFIG_FEC_8XX) += fec_8xx/ |
199 | 199 | ||
200 | obj-$(CONFIG_MACB) += macb.o | ||
201 | |||
200 | obj-$(CONFIG_ARM) += arm/ | 202 | obj-$(CONFIG_ARM) += arm/ |
201 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ | 203 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ |
202 | obj-$(CONFIG_TR) += tokenring/ | 204 | obj-$(CONFIG_TR) += tokenring/ |
@@ -214,3 +216,4 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o | |||
214 | 216 | ||
215 | obj-$(CONFIG_FS_ENET) += fs_enet/ | 217 | obj-$(CONFIG_FS_ENET) += fs_enet/ |
216 | 218 | ||
219 | obj-$(CONFIG_NETXEN_NIC) += netxen/ | ||
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index ef65e5917c8f..18896f24d407 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -1490,32 +1490,7 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) | |||
1490 | buf[12] = readl(mmio + STAT0); | 1490 | buf[12] = readl(mmio + STAT0); |
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | /* | ||
1494 | amd8111e crc generator implementation is different from the kernel | ||
1495 | ether_crc() function. | ||
1496 | */ | ||
1497 | static int amd8111e_ether_crc(int len, char* mac_addr) | ||
1498 | { | ||
1499 | int i,byte; | ||
1500 | unsigned char octet; | ||
1501 | u32 crc= INITCRC; | ||
1502 | |||
1503 | for(byte=0; byte < len; byte++){ | ||
1504 | octet = mac_addr[byte]; | ||
1505 | for( i=0;i < 8; i++){ | ||
1506 | /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/ | ||
1507 | if( (octet & 0x1) ^ (crc & 0x1) ){ | ||
1508 | crc >>= 1; | ||
1509 | crc ^= CRC32; | ||
1510 | } | ||
1511 | else | ||
1512 | crc >>= 1; | ||
1513 | 1493 | ||
1514 | octet >>= 1; | ||
1515 | } | ||
1516 | } | ||
1517 | return crc; | ||
1518 | } | ||
1519 | /* | 1494 | /* |
1520 | This function sets promiscuos mode, all-multi mode or the multicast address | 1495 | This function sets promiscuos mode, all-multi mode or the multicast address |
1521 | list to the device. | 1496 | list to the device. |
@@ -1556,7 +1531,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev) | |||
1556 | mc_filter[1] = mc_filter[0] = 0; | 1531 | mc_filter[1] = mc_filter[0] = 0; |
1557 | for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count; | 1532 | for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count; |
1558 | i++, mc_ptr = mc_ptr->next) { | 1533 | i++, mc_ptr = mc_ptr->next) { |
1559 | bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f; | 1534 | bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; |
1560 | mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); | 1535 | mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); |
1561 | } | 1536 | } |
1562 | amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); | 1537 | amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); |
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h index 7727d328f65e..2007510c4eb6 100644 --- a/drivers/net/amd8111e.h +++ b/drivers/net/amd8111e.h | |||
@@ -651,10 +651,6 @@ typedef enum { | |||
651 | /* driver ioctl parameters */ | 651 | /* driver ioctl parameters */ |
652 | #define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) | 652 | #define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) |
653 | 653 | ||
654 | /* crc generator constants */ | ||
655 | #define CRC32 0xedb88320 | ||
656 | #define INITCRC 0xFFFFFFFF | ||
657 | |||
658 | /* amd8111e desriptor format */ | 654 | /* amd8111e desriptor format */ |
659 | 655 | ||
660 | struct amd8111e_tx_dr{ | 656 | struct amd8111e_tx_dr{ |
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 4ae98970b282..f3faa4fe58e7 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -52,7 +52,12 @@ | |||
52 | #include <asm/ecard.h> | 52 | #include <asm/ecard.h> |
53 | #include <asm/io.h> | 53 | #include <asm/io.h> |
54 | 54 | ||
55 | #include "../8390.h" | 55 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) |
56 | |||
57 | #define ei_inb(_p) readb((void __iomem *)_p) | ||
58 | #define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p) | ||
59 | #define ei_inb_p(_p) readb((void __iomem *)_p) | ||
60 | #define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p) | ||
56 | 61 | ||
57 | #define NET_DEBUG 0 | 62 | #define NET_DEBUG 0 |
58 | #define DEBUG_INIT 2 | 63 | #define DEBUG_INIT 2 |
@@ -60,6 +65,11 @@ | |||
60 | #define DRV_NAME "etherh" | 65 | #define DRV_NAME "etherh" |
61 | #define DRV_VERSION "1.11" | 66 | #define DRV_VERSION "1.11" |
62 | 67 | ||
68 | static char version[] __initdata = | ||
69 | "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; | ||
70 | |||
71 | #include "../lib8390.c" | ||
72 | |||
63 | static unsigned int net_debug = NET_DEBUG; | 73 | static unsigned int net_debug = NET_DEBUG; |
64 | 74 | ||
65 | struct etherh_priv { | 75 | struct etherh_priv { |
@@ -87,9 +97,6 @@ MODULE_AUTHOR("Russell King"); | |||
87 | MODULE_DESCRIPTION("EtherH/EtherM driver"); | 97 | MODULE_DESCRIPTION("EtherH/EtherM driver"); |
88 | MODULE_LICENSE("GPL"); | 98 | MODULE_LICENSE("GPL"); |
89 | 99 | ||
90 | static char version[] __initdata = | ||
91 | "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; | ||
92 | |||
93 | #define ETHERH500_DATAPORT 0x800 /* MEMC */ | 100 | #define ETHERH500_DATAPORT 0x800 /* MEMC */ |
94 | #define ETHERH500_NS8390 0x000 /* MEMC */ | 101 | #define ETHERH500_NS8390 0x000 /* MEMC */ |
95 | #define ETHERH500_CTRLPORT 0x800 /* IOC */ | 102 | #define ETHERH500_CTRLPORT 0x800 /* IOC */ |
@@ -177,7 +184,7 @@ etherh_setif(struct net_device *dev) | |||
177 | switch (etherh_priv(dev)->id) { | 184 | switch (etherh_priv(dev)->id) { |
178 | case PROD_I3_ETHERLAN600: | 185 | case PROD_I3_ETHERLAN600: |
179 | case PROD_I3_ETHERLAN600A: | 186 | case PROD_I3_ETHERLAN600A: |
180 | addr = (void *)dev->base_addr + EN0_RCNTHI; | 187 | addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; |
181 | 188 | ||
182 | switch (dev->if_port) { | 189 | switch (dev->if_port) { |
183 | case IF_PORT_10BASE2: | 190 | case IF_PORT_10BASE2: |
@@ -218,7 +225,7 @@ etherh_getifstat(struct net_device *dev) | |||
218 | switch (etherh_priv(dev)->id) { | 225 | switch (etherh_priv(dev)->id) { |
219 | case PROD_I3_ETHERLAN600: | 226 | case PROD_I3_ETHERLAN600: |
220 | case PROD_I3_ETHERLAN600A: | 227 | case PROD_I3_ETHERLAN600A: |
221 | addr = (void *)dev->base_addr + EN0_RCNTHI; | 228 | addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; |
222 | switch (dev->if_port) { | 229 | switch (dev->if_port) { |
223 | case IF_PORT_10BASE2: | 230 | case IF_PORT_10BASE2: |
224 | stat = 1; | 231 | stat = 1; |
@@ -281,7 +288,7 @@ static void | |||
281 | etherh_reset(struct net_device *dev) | 288 | etherh_reset(struct net_device *dev) |
282 | { | 289 | { |
283 | struct ei_device *ei_local = netdev_priv(dev); | 290 | struct ei_device *ei_local = netdev_priv(dev); |
284 | void __iomem *addr = (void *)dev->base_addr; | 291 | void __iomem *addr = (void __iomem *)dev->base_addr; |
285 | 292 | ||
286 | writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); | 293 | writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); |
287 | 294 | ||
@@ -327,7 +334,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf | |||
327 | 334 | ||
328 | ei_local->dmaing = 1; | 335 | ei_local->dmaing = 1; |
329 | 336 | ||
330 | addr = (void *)dev->base_addr; | 337 | addr = (void __iomem *)dev->base_addr; |
331 | dma_base = etherh_priv(dev)->dma_base; | 338 | dma_base = etherh_priv(dev)->dma_base; |
332 | 339 | ||
333 | count = (count + 1) & ~1; | 340 | count = (count + 1) & ~1; |
@@ -360,7 +367,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf | |||
360 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", | 367 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", |
361 | dev->name); | 368 | dev->name); |
362 | etherh_reset (dev); | 369 | etherh_reset (dev); |
363 | NS8390_init (dev, 1); | 370 | __NS8390_init (dev, 1); |
364 | break; | 371 | break; |
365 | } | 372 | } |
366 | 373 | ||
@@ -387,7 +394,7 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int | |||
387 | 394 | ||
388 | ei_local->dmaing = 1; | 395 | ei_local->dmaing = 1; |
389 | 396 | ||
390 | addr = (void *)dev->base_addr; | 397 | addr = (void __iomem *)dev->base_addr; |
391 | dma_base = etherh_priv(dev)->dma_base; | 398 | dma_base = etherh_priv(dev)->dma_base; |
392 | 399 | ||
393 | buf = skb->data; | 400 | buf = skb->data; |
@@ -427,7 +434,7 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p | |||
427 | 434 | ||
428 | ei_local->dmaing = 1; | 435 | ei_local->dmaing = 1; |
429 | 436 | ||
430 | addr = (void *)dev->base_addr; | 437 | addr = (void __iomem *)dev->base_addr; |
431 | dma_base = etherh_priv(dev)->dma_base; | 438 | dma_base = etherh_priv(dev)->dma_base; |
432 | 439 | ||
433 | writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); | 440 | writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); |
@@ -465,7 +472,7 @@ etherh_open(struct net_device *dev) | |||
465 | return -EINVAL; | 472 | return -EINVAL; |
466 | } | 473 | } |
467 | 474 | ||
468 | if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)) | 475 | if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) |
469 | return -EAGAIN; | 476 | return -EAGAIN; |
470 | 477 | ||
471 | /* | 478 | /* |
@@ -491,7 +498,7 @@ etherh_open(struct net_device *dev) | |||
491 | etherh_setif(dev); | 498 | etherh_setif(dev); |
492 | 499 | ||
493 | etherh_reset(dev); | 500 | etherh_reset(dev); |
494 | ei_open(dev); | 501 | __ei_open(dev); |
495 | 502 | ||
496 | return 0; | 503 | return 0; |
497 | } | 504 | } |
@@ -502,7 +509,7 @@ etherh_open(struct net_device *dev) | |||
502 | static int | 509 | static int |
503 | etherh_close(struct net_device *dev) | 510 | etherh_close(struct net_device *dev) |
504 | { | 511 | { |
505 | ei_close (dev); | 512 | __ei_close (dev); |
506 | free_irq (dev->irq, dev); | 513 | free_irq (dev->irq, dev); |
507 | return 0; | 514 | return 0; |
508 | } | 515 | } |
@@ -650,7 +657,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
650 | if (ret) | 657 | if (ret) |
651 | goto out; | 658 | goto out; |
652 | 659 | ||
653 | dev = __alloc_ei_netdev(sizeof(struct etherh_priv)); | 660 | dev = ____alloc_ei_netdev(sizeof(struct etherh_priv)); |
654 | if (!dev) { | 661 | if (!dev) { |
655 | ret = -ENOMEM; | 662 | ret = -ENOMEM; |
656 | goto release; | 663 | goto release; |
@@ -736,7 +743,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
736 | ei_local->interface_num = 0; | 743 | ei_local->interface_num = 0; |
737 | 744 | ||
738 | etherh_reset(dev); | 745 | etherh_reset(dev); |
739 | NS8390_init(dev, 0); | 746 | __NS8390_init(dev, 0); |
740 | 747 | ||
741 | ret = register_netdev(dev); | 748 | ret = register_netdev(dev); |
742 | if (ret) | 749 | if (ret) |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 7db3c8af0894..f0b6879a1c7d 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -360,7 +360,8 @@ static int mii_probe (struct net_device *dev) | |||
360 | BUG_ON(!phydev); | 360 | BUG_ON(!phydev); |
361 | BUG_ON(phydev->attached_dev); | 361 | BUG_ON(phydev->attached_dev); |
362 | 362 | ||
363 | phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0); | 363 | phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0, |
364 | PHY_INTERFACE_MODE_MII); | ||
364 | 365 | ||
365 | if (IS_ERR(phydev)) { | 366 | if (IS_ERR(phydev)) { |
366 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 367 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile index 54c78d94f48b..382d23f810ab 100644 --- a/drivers/net/chelsio/Makefile +++ b/drivers/net/chelsio/Makefile | |||
@@ -1,11 +1,11 @@ | |||
1 | # | 1 | # |
2 | # Chelsio 10Gb NIC driver for Linux. | 2 | # Chelsio T1 driver |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CHELSIO_T1) += cxgb.o | 5 | obj-$(CONFIG_CHELSIO_T1) += cxgb.o |
6 | 6 | ||
7 | EXTRA_CFLAGS += -Idrivers/net/chelsio $(DEBUG_FLAGS) | 7 | cxgb-$(CONFIG_CHELSIO_T1_1G) += ixf1010.o mac.o mv88e1xxx.o vsc7326.o vsc8244.o |
8 | cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ | ||
9 | mv88x201x.o my3126.o $(cxgb-y) | ||
8 | 10 | ||
9 | 11 | ||
10 | cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o | ||
11 | |||
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 5d9dd14427c5..b265941e1372 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/delay.h> | 45 | #include <linux/delay.h> |
46 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
47 | #include <linux/ethtool.h> | 47 | #include <linux/ethtool.h> |
48 | #include <linux/if_vlan.h> | ||
48 | #include <linux/mii.h> | 49 | #include <linux/mii.h> |
49 | #include <linux/crc32.h> | 50 | #include <linux/crc32.h> |
50 | #include <linux/init.h> | 51 | #include <linux/init.h> |
@@ -53,13 +54,30 @@ | |||
53 | 54 | ||
54 | #define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" | 55 | #define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" |
55 | #define DRV_NAME "cxgb" | 56 | #define DRV_NAME "cxgb" |
56 | #define DRV_VERSION "2.1.1" | 57 | #define DRV_VERSION "2.2" |
57 | #define PFX DRV_NAME ": " | 58 | #define PFX DRV_NAME ": " |
58 | 59 | ||
59 | #define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__) | 60 | #define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__) |
60 | #define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__) | 61 | #define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__) |
61 | #define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__) | 62 | #define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__) |
62 | 63 | ||
64 | /* | ||
65 | * More powerful macro that selectively prints messages based on msg_enable. | ||
66 | * For info and debugging messages. | ||
67 | */ | ||
68 | #define CH_MSG(adapter, level, category, fmt, ...) do { \ | ||
69 | if ((adapter)->msg_enable & NETIF_MSG_##category) \ | ||
70 | printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \ | ||
71 | ## __VA_ARGS__); \ | ||
72 | } while (0) | ||
73 | |||
74 | #ifdef DEBUG | ||
75 | # define CH_DBG(adapter, category, fmt, ...) \ | ||
76 | CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__) | ||
77 | #else | ||
78 | # define CH_DBG(fmt, ...) | ||
79 | #endif | ||
80 | |||
63 | #define CH_DEVICE(devid, ssid, idx) \ | 81 | #define CH_DEVICE(devid, ssid, idx) \ |
64 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } | 82 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } |
65 | 83 | ||
@@ -71,10 +89,6 @@ | |||
71 | 89 | ||
72 | typedef struct adapter adapter_t; | 90 | typedef struct adapter adapter_t; |
73 | 91 | ||
74 | void t1_elmer0_ext_intr(adapter_t *adapter); | ||
75 | void t1_link_changed(adapter_t *adapter, int port_id, int link_status, | ||
76 | int speed, int duplex, int fc); | ||
77 | |||
78 | struct t1_rx_mode { | 92 | struct t1_rx_mode { |
79 | struct net_device *dev; | 93 | struct net_device *dev; |
80 | u32 idx; | 94 | u32 idx; |
@@ -97,26 +111,53 @@ static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm) | |||
97 | } | 111 | } |
98 | 112 | ||
99 | #define MAX_NPORTS 4 | 113 | #define MAX_NPORTS 4 |
114 | #define PORT_MASK ((1 << MAX_NPORTS) - 1) | ||
115 | #define NMTUS 8 | ||
116 | #define TCB_SIZE 128 | ||
100 | 117 | ||
101 | #define SPEED_INVALID 0xffff | 118 | #define SPEED_INVALID 0xffff |
102 | #define DUPLEX_INVALID 0xff | 119 | #define DUPLEX_INVALID 0xff |
103 | 120 | ||
104 | enum { | 121 | enum { |
105 | CHBT_BOARD_N110, | 122 | CHBT_BOARD_N110, |
106 | CHBT_BOARD_N210 | 123 | CHBT_BOARD_N210, |
124 | CHBT_BOARD_7500, | ||
125 | CHBT_BOARD_8000, | ||
126 | CHBT_BOARD_CHT101, | ||
127 | CHBT_BOARD_CHT110, | ||
128 | CHBT_BOARD_CHT210, | ||
129 | CHBT_BOARD_CHT204, | ||
130 | CHBT_BOARD_CHT204V, | ||
131 | CHBT_BOARD_CHT204E, | ||
132 | CHBT_BOARD_CHN204, | ||
133 | CHBT_BOARD_COUGAR, | ||
134 | CHBT_BOARD_6800, | ||
135 | CHBT_BOARD_SIMUL, | ||
107 | }; | 136 | }; |
108 | 137 | ||
109 | enum { | 138 | enum { |
139 | CHBT_TERM_FPGA, | ||
110 | CHBT_TERM_T1, | 140 | CHBT_TERM_T1, |
111 | CHBT_TERM_T2 | 141 | CHBT_TERM_T2, |
142 | CHBT_TERM_T3 | ||
112 | }; | 143 | }; |
113 | 144 | ||
114 | enum { | 145 | enum { |
146 | CHBT_MAC_CHELSIO_A, | ||
147 | CHBT_MAC_IXF1010, | ||
115 | CHBT_MAC_PM3393, | 148 | CHBT_MAC_PM3393, |
149 | CHBT_MAC_VSC7321, | ||
150 | CHBT_MAC_DUMMY | ||
116 | }; | 151 | }; |
117 | 152 | ||
118 | enum { | 153 | enum { |
154 | CHBT_PHY_88E1041, | ||
155 | CHBT_PHY_88E1111, | ||
119 | CHBT_PHY_88X2010, | 156 | CHBT_PHY_88X2010, |
157 | CHBT_PHY_XPAK, | ||
158 | CHBT_PHY_MY3126, | ||
159 | CHBT_PHY_8244, | ||
160 | CHBT_PHY_DUMMY | ||
120 | }; | 161 | }; |
121 | 162 | ||
122 | enum { | 163 | enum { |
@@ -150,16 +191,44 @@ struct chelsio_pci_params { | |||
150 | unsigned char is_pcix; | 191 | unsigned char is_pcix; |
151 | }; | 192 | }; |
152 | 193 | ||
194 | struct tp_params { | ||
195 | unsigned int pm_size; | ||
196 | unsigned int cm_size; | ||
197 | unsigned int pm_rx_base; | ||
198 | unsigned int pm_tx_base; | ||
199 | unsigned int pm_rx_pg_size; | ||
200 | unsigned int pm_tx_pg_size; | ||
201 | unsigned int pm_rx_num_pgs; | ||
202 | unsigned int pm_tx_num_pgs; | ||
203 | unsigned int rx_coalescing_size; | ||
204 | unsigned int use_5tuple_mode; | ||
205 | }; | ||
206 | |||
207 | struct mc5_params { | ||
208 | unsigned int mode; /* selects MC5 width */ | ||
209 | unsigned int nservers; /* size of server region */ | ||
210 | unsigned int nroutes; /* size of routing region */ | ||
211 | }; | ||
212 | |||
213 | /* Default MC5 region sizes */ | ||
214 | #define DEFAULT_SERVER_REGION_LEN 256 | ||
215 | #define DEFAULT_RT_REGION_LEN 1024 | ||
216 | |||
153 | struct adapter_params { | 217 | struct adapter_params { |
154 | struct sge_params sge; | 218 | struct sge_params sge; |
219 | struct mc5_params mc5; | ||
220 | struct tp_params tp; | ||
155 | struct chelsio_pci_params pci; | 221 | struct chelsio_pci_params pci; |
156 | 222 | ||
157 | const struct board_info *brd_info; | 223 | const struct board_info *brd_info; |
158 | 224 | ||
225 | unsigned short mtus[NMTUS]; | ||
159 | unsigned int nports; /* # of ethernet ports */ | 226 | unsigned int nports; /* # of ethernet ports */ |
160 | unsigned int stats_update_period; | 227 | unsigned int stats_update_period; |
161 | unsigned short chip_revision; | 228 | unsigned short chip_revision; |
162 | unsigned char chip_version; | 229 | unsigned char chip_version; |
230 | unsigned char is_asic; | ||
231 | unsigned char has_msi; | ||
163 | }; | 232 | }; |
164 | 233 | ||
165 | struct link_config { | 234 | struct link_config { |
@@ -207,17 +276,20 @@ struct adapter { | |||
207 | /* Terminator modules. */ | 276 | /* Terminator modules. */ |
208 | struct sge *sge; | 277 | struct sge *sge; |
209 | struct peespi *espi; | 278 | struct peespi *espi; |
279 | struct petp *tp; | ||
210 | 280 | ||
211 | struct port_info port[MAX_NPORTS]; | 281 | struct port_info port[MAX_NPORTS]; |
212 | struct work_struct stats_update_task; | 282 | struct work_struct stats_update_task; |
213 | struct timer_list stats_update_timer; | 283 | struct timer_list stats_update_timer; |
214 | 284 | ||
215 | struct semaphore mib_mutex; | ||
216 | spinlock_t tpi_lock; | 285 | spinlock_t tpi_lock; |
217 | spinlock_t work_lock; | 286 | spinlock_t work_lock; |
287 | spinlock_t mac_lock; | ||
288 | |||
218 | /* guards async operations */ | 289 | /* guards async operations */ |
219 | spinlock_t async_lock ____cacheline_aligned; | 290 | spinlock_t async_lock ____cacheline_aligned; |
220 | u32 slow_intr_mask; | 291 | u32 slow_intr_mask; |
292 | int t1powersave; | ||
221 | }; | 293 | }; |
222 | 294 | ||
223 | enum { /* adapter flags */ | 295 | enum { /* adapter flags */ |
@@ -256,6 +328,11 @@ struct board_info { | |||
256 | const char *desc; | 328 | const char *desc; |
257 | }; | 329 | }; |
258 | 330 | ||
331 | static inline int t1_is_asic(const adapter_t *adapter) | ||
332 | { | ||
333 | return adapter->params.is_asic; | ||
334 | } | ||
335 | |||
259 | extern struct pci_device_id t1_pci_tbl[]; | 336 | extern struct pci_device_id t1_pci_tbl[]; |
260 | 337 | ||
261 | static inline int adapter_matches_type(const adapter_t *adapter, | 338 | static inline int adapter_matches_type(const adapter_t *adapter, |
@@ -285,13 +362,15 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap) | |||
285 | return board_info(adap)->clock_core / 1000000; | 362 | return board_info(adap)->clock_core / 1000000; |
286 | } | 363 | } |
287 | 364 | ||
365 | extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); | ||
366 | extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); | ||
288 | extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); | 367 | extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); |
289 | extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); | 368 | extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); |
290 | 369 | ||
291 | extern void t1_interrupts_enable(adapter_t *adapter); | 370 | extern void t1_interrupts_enable(adapter_t *adapter); |
292 | extern void t1_interrupts_disable(adapter_t *adapter); | 371 | extern void t1_interrupts_disable(adapter_t *adapter); |
293 | extern void t1_interrupts_clear(adapter_t *adapter); | 372 | extern void t1_interrupts_clear(adapter_t *adapter); |
294 | extern int elmer0_ext_intr_handler(adapter_t *adapter); | 373 | extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); |
295 | extern int t1_slow_intr_handler(adapter_t *adapter); | 374 | extern int t1_slow_intr_handler(adapter_t *adapter); |
296 | 375 | ||
297 | extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); | 376 | extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); |
@@ -305,9 +384,7 @@ extern int t1_init_hw_modules(adapter_t *adapter); | |||
305 | extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); | 384 | extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); |
306 | extern void t1_free_sw_modules(adapter_t *adapter); | 385 | extern void t1_free_sw_modules(adapter_t *adapter); |
307 | extern void t1_fatal_err(adapter_t *adapter); | 386 | extern void t1_fatal_err(adapter_t *adapter); |
308 | 387 | extern void t1_link_changed(adapter_t *adapter, int port_id); | |
309 | extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable); | 388 | extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, |
310 | extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable); | 389 | int speed, int duplex, int pause); |
311 | extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable); | ||
312 | |||
313 | #endif /* _CXGB_COMMON_H_ */ | 390 | #endif /* _CXGB_COMMON_H_ */ |
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h index 3412342f7345..60901f25014e 100644 --- a/drivers/net/chelsio/cphy.h +++ b/drivers/net/chelsio/cphy.h | |||
@@ -52,7 +52,14 @@ struct mdio_ops { | |||
52 | /* PHY interrupt types */ | 52 | /* PHY interrupt types */ |
53 | enum { | 53 | enum { |
54 | cphy_cause_link_change = 0x1, | 54 | cphy_cause_link_change = 0x1, |
55 | cphy_cause_error = 0x2 | 55 | cphy_cause_error = 0x2, |
56 | cphy_cause_fifo_error = 0x3 | ||
57 | }; | ||
58 | |||
59 | enum { | ||
60 | PHY_LINK_UP = 0x1, | ||
61 | PHY_AUTONEG_RDY = 0x2, | ||
62 | PHY_AUTONEG_EN = 0x4 | ||
56 | }; | 63 | }; |
57 | 64 | ||
58 | struct cphy; | 65 | struct cphy; |
@@ -81,7 +88,18 @@ struct cphy_ops { | |||
81 | /* A PHY instance */ | 88 | /* A PHY instance */ |
82 | struct cphy { | 89 | struct cphy { |
83 | int addr; /* PHY address */ | 90 | int addr; /* PHY address */ |
91 | int state; /* Link status state machine */ | ||
84 | adapter_t *adapter; /* associated adapter */ | 92 | adapter_t *adapter; /* associated adapter */ |
93 | |||
94 | struct work_struct phy_update; | ||
95 | |||
96 | u16 bmsr; | ||
97 | int count; | ||
98 | int act_count; | ||
99 | int act_on; | ||
100 | |||
101 | u32 elmer_gpo; | ||
102 | |||
85 | struct cphy_ops *ops; /* PHY operations */ | 103 | struct cphy_ops *ops; /* PHY operations */ |
86 | int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr, | 104 | int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr, |
87 | int reg_addr, unsigned int *val); | 105 | int reg_addr, unsigned int *val); |
@@ -142,6 +160,10 @@ struct gphy { | |||
142 | int (*reset)(adapter_t *adapter); | 160 | int (*reset)(adapter_t *adapter); |
143 | }; | 161 | }; |
144 | 162 | ||
163 | extern struct gphy t1_my3126_ops; | ||
164 | extern struct gphy t1_mv88e1xxx_ops; | ||
165 | extern struct gphy t1_vsc8244_ops; | ||
166 | extern struct gphy t1_xpak_ops; | ||
145 | extern struct gphy t1_mv88x201x_ops; | 167 | extern struct gphy t1_mv88x201x_ops; |
146 | extern struct gphy t1_dummy_phy_ops; | 168 | extern struct gphy t1_dummy_phy_ops; |
147 | 169 | ||
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h index 5b357d9e88d6..35f565be4fd3 100644 --- a/drivers/net/chelsio/cpl5_cmd.h +++ b/drivers/net/chelsio/cpl5_cmd.h | |||
@@ -46,24 +46,385 @@ | |||
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | enum CPL_opcode { | 48 | enum CPL_opcode { |
49 | CPL_PASS_OPEN_REQ = 0x1, | ||
50 | CPL_PASS_OPEN_RPL = 0x2, | ||
51 | CPL_PASS_ESTABLISH = 0x3, | ||
52 | CPL_PASS_ACCEPT_REQ = 0xE, | ||
53 | CPL_PASS_ACCEPT_RPL = 0x4, | ||
54 | CPL_ACT_OPEN_REQ = 0x5, | ||
55 | CPL_ACT_OPEN_RPL = 0x6, | ||
56 | CPL_CLOSE_CON_REQ = 0x7, | ||
57 | CPL_CLOSE_CON_RPL = 0x8, | ||
58 | CPL_CLOSE_LISTSRV_REQ = 0x9, | ||
59 | CPL_CLOSE_LISTSRV_RPL = 0xA, | ||
60 | CPL_ABORT_REQ = 0xB, | ||
61 | CPL_ABORT_RPL = 0xC, | ||
62 | CPL_PEER_CLOSE = 0xD, | ||
63 | CPL_ACT_ESTABLISH = 0x17, | ||
64 | |||
65 | CPL_GET_TCB = 0x24, | ||
66 | CPL_GET_TCB_RPL = 0x25, | ||
67 | CPL_SET_TCB = 0x26, | ||
68 | CPL_SET_TCB_FIELD = 0x27, | ||
69 | CPL_SET_TCB_RPL = 0x28, | ||
70 | CPL_PCMD = 0x29, | ||
71 | |||
72 | CPL_PCMD_READ = 0x31, | ||
73 | CPL_PCMD_READ_RPL = 0x32, | ||
74 | |||
75 | |||
76 | CPL_RX_DATA = 0xA0, | ||
77 | CPL_RX_DATA_DDP = 0xA1, | ||
78 | CPL_RX_DATA_ACK = 0xA3, | ||
49 | CPL_RX_PKT = 0xAD, | 79 | CPL_RX_PKT = 0xAD, |
80 | CPL_RX_ISCSI_HDR = 0xAF, | ||
81 | CPL_TX_DATA_ACK = 0xB0, | ||
82 | CPL_TX_DATA = 0xB1, | ||
50 | CPL_TX_PKT = 0xB2, | 83 | CPL_TX_PKT = 0xB2, |
51 | CPL_TX_PKT_LSO = 0xB6, | 84 | CPL_TX_PKT_LSO = 0xB6, |
85 | |||
86 | CPL_RTE_DELETE_REQ = 0xC0, | ||
87 | CPL_RTE_DELETE_RPL = 0xC1, | ||
88 | CPL_RTE_WRITE_REQ = 0xC2, | ||
89 | CPL_RTE_WRITE_RPL = 0xD3, | ||
90 | CPL_RTE_READ_REQ = 0xC3, | ||
91 | CPL_RTE_READ_RPL = 0xC4, | ||
92 | CPL_L2T_WRITE_REQ = 0xC5, | ||
93 | CPL_L2T_WRITE_RPL = 0xD4, | ||
94 | CPL_L2T_READ_REQ = 0xC6, | ||
95 | CPL_L2T_READ_RPL = 0xC7, | ||
96 | CPL_SMT_WRITE_REQ = 0xC8, | ||
97 | CPL_SMT_WRITE_RPL = 0xD5, | ||
98 | CPL_SMT_READ_REQ = 0xC9, | ||
99 | CPL_SMT_READ_RPL = 0xCA, | ||
100 | CPL_ARP_MISS_REQ = 0xCD, | ||
101 | CPL_ARP_MISS_RPL = 0xCE, | ||
102 | CPL_MIGRATE_C2T_REQ = 0xDC, | ||
103 | CPL_MIGRATE_C2T_RPL = 0xDD, | ||
104 | CPL_ERROR = 0xD7, | ||
105 | |||
106 | /* internal: driver -> TOM */ | ||
107 | CPL_MSS_CHANGE = 0xE1 | ||
52 | }; | 108 | }; |
53 | 109 | ||
54 | enum { /* TX_PKT_LSO ethernet types */ | 110 | #define NUM_CPL_CMDS 256 |
111 | |||
112 | enum CPL_error { | ||
113 | CPL_ERR_NONE = 0, | ||
114 | CPL_ERR_TCAM_PARITY = 1, | ||
115 | CPL_ERR_TCAM_FULL = 3, | ||
116 | CPL_ERR_CONN_RESET = 20, | ||
117 | CPL_ERR_CONN_EXIST = 22, | ||
118 | CPL_ERR_ARP_MISS = 23, | ||
119 | CPL_ERR_BAD_SYN = 24, | ||
120 | CPL_ERR_CONN_TIMEDOUT = 30, | ||
121 | CPL_ERR_XMIT_TIMEDOUT = 31, | ||
122 | CPL_ERR_PERSIST_TIMEDOUT = 32, | ||
123 | CPL_ERR_FINWAIT2_TIMEDOUT = 33, | ||
124 | CPL_ERR_KEEPALIVE_TIMEDOUT = 34, | ||
125 | CPL_ERR_ABORT_FAILED = 42, | ||
126 | CPL_ERR_GENERAL = 99 | ||
127 | }; | ||
128 | |||
129 | enum { | ||
130 | CPL_CONN_POLICY_AUTO = 0, | ||
131 | CPL_CONN_POLICY_ASK = 1, | ||
132 | CPL_CONN_POLICY_DENY = 3 | ||
133 | }; | ||
134 | |||
135 | enum { | ||
136 | ULP_MODE_NONE = 0, | ||
137 | ULP_MODE_TCPDDP = 1, | ||
138 | ULP_MODE_ISCSI = 2, | ||
139 | ULP_MODE_IWARP = 3, | ||
140 | ULP_MODE_SSL = 4 | ||
141 | }; | ||
142 | |||
143 | enum { | ||
144 | CPL_PASS_OPEN_ACCEPT, | ||
145 | CPL_PASS_OPEN_REJECT | ||
146 | }; | ||
147 | |||
148 | enum { | ||
149 | CPL_ABORT_SEND_RST = 0, | ||
150 | CPL_ABORT_NO_RST, | ||
151 | CPL_ABORT_POST_CLOSE_REQ = 2 | ||
152 | }; | ||
153 | |||
154 | enum { // TX_PKT_LSO ethernet types | ||
55 | CPL_ETH_II, | 155 | CPL_ETH_II, |
56 | CPL_ETH_II_VLAN, | 156 | CPL_ETH_II_VLAN, |
57 | CPL_ETH_802_3, | 157 | CPL_ETH_802_3, |
58 | CPL_ETH_802_3_VLAN | 158 | CPL_ETH_802_3_VLAN |
59 | }; | 159 | }; |
60 | 160 | ||
61 | struct cpl_rx_data { | 161 | union opcode_tid { |
162 | u32 opcode_tid; | ||
163 | u8 opcode; | ||
164 | }; | ||
165 | |||
166 | #define S_OPCODE 24 | ||
167 | #define V_OPCODE(x) ((x) << S_OPCODE) | ||
168 | #define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) | ||
169 | #define G_TID(x) ((x) & 0xFFFFFF) | ||
170 | |||
171 | /* tid is assumed to be 24-bits */ | ||
172 | #define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) | ||
173 | |||
174 | #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) | ||
175 | |||
176 | /* extract the TID from a CPL command */ | ||
177 | #define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) | ||
178 | |||
179 | struct tcp_options { | ||
180 | u16 mss; | ||
181 | u8 wsf; | ||
182 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
183 | u8 rsvd:4; | ||
184 | u8 ecn:1; | ||
185 | u8 sack:1; | ||
186 | u8 tstamp:1; | ||
187 | #else | ||
188 | u8 tstamp:1; | ||
189 | u8 sack:1; | ||
190 | u8 ecn:1; | ||
191 | u8 rsvd:4; | ||
192 | #endif | ||
193 | }; | ||
194 | |||
195 | struct cpl_pass_open_req { | ||
196 | union opcode_tid ot; | ||
197 | u16 local_port; | ||
198 | u16 peer_port; | ||
199 | u32 local_ip; | ||
200 | u32 peer_ip; | ||
201 | u32 opt0h; | ||
202 | u32 opt0l; | ||
203 | u32 peer_netmask; | ||
204 | u32 opt1; | ||
205 | }; | ||
206 | |||
207 | struct cpl_pass_open_rpl { | ||
208 | union opcode_tid ot; | ||
209 | u16 local_port; | ||
210 | u16 peer_port; | ||
211 | u32 local_ip; | ||
212 | u32 peer_ip; | ||
213 | u8 resvd[7]; | ||
214 | u8 status; | ||
215 | }; | ||
216 | |||
217 | struct cpl_pass_establish { | ||
218 | union opcode_tid ot; | ||
219 | u16 local_port; | ||
220 | u16 peer_port; | ||
221 | u32 local_ip; | ||
222 | u32 peer_ip; | ||
223 | u32 tos_tid; | ||
224 | u8 l2t_idx; | ||
225 | u8 rsvd[3]; | ||
226 | u32 snd_isn; | ||
227 | u32 rcv_isn; | ||
228 | }; | ||
229 | |||
230 | struct cpl_pass_accept_req { | ||
231 | union opcode_tid ot; | ||
232 | u16 local_port; | ||
233 | u16 peer_port; | ||
234 | u32 local_ip; | ||
235 | u32 peer_ip; | ||
236 | u32 tos_tid; | ||
237 | struct tcp_options tcp_options; | ||
238 | u8 dst_mac[6]; | ||
239 | u16 vlan_tag; | ||
240 | u8 src_mac[6]; | ||
241 | u8 rsvd[2]; | ||
242 | u32 rcv_isn; | ||
243 | u32 unknown_tcp_options; | ||
244 | }; | ||
245 | |||
246 | struct cpl_pass_accept_rpl { | ||
247 | union opcode_tid ot; | ||
248 | u32 rsvd0; | ||
249 | u32 rsvd1; | ||
250 | u32 peer_ip; | ||
251 | u32 opt0h; | ||
252 | union { | ||
253 | u32 opt0l; | ||
254 | struct { | ||
255 | u8 rsvd[3]; | ||
256 | u8 status; | ||
257 | }; | ||
258 | }; | ||
259 | }; | ||
260 | |||
261 | struct cpl_act_open_req { | ||
262 | union opcode_tid ot; | ||
263 | u16 local_port; | ||
264 | u16 peer_port; | ||
265 | u32 local_ip; | ||
266 | u32 peer_ip; | ||
267 | u32 opt0h; | ||
268 | u32 opt0l; | ||
269 | u32 iff_vlantag; | ||
270 | u32 rsvd; | ||
271 | }; | ||
272 | |||
273 | struct cpl_act_open_rpl { | ||
274 | union opcode_tid ot; | ||
275 | u16 local_port; | ||
276 | u16 peer_port; | ||
277 | u32 local_ip; | ||
278 | u32 peer_ip; | ||
279 | u32 new_tid; | ||
280 | u8 rsvd[3]; | ||
281 | u8 status; | ||
282 | }; | ||
283 | |||
284 | struct cpl_act_establish { | ||
285 | union opcode_tid ot; | ||
286 | u16 local_port; | ||
287 | u16 peer_port; | ||
288 | u32 local_ip; | ||
289 | u32 peer_ip; | ||
290 | u32 tos_tid; | ||
291 | u32 rsvd; | ||
292 | u32 snd_isn; | ||
293 | u32 rcv_isn; | ||
294 | }; | ||
295 | |||
296 | struct cpl_get_tcb { | ||
297 | union opcode_tid ot; | ||
298 | u32 rsvd; | ||
299 | }; | ||
300 | |||
301 | struct cpl_get_tcb_rpl { | ||
302 | union opcode_tid ot; | ||
303 | u16 len; | ||
304 | u8 rsvd; | ||
305 | u8 status; | ||
306 | }; | ||
307 | |||
308 | struct cpl_set_tcb { | ||
309 | union opcode_tid ot; | ||
310 | u16 len; | ||
311 | u16 rsvd; | ||
312 | }; | ||
313 | |||
314 | struct cpl_set_tcb_field { | ||
315 | union opcode_tid ot; | ||
316 | u8 rsvd[3]; | ||
317 | u8 offset; | ||
318 | u32 mask; | ||
319 | u32 val; | ||
320 | }; | ||
321 | |||
322 | struct cpl_set_tcb_rpl { | ||
323 | union opcode_tid ot; | ||
324 | u8 rsvd[3]; | ||
325 | u8 status; | ||
326 | }; | ||
327 | |||
328 | struct cpl_pcmd { | ||
329 | union opcode_tid ot; | ||
330 | u16 dlen_in; | ||
331 | u16 dlen_out; | ||
332 | u32 pcmd_parm[2]; | ||
333 | }; | ||
334 | |||
335 | struct cpl_pcmd_read { | ||
336 | union opcode_tid ot; | ||
337 | u32 rsvd1; | ||
338 | u16 rsvd2; | ||
339 | u32 addr; | ||
340 | u16 len; | ||
341 | }; | ||
342 | |||
343 | struct cpl_pcmd_read_rpl { | ||
344 | union opcode_tid ot; | ||
345 | u16 len; | ||
346 | }; | ||
347 | |||
348 | struct cpl_close_con_req { | ||
349 | union opcode_tid ot; | ||
350 | u32 rsvd; | ||
351 | }; | ||
352 | |||
353 | struct cpl_close_con_rpl { | ||
354 | union opcode_tid ot; | ||
355 | u8 rsvd[3]; | ||
356 | u8 status; | ||
357 | u32 snd_nxt; | ||
358 | u32 rcv_nxt; | ||
359 | }; | ||
360 | |||
361 | struct cpl_close_listserv_req { | ||
362 | union opcode_tid ot; | ||
363 | u32 rsvd; | ||
364 | }; | ||
365 | |||
366 | struct cpl_close_listserv_rpl { | ||
367 | union opcode_tid ot; | ||
368 | u8 rsvd[3]; | ||
369 | u8 status; | ||
370 | }; | ||
371 | |||
372 | struct cpl_abort_req { | ||
373 | union opcode_tid ot; | ||
62 | u32 rsvd0; | 374 | u32 rsvd0; |
375 | u8 rsvd1; | ||
376 | u8 cmd; | ||
377 | u8 rsvd2[6]; | ||
378 | }; | ||
379 | |||
380 | struct cpl_abort_rpl { | ||
381 | union opcode_tid ot; | ||
382 | u32 rsvd0; | ||
383 | u8 rsvd1; | ||
384 | u8 status; | ||
385 | u8 rsvd2[6]; | ||
386 | }; | ||
387 | |||
388 | struct cpl_peer_close { | ||
389 | union opcode_tid ot; | ||
390 | u32 rsvd; | ||
391 | }; | ||
392 | |||
393 | struct cpl_tx_data { | ||
394 | union opcode_tid ot; | ||
395 | u32 len; | ||
396 | u32 rsvd0; | ||
397 | u16 urg; | ||
398 | u16 flags; | ||
399 | }; | ||
400 | |||
401 | struct cpl_tx_data_ack { | ||
402 | union opcode_tid ot; | ||
403 | u32 ack_seq; | ||
404 | }; | ||
405 | |||
406 | struct cpl_rx_data { | ||
407 | union opcode_tid ot; | ||
63 | u32 len; | 408 | u32 len; |
64 | u32 seq; | 409 | u32 seq; |
65 | u16 urg; | 410 | u16 urg; |
66 | u8 rsvd1; | 411 | u8 rsvd; |
412 | u8 status; | ||
413 | }; | ||
414 | |||
415 | struct cpl_rx_data_ack { | ||
416 | union opcode_tid ot; | ||
417 | u32 credit; | ||
418 | }; | ||
419 | |||
420 | struct cpl_rx_data_ddp { | ||
421 | union opcode_tid ot; | ||
422 | u32 len; | ||
423 | u32 seq; | ||
424 | u32 nxt_seq; | ||
425 | u32 ulp_crc; | ||
426 | u16 ddp_status; | ||
427 | u8 rsvd; | ||
67 | u8 status; | 428 | u8 status; |
68 | }; | 429 | }; |
69 | 430 | ||
@@ -99,9 +460,9 @@ struct cpl_tx_pkt_lso { | |||
99 | u8 ip_csum_dis:1; | 460 | u8 ip_csum_dis:1; |
100 | u8 l4_csum_dis:1; | 461 | u8 l4_csum_dis:1; |
101 | u8 vlan_valid:1; | 462 | u8 vlan_valid:1; |
102 | u8 rsvd:1; | 463 | u8 :1; |
103 | #else | 464 | #else |
104 | u8 rsvd:1; | 465 | u8 :1; |
105 | u8 vlan_valid:1; | 466 | u8 vlan_valid:1; |
106 | u8 l4_csum_dis:1; | 467 | u8 l4_csum_dis:1; |
107 | u8 ip_csum_dis:1; | 468 | u8 ip_csum_dis:1; |
@@ -110,8 +471,7 @@ struct cpl_tx_pkt_lso { | |||
110 | u16 vlan; | 471 | u16 vlan; |
111 | __be32 len; | 472 | __be32 len; |
112 | 473 | ||
113 | u32 rsvd2; | 474 | u8 rsvd[5]; |
114 | u8 rsvd3; | ||
115 | #if defined(__LITTLE_ENDIAN_BITFIELD) | 475 | #if defined(__LITTLE_ENDIAN_BITFIELD) |
116 | u8 tcp_hdr_words:4; | 476 | u8 tcp_hdr_words:4; |
117 | u8 ip_hdr_words:4; | 477 | u8 ip_hdr_words:4; |
@@ -138,8 +498,142 @@ struct cpl_rx_pkt { | |||
138 | u8 iff:4; | 498 | u8 iff:4; |
139 | #endif | 499 | #endif |
140 | u16 csum; | 500 | u16 csum; |
141 | __be16 vlan; | 501 | u16 vlan; |
142 | u16 len; | 502 | u16 len; |
143 | }; | 503 | }; |
144 | 504 | ||
505 | struct cpl_l2t_write_req { | ||
506 | union opcode_tid ot; | ||
507 | u32 params; | ||
508 | u8 rsvd1[2]; | ||
509 | u8 dst_mac[6]; | ||
510 | }; | ||
511 | |||
512 | struct cpl_l2t_write_rpl { | ||
513 | union opcode_tid ot; | ||
514 | u8 status; | ||
515 | u8 rsvd[3]; | ||
516 | }; | ||
517 | |||
518 | struct cpl_l2t_read_req { | ||
519 | union opcode_tid ot; | ||
520 | u8 rsvd[3]; | ||
521 | u8 l2t_idx; | ||
522 | }; | ||
523 | |||
524 | struct cpl_l2t_read_rpl { | ||
525 | union opcode_tid ot; | ||
526 | u32 params; | ||
527 | u8 rsvd1[2]; | ||
528 | u8 dst_mac[6]; | ||
529 | }; | ||
530 | |||
531 | struct cpl_smt_write_req { | ||
532 | union opcode_tid ot; | ||
533 | u8 rsvd0; | ||
534 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
535 | u8 rsvd1:1; | ||
536 | u8 mtu_idx:3; | ||
537 | u8 iff:4; | ||
538 | #else | ||
539 | u8 iff:4; | ||
540 | u8 mtu_idx:3; | ||
541 | u8 rsvd1:1; | ||
542 | #endif | ||
543 | u16 rsvd2; | ||
544 | u16 rsvd3; | ||
545 | u8 src_mac1[6]; | ||
546 | u16 rsvd4; | ||
547 | u8 src_mac0[6]; | ||
548 | }; | ||
549 | |||
550 | struct cpl_smt_write_rpl { | ||
551 | union opcode_tid ot; | ||
552 | u8 status; | ||
553 | u8 rsvd[3]; | ||
554 | }; | ||
555 | |||
556 | struct cpl_smt_read_req { | ||
557 | union opcode_tid ot; | ||
558 | u8 rsvd0; | ||
559 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
560 | u8 rsvd1:4; | ||
561 | u8 iff:4; | ||
562 | #else | ||
563 | u8 iff:4; | ||
564 | u8 rsvd1:4; | ||
565 | #endif | ||
566 | u16 rsvd2; | ||
567 | }; | ||
568 | |||
569 | struct cpl_smt_read_rpl { | ||
570 | union opcode_tid ot; | ||
571 | u8 status; | ||
572 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
573 | u8 rsvd1:1; | ||
574 | u8 mtu_idx:3; | ||
575 | u8 rsvd0:4; | ||
576 | #else | ||
577 | u8 rsvd0:4; | ||
578 | u8 mtu_idx:3; | ||
579 | u8 rsvd1:1; | ||
580 | #endif | ||
581 | u16 rsvd2; | ||
582 | u16 rsvd3; | ||
583 | u8 src_mac1[6]; | ||
584 | u16 rsvd4; | ||
585 | u8 src_mac0[6]; | ||
586 | }; | ||
587 | |||
588 | struct cpl_rte_delete_req { | ||
589 | union opcode_tid ot; | ||
590 | u32 params; | ||
591 | }; | ||
592 | |||
593 | struct cpl_rte_delete_rpl { | ||
594 | union opcode_tid ot; | ||
595 | u8 status; | ||
596 | u8 rsvd[3]; | ||
597 | }; | ||
598 | |||
599 | struct cpl_rte_write_req { | ||
600 | union opcode_tid ot; | ||
601 | u32 params; | ||
602 | u32 netmask; | ||
603 | u32 faddr; | ||
604 | }; | ||
605 | |||
606 | struct cpl_rte_write_rpl { | ||
607 | union opcode_tid ot; | ||
608 | u8 status; | ||
609 | u8 rsvd[3]; | ||
610 | }; | ||
611 | |||
612 | struct cpl_rte_read_req { | ||
613 | union opcode_tid ot; | ||
614 | u32 params; | ||
615 | }; | ||
616 | |||
617 | struct cpl_rte_read_rpl { | ||
618 | union opcode_tid ot; | ||
619 | u8 status; | ||
620 | u8 rsvd0[2]; | ||
621 | u8 l2t_idx; | ||
622 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
623 | u8 rsvd1:7; | ||
624 | u8 select:1; | ||
625 | #else | ||
626 | u8 select:1; | ||
627 | u8 rsvd1:7; | ||
628 | #endif | ||
629 | u8 rsvd2[3]; | ||
630 | u32 addr; | ||
631 | }; | ||
632 | |||
633 | struct cpl_mss_change { | ||
634 | union opcode_tid ot; | ||
635 | u32 mss; | ||
636 | }; | ||
637 | |||
145 | #endif /* _CXGB_CPL5_CMD_H_ */ | 638 | #endif /* _CXGB_CPL5_CMD_H_ */ |
639 | |||
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index ad7ff9641a7e..53bec6739812 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/if_vlan.h> | 45 | #include <linux/if_vlan.h> |
46 | #include <linux/mii.h> | 46 | #include <linux/mii.h> |
47 | #include <linux/sockios.h> | 47 | #include <linux/sockios.h> |
48 | #include <linux/proc_fs.h> | ||
49 | #include <linux/dma-mapping.h> | 48 | #include <linux/dma-mapping.h> |
50 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
51 | 50 | ||
@@ -54,36 +53,10 @@ | |||
54 | #include "gmac.h" | 53 | #include "gmac.h" |
55 | #include "cphy.h" | 54 | #include "cphy.h" |
56 | #include "sge.h" | 55 | #include "sge.h" |
56 | #include "tp.h" | ||
57 | #include "espi.h" | 57 | #include "espi.h" |
58 | #include "elmer0.h" | ||
58 | 59 | ||
59 | #ifdef work_struct | ||
60 | #include <linux/tqueue.h> | ||
61 | #define INIT_WORK INIT_TQUEUE | ||
62 | #define schedule_work schedule_task | ||
63 | #define flush_scheduled_work flush_scheduled_tasks | ||
64 | |||
65 | static inline void schedule_mac_stats_update(struct adapter *ap, int secs) | ||
66 | { | ||
67 | mod_timer(&ap->stats_update_timer, jiffies + secs * HZ); | ||
68 | } | ||
69 | |||
70 | static inline void cancel_mac_stats_update(struct adapter *ap) | ||
71 | { | ||
72 | del_timer_sync(&ap->stats_update_timer); | ||
73 | flush_scheduled_tasks(); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Stats update timer for 2.4. It schedules a task to do the actual update as | ||
78 | * we need to access MAC statistics in process context. | ||
79 | */ | ||
80 | static void mac_stats_timer(unsigned long data) | ||
81 | { | ||
82 | struct adapter *ap = (struct adapter *)data; | ||
83 | |||
84 | schedule_task(&ap->stats_update_task); | ||
85 | } | ||
86 | #else | ||
87 | #include <linux/workqueue.h> | 60 | #include <linux/workqueue.h> |
88 | 61 | ||
89 | static inline void schedule_mac_stats_update(struct adapter *ap, int secs) | 62 | static inline void schedule_mac_stats_update(struct adapter *ap, int secs) |
@@ -95,7 +68,6 @@ static inline void cancel_mac_stats_update(struct adapter *ap) | |||
95 | { | 68 | { |
96 | cancel_delayed_work(&ap->stats_update_task); | 69 | cancel_delayed_work(&ap->stats_update_task); |
97 | } | 70 | } |
98 | #endif | ||
99 | 71 | ||
100 | #define MAX_CMDQ_ENTRIES 16384 | 72 | #define MAX_CMDQ_ENTRIES 16384 |
101 | #define MAX_CMDQ1_ENTRIES 1024 | 73 | #define MAX_CMDQ1_ENTRIES 1024 |
@@ -103,10 +75,9 @@ static inline void cancel_mac_stats_update(struct adapter *ap) | |||
103 | #define MAX_RX_JUMBO_BUFFERS 16384 | 75 | #define MAX_RX_JUMBO_BUFFERS 16384 |
104 | #define MAX_TX_BUFFERS_HIGH 16384U | 76 | #define MAX_TX_BUFFERS_HIGH 16384U |
105 | #define MAX_TX_BUFFERS_LOW 1536U | 77 | #define MAX_TX_BUFFERS_LOW 1536U |
78 | #define MAX_TX_BUFFERS 1460U | ||
106 | #define MIN_FL_ENTRIES 32 | 79 | #define MIN_FL_ENTRIES 32 |
107 | 80 | ||
108 | #define PORT_MASK ((1 << MAX_NPORTS) - 1) | ||
109 | |||
110 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | 81 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
111 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | 82 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ |
112 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | 83 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) |
@@ -124,8 +95,21 @@ MODULE_LICENSE("GPL"); | |||
124 | static int dflt_msg_enable = DFLT_MSG_ENABLE; | 95 | static int dflt_msg_enable = DFLT_MSG_ENABLE; |
125 | 96 | ||
126 | module_param(dflt_msg_enable, int, 0); | 97 | module_param(dflt_msg_enable, int, 0); |
127 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap"); | 98 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); |
99 | |||
100 | #define HCLOCK 0x0 | ||
101 | #define LCLOCK 0x1 | ||
102 | |||
103 | /* T1 cards powersave mode */ | ||
104 | static int t1_clock(struct adapter *adapter, int mode); | ||
105 | static int t1powersave = 1; /* HW default is powersave mode. */ | ||
128 | 106 | ||
107 | module_param(t1powersave, int, 0); | ||
108 | MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); | ||
109 | |||
110 | static int disable_msi = 0; | ||
111 | module_param(disable_msi, int, 0); | ||
112 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
129 | 113 | ||
130 | static const char pci_speed[][4] = { | 114 | static const char pci_speed[][4] = { |
131 | "33", "66", "100", "133" | 115 | "33", "66", "100", "133" |
@@ -149,7 +133,7 @@ static void t1_set_rxmode(struct net_device *dev) | |||
149 | static void link_report(struct port_info *p) | 133 | static void link_report(struct port_info *p) |
150 | { | 134 | { |
151 | if (!netif_carrier_ok(p->dev)) | 135 | if (!netif_carrier_ok(p->dev)) |
152 | printk(KERN_INFO "%s: link down\n", p->dev->name); | 136 | printk(KERN_INFO "%s: link down\n", p->dev->name); |
153 | else { | 137 | else { |
154 | const char *s = "10Mbps"; | 138 | const char *s = "10Mbps"; |
155 | 139 | ||
@@ -159,13 +143,13 @@ static void link_report(struct port_info *p) | |||
159 | case SPEED_100: s = "100Mbps"; break; | 143 | case SPEED_100: s = "100Mbps"; break; |
160 | } | 144 | } |
161 | 145 | ||
162 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", | 146 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", |
163 | p->dev->name, s, | 147 | p->dev->name, s, |
164 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); | 148 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); |
165 | } | 149 | } |
166 | } | 150 | } |
167 | 151 | ||
168 | void t1_link_changed(struct adapter *adapter, int port_id, int link_stat, | 152 | void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, |
169 | int speed, int duplex, int pause) | 153 | int speed, int duplex, int pause) |
170 | { | 154 | { |
171 | struct port_info *p = &adapter->port[port_id]; | 155 | struct port_info *p = &adapter->port[port_id]; |
@@ -177,6 +161,22 @@ void t1_link_changed(struct adapter *adapter, int port_id, int link_stat, | |||
177 | netif_carrier_off(p->dev); | 161 | netif_carrier_off(p->dev); |
178 | link_report(p); | 162 | link_report(p); |
179 | 163 | ||
164 | /* multi-ports: inform toe */ | ||
165 | if ((speed > 0) && (adapter->params.nports > 1)) { | ||
166 | unsigned int sched_speed = 10; | ||
167 | switch (speed) { | ||
168 | case SPEED_1000: | ||
169 | sched_speed = 1000; | ||
170 | break; | ||
171 | case SPEED_100: | ||
172 | sched_speed = 100; | ||
173 | break; | ||
174 | case SPEED_10: | ||
175 | sched_speed = 10; | ||
176 | break; | ||
177 | } | ||
178 | t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); | ||
179 | } | ||
180 | } | 180 | } |
181 | } | 181 | } |
182 | 182 | ||
@@ -195,8 +195,10 @@ static void link_start(struct port_info *p) | |||
195 | static void enable_hw_csum(struct adapter *adapter) | 195 | static void enable_hw_csum(struct adapter *adapter) |
196 | { | 196 | { |
197 | if (adapter->flags & TSO_CAPABLE) | 197 | if (adapter->flags & TSO_CAPABLE) |
198 | t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */ | 198 | t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ |
199 | t1_tp_set_tcp_checksum_offload(adapter, 1); | 199 | if (adapter->flags & UDP_CSUM_CAPABLE) |
200 | t1_tp_set_udp_checksum_offload(adapter->tp, 1); | ||
201 | t1_tp_set_tcp_checksum_offload(adapter->tp, 1); | ||
200 | } | 202 | } |
201 | 203 | ||
202 | /* | 204 | /* |
@@ -217,11 +219,19 @@ static int cxgb_up(struct adapter *adapter) | |||
217 | } | 219 | } |
218 | 220 | ||
219 | t1_interrupts_clear(adapter); | 221 | t1_interrupts_clear(adapter); |
220 | if ((err = request_irq(adapter->pdev->irq, | 222 | |
221 | t1_select_intr_handler(adapter), IRQF_SHARED, | 223 | adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0; |
222 | adapter->name, adapter))) { | 224 | err = request_irq(adapter->pdev->irq, |
225 | t1_select_intr_handler(adapter), | ||
226 | adapter->params.has_msi ? 0 : IRQF_SHARED, | ||
227 | adapter->name, adapter); | ||
228 | if (err) { | ||
229 | if (adapter->params.has_msi) | ||
230 | pci_disable_msi(adapter->pdev); | ||
231 | |||
223 | goto out_err; | 232 | goto out_err; |
224 | } | 233 | } |
234 | |||
225 | t1_sge_start(adapter->sge); | 235 | t1_sge_start(adapter->sge); |
226 | t1_interrupts_enable(adapter); | 236 | t1_interrupts_enable(adapter); |
227 | out_err: | 237 | out_err: |
@@ -236,6 +246,8 @@ static void cxgb_down(struct adapter *adapter) | |||
236 | t1_sge_stop(adapter->sge); | 246 | t1_sge_stop(adapter->sge); |
237 | t1_interrupts_disable(adapter); | 247 | t1_interrupts_disable(adapter); |
238 | free_irq(adapter->pdev->irq, adapter); | 248 | free_irq(adapter->pdev->irq, adapter); |
249 | if (adapter->params.has_msi) | ||
250 | pci_disable_msi(adapter->pdev); | ||
239 | } | 251 | } |
240 | 252 | ||
241 | static int cxgb_open(struct net_device *dev) | 253 | static int cxgb_open(struct net_device *dev) |
@@ -290,7 +302,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev) | |||
290 | 302 | ||
291 | /* Do a full update of the MAC stats */ | 303 | /* Do a full update of the MAC stats */ |
292 | pstats = p->mac->ops->statistics_update(p->mac, | 304 | pstats = p->mac->ops->statistics_update(p->mac, |
293 | MAC_STATS_UPDATE_FULL); | 305 | MAC_STATS_UPDATE_FULL); |
294 | 306 | ||
295 | ns->tx_packets = pstats->TxUnicastFramesOK + | 307 | ns->tx_packets = pstats->TxUnicastFramesOK + |
296 | pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; | 308 | pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; |
@@ -344,47 +356,53 @@ static void set_msglevel(struct net_device *dev, u32 val) | |||
344 | } | 356 | } |
345 | 357 | ||
346 | static char stats_strings[][ETH_GSTRING_LEN] = { | 358 | static char stats_strings[][ETH_GSTRING_LEN] = { |
347 | "TxOctetsOK", | 359 | "TxOctetsOK", |
348 | "TxOctetsBad", | 360 | "TxOctetsBad", |
349 | "TxUnicastFramesOK", | 361 | "TxUnicastFramesOK", |
350 | "TxMulticastFramesOK", | 362 | "TxMulticastFramesOK", |
351 | "TxBroadcastFramesOK", | 363 | "TxBroadcastFramesOK", |
352 | "TxPauseFrames", | 364 | "TxPauseFrames", |
353 | "TxFramesWithDeferredXmissions", | 365 | "TxFramesWithDeferredXmissions", |
354 | "TxLateCollisions", | 366 | "TxLateCollisions", |
355 | "TxTotalCollisions", | 367 | "TxTotalCollisions", |
356 | "TxFramesAbortedDueToXSCollisions", | 368 | "TxFramesAbortedDueToXSCollisions", |
357 | "TxUnderrun", | 369 | "TxUnderrun", |
358 | "TxLengthErrors", | 370 | "TxLengthErrors", |
359 | "TxInternalMACXmitError", | 371 | "TxInternalMACXmitError", |
360 | "TxFramesWithExcessiveDeferral", | 372 | "TxFramesWithExcessiveDeferral", |
361 | "TxFCSErrors", | 373 | "TxFCSErrors", |
362 | 374 | ||
363 | "RxOctetsOK", | 375 | "RxOctetsOK", |
364 | "RxOctetsBad", | 376 | "RxOctetsBad", |
365 | "RxUnicastFramesOK", | 377 | "RxUnicastFramesOK", |
366 | "RxMulticastFramesOK", | 378 | "RxMulticastFramesOK", |
367 | "RxBroadcastFramesOK", | 379 | "RxBroadcastFramesOK", |
368 | "RxPauseFrames", | 380 | "RxPauseFrames", |
369 | "RxFCSErrors", | 381 | "RxFCSErrors", |
370 | "RxAlignErrors", | 382 | "RxAlignErrors", |
371 | "RxSymbolErrors", | 383 | "RxSymbolErrors", |
372 | "RxDataErrors", | 384 | "RxDataErrors", |
373 | "RxSequenceErrors", | 385 | "RxSequenceErrors", |
374 | "RxRuntErrors", | 386 | "RxRuntErrors", |
375 | "RxJabberErrors", | 387 | "RxJabberErrors", |
376 | "RxInternalMACRcvError", | 388 | "RxInternalMACRcvError", |
377 | "RxInRangeLengthErrors", | 389 | "RxInRangeLengthErrors", |
378 | "RxOutOfRangeLengthField", | 390 | "RxOutOfRangeLengthField", |
379 | "RxFrameTooLongErrors", | 391 | "RxFrameTooLongErrors", |
380 | 392 | ||
381 | "TSO", | 393 | /* Port stats */ |
382 | "VLANextractions", | 394 | "RxPackets", |
383 | "VLANinsertions", | ||
384 | "RxCsumGood", | 395 | "RxCsumGood", |
396 | "TxPackets", | ||
385 | "TxCsumOffload", | 397 | "TxCsumOffload", |
386 | "RxDrops" | 398 | "TxTso", |
387 | 399 | "RxVlan", | |
400 | "TxVlan", | ||
401 | |||
402 | /* Interrupt stats */ | ||
403 | "rx drops", | ||
404 | "pure_rsps", | ||
405 | "unhandled irqs", | ||
388 | "respQ_empty", | 406 | "respQ_empty", |
389 | "respQ_overflow", | 407 | "respQ_overflow", |
390 | "freelistQ_empty", | 408 | "freelistQ_empty", |
@@ -392,11 +410,7 @@ static char stats_strings[][ETH_GSTRING_LEN] = { | |||
392 | "pkt_mismatch", | 410 | "pkt_mismatch", |
393 | "cmdQ_full0", | 411 | "cmdQ_full0", |
394 | "cmdQ_full1", | 412 | "cmdQ_full1", |
395 | "tx_ipfrags", | 413 | |
396 | "tx_reg_pkts", | ||
397 | "tx_lso_pkts", | ||
398 | "tx_do_cksum", | ||
399 | |||
400 | "espi_DIP2ParityErr", | 414 | "espi_DIP2ParityErr", |
401 | "espi_DIP4Err", | 415 | "espi_DIP4Err", |
402 | "espi_RxDrops", | 416 | "espi_RxDrops", |
@@ -404,7 +418,7 @@ static char stats_strings[][ETH_GSTRING_LEN] = { | |||
404 | "espi_RxOvfl", | 418 | "espi_RxOvfl", |
405 | "espi_ParityErr" | 419 | "espi_ParityErr" |
406 | }; | 420 | }; |
407 | 421 | ||
408 | #define T2_REGMAP_SIZE (3 * 1024) | 422 | #define T2_REGMAP_SIZE (3 * 1024) |
409 | 423 | ||
410 | static int get_regs_len(struct net_device *dev) | 424 | static int get_regs_len(struct net_device *dev) |
@@ -439,65 +453,77 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
439 | struct adapter *adapter = dev->priv; | 453 | struct adapter *adapter = dev->priv; |
440 | struct cmac *mac = adapter->port[dev->if_port].mac; | 454 | struct cmac *mac = adapter->port[dev->if_port].mac; |
441 | const struct cmac_statistics *s; | 455 | const struct cmac_statistics *s; |
442 | const struct sge_port_stats *ss; | ||
443 | const struct sge_intr_counts *t; | 456 | const struct sge_intr_counts *t; |
457 | struct sge_port_stats ss; | ||
444 | 458 | ||
445 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); | 459 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); |
446 | ss = t1_sge_get_port_stats(adapter->sge, dev->if_port); | ||
447 | t = t1_sge_get_intr_counts(adapter->sge); | ||
448 | 460 | ||
449 | *data++ = s->TxOctetsOK; | 461 | *data++ = s->TxOctetsOK; |
450 | *data++ = s->TxOctetsBad; | 462 | *data++ = s->TxOctetsBad; |
451 | *data++ = s->TxUnicastFramesOK; | 463 | *data++ = s->TxUnicastFramesOK; |
452 | *data++ = s->TxMulticastFramesOK; | 464 | *data++ = s->TxMulticastFramesOK; |
453 | *data++ = s->TxBroadcastFramesOK; | 465 | *data++ = s->TxBroadcastFramesOK; |
454 | *data++ = s->TxPauseFrames; | 466 | *data++ = s->TxPauseFrames; |
455 | *data++ = s->TxFramesWithDeferredXmissions; | 467 | *data++ = s->TxFramesWithDeferredXmissions; |
456 | *data++ = s->TxLateCollisions; | 468 | *data++ = s->TxLateCollisions; |
457 | *data++ = s->TxTotalCollisions; | 469 | *data++ = s->TxTotalCollisions; |
458 | *data++ = s->TxFramesAbortedDueToXSCollisions; | 470 | *data++ = s->TxFramesAbortedDueToXSCollisions; |
459 | *data++ = s->TxUnderrun; | 471 | *data++ = s->TxUnderrun; |
460 | *data++ = s->TxLengthErrors; | 472 | *data++ = s->TxLengthErrors; |
461 | *data++ = s->TxInternalMACXmitError; | 473 | *data++ = s->TxInternalMACXmitError; |
462 | *data++ = s->TxFramesWithExcessiveDeferral; | 474 | *data++ = s->TxFramesWithExcessiveDeferral; |
463 | *data++ = s->TxFCSErrors; | 475 | *data++ = s->TxFCSErrors; |
464 | 476 | ||
465 | *data++ = s->RxOctetsOK; | 477 | *data++ = s->RxOctetsOK; |
466 | *data++ = s->RxOctetsBad; | 478 | *data++ = s->RxOctetsBad; |
467 | *data++ = s->RxUnicastFramesOK; | 479 | *data++ = s->RxUnicastFramesOK; |
468 | *data++ = s->RxMulticastFramesOK; | 480 | *data++ = s->RxMulticastFramesOK; |
469 | *data++ = s->RxBroadcastFramesOK; | 481 | *data++ = s->RxBroadcastFramesOK; |
470 | *data++ = s->RxPauseFrames; | 482 | *data++ = s->RxPauseFrames; |
471 | *data++ = s->RxFCSErrors; | 483 | *data++ = s->RxFCSErrors; |
472 | *data++ = s->RxAlignErrors; | 484 | *data++ = s->RxAlignErrors; |
473 | *data++ = s->RxSymbolErrors; | 485 | *data++ = s->RxSymbolErrors; |
474 | *data++ = s->RxDataErrors; | 486 | *data++ = s->RxDataErrors; |
475 | *data++ = s->RxSequenceErrors; | 487 | *data++ = s->RxSequenceErrors; |
476 | *data++ = s->RxRuntErrors; | 488 | *data++ = s->RxRuntErrors; |
477 | *data++ = s->RxJabberErrors; | 489 | *data++ = s->RxJabberErrors; |
478 | *data++ = s->RxInternalMACRcvError; | 490 | *data++ = s->RxInternalMACRcvError; |
479 | *data++ = s->RxInRangeLengthErrors; | 491 | *data++ = s->RxInRangeLengthErrors; |
480 | *data++ = s->RxOutOfRangeLengthField; | 492 | *data++ = s->RxOutOfRangeLengthField; |
481 | *data++ = s->RxFrameTooLongErrors; | 493 | *data++ = s->RxFrameTooLongErrors; |
482 | 494 | ||
483 | *data++ = ss->tso; | 495 | t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); |
484 | *data++ = ss->vlan_xtract; | 496 | *data++ = ss.rx_packets; |
485 | *data++ = ss->vlan_insert; | 497 | *data++ = ss.rx_cso_good; |
486 | *data++ = ss->rx_cso_good; | 498 | *data++ = ss.tx_packets; |
487 | *data++ = ss->tx_cso; | 499 | *data++ = ss.tx_cso; |
488 | *data++ = ss->rx_drops; | 500 | *data++ = ss.tx_tso; |
489 | 501 | *data++ = ss.vlan_xtract; | |
490 | *data++ = (u64)t->respQ_empty; | 502 | *data++ = ss.vlan_insert; |
491 | *data++ = (u64)t->respQ_overflow; | 503 | |
492 | *data++ = (u64)t->freelistQ_empty; | 504 | t = t1_sge_get_intr_counts(adapter->sge); |
493 | *data++ = (u64)t->pkt_too_big; | 505 | *data++ = t->rx_drops; |
494 | *data++ = (u64)t->pkt_mismatch; | 506 | *data++ = t->pure_rsps; |
495 | *data++ = (u64)t->cmdQ_full[0]; | 507 | *data++ = t->unhandled_irqs; |
496 | *data++ = (u64)t->cmdQ_full[1]; | 508 | *data++ = t->respQ_empty; |
497 | *data++ = (u64)t->tx_ipfrags; | 509 | *data++ = t->respQ_overflow; |
498 | *data++ = (u64)t->tx_reg_pkts; | 510 | *data++ = t->freelistQ_empty; |
499 | *data++ = (u64)t->tx_lso_pkts; | 511 | *data++ = t->pkt_too_big; |
500 | *data++ = (u64)t->tx_do_cksum; | 512 | *data++ = t->pkt_mismatch; |
513 | *data++ = t->cmdQ_full[0]; | ||
514 | *data++ = t->cmdQ_full[1]; | ||
515 | |||
516 | if (adapter->espi) { | ||
517 | const struct espi_intr_counts *e; | ||
518 | |||
519 | e = t1_espi_get_intr_counts(adapter->espi); | ||
520 | *data++ = e->DIP2_parity_err; | ||
521 | *data++ = e->DIP4_err; | ||
522 | *data++ = e->rx_drops; | ||
523 | *data++ = e->tx_drops; | ||
524 | *data++ = e->rx_ovflw; | ||
525 | *data++ = e->parity_err; | ||
526 | } | ||
501 | } | 527 | } |
502 | 528 | ||
503 | static inline void reg_block_dump(struct adapter *ap, void *buf, | 529 | static inline void reg_block_dump(struct adapter *ap, void *buf, |
@@ -521,6 +547,15 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
521 | 547 | ||
522 | memset(buf, 0, T2_REGMAP_SIZE); | 548 | memset(buf, 0, T2_REGMAP_SIZE); |
523 | reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); | 549 | reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); |
550 | reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); | ||
551 | reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); | ||
552 | reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); | ||
553 | reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); | ||
554 | reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); | ||
555 | reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); | ||
556 | reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); | ||
557 | reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); | ||
558 | reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); | ||
524 | } | 559 | } |
525 | 560 | ||
526 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 561 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
@@ -539,12 +574,12 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
539 | cmd->duplex = -1; | 574 | cmd->duplex = -1; |
540 | } | 575 | } |
541 | 576 | ||
542 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; | 577 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; |
543 | cmd->phy_address = p->phy->addr; | 578 | cmd->phy_address = p->phy->addr; |
544 | cmd->transceiver = XCVR_EXTERNAL; | 579 | cmd->transceiver = XCVR_EXTERNAL; |
545 | cmd->autoneg = p->link_config.autoneg; | 580 | cmd->autoneg = p->link_config.autoneg; |
546 | cmd->maxtxpkt = 0; | 581 | cmd->maxtxpkt = 0; |
547 | cmd->maxrxpkt = 0; | 582 | cmd->maxrxpkt = 0; |
548 | return 0; | 583 | return 0; |
549 | } | 584 | } |
550 | 585 | ||
@@ -715,7 +750,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | |||
715 | return -EINVAL; | 750 | return -EINVAL; |
716 | 751 | ||
717 | if (adapter->flags & FULL_INIT_DONE) | 752 | if (adapter->flags & FULL_INIT_DONE) |
718 | return -EBUSY; | 753 | return -EBUSY; |
719 | 754 | ||
720 | adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; | 755 | adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; |
721 | adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; | 756 | adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; |
@@ -759,7 +794,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
759 | 794 | ||
760 | static int get_eeprom_len(struct net_device *dev) | 795 | static int get_eeprom_len(struct net_device *dev) |
761 | { | 796 | { |
762 | return EEPROM_SIZE; | 797 | struct adapter *adapter = dev->priv; |
798 | |||
799 | return t1_is_asic(adapter) ? EEPROM_SIZE : 0; | ||
763 | } | 800 | } |
764 | 801 | ||
765 | #define EEPROM_MAGIC(ap) \ | 802 | #define EEPROM_MAGIC(ap) \ |
@@ -809,47 +846,36 @@ static const struct ethtool_ops t1_ethtool_ops = { | |||
809 | .set_tso = set_tso, | 846 | .set_tso = set_tso, |
810 | }; | 847 | }; |
811 | 848 | ||
812 | static void cxgb_proc_cleanup(struct adapter *adapter, | ||
813 | struct proc_dir_entry *dir) | ||
814 | { | ||
815 | const char *name; | ||
816 | name = adapter->name; | ||
817 | remove_proc_entry(name, dir); | ||
818 | } | ||
819 | //#define chtoe_setup_toedev(adapter) NULL | ||
820 | #define update_mtu_tab(adapter) | ||
821 | #define write_smt_entry(adapter, idx) | ||
822 | |||
823 | static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | 849 | static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
824 | { | 850 | { |
825 | struct adapter *adapter = dev->priv; | 851 | struct adapter *adapter = dev->priv; |
826 | struct mii_ioctl_data *data = if_mii(req); | 852 | struct mii_ioctl_data *data = if_mii(req); |
827 | 853 | ||
828 | switch (cmd) { | 854 | switch (cmd) { |
829 | case SIOCGMIIPHY: | 855 | case SIOCGMIIPHY: |
830 | data->phy_id = adapter->port[dev->if_port].phy->addr; | 856 | data->phy_id = adapter->port[dev->if_port].phy->addr; |
831 | /* FALLTHRU */ | 857 | /* FALLTHRU */ |
832 | case SIOCGMIIREG: { | 858 | case SIOCGMIIREG: { |
833 | struct cphy *phy = adapter->port[dev->if_port].phy; | 859 | struct cphy *phy = adapter->port[dev->if_port].phy; |
834 | u32 val; | 860 | u32 val; |
835 | 861 | ||
836 | if (!phy->mdio_read) | 862 | if (!phy->mdio_read) |
837 | return -EOPNOTSUPP; | 863 | return -EOPNOTSUPP; |
838 | phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, | 864 | phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, |
839 | &val); | 865 | &val); |
840 | data->val_out = val; | 866 | data->val_out = val; |
841 | break; | 867 | break; |
842 | } | 868 | } |
843 | case SIOCSMIIREG: { | 869 | case SIOCSMIIREG: { |
844 | struct cphy *phy = adapter->port[dev->if_port].phy; | 870 | struct cphy *phy = adapter->port[dev->if_port].phy; |
845 | 871 | ||
846 | if (!capable(CAP_NET_ADMIN)) | 872 | if (!capable(CAP_NET_ADMIN)) |
847 | return -EPERM; | 873 | return -EPERM; |
848 | if (!phy->mdio_write) | 874 | if (!phy->mdio_write) |
849 | return -EOPNOTSUPP; | 875 | return -EOPNOTSUPP; |
850 | phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, | 876 | phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, |
851 | data->val_in); | 877 | data->val_in); |
852 | break; | 878 | break; |
853 | } | 879 | } |
854 | 880 | ||
855 | default: | 881 | default: |
@@ -865,9 +891,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu) | |||
865 | struct cmac *mac = adapter->port[dev->if_port].mac; | 891 | struct cmac *mac = adapter->port[dev->if_port].mac; |
866 | 892 | ||
867 | if (!mac->ops->set_mtu) | 893 | if (!mac->ops->set_mtu) |
868 | return -EOPNOTSUPP; | 894 | return -EOPNOTSUPP; |
869 | if (new_mtu < 68) | 895 | if (new_mtu < 68) |
870 | return -EINVAL; | 896 | return -EINVAL; |
871 | if ((ret = mac->ops->set_mtu(mac, new_mtu))) | 897 | if ((ret = mac->ops->set_mtu(mac, new_mtu))) |
872 | return ret; | 898 | return ret; |
873 | dev->mtu = new_mtu; | 899 | dev->mtu = new_mtu; |
@@ -918,7 +944,7 @@ static void t1_netpoll(struct net_device *dev) | |||
918 | struct adapter *adapter = dev->priv; | 944 | struct adapter *adapter = dev->priv; |
919 | 945 | ||
920 | local_irq_save(flags); | 946 | local_irq_save(flags); |
921 | t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); | 947 | t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); |
922 | local_irq_restore(flags); | 948 | local_irq_restore(flags); |
923 | } | 949 | } |
924 | #endif | 950 | #endif |
@@ -955,14 +981,14 @@ static void ext_intr_task(void *data) | |||
955 | { | 981 | { |
956 | struct adapter *adapter = data; | 982 | struct adapter *adapter = data; |
957 | 983 | ||
958 | elmer0_ext_intr_handler(adapter); | 984 | t1_elmer0_ext_intr_handler(adapter); |
959 | 985 | ||
960 | /* Now reenable external interrupts */ | 986 | /* Now reenable external interrupts */ |
961 | spin_lock_irq(&adapter->async_lock); | 987 | spin_lock_irq(&adapter->async_lock); |
962 | adapter->slow_intr_mask |= F_PL_INTR_EXT; | 988 | adapter->slow_intr_mask |= F_PL_INTR_EXT; |
963 | writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); | 989 | writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); |
964 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, | 990 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, |
965 | adapter->regs + A_PL_ENABLE); | 991 | adapter->regs + A_PL_ENABLE); |
966 | spin_unlock_irq(&adapter->async_lock); | 992 | spin_unlock_irq(&adapter->async_lock); |
967 | } | 993 | } |
968 | 994 | ||
@@ -978,7 +1004,7 @@ void t1_elmer0_ext_intr(struct adapter *adapter) | |||
978 | */ | 1004 | */ |
979 | adapter->slow_intr_mask &= ~F_PL_INTR_EXT; | 1005 | adapter->slow_intr_mask &= ~F_PL_INTR_EXT; |
980 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, | 1006 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, |
981 | adapter->regs + A_PL_ENABLE); | 1007 | adapter->regs + A_PL_ENABLE); |
982 | schedule_work(&adapter->ext_intr_handler_task); | 1008 | schedule_work(&adapter->ext_intr_handler_task); |
983 | } | 1009 | } |
984 | 1010 | ||
@@ -1011,7 +1037,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1011 | 1037 | ||
1012 | err = pci_enable_device(pdev); | 1038 | err = pci_enable_device(pdev); |
1013 | if (err) | 1039 | if (err) |
1014 | return err; | 1040 | return err; |
1015 | 1041 | ||
1016 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 1042 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
1017 | CH_ERR("%s: cannot find PCI device memory base address\n", | 1043 | CH_ERR("%s: cannot find PCI device memory base address\n", |
@@ -1043,7 +1069,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1043 | 1069 | ||
1044 | pci_set_master(pdev); | 1070 | pci_set_master(pdev); |
1045 | 1071 | ||
1046 | mmio_start = pci_resource_start(pdev, 0); | 1072 | mmio_start = pci_resource_start(pdev, 0); |
1047 | mmio_len = pci_resource_len(pdev, 0); | 1073 | mmio_len = pci_resource_len(pdev, 0); |
1048 | bi = t1_get_board_info(ent->driver_data); | 1074 | bi = t1_get_board_info(ent->driver_data); |
1049 | 1075 | ||
@@ -1081,21 +1107,15 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1081 | adapter->msg_enable = dflt_msg_enable; | 1107 | adapter->msg_enable = dflt_msg_enable; |
1082 | adapter->mmio_len = mmio_len; | 1108 | adapter->mmio_len = mmio_len; |
1083 | 1109 | ||
1084 | init_MUTEX(&adapter->mib_mutex); | ||
1085 | spin_lock_init(&adapter->tpi_lock); | 1110 | spin_lock_init(&adapter->tpi_lock); |
1086 | spin_lock_init(&adapter->work_lock); | 1111 | spin_lock_init(&adapter->work_lock); |
1087 | spin_lock_init(&adapter->async_lock); | 1112 | spin_lock_init(&adapter->async_lock); |
1113 | spin_lock_init(&adapter->mac_lock); | ||
1088 | 1114 | ||
1089 | INIT_WORK(&adapter->ext_intr_handler_task, | 1115 | INIT_WORK(&adapter->ext_intr_handler_task, |
1090 | ext_intr_task, adapter); | 1116 | ext_intr_task, adapter); |
1091 | INIT_WORK(&adapter->stats_update_task, mac_stats_task, | 1117 | INIT_WORK(&adapter->stats_update_task, mac_stats_task, |
1092 | adapter); | 1118 | adapter); |
1093 | #ifdef work_struct | ||
1094 | init_timer(&adapter->stats_update_timer); | ||
1095 | adapter->stats_update_timer.function = mac_stats_timer; | ||
1096 | adapter->stats_update_timer.data = | ||
1097 | (unsigned long)adapter; | ||
1098 | #endif | ||
1099 | 1119 | ||
1100 | pci_set_drvdata(pdev, netdev); | 1120 | pci_set_drvdata(pdev, netdev); |
1101 | } | 1121 | } |
@@ -1122,16 +1142,19 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1122 | netdev->vlan_rx_register = vlan_rx_register; | 1142 | netdev->vlan_rx_register = vlan_rx_register; |
1123 | netdev->vlan_rx_kill_vid = vlan_rx_kill_vid; | 1143 | netdev->vlan_rx_kill_vid = vlan_rx_kill_vid; |
1124 | #endif | 1144 | #endif |
1125 | adapter->flags |= TSO_CAPABLE; | 1145 | |
1126 | netdev->features |= NETIF_F_TSO; | 1146 | /* T204: disable TSO */ |
1147 | if (!(is_T2(adapter)) || bi->port_number != 4) { | ||
1148 | adapter->flags |= TSO_CAPABLE; | ||
1149 | netdev->features |= NETIF_F_TSO; | ||
1150 | } | ||
1127 | } | 1151 | } |
1128 | 1152 | ||
1129 | netdev->open = cxgb_open; | 1153 | netdev->open = cxgb_open; |
1130 | netdev->stop = cxgb_close; | 1154 | netdev->stop = cxgb_close; |
1131 | netdev->hard_start_xmit = t1_start_xmit; | 1155 | netdev->hard_start_xmit = t1_start_xmit; |
1132 | netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? | 1156 | netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? |
1133 | sizeof(struct cpl_tx_pkt_lso) : | 1157 | sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); |
1134 | sizeof(struct cpl_tx_pkt); | ||
1135 | netdev->get_stats = t1_get_stats; | 1158 | netdev->get_stats = t1_get_stats; |
1136 | netdev->set_multicast_list = t1_set_rxmode; | 1159 | netdev->set_multicast_list = t1_set_rxmode; |
1137 | netdev->do_ioctl = t1_ioctl; | 1160 | netdev->do_ioctl = t1_ioctl; |
@@ -1142,7 +1165,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1142 | #endif | 1165 | #endif |
1143 | netdev->weight = 64; | 1166 | netdev->weight = 64; |
1144 | 1167 | ||
1145 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); | 1168 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); |
1146 | } | 1169 | } |
1147 | 1170 | ||
1148 | if (t1_init_sw_modules(adapter, bi) < 0) { | 1171 | if (t1_init_sw_modules(adapter, bi) < 0) { |
@@ -1169,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1169 | if (!adapter->registered_device_map) | 1192 | if (!adapter->registered_device_map) |
1170 | adapter->name = adapter->port[i].dev->name; | 1193 | adapter->name = adapter->port[i].dev->name; |
1171 | 1194 | ||
1172 | __set_bit(i, &adapter->registered_device_map); | 1195 | __set_bit(i, &adapter->registered_device_map); |
1173 | } | 1196 | } |
1174 | } | 1197 | } |
1175 | if (!adapter->registered_device_map) { | 1198 | if (!adapter->registered_device_map) { |
@@ -1182,18 +1205,28 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1182 | bi->desc, adapter->params.chip_revision, | 1205 | bi->desc, adapter->params.chip_revision, |
1183 | adapter->params.pci.is_pcix ? "PCIX" : "PCI", | 1206 | adapter->params.pci.is_pcix ? "PCIX" : "PCI", |
1184 | adapter->params.pci.speed, adapter->params.pci.width); | 1207 | adapter->params.pci.speed, adapter->params.pci.width); |
1208 | |||
1209 | /* | ||
1210 | * Set the T1B ASIC and memory clocks. | ||
1211 | */ | ||
1212 | if (t1powersave) | ||
1213 | adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ | ||
1214 | else | ||
1215 | adapter->t1powersave = HCLOCK; | ||
1216 | if (t1_is_T1B(adapter)) | ||
1217 | t1_clock(adapter, t1powersave); | ||
1218 | |||
1185 | return 0; | 1219 | return 0; |
1186 | 1220 | ||
1187 | out_release_adapter_res: | 1221 | out_release_adapter_res: |
1188 | t1_free_sw_modules(adapter); | 1222 | t1_free_sw_modules(adapter); |
1189 | out_free_dev: | 1223 | out_free_dev: |
1190 | if (adapter) { | 1224 | if (adapter) { |
1191 | if (adapter->regs) iounmap(adapter->regs); | 1225 | if (adapter->regs) |
1226 | iounmap(adapter->regs); | ||
1192 | for (i = bi->port_number - 1; i >= 0; --i) | 1227 | for (i = bi->port_number - 1; i >= 0; --i) |
1193 | if (adapter->port[i].dev) { | 1228 | if (adapter->port[i].dev) |
1194 | cxgb_proc_cleanup(adapter, proc_root_driver); | 1229 | free_netdev(adapter->port[i].dev); |
1195 | kfree(adapter->port[i].dev); | ||
1196 | } | ||
1197 | } | 1230 | } |
1198 | pci_release_regions(pdev); | 1231 | pci_release_regions(pdev); |
1199 | out_disable_pdev: | 1232 | out_disable_pdev: |
@@ -1202,6 +1235,155 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1202 | return err; | 1235 | return err; |
1203 | } | 1236 | } |
1204 | 1237 | ||
1238 | static void bit_bang(struct adapter *adapter, int bitdata, int nbits) | ||
1239 | { | ||
1240 | int data; | ||
1241 | int i; | ||
1242 | u32 val; | ||
1243 | |||
1244 | enum { | ||
1245 | S_CLOCK = 1 << 3, | ||
1246 | S_DATA = 1 << 4 | ||
1247 | }; | ||
1248 | |||
1249 | for (i = (nbits - 1); i > -1; i--) { | ||
1250 | |||
1251 | udelay(50); | ||
1252 | |||
1253 | data = ((bitdata >> i) & 0x1); | ||
1254 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1255 | |||
1256 | if (data) | ||
1257 | val |= S_DATA; | ||
1258 | else | ||
1259 | val &= ~S_DATA; | ||
1260 | |||
1261 | udelay(50); | ||
1262 | |||
1263 | /* Set SCLOCK low */ | ||
1264 | val &= ~S_CLOCK; | ||
1265 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1266 | |||
1267 | udelay(50); | ||
1268 | |||
1269 | /* Write SCLOCK high */ | ||
1270 | val |= S_CLOCK; | ||
1271 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1272 | |||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | static int t1_clock(struct adapter *adapter, int mode) | ||
1277 | { | ||
1278 | u32 val; | ||
1279 | int M_CORE_VAL; | ||
1280 | int M_MEM_VAL; | ||
1281 | |||
1282 | enum { | ||
1283 | M_CORE_BITS = 9, | ||
1284 | T_CORE_VAL = 0, | ||
1285 | T_CORE_BITS = 2, | ||
1286 | N_CORE_VAL = 0, | ||
1287 | N_CORE_BITS = 2, | ||
1288 | M_MEM_BITS = 9, | ||
1289 | T_MEM_VAL = 0, | ||
1290 | T_MEM_BITS = 2, | ||
1291 | N_MEM_VAL = 0, | ||
1292 | N_MEM_BITS = 2, | ||
1293 | NP_LOAD = 1 << 17, | ||
1294 | S_LOAD_MEM = 1 << 5, | ||
1295 | S_LOAD_CORE = 1 << 6, | ||
1296 | S_CLOCK = 1 << 3 | ||
1297 | }; | ||
1298 | |||
1299 | if (!t1_is_T1B(adapter)) | ||
1300 | return -ENODEV; /* Can't re-clock this chip. */ | ||
1301 | |||
1302 | if (mode & 2) { | ||
1303 | return 0; /* show current mode. */ | ||
1304 | } | ||
1305 | |||
1306 | if ((adapter->t1powersave & 1) == (mode & 1)) | ||
1307 | return -EALREADY; /* ASIC already running in mode. */ | ||
1308 | |||
1309 | if ((mode & 1) == HCLOCK) { | ||
1310 | M_CORE_VAL = 0x14; | ||
1311 | M_MEM_VAL = 0x18; | ||
1312 | adapter->t1powersave = HCLOCK; /* overclock */ | ||
1313 | } else { | ||
1314 | M_CORE_VAL = 0xe; | ||
1315 | M_MEM_VAL = 0x10; | ||
1316 | adapter->t1powersave = LCLOCK; /* underclock */ | ||
1317 | } | ||
1318 | |||
1319 | /* Don't interrupt this serial stream! */ | ||
1320 | spin_lock(&adapter->tpi_lock); | ||
1321 | |||
1322 | /* Initialize for ASIC core */ | ||
1323 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1324 | val |= NP_LOAD; | ||
1325 | udelay(50); | ||
1326 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1327 | udelay(50); | ||
1328 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1329 | val &= ~S_LOAD_CORE; | ||
1330 | val &= ~S_CLOCK; | ||
1331 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1332 | udelay(50); | ||
1333 | |||
1334 | /* Serial program the ASIC clock synthesizer */ | ||
1335 | bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); | ||
1336 | bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); | ||
1337 | bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); | ||
1338 | udelay(50); | ||
1339 | |||
1340 | /* Finish ASIC core */ | ||
1341 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1342 | val |= S_LOAD_CORE; | ||
1343 | udelay(50); | ||
1344 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1345 | udelay(50); | ||
1346 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1347 | val &= ~S_LOAD_CORE; | ||
1348 | udelay(50); | ||
1349 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1350 | udelay(50); | ||
1351 | |||
1352 | /* Initialize for memory */ | ||
1353 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1354 | val |= NP_LOAD; | ||
1355 | udelay(50); | ||
1356 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1357 | udelay(50); | ||
1358 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1359 | val &= ~S_LOAD_MEM; | ||
1360 | val &= ~S_CLOCK; | ||
1361 | udelay(50); | ||
1362 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1363 | udelay(50); | ||
1364 | |||
1365 | /* Serial program the memory clock synthesizer */ | ||
1366 | bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); | ||
1367 | bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); | ||
1368 | bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); | ||
1369 | udelay(50); | ||
1370 | |||
1371 | /* Finish memory */ | ||
1372 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1373 | val |= S_LOAD_MEM; | ||
1374 | udelay(50); | ||
1375 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1376 | udelay(50); | ||
1377 | __t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
1378 | val &= ~S_LOAD_MEM; | ||
1379 | udelay(50); | ||
1380 | __t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
1381 | |||
1382 | spin_unlock(&adapter->tpi_lock); | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1205 | static inline void t1_sw_reset(struct pci_dev *pdev) | 1387 | static inline void t1_sw_reset(struct pci_dev *pdev) |
1206 | { | 1388 | { |
1207 | pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); | 1389 | pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); |
@@ -1223,10 +1405,9 @@ static void __devexit remove_one(struct pci_dev *pdev) | |||
1223 | t1_free_sw_modules(adapter); | 1405 | t1_free_sw_modules(adapter); |
1224 | iounmap(adapter->regs); | 1406 | iounmap(adapter->regs); |
1225 | while (--i >= 0) | 1407 | while (--i >= 0) |
1226 | if (adapter->port[i].dev) { | 1408 | if (adapter->port[i].dev) |
1227 | cxgb_proc_cleanup(adapter, proc_root_driver); | 1409 | free_netdev(adapter->port[i].dev); |
1228 | kfree(adapter->port[i].dev); | 1410 | |
1229 | } | ||
1230 | pci_release_regions(pdev); | 1411 | pci_release_regions(pdev); |
1231 | pci_disable_device(pdev); | 1412 | pci_disable_device(pdev); |
1232 | pci_set_drvdata(pdev, NULL); | 1413 | pci_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h index 5590cb2dac19..9ebecaa97d31 100644 --- a/drivers/net/chelsio/elmer0.h +++ b/drivers/net/chelsio/elmer0.h | |||
@@ -39,6 +39,12 @@ | |||
39 | #ifndef _CXGB_ELMER0_H_ | 39 | #ifndef _CXGB_ELMER0_H_ |
40 | #define _CXGB_ELMER0_H_ | 40 | #define _CXGB_ELMER0_H_ |
41 | 41 | ||
42 | /* ELMER0 flavors */ | ||
43 | enum { | ||
44 | ELMER0_XC2S300E_6FT256_C, | ||
45 | ELMER0_XC2S100E_6TQ144_C | ||
46 | }; | ||
47 | |||
42 | /* ELMER0 registers */ | 48 | /* ELMER0 registers */ |
43 | #define A_ELMER0_VERSION 0x100000 | 49 | #define A_ELMER0_VERSION 0x100000 |
44 | #define A_ELMER0_PHY_CFG 0x100004 | 50 | #define A_ELMER0_PHY_CFG 0x100004 |
@@ -149,3 +155,4 @@ | |||
149 | #define MI1_OP_INDIRECT_READ 3 | 155 | #define MI1_OP_INDIRECT_READ 3 |
150 | 156 | ||
151 | #endif /* _CXGB_ELMER0_H_ */ | 157 | #endif /* _CXGB_ELMER0_H_ */ |
158 | |||
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c index 542e5e065c6f..4192f0f5b3ee 100644 --- a/drivers/net/chelsio/espi.c +++ b/drivers/net/chelsio/espi.c | |||
@@ -81,46 +81,36 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, | |||
81 | return busy; | 81 | return busy; |
82 | } | 82 | } |
83 | 83 | ||
84 | /* 1. Deassert rx_reset_core. */ | ||
85 | /* 2. Program TRICN_CNFG registers. */ | ||
86 | /* 3. Deassert rx_reset_link */ | ||
87 | static int tricn_init(adapter_t *adapter) | 84 | static int tricn_init(adapter_t *adapter) |
88 | { | 85 | { |
89 | int i = 0; | 86 | int i, sme = 1; |
90 | int stat = 0; | ||
91 | int timeout = 0; | ||
92 | int is_ready = 0; | ||
93 | 87 | ||
94 | /* 1 */ | 88 | if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { |
95 | timeout=1000; | 89 | CH_ERR("%s: ESPI clock not ready\n", adapter->name); |
96 | do { | 90 | return -1; |
97 | stat = readl(adapter->regs + A_ESPI_RX_RESET); | ||
98 | is_ready = (stat & 0x4); | ||
99 | timeout--; | ||
100 | udelay(5); | ||
101 | } while (!is_ready || (timeout==0)); | ||
102 | writel(0x2, adapter->regs + A_ESPI_RX_RESET); | ||
103 | if (timeout==0) | ||
104 | { | ||
105 | CH_ERR("ESPI : ERROR : Timeout tricn_init() \n"); | ||
106 | t1_fatal_err(adapter); | ||
107 | } | 91 | } |
108 | 92 | ||
109 | /* 2 */ | 93 | writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); |
110 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); | 94 | |
111 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); | 95 | if (sme) { |
112 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); | 96 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); |
113 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); | 97 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); |
114 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); | 98 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); |
115 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | 99 | } |
116 | for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | 100 | for (i = 1; i <= 8; i++) |
117 | for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | 101 | tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); |
118 | for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | 102 | for (i = 1; i <= 2; i++) |
119 | for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80); | 103 | tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); |
120 | for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | 104 | for (i = 1; i <= 3; i++) |
121 | 105 | tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | |
122 | /* 3 */ | 106 | tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); |
123 | writel(0x3, adapter->regs + A_ESPI_RX_RESET); | 107 | tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); |
108 | tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); | ||
109 | tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); | ||
110 | tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); | ||
111 | |||
112 | writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, | ||
113 | adapter->regs + A_ESPI_RX_RESET); | ||
124 | 114 | ||
125 | return 0; | 115 | return 0; |
126 | } | 116 | } |
@@ -143,6 +133,7 @@ void t1_espi_intr_enable(struct peespi *espi) | |||
143 | 133 | ||
144 | void t1_espi_intr_clear(struct peespi *espi) | 134 | void t1_espi_intr_clear(struct peespi *espi) |
145 | { | 135 | { |
136 | readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); | ||
146 | writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); | 137 | writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); |
147 | writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); | 138 | writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); |
148 | } | 139 | } |
@@ -157,7 +148,6 @@ void t1_espi_intr_disable(struct peespi *espi) | |||
157 | 148 | ||
158 | int t1_espi_intr_handler(struct peespi *espi) | 149 | int t1_espi_intr_handler(struct peespi *espi) |
159 | { | 150 | { |
160 | u32 cnt; | ||
161 | u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); | 151 | u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); |
162 | 152 | ||
163 | if (status & F_DIP4ERR) | 153 | if (status & F_DIP4ERR) |
@@ -177,7 +167,7 @@ int t1_espi_intr_handler(struct peespi *espi) | |||
177 | * Must read the error count to clear the interrupt | 167 | * Must read the error count to clear the interrupt |
178 | * that it causes. | 168 | * that it causes. |
179 | */ | 169 | */ |
180 | cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); | 170 | readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); |
181 | } | 171 | } |
182 | 172 | ||
183 | /* | 173 | /* |
@@ -192,7 +182,7 @@ int t1_espi_intr_handler(struct peespi *espi) | |||
192 | 182 | ||
193 | const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) | 183 | const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) |
194 | { | 184 | { |
195 | return &espi->intr_cnt; | 185 | return &espi->intr_cnt; |
196 | } | 186 | } |
197 | 187 | ||
198 | static void espi_setup_for_pm3393(adapter_t *adapter) | 188 | static void espi_setup_for_pm3393(adapter_t *adapter) |
@@ -210,17 +200,45 @@ static void espi_setup_for_pm3393(adapter_t *adapter) | |||
210 | writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); | 200 | writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); |
211 | } | 201 | } |
212 | 202 | ||
213 | /* T2 Init part -- */ | 203 | static void espi_setup_for_vsc7321(adapter_t *adapter) |
214 | /* 1. Set T_ESPI_MISCCTRL_ADDR */ | 204 | { |
215 | /* 2. Init ESPI registers. */ | 205 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); |
216 | /* 3. Init TriCN Hard Macro */ | 206 | writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); |
217 | int t1_espi_init(struct peespi *espi, int mac_type, int nports) | 207 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); |
208 | writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | ||
209 | writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | ||
210 | writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); | ||
211 | writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG); | ||
212 | |||
213 | writel(0x08000008, adapter->regs + A_ESPI_TRAIN); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug. | ||
218 | */ | ||
219 | static void espi_setup_for_ixf1010(adapter_t *adapter, int nports) | ||
218 | { | 220 | { |
219 | u32 cnt; | 221 | writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); |
222 | if (nports == 4) { | ||
223 | if (is_T2(adapter)) { | ||
224 | writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | ||
225 | writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | ||
226 | } else { | ||
227 | writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | ||
228 | writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | ||
229 | } | ||
230 | } else { | ||
231 | writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | ||
232 | writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | ||
233 | } | ||
234 | writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG); | ||
220 | 235 | ||
236 | } | ||
237 | |||
238 | int t1_espi_init(struct peespi *espi, int mac_type, int nports) | ||
239 | { | ||
221 | u32 status_enable_extra = 0; | 240 | u32 status_enable_extra = 0; |
222 | adapter_t *adapter = espi->adapter; | 241 | adapter_t *adapter = espi->adapter; |
223 | u32 status, burstval = 0x800100; | ||
224 | 242 | ||
225 | /* Disable ESPI training. MACs that can handle it enable it below. */ | 243 | /* Disable ESPI training. MACs that can handle it enable it below. */ |
226 | writel(0, adapter->regs + A_ESPI_TRAIN); | 244 | writel(0, adapter->regs + A_ESPI_TRAIN); |
@@ -229,38 +247,20 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports) | |||
229 | writel(V_OUT_OF_SYNC_COUNT(4) | | 247 | writel(V_OUT_OF_SYNC_COUNT(4) | |
230 | V_DIP2_PARITY_ERR_THRES(3) | | 248 | V_DIP2_PARITY_ERR_THRES(3) | |
231 | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); | 249 | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); |
232 | if (nports == 4) { | 250 | writel(nports == 4 ? 0x200040 : 0x1000080, |
233 | /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */ | 251 | adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); |
234 | burstval = 0x200040; | 252 | } else |
235 | } | 253 | writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); |
236 | } | ||
237 | writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); | ||
238 | 254 | ||
239 | switch (mac_type) { | 255 | if (mac_type == CHBT_MAC_PM3393) |
240 | case CHBT_MAC_PM3393: | ||
241 | espi_setup_for_pm3393(adapter); | 256 | espi_setup_for_pm3393(adapter); |
242 | break; | 257 | else if (mac_type == CHBT_MAC_VSC7321) |
243 | default: | 258 | espi_setup_for_vsc7321(adapter); |
259 | else if (mac_type == CHBT_MAC_IXF1010) { | ||
260 | status_enable_extra = F_INTEL1010MODE; | ||
261 | espi_setup_for_ixf1010(adapter, nports); | ||
262 | } else | ||
244 | return -1; | 263 | return -1; |
245 | } | ||
246 | |||
247 | /* | ||
248 | * Make sure any pending interrupts from the SPI are | ||
249 | * Cleared before enabling the interrupt. | ||
250 | */ | ||
251 | writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE); | ||
252 | status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
253 | if (status & F_DIP2PARITYERR) { | ||
254 | cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we | ||
259 | * write the status as is. | ||
260 | */ | ||
261 | if (status && t1_is_T1B(espi->adapter)) | ||
262 | status = 1; | ||
263 | writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
264 | 264 | ||
265 | writel(status_enable_extra | F_RXSTATUSENABLE, | 265 | writel(status_enable_extra | F_RXSTATUSENABLE, |
266 | adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); | 266 | adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); |
@@ -271,9 +271,11 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports) | |||
271 | * Always position the control at the 1st port egress IN | 271 | * Always position the control at the 1st port egress IN |
272 | * (sop,eop) counter to reduce PIOs for T/N210 workaround. | 272 | * (sop,eop) counter to reduce PIOs for T/N210 workaround. |
273 | */ | 273 | */ |
274 | espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL) | 274 | espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL); |
275 | & ~MON_MASK) | (F_MONITORED_DIRECTION | 275 | espi->misc_ctrl &= ~MON_MASK; |
276 | | F_MONITORED_INTERFACE); | 276 | espi->misc_ctrl |= F_MONITORED_DIRECTION; |
277 | if (adapter->params.nports == 1) | ||
278 | espi->misc_ctrl |= F_MONITORED_INTERFACE; | ||
277 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | 279 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); |
278 | spin_lock_init(&espi->lock); | 280 | spin_lock_init(&espi->lock); |
279 | } | 281 | } |
@@ -299,8 +301,7 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) | |||
299 | { | 301 | { |
300 | struct peespi *espi = adapter->espi; | 302 | struct peespi *espi = adapter->espi; |
301 | 303 | ||
302 | if (!is_T2(adapter)) | 304 | if (!is_T2(adapter)) return; |
303 | return; | ||
304 | spin_lock(&espi->lock); | 305 | spin_lock(&espi->lock); |
305 | espi->misc_ctrl = (val & ~MON_MASK) | | 306 | espi->misc_ctrl = (val & ~MON_MASK) | |
306 | (espi->misc_ctrl & MON_MASK); | 307 | (espi->misc_ctrl & MON_MASK); |
@@ -310,27 +311,61 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) | |||
310 | 311 | ||
311 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) | 312 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) |
312 | { | 313 | { |
313 | u32 sel; | ||
314 | |||
315 | struct peespi *espi = adapter->espi; | 314 | struct peespi *espi = adapter->espi; |
315 | u32 sel; | ||
316 | 316 | ||
317 | if (!is_T2(adapter)) | 317 | if (!is_T2(adapter)) |
318 | return 0; | 318 | return 0; |
319 | |||
319 | sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); | 320 | sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); |
320 | if (!wait) { | 321 | if (!wait) { |
321 | if (!spin_trylock(&espi->lock)) | 322 | if (!spin_trylock(&espi->lock)) |
322 | return 0; | 323 | return 0; |
323 | } | 324 | } else |
324 | else | ||
325 | spin_lock(&espi->lock); | 325 | spin_lock(&espi->lock); |
326 | |||
326 | if ((sel != (espi->misc_ctrl & MON_MASK))) { | 327 | if ((sel != (espi->misc_ctrl & MON_MASK))) { |
327 | writel(((espi->misc_ctrl & ~MON_MASK) | sel), | 328 | writel(((espi->misc_ctrl & ~MON_MASK) | sel), |
328 | adapter->regs + A_ESPI_MISC_CONTROL); | 329 | adapter->regs + A_ESPI_MISC_CONTROL); |
329 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | 330 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); |
330 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | 331 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); |
331 | } | 332 | } else |
332 | else | ||
333 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | 333 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); |
334 | spin_unlock(&espi->lock); | 334 | spin_unlock(&espi->lock); |
335 | return sel; | 335 | return sel; |
336 | } | 336 | } |
337 | |||
338 | /* | ||
339 | * This function is for T204 only. | ||
340 | * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in | ||
341 | * one shot, since there is no per port counter on the out side. | ||
342 | */ | ||
343 | int | ||
344 | t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) | ||
345 | { | ||
346 | struct peespi *espi = adapter->espi; | ||
347 | u8 i, nport = (u8)adapter->params.nports; | ||
348 | |||
349 | if (!wait) { | ||
350 | if (!spin_trylock(&espi->lock)) | ||
351 | return -1; | ||
352 | } else | ||
353 | spin_lock(&espi->lock); | ||
354 | |||
355 | if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) { | ||
356 | espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | | ||
357 | F_MONITORED_DIRECTION; | ||
358 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | ||
359 | } | ||
360 | for (i = 0 ; i < nport; i++, valp++) { | ||
361 | if (i) { | ||
362 | writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), | ||
363 | adapter->regs + A_ESPI_MISC_CONTROL); | ||
364 | } | ||
365 | *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | ||
366 | } | ||
367 | |||
368 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | ||
369 | spin_unlock(&espi->lock); | ||
370 | return 0; | ||
371 | } | ||
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h index c90e37f8457c..84f2c98bc4cc 100644 --- a/drivers/net/chelsio/espi.h +++ b/drivers/net/chelsio/espi.h | |||
@@ -64,5 +64,6 @@ const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi); | |||
64 | 64 | ||
65 | void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val); | 65 | void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val); |
66 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); | 66 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); |
67 | int t1_espi_get_mon_t204(adapter_t *, u32 *, u8); | ||
67 | 68 | ||
68 | #endif /* _CXGB_ESPI_H_ */ | 69 | #endif /* _CXGB_ESPI_H_ */ |
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h new file mode 100644 index 000000000000..17a3c2ba36a3 --- /dev/null +++ b/drivers/net/chelsio/fpga_defs.h | |||
@@ -0,0 +1,232 @@ | |||
1 | /* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */ | ||
2 | |||
3 | /* | ||
4 | * FPGA specific definitions | ||
5 | */ | ||
6 | |||
7 | #ifndef __CHELSIO_FPGA_DEFS_H__ | ||
8 | #define __CHELSIO_FPGA_DEFS_H__ | ||
9 | |||
10 | #define FPGA_PCIX_ADDR_VERSION 0xA08 | ||
11 | #define FPGA_PCIX_ADDR_STAT 0xA0C | ||
12 | |||
13 | /* FPGA master interrupt Cause/Enable bits */ | ||
14 | #define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1 | ||
15 | #define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2 | ||
16 | #define FPGA_PCIX_INTERRUPT_TP 0x4 | ||
17 | #define FPGA_PCIX_INTERRUPT_MC3 0x8 | ||
18 | #define FPGA_PCIX_INTERRUPT_GMAC 0x10 | ||
19 | #define FPGA_PCIX_INTERRUPT_PCIX 0x20 | ||
20 | |||
21 | /* TP interrupt register addresses */ | ||
22 | #define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10 | ||
23 | #define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14 | ||
24 | #define FPGA_TP_ADDR_VERSION 0xA18 | ||
25 | |||
26 | /* TP interrupt Cause/Enable bits */ | ||
27 | #define FPGA_TP_INTERRUPT_MC4 0x1 | ||
28 | #define FPGA_TP_INTERRUPT_MC5 0x2 | ||
29 | |||
30 | /* | ||
31 | * PM interrupt register addresses | ||
32 | */ | ||
33 | #define FPGA_MC3_REG_INTRENABLE 0xA20 | ||
34 | #define FPGA_MC3_REG_INTRCAUSE 0xA24 | ||
35 | #define FPGA_MC3_REG_VERSION 0xA28 | ||
36 | |||
37 | /* | ||
38 | * GMAC interrupt register addresses | ||
39 | */ | ||
40 | #define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30 | ||
41 | #define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34 | ||
42 | #define FPGA_GMAC_ADDR_VERSION 0xA38 | ||
43 | |||
44 | /* GMAC Cause/Enable bits */ | ||
45 | #define FPGA_GMAC_INTERRUPT_PORT0 0x1 | ||
46 | #define FPGA_GMAC_INTERRUPT_PORT1 0x2 | ||
47 | #define FPGA_GMAC_INTERRUPT_PORT2 0x4 | ||
48 | #define FPGA_GMAC_INTERRUPT_PORT3 0x8 | ||
49 | |||
50 | /* MI0 registers */ | ||
51 | #define A_MI0_CLK 0xb00 | ||
52 | |||
53 | #define S_MI0_CLK_DIV 0 | ||
54 | #define M_MI0_CLK_DIV 0xff | ||
55 | #define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV) | ||
56 | #define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV) | ||
57 | |||
58 | #define S_MI0_CLK_CNT 8 | ||
59 | #define M_MI0_CLK_CNT 0xff | ||
60 | #define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT) | ||
61 | #define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT) | ||
62 | |||
63 | #define A_MI0_CSR 0xb04 | ||
64 | |||
65 | #define S_MI0_CSR_POLL 0 | ||
66 | #define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL) | ||
67 | #define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U) | ||
68 | |||
69 | #define S_MI0_PREAMBLE 1 | ||
70 | #define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE) | ||
71 | #define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U) | ||
72 | |||
73 | #define S_MI0_INTR_ENABLE 2 | ||
74 | #define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE) | ||
75 | #define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U) | ||
76 | |||
77 | #define S_MI0_BUSY 3 | ||
78 | #define V_MI0_BUSY(x) ((x) << S_MI0_BUSY) | ||
79 | #define F_MI0_BUSY V_MI0_BUSY(1U) | ||
80 | |||
81 | #define S_MI0_MDIO 4 | ||
82 | #define V_MI0_MDIO(x) ((x) << S_MI0_MDIO) | ||
83 | #define F_MI0_MDIO V_MI0_MDIO(1U) | ||
84 | |||
85 | #define A_MI0_ADDR 0xb08 | ||
86 | |||
87 | #define S_MI0_PHY_REG_ADDR 0 | ||
88 | #define M_MI0_PHY_REG_ADDR 0x1f | ||
89 | #define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR) | ||
90 | #define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR) | ||
91 | |||
92 | #define S_MI0_PHY_ADDR 5 | ||
93 | #define M_MI0_PHY_ADDR 0x1f | ||
94 | #define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR) | ||
95 | #define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR) | ||
96 | |||
97 | #define A_MI0_DATA_EXT 0xb0c | ||
98 | #define A_MI0_DATA_INT 0xb10 | ||
99 | |||
100 | /* GMAC registers */ | ||
101 | #define A_GMAC_MACID_LO 0x28 | ||
102 | #define A_GMAC_MACID_HI 0x2c | ||
103 | #define A_GMAC_CSR 0x30 | ||
104 | |||
105 | #define S_INTERFACE 0 | ||
106 | #define M_INTERFACE 0x3 | ||
107 | #define V_INTERFACE(x) ((x) << S_INTERFACE) | ||
108 | #define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE) | ||
109 | |||
110 | #define S_MAC_TX_ENABLE 2 | ||
111 | #define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE) | ||
112 | #define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U) | ||
113 | |||
114 | #define S_MAC_RX_ENABLE 3 | ||
115 | #define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE) | ||
116 | #define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U) | ||
117 | |||
118 | #define S_MAC_LB_ENABLE 4 | ||
119 | #define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE) | ||
120 | #define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U) | ||
121 | |||
122 | #define S_MAC_SPEED 5 | ||
123 | #define M_MAC_SPEED 0x3 | ||
124 | #define V_MAC_SPEED(x) ((x) << S_MAC_SPEED) | ||
125 | #define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED) | ||
126 | |||
127 | #define S_MAC_HD_FC_ENABLE 7 | ||
128 | #define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE) | ||
129 | #define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U) | ||
130 | |||
131 | #define S_MAC_HALF_DUPLEX 8 | ||
132 | #define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX) | ||
133 | #define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U) | ||
134 | |||
135 | #define S_MAC_PROMISC 9 | ||
136 | #define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC) | ||
137 | #define F_MAC_PROMISC V_MAC_PROMISC(1U) | ||
138 | |||
139 | #define S_MAC_MC_ENABLE 10 | ||
140 | #define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE) | ||
141 | #define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U) | ||
142 | |||
143 | #define S_MAC_RESET 11 | ||
144 | #define V_MAC_RESET(x) ((x) << S_MAC_RESET) | ||
145 | #define F_MAC_RESET V_MAC_RESET(1U) | ||
146 | |||
147 | #define S_MAC_RX_PAUSE_ENABLE 12 | ||
148 | #define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE) | ||
149 | #define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U) | ||
150 | |||
151 | #define S_MAC_TX_PAUSE_ENABLE 13 | ||
152 | #define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE) | ||
153 | #define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U) | ||
154 | |||
155 | #define S_MAC_LWM_ENABLE 14 | ||
156 | #define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE) | ||
157 | #define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U) | ||
158 | |||
159 | #define S_MAC_MAGIC_PKT_ENABLE 15 | ||
160 | #define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE) | ||
161 | #define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U) | ||
162 | |||
163 | #define S_MAC_ISL_ENABLE 16 | ||
164 | #define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE) | ||
165 | #define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U) | ||
166 | |||
167 | #define S_MAC_JUMBO_ENABLE 17 | ||
168 | #define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE) | ||
169 | #define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U) | ||
170 | |||
171 | #define S_MAC_RX_PAD_ENABLE 18 | ||
172 | #define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE) | ||
173 | #define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U) | ||
174 | |||
175 | #define S_MAC_RX_CRC_ENABLE 19 | ||
176 | #define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE) | ||
177 | #define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U) | ||
178 | |||
179 | #define A_GMAC_IFS 0x34 | ||
180 | |||
181 | #define S_MAC_IFS2 0 | ||
182 | #define M_MAC_IFS2 0x3f | ||
183 | #define V_MAC_IFS2(x) ((x) << S_MAC_IFS2) | ||
184 | #define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2) | ||
185 | |||
186 | #define S_MAC_IFS1 8 | ||
187 | #define M_MAC_IFS1 0x7f | ||
188 | #define V_MAC_IFS1(x) ((x) << S_MAC_IFS1) | ||
189 | #define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1) | ||
190 | |||
191 | #define A_GMAC_JUMBO_FRAME_LEN 0x38 | ||
192 | #define A_GMAC_LNK_DLY 0x3c | ||
193 | #define A_GMAC_PAUSETIME 0x40 | ||
194 | #define A_GMAC_MCAST_LO 0x44 | ||
195 | #define A_GMAC_MCAST_HI 0x48 | ||
196 | #define A_GMAC_MCAST_MASK_LO 0x4c | ||
197 | #define A_GMAC_MCAST_MASK_HI 0x50 | ||
198 | #define A_GMAC_RMT_CNT 0x54 | ||
199 | #define A_GMAC_RMT_DATA 0x58 | ||
200 | #define A_GMAC_BACKOFF_SEED 0x5c | ||
201 | #define A_GMAC_TXF_THRES 0x60 | ||
202 | |||
203 | #define S_TXF_READ_THRESHOLD 0 | ||
204 | #define M_TXF_READ_THRESHOLD 0xff | ||
205 | #define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD) | ||
206 | #define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD) | ||
207 | |||
208 | #define S_TXF_WRITE_THRESHOLD 16 | ||
209 | #define M_TXF_WRITE_THRESHOLD 0xff | ||
210 | #define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD) | ||
211 | #define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD) | ||
212 | |||
213 | #define MAC_REG_BASE 0x600 | ||
214 | #define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg)) | ||
215 | |||
216 | #define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO) | ||
217 | #define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI) | ||
218 | #define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR) | ||
219 | #define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS) | ||
220 | #define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN) | ||
221 | #define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY) | ||
222 | #define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME) | ||
223 | #define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO) | ||
224 | #define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI) | ||
225 | #define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO) | ||
226 | #define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI) | ||
227 | #define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT) | ||
228 | #define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA) | ||
229 | #define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED) | ||
230 | #define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES) | ||
231 | |||
232 | #endif | ||
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h index 746b0eeea964..a2b8ad9b5535 100644 --- a/drivers/net/chelsio/gmac.h +++ b/drivers/net/chelsio/gmac.h | |||
@@ -62,6 +62,8 @@ struct cmac_statistics { | |||
62 | u64 TxInternalMACXmitError; | 62 | u64 TxInternalMACXmitError; |
63 | u64 TxFramesWithExcessiveDeferral; | 63 | u64 TxFramesWithExcessiveDeferral; |
64 | u64 TxFCSErrors; | 64 | u64 TxFCSErrors; |
65 | u64 TxJumboFramesOK; | ||
66 | u64 TxJumboOctetsOK; | ||
65 | 67 | ||
66 | /* Receive */ | 68 | /* Receive */ |
67 | u64 RxOctetsOK; | 69 | u64 RxOctetsOK; |
@@ -81,6 +83,8 @@ struct cmac_statistics { | |||
81 | u64 RxInRangeLengthErrors; | 83 | u64 RxInRangeLengthErrors; |
82 | u64 RxOutOfRangeLengthField; | 84 | u64 RxOutOfRangeLengthField; |
83 | u64 RxFrameTooLongErrors; | 85 | u64 RxFrameTooLongErrors; |
86 | u64 RxJumboFramesOK; | ||
87 | u64 RxJumboOctetsOK; | ||
84 | }; | 88 | }; |
85 | 89 | ||
86 | struct cmac_ops { | 90 | struct cmac_ops { |
@@ -128,6 +132,7 @@ struct gmac { | |||
128 | extern struct gmac t1_pm3393_ops; | 132 | extern struct gmac t1_pm3393_ops; |
129 | extern struct gmac t1_chelsio_mac_ops; | 133 | extern struct gmac t1_chelsio_mac_ops; |
130 | extern struct gmac t1_vsc7321_ops; | 134 | extern struct gmac t1_vsc7321_ops; |
135 | extern struct gmac t1_vsc7326_ops; | ||
131 | extern struct gmac t1_ixf1010_ops; | 136 | extern struct gmac t1_ixf1010_ops; |
132 | extern struct gmac t1_dummy_mac_ops; | 137 | extern struct gmac t1_dummy_mac_ops; |
133 | 138 | ||
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c new file mode 100644 index 000000000000..5b8f144e83d4 --- /dev/null +++ b/drivers/net/chelsio/ixf1010.c | |||
@@ -0,0 +1,485 @@ | |||
1 | /* $Date: 2005/11/12 02:13:49 $ $RCSfile: ixf1010.c,v $ $Revision: 1.36 $ */ | ||
2 | #include "gmac.h" | ||
3 | #include "elmer0.h" | ||
4 | |||
5 | /* Update fast changing statistics every 15 seconds */ | ||
6 | #define STATS_TICK_SECS 15 | ||
7 | /* 30 minutes for full statistics update */ | ||
8 | #define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) | ||
9 | |||
10 | /* | ||
11 | * The IXF1010 can handle frames up to 16383 bytes but it's optimized for | ||
12 | * frames up to 9831 (0x2667) bytes, so we limit jumbo frame size to this. | ||
13 | * This length includes ethernet header and FCS. | ||
14 | */ | ||
15 | #define MAX_FRAME_SIZE 0x2667 | ||
16 | |||
17 | /* MAC registers */ | ||
18 | enum { | ||
19 | /* Per-port registers */ | ||
20 | REG_MACADDR_LOW = 0, | ||
21 | REG_MACADDR_HIGH = 0x4, | ||
22 | REG_FDFC_TYPE = 0xC, | ||
23 | REG_FC_TX_TIMER_VALUE = 0x1c, | ||
24 | REG_IPG_RX_TIME1 = 0x28, | ||
25 | REG_IPG_RX_TIME2 = 0x2c, | ||
26 | REG_IPG_TX_TIME = 0x30, | ||
27 | REG_PAUSE_THRES = 0x38, | ||
28 | REG_MAX_FRAME_SIZE = 0x3c, | ||
29 | REG_RGMII_SPEED = 0x40, | ||
30 | REG_FC_ENABLE = 0x48, | ||
31 | REG_DISCARD_CTRL_FRAMES = 0x54, | ||
32 | REG_DIVERSE_CONFIG = 0x60, | ||
33 | REG_RX_FILTER = 0x64, | ||
34 | REG_MC_ADDR_LOW = 0x68, | ||
35 | REG_MC_ADDR_HIGH = 0x6c, | ||
36 | |||
37 | REG_RX_OCTETS_OK = 0x80, | ||
38 | REG_RX_OCTETS_BAD = 0x84, | ||
39 | REG_RX_UC_PKTS = 0x88, | ||
40 | REG_RX_MC_PKTS = 0x8c, | ||
41 | REG_RX_BC_PKTS = 0x90, | ||
42 | REG_RX_FCS_ERR = 0xb0, | ||
43 | REG_RX_TAGGED = 0xb4, | ||
44 | REG_RX_DATA_ERR = 0xb8, | ||
45 | REG_RX_ALIGN_ERR = 0xbc, | ||
46 | REG_RX_LONG_ERR = 0xc0, | ||
47 | REG_RX_JABBER_ERR = 0xc4, | ||
48 | REG_RX_PAUSE_FRAMES = 0xc8, | ||
49 | REG_RX_UNKNOWN_CTRL_FRAMES = 0xcc, | ||
50 | REG_RX_VERY_LONG_ERR = 0xd0, | ||
51 | REG_RX_RUNT_ERR = 0xd4, | ||
52 | REG_RX_SHORT_ERR = 0xd8, | ||
53 | REG_RX_SYMBOL_ERR = 0xe4, | ||
54 | |||
55 | REG_TX_OCTETS_OK = 0x100, | ||
56 | REG_TX_OCTETS_BAD = 0x104, | ||
57 | REG_TX_UC_PKTS = 0x108, | ||
58 | REG_TX_MC_PKTS = 0x10c, | ||
59 | REG_TX_BC_PKTS = 0x110, | ||
60 | REG_TX_EXCESSIVE_LEN_DROP = 0x14c, | ||
61 | REG_TX_UNDERRUN = 0x150, | ||
62 | REG_TX_TAGGED = 0x154, | ||
63 | REG_TX_PAUSE_FRAMES = 0x15C, | ||
64 | |||
65 | /* Global registers */ | ||
66 | REG_PORT_ENABLE = 0x1400, | ||
67 | |||
68 | REG_JTAG_ID = 0x1430, | ||
69 | |||
70 | RX_FIFO_HIGH_WATERMARK_BASE = 0x1600, | ||
71 | RX_FIFO_LOW_WATERMARK_BASE = 0x1628, | ||
72 | RX_FIFO_FRAMES_REMOVED_BASE = 0x1650, | ||
73 | |||
74 | REG_RX_ERR_DROP = 0x167c, | ||
75 | REG_RX_FIFO_OVERFLOW_EVENT = 0x1680, | ||
76 | |||
77 | TX_FIFO_HIGH_WATERMARK_BASE = 0x1800, | ||
78 | TX_FIFO_LOW_WATERMARK_BASE = 0x1828, | ||
79 | TX_FIFO_XFER_THRES_BASE = 0x1850, | ||
80 | |||
81 | REG_TX_FIFO_OVERFLOW_EVENT = 0x1878, | ||
82 | REG_TX_FIFO_OOS_EVENT = 0x1884, | ||
83 | |||
84 | TX_FIFO_FRAMES_REMOVED_BASE = 0x1888, | ||
85 | |||
86 | REG_SPI_RX_BURST = 0x1c00, | ||
87 | REG_SPI_RX_TRAINING = 0x1c04, | ||
88 | REG_SPI_RX_CALENDAR = 0x1c08, | ||
89 | REG_SPI_TX_SYNC = 0x1c0c | ||
90 | }; | ||
91 | |||
92 | enum { /* RMON registers */ | ||
93 | REG_RxOctetsTotalOK = 0x80, | ||
94 | REG_RxOctetsBad = 0x84, | ||
95 | REG_RxUCPkts = 0x88, | ||
96 | REG_RxMCPkts = 0x8c, | ||
97 | REG_RxBCPkts = 0x90, | ||
98 | REG_RxJumboPkts = 0xac, | ||
99 | REG_RxFCSErrors = 0xb0, | ||
100 | REG_RxDataErrors = 0xb8, | ||
101 | REG_RxAlignErrors = 0xbc, | ||
102 | REG_RxLongErrors = 0xc0, | ||
103 | REG_RxJabberErrors = 0xc4, | ||
104 | REG_RxPauseMacControlCounter = 0xc8, | ||
105 | REG_RxVeryLongErrors = 0xd0, | ||
106 | REG_RxRuntErrors = 0xd4, | ||
107 | REG_RxShortErrors = 0xd8, | ||
108 | REG_RxSequenceErrors = 0xe0, | ||
109 | REG_RxSymbolErrors = 0xe4, | ||
110 | |||
111 | REG_TxOctetsTotalOK = 0x100, | ||
112 | REG_TxOctetsBad = 0x104, | ||
113 | REG_TxUCPkts = 0x108, | ||
114 | REG_TxMCPkts = 0x10c, | ||
115 | REG_TxBCPkts = 0x110, | ||
116 | REG_TxJumboPkts = 0x12C, | ||
117 | REG_TxTotalCollisions = 0x134, | ||
118 | REG_TxExcessiveLengthDrop = 0x14c, | ||
119 | REG_TxUnderrun = 0x150, | ||
120 | REG_TxCRCErrors = 0x158, | ||
121 | REG_TxPauseFrames = 0x15c | ||
122 | }; | ||
123 | |||
124 | enum { | ||
125 | DIVERSE_CONFIG_PAD_ENABLE = 0x80, | ||
126 | DIVERSE_CONFIG_CRC_ADD = 0x40 | ||
127 | }; | ||
128 | |||
129 | #define MACREG_BASE 0 | ||
130 | #define MACREG(mac, mac_reg) ((mac)->instance->mac_base + (mac_reg)) | ||
131 | |||
132 | struct _cmac_instance { | ||
133 | u32 mac_base; | ||
134 | u32 index; | ||
135 | u32 version; | ||
136 | u32 ticks; | ||
137 | }; | ||
138 | |||
139 | static void disable_port(struct cmac *mac) | ||
140 | { | ||
141 | u32 val; | ||
142 | |||
143 | t1_tpi_read(mac->adapter, REG_PORT_ENABLE, &val); | ||
144 | val &= ~(1 << mac->instance->index); | ||
145 | t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); | ||
146 | } | ||
147 | |||
148 | #define RMON_UPDATE(mac, name, stat_name) \ | ||
149 | t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \ | ||
150 | (mac)->stats.stat_name += val; | ||
151 | |||
152 | /* | ||
153 | * Read the current values of the RMON counters and add them to the cumulative | ||
154 | * port statistics. The HW RMON counters are cleared by this operation. | ||
155 | */ | ||
156 | static void port_stats_update(struct cmac *mac) | ||
157 | { | ||
158 | u32 val; | ||
159 | |||
160 | /* Rx stats */ | ||
161 | RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); | ||
162 | RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad); | ||
163 | RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK); | ||
164 | RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK); | ||
165 | RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK); | ||
166 | RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK); | ||
167 | RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors); | ||
168 | RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors); | ||
169 | RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors); | ||
170 | RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors); | ||
171 | RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames); | ||
172 | RMON_UPDATE(mac, RxDataErrors, RxDataErrors); | ||
173 | RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors); | ||
174 | RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors); | ||
175 | RMON_UPDATE(mac, RxShortErrors, RxRuntErrors); | ||
176 | RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors); | ||
177 | RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); | ||
178 | |||
179 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
180 | RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK); | ||
181 | RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad); | ||
182 | RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK); | ||
183 | RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK); | ||
184 | RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK); | ||
185 | RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK); | ||
186 | RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames); | ||
187 | RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors); | ||
188 | RMON_UPDATE(mac, TxUnderrun, TxUnderrun); | ||
189 | RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors); | ||
190 | } | ||
191 | |||
192 | /* No-op interrupt operation as this MAC does not support interrupts */ | ||
193 | static int mac_intr_op(struct cmac *mac) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | /* Expect MAC address to be in network byte order. */ | ||
199 | static int mac_set_address(struct cmac *mac, u8 addr[6]) | ||
200 | { | ||
201 | u32 addr_lo, addr_hi; | ||
202 | |||
203 | addr_lo = addr[2]; | ||
204 | addr_lo = (addr_lo << 8) | addr[3]; | ||
205 | addr_lo = (addr_lo << 8) | addr[4]; | ||
206 | addr_lo = (addr_lo << 8) | addr[5]; | ||
207 | |||
208 | addr_hi = addr[0]; | ||
209 | addr_hi = (addr_hi << 8) | addr[1]; | ||
210 | |||
211 | t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_LOW), addr_lo); | ||
212 | t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), addr_hi); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static int mac_get_address(struct cmac *mac, u8 addr[6]) | ||
217 | { | ||
218 | u32 addr_lo, addr_hi; | ||
219 | |||
220 | t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_LOW), &addr_lo); | ||
221 | t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), &addr_hi); | ||
222 | |||
223 | addr[0] = (u8) (addr_hi >> 8); | ||
224 | addr[1] = (u8) addr_hi; | ||
225 | addr[2] = (u8) (addr_lo >> 24); | ||
226 | addr[3] = (u8) (addr_lo >> 16); | ||
227 | addr[4] = (u8) (addr_lo >> 8); | ||
228 | addr[5] = (u8) addr_lo; | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | /* This is intended to reset a port, not the whole MAC */ | ||
233 | static int mac_reset(struct cmac *mac) | ||
234 | { | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) | ||
239 | { | ||
240 | u32 val, new_mode; | ||
241 | adapter_t *adapter = mac->adapter; | ||
242 | u32 addr_lo, addr_hi; | ||
243 | u8 *addr; | ||
244 | |||
245 | t1_tpi_read(adapter, MACREG(mac, REG_RX_FILTER), &val); | ||
246 | new_mode = val & ~7; | ||
247 | if (!t1_rx_mode_promisc(rm) && mac->instance->version > 0) | ||
248 | new_mode |= 1; /* only set if version > 0 due to erratum */ | ||
249 | if (!t1_rx_mode_promisc(rm) && !t1_rx_mode_allmulti(rm) | ||
250 | && t1_rx_mode_mc_cnt(rm) <= 1) | ||
251 | new_mode |= 2; | ||
252 | if (new_mode != val) | ||
253 | t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), new_mode); | ||
254 | switch (t1_rx_mode_mc_cnt(rm)) { | ||
255 | case 0: | ||
256 | t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), 0); | ||
257 | t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), 0); | ||
258 | break; | ||
259 | case 1: | ||
260 | addr = t1_get_next_mcaddr(rm); | ||
261 | addr_lo = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | | ||
262 | addr[5]; | ||
263 | addr_hi = (addr[0] << 8) | addr[1]; | ||
264 | t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), addr_lo); | ||
265 | t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), addr_hi); | ||
266 | break; | ||
267 | default: | ||
268 | break; | ||
269 | } | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static int mac_set_mtu(struct cmac *mac, int mtu) | ||
274 | { | ||
275 | /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ | ||
276 | if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL; | ||
277 | t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), | ||
278 | mtu + 14 + 4); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, | ||
283 | int fc) | ||
284 | { | ||
285 | u32 val; | ||
286 | |||
287 | if (speed >= 0 && speed != SPEED_100 && speed != SPEED_1000) | ||
288 | return -1; | ||
289 | if (duplex >= 0 && duplex != DUPLEX_FULL) | ||
290 | return -1; | ||
291 | |||
292 | if (speed >= 0) { | ||
293 | val = speed == SPEED_100 ? 1 : 2; | ||
294 | t1_tpi_write(mac->adapter, MACREG(mac, REG_RGMII_SPEED), val); | ||
295 | } | ||
296 | |||
297 | t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val); | ||
298 | val &= ~3; | ||
299 | if (fc & PAUSE_RX) | ||
300 | val |= 1; | ||
301 | if (fc & PAUSE_TX) | ||
302 | val |= 2; | ||
303 | t1_tpi_write(mac->adapter, MACREG(mac, REG_FC_ENABLE), val); | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int mac_get_speed_duplex_fc(struct cmac *mac, int *speed, int *duplex, | ||
308 | int *fc) | ||
309 | { | ||
310 | u32 val; | ||
311 | |||
312 | if (duplex) | ||
313 | *duplex = DUPLEX_FULL; | ||
314 | if (speed) { | ||
315 | t1_tpi_read(mac->adapter, MACREG(mac, REG_RGMII_SPEED), | ||
316 | &val); | ||
317 | *speed = (val & 2) ? SPEED_1000 : SPEED_100; | ||
318 | } | ||
319 | if (fc) { | ||
320 | t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val); | ||
321 | *fc = 0; | ||
322 | if (val & 1) | ||
323 | *fc |= PAUSE_RX; | ||
324 | if (val & 2) | ||
325 | *fc |= PAUSE_TX; | ||
326 | } | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static void enable_port(struct cmac *mac) | ||
331 | { | ||
332 | u32 val; | ||
333 | u32 index = mac->instance->index; | ||
334 | adapter_t *adapter = mac->adapter; | ||
335 | |||
336 | t1_tpi_read(adapter, MACREG(mac, REG_DIVERSE_CONFIG), &val); | ||
337 | val |= DIVERSE_CONFIG_CRC_ADD | DIVERSE_CONFIG_PAD_ENABLE; | ||
338 | t1_tpi_write(adapter, MACREG(mac, REG_DIVERSE_CONFIG), val); | ||
339 | if (mac->instance->version > 0) | ||
340 | t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 3); | ||
341 | else /* Don't enable unicast address filtering due to IXF1010 bug */ | ||
342 | t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 2); | ||
343 | |||
344 | t1_tpi_read(adapter, REG_RX_ERR_DROP, &val); | ||
345 | val |= (1 << index); | ||
346 | t1_tpi_write(adapter, REG_RX_ERR_DROP, val); | ||
347 | |||
348 | /* | ||
349 | * Clear the port RMON registers by adding their current values to the | ||
350 | * cumulatice port stats and then clearing the stats. Really. | ||
351 | */ | ||
352 | port_stats_update(mac); | ||
353 | memset(&mac->stats, 0, sizeof(struct cmac_statistics)); | ||
354 | mac->instance->ticks = 0; | ||
355 | |||
356 | t1_tpi_read(adapter, REG_PORT_ENABLE, &val); | ||
357 | val |= (1 << index); | ||
358 | t1_tpi_write(adapter, REG_PORT_ENABLE, val); | ||
359 | |||
360 | index <<= 2; | ||
361 | if (is_T2(adapter)) { | ||
362 | /* T204: set the Fifo water level & threshold */ | ||
363 | t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); | ||
364 | t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); | ||
365 | t1_tpi_write(adapter, TX_FIFO_HIGH_WATERMARK_BASE + index, 0x600); | ||
366 | t1_tpi_write(adapter, TX_FIFO_LOW_WATERMARK_BASE + index, 0x1d0); | ||
367 | t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x1100); | ||
368 | } else { | ||
369 | /* | ||
370 | * Set the TX Fifo Threshold to 0x400 instead of 0x100 to work around | ||
371 | * Underrun problem. Intel has blessed this solution. | ||
372 | */ | ||
373 | t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x400); | ||
374 | } | ||
375 | } | ||
376 | |||
377 | /* IXF1010 ports do not have separate enables for TX and RX */ | ||
378 | static int mac_enable(struct cmac *mac, int which) | ||
379 | { | ||
380 | if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX)) | ||
381 | enable_port(mac); | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static int mac_disable(struct cmac *mac, int which) | ||
386 | { | ||
387 | if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX)) | ||
388 | disable_port(mac); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * This function is called periodically to accumulate the current values of the | ||
394 | * RMON counters into the port statistics. Since the counters are only 32 bits | ||
395 | * some of them can overflow in less than a minute at GigE speeds, so this | ||
396 | * function should be called every 30 seconds or so. | ||
397 | * | ||
398 | * To cut down on reading costs we update only the octet counters at each tick | ||
399 | * and do a full update at major ticks, which can be every 30 minutes or more. | ||
400 | */ | ||
401 | static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, | ||
402 | int flag) | ||
403 | { | ||
404 | if (flag == MAC_STATS_UPDATE_FULL || | ||
405 | MAJOR_UPDATE_TICKS <= mac->instance->ticks) { | ||
406 | port_stats_update(mac); | ||
407 | mac->instance->ticks = 0; | ||
408 | } else { | ||
409 | u32 val; | ||
410 | |||
411 | RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); | ||
412 | RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK); | ||
413 | mac->instance->ticks++; | ||
414 | } | ||
415 | return &mac->stats; | ||
416 | } | ||
417 | |||
418 | static void mac_destroy(struct cmac *mac) | ||
419 | { | ||
420 | kfree(mac); | ||
421 | } | ||
422 | |||
423 | static struct cmac_ops ixf1010_ops = { | ||
424 | .destroy = mac_destroy, | ||
425 | .reset = mac_reset, | ||
426 | .interrupt_enable = mac_intr_op, | ||
427 | .interrupt_disable = mac_intr_op, | ||
428 | .interrupt_clear = mac_intr_op, | ||
429 | .enable = mac_enable, | ||
430 | .disable = mac_disable, | ||
431 | .set_mtu = mac_set_mtu, | ||
432 | .set_rx_mode = mac_set_rx_mode, | ||
433 | .set_speed_duplex_fc = mac_set_speed_duplex_fc, | ||
434 | .get_speed_duplex_fc = mac_get_speed_duplex_fc, | ||
435 | .statistics_update = mac_update_statistics, | ||
436 | .macaddress_get = mac_get_address, | ||
437 | .macaddress_set = mac_set_address, | ||
438 | }; | ||
439 | |||
440 | static int ixf1010_mac_reset(adapter_t *adapter) | ||
441 | { | ||
442 | u32 val; | ||
443 | |||
444 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
445 | if ((val & 1) != 0) { | ||
446 | val &= ~1; | ||
447 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
448 | udelay(2); | ||
449 | } | ||
450 | val |= 1; | ||
451 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
452 | udelay(2); | ||
453 | |||
454 | t1_tpi_write(adapter, REG_PORT_ENABLE, 0); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index) | ||
459 | { | ||
460 | struct cmac *mac; | ||
461 | u32 val; | ||
462 | |||
463 | if (index > 9) return NULL; | ||
464 | |||
465 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); | ||
466 | if (!mac) return NULL; | ||
467 | |||
468 | mac->ops = &ixf1010_ops; | ||
469 | mac->instance = (cmac_instance *)(mac + 1); | ||
470 | |||
471 | mac->instance->mac_base = MACREG_BASE + (index * 0x200); | ||
472 | mac->instance->index = index; | ||
473 | mac->adapter = adapter; | ||
474 | mac->instance->ticks = 0; | ||
475 | |||
476 | t1_tpi_read(adapter, REG_JTAG_ID, &val); | ||
477 | mac->instance->version = val >> 28; | ||
478 | return mac; | ||
479 | } | ||
480 | |||
481 | struct gmac t1_ixf1010_ops = { | ||
482 | STATS_TICK_SECS, | ||
483 | ixf1010_mac_create, | ||
484 | ixf1010_mac_reset | ||
485 | }; | ||
diff --git a/drivers/net/chelsio/mac.c b/drivers/net/chelsio/mac.c new file mode 100644 index 000000000000..6af39dc70459 --- /dev/null +++ b/drivers/net/chelsio/mac.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /* $Date: 2005/10/22 00:42:59 $ $RCSfile: mac.c,v $ $Revision: 1.32 $ */ | ||
2 | #include "gmac.h" | ||
3 | #include "regs.h" | ||
4 | #include "fpga_defs.h" | ||
5 | |||
6 | #define MAC_CSR_INTERFACE_GMII 0x0 | ||
7 | #define MAC_CSR_INTERFACE_TBI 0x1 | ||
8 | #define MAC_CSR_INTERFACE_MII 0x2 | ||
9 | #define MAC_CSR_INTERFACE_RMII 0x3 | ||
10 | |||
11 | /* Chelsio's MAC statistics. */ | ||
12 | struct mac_statistics { | ||
13 | |||
14 | /* Transmit */ | ||
15 | u32 TxFramesTransmittedOK; | ||
16 | u32 TxReserved1; | ||
17 | u32 TxReserved2; | ||
18 | u32 TxOctetsTransmittedOK; | ||
19 | u32 TxFramesWithDeferredXmissions; | ||
20 | u32 TxLateCollisions; | ||
21 | u32 TxFramesAbortedDueToXSCollisions; | ||
22 | u32 TxFramesLostDueToIntMACXmitError; | ||
23 | u32 TxReserved3; | ||
24 | u32 TxMulticastFrameXmittedOK; | ||
25 | u32 TxBroadcastFramesXmittedOK; | ||
26 | u32 TxFramesWithExcessiveDeferral; | ||
27 | u32 TxPAUSEMACCtrlFramesTransmitted; | ||
28 | |||
29 | /* Receive */ | ||
30 | u32 RxFramesReceivedOK; | ||
31 | u32 RxFrameCheckSequenceErrors; | ||
32 | u32 RxAlignmentErrors; | ||
33 | u32 RxOctetsReceivedOK; | ||
34 | u32 RxFramesLostDueToIntMACRcvError; | ||
35 | u32 RxMulticastFramesReceivedOK; | ||
36 | u32 RxBroadcastFramesReceivedOK; | ||
37 | u32 RxInRangeLengthErrors; | ||
38 | u32 RxTxOutOfRangeLengthField; | ||
39 | u32 RxFrameTooLongErrors; | ||
40 | u32 RxPAUSEMACCtrlFramesReceived; | ||
41 | }; | ||
42 | |||
43 | static int static_aPorts[] = { | ||
44 | FPGA_GMAC_INTERRUPT_PORT0, | ||
45 | FPGA_GMAC_INTERRUPT_PORT1, | ||
46 | FPGA_GMAC_INTERRUPT_PORT2, | ||
47 | FPGA_GMAC_INTERRUPT_PORT3 | ||
48 | }; | ||
49 | |||
50 | struct _cmac_instance { | ||
51 | u32 index; | ||
52 | }; | ||
53 | |||
54 | static int mac_intr_enable(struct cmac *mac) | ||
55 | { | ||
56 | u32 mac_intr; | ||
57 | |||
58 | if (t1_is_asic(mac->adapter)) { | ||
59 | /* ASIC */ | ||
60 | |||
61 | /* We don't use the on chip MAC for ASIC products. */ | ||
62 | } else { | ||
63 | /* FPGA */ | ||
64 | |||
65 | /* Set parent gmac interrupt. */ | ||
66 | mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); | ||
67 | mac_intr |= FPGA_PCIX_INTERRUPT_GMAC; | ||
68 | writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); | ||
69 | |||
70 | mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); | ||
71 | mac_intr |= static_aPorts[mac->instance->index]; | ||
72 | writel(mac_intr, | ||
73 | mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); | ||
74 | } | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int mac_intr_disable(struct cmac *mac) | ||
80 | { | ||
81 | u32 mac_intr; | ||
82 | |||
83 | if (t1_is_asic(mac->adapter)) { | ||
84 | /* ASIC */ | ||
85 | |||
86 | /* We don't use the on chip MAC for ASIC products. */ | ||
87 | } else { | ||
88 | /* FPGA */ | ||
89 | |||
90 | /* Set parent gmac interrupt. */ | ||
91 | mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); | ||
92 | mac_intr &= ~FPGA_PCIX_INTERRUPT_GMAC; | ||
93 | writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); | ||
94 | |||
95 | mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); | ||
96 | mac_intr &= ~(static_aPorts[mac->instance->index]); | ||
97 | writel(mac_intr, | ||
98 | mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); | ||
99 | } | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int mac_intr_clear(struct cmac *mac) | ||
105 | { | ||
106 | u32 mac_intr; | ||
107 | |||
108 | if (t1_is_asic(mac->adapter)) { | ||
109 | /* ASIC */ | ||
110 | |||
111 | /* We don't use the on chip MAC for ASIC products. */ | ||
112 | } else { | ||
113 | /* FPGA */ | ||
114 | |||
115 | /* Set parent gmac interrupt. */ | ||
116 | writel(FPGA_PCIX_INTERRUPT_GMAC, | ||
117 | mac->adapter->regs + A_PL_CAUSE); | ||
118 | mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); | ||
119 | mac_intr |= (static_aPorts[mac->instance->index]); | ||
120 | writel(mac_intr, | ||
121 | mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); | ||
122 | } | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int mac_get_address(struct cmac *mac, u8 addr[6]) | ||
128 | { | ||
129 | u32 data32_lo, data32_hi; | ||
130 | |||
131 | data32_lo = readl(mac->adapter->regs | ||
132 | + MAC_REG_IDLO(mac->instance->index)); | ||
133 | data32_hi = readl(mac->adapter->regs | ||
134 | + MAC_REG_IDHI(mac->instance->index)); | ||
135 | |||
136 | addr[0] = (u8) ((data32_hi >> 8) & 0xFF); | ||
137 | addr[1] = (u8) ((data32_hi) & 0xFF); | ||
138 | addr[2] = (u8) ((data32_lo >> 24) & 0xFF); | ||
139 | addr[3] = (u8) ((data32_lo >> 16) & 0xFF); | ||
140 | addr[4] = (u8) ((data32_lo >> 8) & 0xFF); | ||
141 | addr[5] = (u8) ((data32_lo) & 0xFF); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int mac_reset(struct cmac *mac) | ||
146 | { | ||
147 | u32 data32; | ||
148 | int mac_in_reset, time_out = 100; | ||
149 | int idx = mac->instance->index; | ||
150 | |||
151 | data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); | ||
152 | writel(data32 | F_MAC_RESET, | ||
153 | mac->adapter->regs + MAC_REG_CSR(idx)); | ||
154 | |||
155 | do { | ||
156 | data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); | ||
157 | |||
158 | mac_in_reset = data32 & F_MAC_RESET; | ||
159 | if (mac_in_reset) | ||
160 | udelay(1); | ||
161 | } while (mac_in_reset && --time_out); | ||
162 | |||
163 | if (mac_in_reset) { | ||
164 | CH_ERR("%s: MAC %d reset timed out\n", | ||
165 | mac->adapter->name, idx); | ||
166 | return 2; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) | ||
173 | { | ||
174 | u32 val; | ||
175 | |||
176 | val = readl(mac->adapter->regs | ||
177 | + MAC_REG_CSR(mac->instance->index)); | ||
178 | val &= ~(F_MAC_PROMISC | F_MAC_MC_ENABLE); | ||
179 | val |= V_MAC_PROMISC(t1_rx_mode_promisc(rm) != 0); | ||
180 | val |= V_MAC_MC_ENABLE(t1_rx_mode_allmulti(rm) != 0); | ||
181 | writel(val, | ||
182 | mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, | ||
188 | int fc) | ||
189 | { | ||
190 | u32 data32; | ||
191 | |||
192 | data32 = readl(mac->adapter->regs | ||
193 | + MAC_REG_CSR(mac->instance->index)); | ||
194 | data32 &= ~(F_MAC_HALF_DUPLEX | V_MAC_SPEED(M_MAC_SPEED) | | ||
195 | V_INTERFACE(M_INTERFACE) | F_MAC_TX_PAUSE_ENABLE | | ||
196 | F_MAC_RX_PAUSE_ENABLE); | ||
197 | |||
198 | switch (speed) { | ||
199 | case SPEED_10: | ||
200 | case SPEED_100: | ||
201 | data32 |= V_INTERFACE(MAC_CSR_INTERFACE_MII); | ||
202 | data32 |= V_MAC_SPEED(speed == SPEED_10 ? 0 : 1); | ||
203 | break; | ||
204 | case SPEED_1000: | ||
205 | data32 |= V_INTERFACE(MAC_CSR_INTERFACE_GMII); | ||
206 | data32 |= V_MAC_SPEED(2); | ||
207 | break; | ||
208 | } | ||
209 | |||
210 | if (duplex >= 0) | ||
211 | data32 |= V_MAC_HALF_DUPLEX(duplex == DUPLEX_HALF); | ||
212 | |||
213 | if (fc >= 0) { | ||
214 | data32 |= V_MAC_RX_PAUSE_ENABLE((fc & PAUSE_RX) != 0); | ||
215 | data32 |= V_MAC_TX_PAUSE_ENABLE((fc & PAUSE_TX) != 0); | ||
216 | } | ||
217 | |||
218 | writel(data32, | ||
219 | mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static int mac_enable(struct cmac *mac, int which) | ||
224 | { | ||
225 | u32 val; | ||
226 | |||
227 | val = readl(mac->adapter->regs | ||
228 | + MAC_REG_CSR(mac->instance->index)); | ||
229 | if (which & MAC_DIRECTION_RX) | ||
230 | val |= F_MAC_RX_ENABLE; | ||
231 | if (which & MAC_DIRECTION_TX) | ||
232 | val |= F_MAC_TX_ENABLE; | ||
233 | writel(val, | ||
234 | mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int mac_disable(struct cmac *mac, int which) | ||
239 | { | ||
240 | u32 val; | ||
241 | |||
242 | val = readl(mac->adapter->regs | ||
243 | + MAC_REG_CSR(mac->instance->index)); | ||
244 | if (which & MAC_DIRECTION_RX) | ||
245 | val &= ~F_MAC_RX_ENABLE; | ||
246 | if (which & MAC_DIRECTION_TX) | ||
247 | val &= ~F_MAC_TX_ENABLE; | ||
248 | writel(val, | ||
249 | mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | #if 0 | ||
254 | static int mac_set_ifs(struct cmac *mac, u32 mode) | ||
255 | { | ||
256 | t1_write_reg_4(mac->adapter, | ||
257 | MAC_REG_IFS(mac->instance->index), | ||
258 | mode); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static int mac_enable_isl(struct cmac *mac) | ||
263 | { | ||
264 | u32 data32 = readl(mac->adapter->regs | ||
265 | + MAC_REG_CSR(mac->instance->index)); | ||
266 | data32 |= F_MAC_RX_ENABLE | F_MAC_TX_ENABLE; | ||
267 | t1_write_reg_4(mac->adapter, | ||
268 | MAC_REG_CSR(mac->instance->index), | ||
269 | data32); | ||
270 | return 0; | ||
271 | } | ||
272 | #endif | ||
273 | |||
274 | static int mac_set_mtu(struct cmac *mac, int mtu) | ||
275 | { | ||
276 | if (mtu > 9600) | ||
277 | return -EINVAL; | ||
278 | writel(mtu + ETH_HLEN + VLAN_HLEN, | ||
279 | mac->adapter->regs + MAC_REG_LARGEFRAMELENGTH(mac->instance->index)); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, | ||
285 | int flag) | ||
286 | { | ||
287 | struct mac_statistics st; | ||
288 | u32 *p = (u32 *) & st, i; | ||
289 | |||
290 | writel(0, | ||
291 | mac->adapter->regs + MAC_REG_RMCNT(mac->instance->index)); | ||
292 | |||
293 | for (i = 0; i < sizeof(st) / sizeof(u32); i++) | ||
294 | *p++ = readl(mac->adapter->regs | ||
295 | + MAC_REG_RMDATA(mac->instance->index)); | ||
296 | |||
297 | /* XXX convert stats */ | ||
298 | return &mac->stats; | ||
299 | } | ||
300 | |||
301 | static void mac_destroy(struct cmac *mac) | ||
302 | { | ||
303 | kfree(mac); | ||
304 | } | ||
305 | |||
306 | static struct cmac_ops chelsio_mac_ops = { | ||
307 | .destroy = mac_destroy, | ||
308 | .reset = mac_reset, | ||
309 | .interrupt_enable = mac_intr_enable, | ||
310 | .interrupt_disable = mac_intr_disable, | ||
311 | .interrupt_clear = mac_intr_clear, | ||
312 | .enable = mac_enable, | ||
313 | .disable = mac_disable, | ||
314 | .set_mtu = mac_set_mtu, | ||
315 | .set_rx_mode = mac_set_rx_mode, | ||
316 | .set_speed_duplex_fc = mac_set_speed_duplex_fc, | ||
317 | .macaddress_get = mac_get_address, | ||
318 | .statistics_update = mac_update_statistics, | ||
319 | }; | ||
320 | |||
321 | static struct cmac *mac_create(adapter_t *adapter, int index) | ||
322 | { | ||
323 | struct cmac *mac; | ||
324 | u32 data32; | ||
325 | |||
326 | if (index >= 4) | ||
327 | return NULL; | ||
328 | |||
329 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); | ||
330 | if (!mac) | ||
331 | return NULL; | ||
332 | |||
333 | mac->ops = &chelsio_mac_ops; | ||
334 | mac->instance = (cmac_instance *) (mac + 1); | ||
335 | |||
336 | mac->instance->index = index; | ||
337 | mac->adapter = adapter; | ||
338 | |||
339 | data32 = readl(adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
340 | data32 &= ~(F_MAC_RESET | F_MAC_PROMISC | F_MAC_PROMISC | | ||
341 | F_MAC_LB_ENABLE | F_MAC_RX_ENABLE | F_MAC_TX_ENABLE); | ||
342 | data32 |= F_MAC_JUMBO_ENABLE; | ||
343 | writel(data32, adapter->regs + MAC_REG_CSR(mac->instance->index)); | ||
344 | |||
345 | /* Initialize the random backoff seed. */ | ||
346 | data32 = 0x55aa + (3 * index); | ||
347 | writel(data32, | ||
348 | adapter->regs + MAC_REG_GMRANDBACKOFFSEED(mac->instance->index)); | ||
349 | |||
350 | /* Check to see if the mac address needs to be set manually. */ | ||
351 | data32 = readl(adapter->regs + MAC_REG_IDLO(mac->instance->index)); | ||
352 | if (data32 == 0 || data32 == 0xffffffff) { | ||
353 | /* | ||
354 | * Add a default MAC address if we can't read one. | ||
355 | */ | ||
356 | writel(0x43FFFFFF - index, | ||
357 | adapter->regs + MAC_REG_IDLO(mac->instance->index)); | ||
358 | writel(0x0007, | ||
359 | adapter->regs + MAC_REG_IDHI(mac->instance->index)); | ||
360 | } | ||
361 | |||
362 | (void) mac_set_mtu(mac, 1500); | ||
363 | return mac; | ||
364 | } | ||
365 | |||
366 | struct gmac t1_chelsio_mac_ops = { | ||
367 | .create = mac_create | ||
368 | }; | ||
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c new file mode 100644 index 000000000000..28ac93ff7c4f --- /dev/null +++ b/drivers/net/chelsio/mv88e1xxx.c | |||
@@ -0,0 +1,397 @@ | |||
1 | /* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */ | ||
2 | #include "common.h" | ||
3 | #include "mv88e1xxx.h" | ||
4 | #include "cphy.h" | ||
5 | #include "elmer0.h" | ||
6 | |||
7 | /* MV88E1XXX MDI crossover register values */ | ||
8 | #define CROSSOVER_MDI 0 | ||
9 | #define CROSSOVER_MDIX 1 | ||
10 | #define CROSSOVER_AUTO 3 | ||
11 | |||
12 | #define INTR_ENABLE_MASK 0x6CA0 | ||
13 | |||
14 | /* | ||
15 | * Set the bits given by 'bitval' in PHY register 'reg'. | ||
16 | */ | ||
17 | static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval) | ||
18 | { | ||
19 | u32 val; | ||
20 | |||
21 | (void) simple_mdio_read(cphy, reg, &val); | ||
22 | (void) simple_mdio_write(cphy, reg, val | bitval); | ||
23 | } | ||
24 | |||
25 | /* | ||
26 | * Clear the bits given by 'bitval' in PHY register 'reg'. | ||
27 | */ | ||
28 | static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval) | ||
29 | { | ||
30 | u32 val; | ||
31 | |||
32 | (void) simple_mdio_read(cphy, reg, &val); | ||
33 | (void) simple_mdio_write(cphy, reg, val & ~bitval); | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * NAME: phy_reset | ||
38 | * | ||
39 | * DESC: Reset the given PHY's port. NOTE: This is not a global | ||
40 | * chip reset. | ||
41 | * | ||
42 | * PARAMS: cphy - Pointer to PHY instance data. | ||
43 | * | ||
44 | * RETURN: 0 - Successfull reset. | ||
45 | * -1 - Timeout. | ||
46 | */ | ||
47 | static int mv88e1xxx_reset(struct cphy *cphy, int wait) | ||
48 | { | ||
49 | u32 ctl; | ||
50 | int time_out = 1000; | ||
51 | |||
52 | mdio_set_bit(cphy, MII_BMCR, BMCR_RESET); | ||
53 | |||
54 | do { | ||
55 | (void) simple_mdio_read(cphy, MII_BMCR, &ctl); | ||
56 | ctl &= BMCR_RESET; | ||
57 | if (ctl) | ||
58 | udelay(1); | ||
59 | } while (ctl && --time_out); | ||
60 | |||
61 | return ctl ? -1 : 0; | ||
62 | } | ||
63 | |||
64 | static int mv88e1xxx_interrupt_enable(struct cphy *cphy) | ||
65 | { | ||
66 | /* Enable PHY interrupts. */ | ||
67 | (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, | ||
68 | INTR_ENABLE_MASK); | ||
69 | |||
70 | /* Enable Marvell interrupts through Elmer0. */ | ||
71 | if (t1_is_asic(cphy->adapter)) { | ||
72 | u32 elmer; | ||
73 | |||
74 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
75 | elmer |= ELMER0_GP_BIT1; | ||
76 | if (is_T2(cphy->adapter)) { | ||
77 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | ||
78 | } | ||
79 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int mv88e1xxx_interrupt_disable(struct cphy *cphy) | ||
85 | { | ||
86 | /* Disable all phy interrupts. */ | ||
87 | (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0); | ||
88 | |||
89 | /* Disable Marvell interrupts through Elmer0. */ | ||
90 | if (t1_is_asic(cphy->adapter)) { | ||
91 | u32 elmer; | ||
92 | |||
93 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
94 | elmer &= ~ELMER0_GP_BIT1; | ||
95 | if (is_T2(cphy->adapter)) { | ||
96 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); | ||
97 | } | ||
98 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int mv88e1xxx_interrupt_clear(struct cphy *cphy) | ||
104 | { | ||
105 | u32 elmer; | ||
106 | |||
107 | /* Clear PHY interrupts by reading the register. */ | ||
108 | (void) simple_mdio_read(cphy, | ||
109 | MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer); | ||
110 | |||
111 | /* Clear Marvell interrupts through Elmer0. */ | ||
112 | if (t1_is_asic(cphy->adapter)) { | ||
113 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | ||
114 | elmer |= ELMER0_GP_BIT1; | ||
115 | if (is_T2(cphy->adapter)) { | ||
116 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | ||
117 | } | ||
118 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | ||
119 | } | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Set the PHY speed and duplex. This also disables auto-negotiation, except | ||
125 | * for 1Gb/s, where auto-negotiation is mandatory. | ||
126 | */ | ||
127 | static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) | ||
128 | { | ||
129 | u32 ctl; | ||
130 | |||
131 | (void) simple_mdio_read(phy, MII_BMCR, &ctl); | ||
132 | if (speed >= 0) { | ||
133 | ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); | ||
134 | if (speed == SPEED_100) | ||
135 | ctl |= BMCR_SPEED100; | ||
136 | else if (speed == SPEED_1000) | ||
137 | ctl |= BMCR_SPEED1000; | ||
138 | } | ||
139 | if (duplex >= 0) { | ||
140 | ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); | ||
141 | if (duplex == DUPLEX_FULL) | ||
142 | ctl |= BMCR_FULLDPLX; | ||
143 | } | ||
144 | if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ | ||
145 | ctl |= BMCR_ANENABLE; | ||
146 | (void) simple_mdio_write(phy, MII_BMCR, ctl); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) | ||
151 | { | ||
152 | u32 data32; | ||
153 | |||
154 | (void) simple_mdio_read(cphy, | ||
155 | MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32); | ||
156 | data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE); | ||
157 | data32 |= V_PSCR_MDI_XOVER_MODE(crossover); | ||
158 | (void) simple_mdio_write(cphy, | ||
159 | MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static int mv88e1xxx_autoneg_enable(struct cphy *cphy) | ||
164 | { | ||
165 | u32 ctl; | ||
166 | |||
167 | (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); | ||
168 | |||
169 | (void) simple_mdio_read(cphy, MII_BMCR, &ctl); | ||
170 | /* restart autoneg for change to take effect */ | ||
171 | ctl |= BMCR_ANENABLE | BMCR_ANRESTART; | ||
172 | (void) simple_mdio_write(cphy, MII_BMCR, ctl); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int mv88e1xxx_autoneg_disable(struct cphy *cphy) | ||
177 | { | ||
178 | u32 ctl; | ||
179 | |||
180 | /* | ||
181 | * Crossover *must* be set to manual in order to disable auto-neg. | ||
182 | * The Alaska FAQs document highlights this point. | ||
183 | */ | ||
184 | (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI); | ||
185 | |||
186 | /* | ||
187 | * Must include autoneg reset when disabling auto-neg. This | ||
188 | * is described in the Alaska FAQ document. | ||
189 | */ | ||
190 | (void) simple_mdio_read(cphy, MII_BMCR, &ctl); | ||
191 | ctl &= ~BMCR_ANENABLE; | ||
192 | (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int mv88e1xxx_autoneg_restart(struct cphy *cphy) | ||
197 | { | ||
198 | mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map) | ||
203 | { | ||
204 | u32 val = 0; | ||
205 | |||
206 | if (advertise_map & | ||
207 | (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { | ||
208 | (void) simple_mdio_read(phy, MII_GBCR, &val); | ||
209 | val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL); | ||
210 | if (advertise_map & ADVERTISED_1000baseT_Half) | ||
211 | val |= GBCR_ADV_1000HALF; | ||
212 | if (advertise_map & ADVERTISED_1000baseT_Full) | ||
213 | val |= GBCR_ADV_1000FULL; | ||
214 | } | ||
215 | (void) simple_mdio_write(phy, MII_GBCR, val); | ||
216 | |||
217 | val = 1; | ||
218 | if (advertise_map & ADVERTISED_10baseT_Half) | ||
219 | val |= ADVERTISE_10HALF; | ||
220 | if (advertise_map & ADVERTISED_10baseT_Full) | ||
221 | val |= ADVERTISE_10FULL; | ||
222 | if (advertise_map & ADVERTISED_100baseT_Half) | ||
223 | val |= ADVERTISE_100HALF; | ||
224 | if (advertise_map & ADVERTISED_100baseT_Full) | ||
225 | val |= ADVERTISE_100FULL; | ||
226 | if (advertise_map & ADVERTISED_PAUSE) | ||
227 | val |= ADVERTISE_PAUSE; | ||
228 | if (advertise_map & ADVERTISED_ASYM_PAUSE) | ||
229 | val |= ADVERTISE_PAUSE_ASYM; | ||
230 | (void) simple_mdio_write(phy, MII_ADVERTISE, val); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int mv88e1xxx_set_loopback(struct cphy *cphy, int on) | ||
235 | { | ||
236 | if (on) | ||
237 | mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK); | ||
238 | else | ||
239 | mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, | ||
244 | int *speed, int *duplex, int *fc) | ||
245 | { | ||
246 | u32 status; | ||
247 | int sp = -1, dplx = -1, pause = 0; | ||
248 | |||
249 | (void) simple_mdio_read(cphy, | ||
250 | MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); | ||
251 | if ((status & V_PSSR_STATUS_RESOLVED) != 0) { | ||
252 | if (status & V_PSSR_RX_PAUSE) | ||
253 | pause |= PAUSE_RX; | ||
254 | if (status & V_PSSR_TX_PAUSE) | ||
255 | pause |= PAUSE_TX; | ||
256 | dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; | ||
257 | sp = G_PSSR_SPEED(status); | ||
258 | if (sp == 0) | ||
259 | sp = SPEED_10; | ||
260 | else if (sp == 1) | ||
261 | sp = SPEED_100; | ||
262 | else | ||
263 | sp = SPEED_1000; | ||
264 | } | ||
265 | if (link_ok) | ||
266 | *link_ok = (status & V_PSSR_LINK) != 0; | ||
267 | if (speed) | ||
268 | *speed = sp; | ||
269 | if (duplex) | ||
270 | *duplex = dplx; | ||
271 | if (fc) | ||
272 | *fc = pause; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) | ||
277 | { | ||
278 | u32 val; | ||
279 | |||
280 | (void) simple_mdio_read(cphy, | ||
281 | MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val); | ||
282 | |||
283 | /* | ||
284 | * Set the downshift counter to 2 so we try to establish Gb link | ||
285 | * twice before downshifting. | ||
286 | */ | ||
287 | val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT)); | ||
288 | |||
289 | if (downshift_enable) | ||
290 | val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2); | ||
291 | (void) simple_mdio_write(cphy, | ||
292 | MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static int mv88e1xxx_interrupt_handler(struct cphy *cphy) | ||
297 | { | ||
298 | int cphy_cause = 0; | ||
299 | u32 status; | ||
300 | |||
301 | /* | ||
302 | * Loop until cause reads zero. Need to handle bouncing interrupts. | ||
303 | */ | ||
304 | while (1) { | ||
305 | u32 cause; | ||
306 | |||
307 | (void) simple_mdio_read(cphy, | ||
308 | MV88E1XXX_INTERRUPT_STATUS_REGISTER, | ||
309 | &cause); | ||
310 | cause &= INTR_ENABLE_MASK; | ||
311 | if (!cause) break; | ||
312 | |||
313 | if (cause & MV88E1XXX_INTR_LINK_CHNG) { | ||
314 | (void) simple_mdio_read(cphy, | ||
315 | MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); | ||
316 | |||
317 | if (status & MV88E1XXX_INTR_LINK_CHNG) { | ||
318 | cphy->state |= PHY_LINK_UP; | ||
319 | } else { | ||
320 | cphy->state &= ~PHY_LINK_UP; | ||
321 | if (cphy->state & PHY_AUTONEG_EN) | ||
322 | cphy->state &= ~PHY_AUTONEG_RDY; | ||
323 | cphy_cause |= cphy_cause_link_change; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | if (cause & MV88E1XXX_INTR_AUTONEG_DONE) | ||
328 | cphy->state |= PHY_AUTONEG_RDY; | ||
329 | |||
330 | if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) == | ||
331 | (PHY_LINK_UP | PHY_AUTONEG_RDY)) | ||
332 | cphy_cause |= cphy_cause_link_change; | ||
333 | } | ||
334 | return cphy_cause; | ||
335 | } | ||
336 | |||
337 | static void mv88e1xxx_destroy(struct cphy *cphy) | ||
338 | { | ||
339 | kfree(cphy); | ||
340 | } | ||
341 | |||
342 | static struct cphy_ops mv88e1xxx_ops = { | ||
343 | .destroy = mv88e1xxx_destroy, | ||
344 | .reset = mv88e1xxx_reset, | ||
345 | .interrupt_enable = mv88e1xxx_interrupt_enable, | ||
346 | .interrupt_disable = mv88e1xxx_interrupt_disable, | ||
347 | .interrupt_clear = mv88e1xxx_interrupt_clear, | ||
348 | .interrupt_handler = mv88e1xxx_interrupt_handler, | ||
349 | .autoneg_enable = mv88e1xxx_autoneg_enable, | ||
350 | .autoneg_disable = mv88e1xxx_autoneg_disable, | ||
351 | .autoneg_restart = mv88e1xxx_autoneg_restart, | ||
352 | .advertise = mv88e1xxx_advertise, | ||
353 | .set_loopback = mv88e1xxx_set_loopback, | ||
354 | .set_speed_duplex = mv88e1xxx_set_speed_duplex, | ||
355 | .get_link_status = mv88e1xxx_get_link_status, | ||
356 | }; | ||
357 | |||
358 | static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr, | ||
359 | struct mdio_ops *mdio_ops) | ||
360 | { | ||
361 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); | ||
362 | |||
363 | if (!cphy) return NULL; | ||
364 | |||
365 | cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); | ||
366 | |||
367 | /* Configure particular PHY's to run in a different mode. */ | ||
368 | if ((board_info(adapter)->caps & SUPPORTED_TP) && | ||
369 | board_info(adapter)->chip_phy == CHBT_PHY_88E1111) { | ||
370 | /* | ||
371 | * Configure the PHY transmitter as class A to reduce EMI. | ||
372 | */ | ||
373 | (void) simple_mdio_write(cphy, | ||
374 | MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB); | ||
375 | (void) simple_mdio_write(cphy, | ||
376 | MV88E1XXX_EXTENDED_REGISTER, 0x8004); | ||
377 | } | ||
378 | (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ | ||
379 | |||
380 | /* LED */ | ||
381 | if (is_T2(adapter)) { | ||
382 | (void) simple_mdio_write(cphy, | ||
383 | MV88E1XXX_LED_CONTROL_REGISTER, 0x1); | ||
384 | } | ||
385 | |||
386 | return cphy; | ||
387 | } | ||
388 | |||
389 | static int mv88e1xxx_phy_reset(adapter_t* adapter) | ||
390 | { | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | struct gphy t1_mv88e1xxx_ops = { | ||
395 | mv88e1xxx_phy_create, | ||
396 | mv88e1xxx_phy_reset | ||
397 | }; | ||
diff --git a/drivers/net/chelsio/mv88e1xxx.h b/drivers/net/chelsio/mv88e1xxx.h new file mode 100644 index 000000000000..967cc4286359 --- /dev/null +++ b/drivers/net/chelsio/mv88e1xxx.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */ | ||
2 | #ifndef CHELSIO_MV8E1XXX_H | ||
3 | #define CHELSIO_MV8E1XXX_H | ||
4 | |||
5 | #ifndef BMCR_SPEED1000 | ||
6 | # define BMCR_SPEED1000 0x40 | ||
7 | #endif | ||
8 | |||
9 | #ifndef ADVERTISE_PAUSE | ||
10 | # define ADVERTISE_PAUSE 0x400 | ||
11 | #endif | ||
12 | #ifndef ADVERTISE_PAUSE_ASYM | ||
13 | # define ADVERTISE_PAUSE_ASYM 0x800 | ||
14 | #endif | ||
15 | |||
16 | /* Gigabit MII registers */ | ||
17 | #define MII_GBCR 9 /* 1000Base-T control register */ | ||
18 | #define MII_GBSR 10 /* 1000Base-T status register */ | ||
19 | |||
20 | /* 1000Base-T control register fields */ | ||
21 | #define GBCR_ADV_1000HALF 0x100 | ||
22 | #define GBCR_ADV_1000FULL 0x200 | ||
23 | #define GBCR_PREFER_MASTER 0x400 | ||
24 | #define GBCR_MANUAL_AS_MASTER 0x800 | ||
25 | #define GBCR_MANUAL_CONFIG_ENABLE 0x1000 | ||
26 | |||
27 | /* 1000Base-T status register fields */ | ||
28 | #define GBSR_LP_1000HALF 0x400 | ||
29 | #define GBSR_LP_1000FULL 0x800 | ||
30 | #define GBSR_REMOTE_OK 0x1000 | ||
31 | #define GBSR_LOCAL_OK 0x2000 | ||
32 | #define GBSR_LOCAL_MASTER 0x4000 | ||
33 | #define GBSR_MASTER_FAULT 0x8000 | ||
34 | |||
35 | /* Marvell PHY interrupt status bits. */ | ||
36 | #define MV88E1XXX_INTR_JABBER 0x0001 | ||
37 | #define MV88E1XXX_INTR_POLARITY_CHNG 0x0002 | ||
38 | #define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010 | ||
39 | #define MV88E1XXX_INTR_DOWNSHIFT 0x0020 | ||
40 | #define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040 | ||
41 | #define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080 | ||
42 | #define MV88E1XXX_INTR_FALSE_CARRIER 0x0100 | ||
43 | #define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200 | ||
44 | #define MV88E1XXX_INTR_LINK_CHNG 0x0400 | ||
45 | #define MV88E1XXX_INTR_AUTONEG_DONE 0x0800 | ||
46 | #define MV88E1XXX_INTR_PAGE_RECV 0x1000 | ||
47 | #define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000 | ||
48 | #define MV88E1XXX_INTR_SPEED_CHNG 0x4000 | ||
49 | #define MV88E1XXX_INTR_AUTONEG_ERR 0x8000 | ||
50 | |||
51 | /* Marvell PHY specific registers. */ | ||
52 | #define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16 | ||
53 | #define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17 | ||
54 | #define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18 | ||
55 | #define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19 | ||
56 | #define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20 | ||
57 | #define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21 | ||
58 | #define MV88E1XXX_RES_REGISTER 22 | ||
59 | #define MV88E1XXX_GLOBAL_STATUS_REGISTER 23 | ||
60 | #define MV88E1XXX_LED_CONTROL_REGISTER 24 | ||
61 | #define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25 | ||
62 | #define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26 | ||
63 | #define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27 | ||
64 | #define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28 | ||
65 | #define MV88E1XXX_EXTENDED_ADDR_REGISTER 29 | ||
66 | #define MV88E1XXX_EXTENDED_REGISTER 30 | ||
67 | |||
68 | /* PHY specific control register fields */ | ||
69 | #define S_PSCR_MDI_XOVER_MODE 5 | ||
70 | #define M_PSCR_MDI_XOVER_MODE 0x3 | ||
71 | #define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) | ||
72 | #define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE) | ||
73 | |||
74 | /* Extended PHY specific control register fields */ | ||
75 | #define S_DOWNSHIFT_ENABLE 8 | ||
76 | #define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) | ||
77 | |||
78 | #define S_DOWNSHIFT_CNT 9 | ||
79 | #define M_DOWNSHIFT_CNT 0x7 | ||
80 | #define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) | ||
81 | #define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT) | ||
82 | |||
83 | /* PHY specific status register fields */ | ||
84 | #define S_PSSR_JABBER 0 | ||
85 | #define V_PSSR_JABBER (1 << S_PSSR_JABBER) | ||
86 | |||
87 | #define S_PSSR_POLARITY 1 | ||
88 | #define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) | ||
89 | |||
90 | #define S_PSSR_RX_PAUSE 2 | ||
91 | #define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) | ||
92 | |||
93 | #define S_PSSR_TX_PAUSE 3 | ||
94 | #define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) | ||
95 | |||
96 | #define S_PSSR_ENERGY_DETECT 4 | ||
97 | #define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) | ||
98 | |||
99 | #define S_PSSR_DOWNSHIFT_STATUS 5 | ||
100 | #define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) | ||
101 | |||
102 | #define S_PSSR_MDI 6 | ||
103 | #define V_PSSR_MDI (1 << S_PSSR_MDI) | ||
104 | |||
105 | #define S_PSSR_CABLE_LEN 7 | ||
106 | #define M_PSSR_CABLE_LEN 0x7 | ||
107 | #define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) | ||
108 | #define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) | ||
109 | |||
110 | #define S_PSSR_LINK 10 | ||
111 | #define V_PSSR_LINK (1 << S_PSSR_LINK) | ||
112 | |||
113 | #define S_PSSR_STATUS_RESOLVED 11 | ||
114 | #define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) | ||
115 | |||
116 | #define S_PSSR_PAGE_RECEIVED 12 | ||
117 | #define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) | ||
118 | |||
119 | #define S_PSSR_DUPLEX 13 | ||
120 | #define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) | ||
121 | |||
122 | #define S_PSSR_SPEED 14 | ||
123 | #define M_PSSR_SPEED 0x3 | ||
124 | #define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) | ||
125 | #define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) | ||
126 | |||
127 | #endif | ||
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c index db5034282782..c8e89480d906 100644 --- a/drivers/net/chelsio/mv88x201x.c +++ b/drivers/net/chelsio/mv88x201x.c | |||
@@ -85,29 +85,33 @@ static int mv88x201x_reset(struct cphy *cphy, int wait) | |||
85 | 85 | ||
86 | static int mv88x201x_interrupt_enable(struct cphy *cphy) | 86 | static int mv88x201x_interrupt_enable(struct cphy *cphy) |
87 | { | 87 | { |
88 | u32 elmer; | ||
89 | |||
90 | /* Enable PHY LASI interrupts. */ | 88 | /* Enable PHY LASI interrupts. */ |
91 | mdio_write(cphy, 0x1, 0x9002, 0x1); | 89 | mdio_write(cphy, 0x1, 0x9002, 0x1); |
92 | 90 | ||
93 | /* Enable Marvell interrupts through Elmer0. */ | 91 | /* Enable Marvell interrupts through Elmer0. */ |
94 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 92 | if (t1_is_asic(cphy->adapter)) { |
95 | elmer |= ELMER0_GP_BIT6; | 93 | u32 elmer; |
96 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 94 | |
95 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
96 | elmer |= ELMER0_GP_BIT6; | ||
97 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
98 | } | ||
97 | return 0; | 99 | return 0; |
98 | } | 100 | } |
99 | 101 | ||
100 | static int mv88x201x_interrupt_disable(struct cphy *cphy) | 102 | static int mv88x201x_interrupt_disable(struct cphy *cphy) |
101 | { | 103 | { |
102 | u32 elmer; | ||
103 | |||
104 | /* Disable PHY LASI interrupts. */ | 104 | /* Disable PHY LASI interrupts. */ |
105 | mdio_write(cphy, 0x1, 0x9002, 0x0); | 105 | mdio_write(cphy, 0x1, 0x9002, 0x0); |
106 | 106 | ||
107 | /* Disable Marvell interrupts through Elmer0. */ | 107 | /* Disable Marvell interrupts through Elmer0. */ |
108 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 108 | if (t1_is_asic(cphy->adapter)) { |
109 | elmer &= ~ELMER0_GP_BIT6; | 109 | u32 elmer; |
110 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 110 | |
111 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
112 | elmer &= ~ELMER0_GP_BIT6; | ||
113 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
114 | } | ||
111 | return 0; | 115 | return 0; |
112 | } | 116 | } |
113 | 117 | ||
@@ -140,9 +144,11 @@ static int mv88x201x_interrupt_clear(struct cphy *cphy) | |||
140 | #endif | 144 | #endif |
141 | 145 | ||
142 | /* Clear Marvell interrupts through Elmer0. */ | 146 | /* Clear Marvell interrupts through Elmer0. */ |
143 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | 147 | if (t1_is_asic(cphy->adapter)) { |
144 | elmer |= ELMER0_GP_BIT6; | 148 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); |
145 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | 149 | elmer |= ELMER0_GP_BIT6; |
150 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | ||
151 | } | ||
146 | return 0; | 152 | return 0; |
147 | } | 153 | } |
148 | 154 | ||
@@ -205,11 +211,11 @@ static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr, | |||
205 | struct mdio_ops *mdio_ops) | 211 | struct mdio_ops *mdio_ops) |
206 | { | 212 | { |
207 | u32 val; | 213 | u32 val; |
208 | struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL); | 214 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); |
209 | 215 | ||
210 | if (!cphy) | 216 | if (!cphy) |
211 | return NULL; | 217 | return NULL; |
212 | memset(cphy, 0, sizeof(*cphy)); | 218 | |
213 | cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops); | 219 | cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops); |
214 | 220 | ||
215 | /* Commands the PHY to enable XFP's clock. */ | 221 | /* Commands the PHY to enable XFP's clock. */ |
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c new file mode 100644 index 000000000000..0b90014d5b3e --- /dev/null +++ b/drivers/net/chelsio/my3126.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */ | ||
2 | #include "cphy.h" | ||
3 | #include "elmer0.h" | ||
4 | #include "suni1x10gexp_regs.h" | ||
5 | |||
6 | /* Port Reset */ | ||
7 | static int my3126_reset(struct cphy *cphy, int wait) | ||
8 | { | ||
9 | /* | ||
10 | * This can be done through registers. It is not required since | ||
11 | * a full chip reset is used. | ||
12 | */ | ||
13 | return (0); | ||
14 | } | ||
15 | |||
16 | static int my3126_interrupt_enable(struct cphy *cphy) | ||
17 | { | ||
18 | schedule_delayed_work(&cphy->phy_update, HZ/30); | ||
19 | t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); | ||
20 | return (0); | ||
21 | } | ||
22 | |||
23 | static int my3126_interrupt_disable(struct cphy *cphy) | ||
24 | { | ||
25 | cancel_rearming_delayed_work(&cphy->phy_update); | ||
26 | return (0); | ||
27 | } | ||
28 | |||
29 | static int my3126_interrupt_clear(struct cphy *cphy) | ||
30 | { | ||
31 | return (0); | ||
32 | } | ||
33 | |||
34 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) | ||
35 | |||
36 | static int my3126_interrupt_handler(struct cphy *cphy) | ||
37 | { | ||
38 | u32 val; | ||
39 | u16 val16; | ||
40 | u16 status; | ||
41 | u32 act_count; | ||
42 | adapter_t *adapter; | ||
43 | adapter = cphy->adapter; | ||
44 | |||
45 | if (cphy->count == 50) { | ||
46 | mdio_read(cphy, 0x1, 0x1, &val); | ||
47 | val16 = (u16) val; | ||
48 | status = cphy->bmsr ^ val16; | ||
49 | |||
50 | if (status & BMSR_LSTATUS) | ||
51 | t1_link_changed(adapter, 0); | ||
52 | cphy->bmsr = val16; | ||
53 | |||
54 | /* We have only enabled link change interrupts so it | ||
55 | must be that | ||
56 | */ | ||
57 | cphy->count = 0; | ||
58 | } | ||
59 | |||
60 | t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL), | ||
61 | SUNI1x10GEXP_BITMSK_MSTAT_SNAP); | ||
62 | t1_tpi_read(adapter, | ||
63 | OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count); | ||
64 | t1_tpi_read(adapter, | ||
65 | OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val); | ||
66 | act_count += val; | ||
67 | |||
68 | /* Populate elmer_gpo with the register value */ | ||
69 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
70 | cphy->elmer_gpo = val; | ||
71 | |||
72 | if ( (val & (1 << 8)) || (val & (1 << 19)) || | ||
73 | (cphy->act_count == act_count) || cphy->act_on ) { | ||
74 | if (is_T2(adapter)) | ||
75 | val |= (1 << 9); | ||
76 | else if (t1_is_T1B(adapter)) | ||
77 | val |= (1 << 20); | ||
78 | cphy->act_on = 0; | ||
79 | } else { | ||
80 | if (is_T2(adapter)) | ||
81 | val &= ~(1 << 9); | ||
82 | else if (t1_is_T1B(adapter)) | ||
83 | val &= ~(1 << 20); | ||
84 | cphy->act_on = 1; | ||
85 | } | ||
86 | |||
87 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
88 | |||
89 | cphy->elmer_gpo = val; | ||
90 | cphy->act_count = act_count; | ||
91 | cphy->count++; | ||
92 | |||
93 | return cphy_cause_link_change; | ||
94 | } | ||
95 | |||
96 | static void my3216_poll(void *arg) | ||
97 | { | ||
98 | my3126_interrupt_handler(arg); | ||
99 | } | ||
100 | |||
101 | static int my3126_set_loopback(struct cphy *cphy, int on) | ||
102 | { | ||
103 | return (0); | ||
104 | } | ||
105 | |||
106 | /* To check the activity LED */ | ||
107 | static int my3126_get_link_status(struct cphy *cphy, | ||
108 | int *link_ok, int *speed, int *duplex, int *fc) | ||
109 | { | ||
110 | u32 val; | ||
111 | u16 val16; | ||
112 | adapter_t *adapter; | ||
113 | |||
114 | adapter = cphy->adapter; | ||
115 | mdio_read(cphy, 0x1, 0x1, &val); | ||
116 | val16 = (u16) val; | ||
117 | |||
118 | /* Populate elmer_gpo with the register value */ | ||
119 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
120 | cphy->elmer_gpo = val; | ||
121 | |||
122 | *link_ok = (val16 & BMSR_LSTATUS); | ||
123 | |||
124 | if (*link_ok) { | ||
125 | /* Turn on the LED. */ | ||
126 | if (is_T2(adapter)) | ||
127 | val &= ~(1 << 8); | ||
128 | else if (t1_is_T1B(adapter)) | ||
129 | val &= ~(1 << 19); | ||
130 | } else { | ||
131 | /* Turn off the LED. */ | ||
132 | if (is_T2(adapter)) | ||
133 | val |= (1 << 8); | ||
134 | else if (t1_is_T1B(adapter)) | ||
135 | val |= (1 << 19); | ||
136 | } | ||
137 | |||
138 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
139 | cphy->elmer_gpo = val; | ||
140 | *speed = SPEED_10000; | ||
141 | *duplex = DUPLEX_FULL; | ||
142 | |||
143 | /* need to add flow control */ | ||
144 | if (fc) | ||
145 | *fc = PAUSE_RX | PAUSE_TX; | ||
146 | |||
147 | return (0); | ||
148 | } | ||
149 | |||
150 | static void my3126_destroy(struct cphy *cphy) | ||
151 | { | ||
152 | kfree(cphy); | ||
153 | } | ||
154 | |||
155 | static struct cphy_ops my3126_ops = { | ||
156 | .destroy = my3126_destroy, | ||
157 | .reset = my3126_reset, | ||
158 | .interrupt_enable = my3126_interrupt_enable, | ||
159 | .interrupt_disable = my3126_interrupt_disable, | ||
160 | .interrupt_clear = my3126_interrupt_clear, | ||
161 | .interrupt_handler = my3126_interrupt_handler, | ||
162 | .get_link_status = my3126_get_link_status, | ||
163 | .set_loopback = my3126_set_loopback, | ||
164 | }; | ||
165 | |||
166 | static struct cphy *my3126_phy_create(adapter_t *adapter, | ||
167 | int phy_addr, struct mdio_ops *mdio_ops) | ||
168 | { | ||
169 | struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); | ||
170 | |||
171 | if (cphy) | ||
172 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); | ||
173 | |||
174 | INIT_WORK(&cphy->phy_update, my3216_poll, cphy); | ||
175 | cphy->bmsr = 0; | ||
176 | |||
177 | return (cphy); | ||
178 | } | ||
179 | |||
180 | /* Chip Reset */ | ||
181 | static int my3126_phy_reset(adapter_t * adapter) | ||
182 | { | ||
183 | u32 val; | ||
184 | |||
185 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
186 | val &= ~4; | ||
187 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
188 | msleep(100); | ||
189 | |||
190 | t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); | ||
191 | msleep(1000); | ||
192 | |||
193 | /* Now lets enable the Laser. Delay 100us */ | ||
194 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
195 | val |= 0x8000; | ||
196 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
197 | udelay(100); | ||
198 | return (0); | ||
199 | } | ||
200 | |||
201 | struct gphy t1_my3126_ops = { | ||
202 | my3126_phy_create, | ||
203 | my3126_phy_reset | ||
204 | }; | ||
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index 04a1404fc65e..63cabeb98afe 100644 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c | |||
@@ -43,21 +43,7 @@ | |||
43 | #include "elmer0.h" | 43 | #include "elmer0.h" |
44 | #include "suni1x10gexp_regs.h" | 44 | #include "suni1x10gexp_regs.h" |
45 | 45 | ||
46 | /* 802.3ae 10Gb/s MDIO Manageable Device(MMD) | 46 | #include <linux/crc32.h> |
47 | */ | ||
48 | enum { | ||
49 | MMD_RESERVED, | ||
50 | MMD_PMAPMD, | ||
51 | MMD_WIS, | ||
52 | MMD_PCS, | ||
53 | MMD_PHY_XGXS, /* XGMII Extender Sublayer */ | ||
54 | MMD_DTE_XGXS, | ||
55 | }; | ||
56 | |||
57 | enum { | ||
58 | PHY_XGXS_CTRL_1, | ||
59 | PHY_XGXS_STATUS_1 | ||
60 | }; | ||
61 | 47 | ||
62 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) | 48 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) |
63 | 49 | ||
@@ -88,6 +74,8 @@ enum { /* RMON registers */ | |||
88 | RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, | 74 | RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, |
89 | RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, | 75 | RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, |
90 | RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, | 76 | RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, |
77 | RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW, | ||
78 | RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW, | ||
91 | 79 | ||
92 | TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, | 80 | TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, |
93 | TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, | 81 | TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, |
@@ -95,7 +83,9 @@ enum { /* RMON registers */ | |||
95 | TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, | 83 | TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, |
96 | TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, | 84 | TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, |
97 | TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, | 85 | TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, |
98 | TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW | 86 | TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW, |
87 | TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW, | ||
88 | TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW | ||
99 | }; | 89 | }; |
100 | 90 | ||
101 | struct _cmac_instance { | 91 | struct _cmac_instance { |
@@ -124,12 +114,12 @@ static int pm3393_reset(struct cmac *cmac) | |||
124 | 114 | ||
125 | /* | 115 | /* |
126 | * Enable interrupts for the PM3393 | 116 | * Enable interrupts for the PM3393 |
127 | 117 | * | |
128 | 1. Enable PM3393 BLOCK interrupts. | 118 | * 1. Enable PM3393 BLOCK interrupts. |
129 | 2. Enable PM3393 Master Interrupt bit(INTE) | 119 | * 2. Enable PM3393 Master Interrupt bit(INTE) |
130 | 3. Enable ELMER's PM3393 bit. | 120 | * 3. Enable ELMER's PM3393 bit. |
131 | 4. Enable Terminator external interrupt. | 121 | * 4. Enable Terminator external interrupt. |
132 | */ | 122 | */ |
133 | static int pm3393_interrupt_enable(struct cmac *cmac) | 123 | static int pm3393_interrupt_enable(struct cmac *cmac) |
134 | { | 124 | { |
135 | u32 pl_intr; | 125 | u32 pl_intr; |
@@ -257,14 +247,12 @@ static int pm3393_interrupt_clear(struct cmac *cmac) | |||
257 | static int pm3393_interrupt_handler(struct cmac *cmac) | 247 | static int pm3393_interrupt_handler(struct cmac *cmac) |
258 | { | 248 | { |
259 | u32 master_intr_status; | 249 | u32 master_intr_status; |
260 | /* | 250 | |
261 | 1. Read master interrupt register. | ||
262 | 2. Read BLOCK's interrupt status registers. | ||
263 | 3. Handle BLOCK interrupts. | ||
264 | */ | ||
265 | /* Read the master interrupt status register. */ | 251 | /* Read the master interrupt status register. */ |
266 | pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, | 252 | pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, |
267 | &master_intr_status); | 253 | &master_intr_status); |
254 | CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n", | ||
255 | master_intr_status); | ||
268 | 256 | ||
269 | /* TBD XXX Lets just clear everything for now */ | 257 | /* TBD XXX Lets just clear everything for now */ |
270 | pm3393_interrupt_clear(cmac); | 258 | pm3393_interrupt_clear(cmac); |
@@ -307,11 +295,7 @@ static int pm3393_enable_port(struct cmac *cmac, int which) | |||
307 | * The PHY doesn't give us link status indication on its own so have | 295 | * The PHY doesn't give us link status indication on its own so have |
308 | * the link management code query it instead. | 296 | * the link management code query it instead. |
309 | */ | 297 | */ |
310 | { | 298 | t1_link_changed(cmac->adapter, 0); |
311 | extern void link_changed(adapter_t *adapter, int port_id); | ||
312 | |||
313 | link_changed(cmac->adapter, 0); | ||
314 | } | ||
315 | return 0; | 299 | return 0; |
316 | } | 300 | } |
317 | 301 | ||
@@ -363,33 +347,6 @@ static int pm3393_set_mtu(struct cmac *cmac, int mtu) | |||
363 | return 0; | 347 | return 0; |
364 | } | 348 | } |
365 | 349 | ||
366 | static u32 calc_crc(u8 *b, int len) | ||
367 | { | ||
368 | int i; | ||
369 | u32 crc = (u32)~0; | ||
370 | |||
371 | /* calculate crc one bit at a time */ | ||
372 | while (len--) { | ||
373 | crc ^= *b++; | ||
374 | for (i = 0; i < 8; i++) { | ||
375 | if (crc & 0x1) | ||
376 | crc = (crc >> 1) ^ 0xedb88320; | ||
377 | else | ||
378 | crc = (crc >> 1); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | /* reverse bits */ | ||
383 | crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0); | ||
384 | crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc); | ||
385 | crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa); | ||
386 | /* swap bytes */ | ||
387 | crc = (crc >> 16) | (crc << 16); | ||
388 | crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00); | ||
389 | |||
390 | return crc; | ||
391 | } | ||
392 | |||
393 | static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) | 350 | static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) |
394 | { | 351 | { |
395 | int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; | 352 | int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; |
@@ -423,7 +380,7 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) | |||
423 | u16 mc_filter[4] = { 0, }; | 380 | u16 mc_filter[4] = { 0, }; |
424 | 381 | ||
425 | while ((addr = t1_get_next_mcaddr(rm))) { | 382 | while ((addr = t1_get_next_mcaddr(rm))) { |
426 | bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */ | 383 | bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f; /* bit[23:28] */ |
427 | mc_filter[bit >> 4] |= 1 << (bit & 0xf); | 384 | mc_filter[bit >> 4] |= 1 << (bit & 0xf); |
428 | } | 385 | } |
429 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); | 386 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); |
@@ -471,20 +428,29 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, | |||
471 | return 0; | 428 | return 0; |
472 | } | 429 | } |
473 | 430 | ||
431 | static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, | ||
432 | int over) | ||
433 | { | ||
434 | u32 val0, val1, val2; | ||
435 | |||
436 | t1_tpi_read(adapter, offs, &val0); | ||
437 | t1_tpi_read(adapter, offs + 4, &val1); | ||
438 | t1_tpi_read(adapter, offs + 8, &val2); | ||
439 | |||
440 | *val &= ~0ull << 40; | ||
441 | *val |= val0 & 0xffff; | ||
442 | *val |= (val1 & 0xffff) << 16; | ||
443 | *val |= (u64)(val2 & 0xff) << 32; | ||
444 | |||
445 | if (over) | ||
446 | *val += 1ull << 40; | ||
447 | } | ||
448 | |||
474 | #define RMON_UPDATE(mac, name, stat_name) \ | 449 | #define RMON_UPDATE(mac, name, stat_name) \ |
475 | { \ | 450 | pm3393_rmon_update((mac)->adapter, OFFSET(name), \ |
476 | t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ | 451 | &(mac)->stats.stat_name, \ |
477 | t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \ | 452 | (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) |
478 | t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \ | 453 | |
479 | (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \ | ||
480 | (((u64)val1 & 0xffff) << 16) | \ | ||
481 | (((u64)val2 & 0xff) << 32) | \ | ||
482 | ((mac)->stats.stat_name & \ | ||
483 | (~(u64)0 << 40)); \ | ||
484 | if (ro & \ | ||
485 | ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \ | ||
486 | (mac)->stats.stat_name += ((u64)1 << 40); \ | ||
487 | } | ||
488 | 454 | ||
489 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | 455 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, |
490 | int flag) | 456 | int flag) |
@@ -519,6 +485,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | |||
519 | RMON_UPDATE(mac, RxJabbers, RxJabberErrors); | 485 | RMON_UPDATE(mac, RxJabbers, RxJabberErrors); |
520 | RMON_UPDATE(mac, RxFragments, RxRuntErrors); | 486 | RMON_UPDATE(mac, RxFragments, RxRuntErrors); |
521 | RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); | 487 | RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); |
488 | RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); | ||
489 | RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); | ||
522 | 490 | ||
523 | /* Tx stats */ | 491 | /* Tx stats */ |
524 | RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); | 492 | RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); |
@@ -529,6 +497,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | |||
529 | RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); | 497 | RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); |
530 | RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); | 498 | RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); |
531 | RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); | 499 | RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); |
500 | RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); | ||
501 | RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); | ||
532 | 502 | ||
533 | return &mac->stats; | 503 | return &mac->stats; |
534 | } | 504 | } |
@@ -631,10 +601,9 @@ static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) | |||
631 | { | 601 | { |
632 | struct cmac *cmac; | 602 | struct cmac *cmac; |
633 | 603 | ||
634 | cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); | 604 | cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); |
635 | if (!cmac) | 605 | if (!cmac) |
636 | return NULL; | 606 | return NULL; |
637 | memset(cmac, 0, sizeof(*cmac)); | ||
638 | 607 | ||
639 | cmac->ops = &pm3393_ops; | 608 | cmac->ops = &pm3393_ops; |
640 | cmac->instance = (cmac_instance *) (cmac + 1); | 609 | cmac->instance = (cmac_instance *) (cmac + 1); |
@@ -815,6 +784,12 @@ static int pm3393_mac_reset(adapter_t * adapter) | |||
815 | 784 | ||
816 | successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock | 785 | successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock |
817 | && is_xaui_mabc_pll_locked); | 786 | && is_xaui_mabc_pll_locked); |
787 | |||
788 | CH_DBG(adapter, HW, | ||
789 | "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " | ||
790 | "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", | ||
791 | i, is_pl4_reset_finished, val, is_pl4_outof_lock, | ||
792 | is_xaui_mabc_pll_locked); | ||
818 | } | 793 | } |
819 | return successful_reset ? 0 : 1; | 794 | return successful_reset ? 0 : 1; |
820 | } | 795 | } |
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h index b90e11f40d1f..c80bf4d6d0a6 100644 --- a/drivers/net/chelsio/regs.h +++ b/drivers/net/chelsio/regs.h | |||
@@ -71,6 +71,10 @@ | |||
71 | #define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) | 71 | #define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) |
72 | #define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) | 72 | #define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) |
73 | 73 | ||
74 | #define S_DISABLE_CMDQ0_GTS 8 | ||
75 | #define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS) | ||
76 | #define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U) | ||
77 | |||
74 | #define S_DISABLE_CMDQ1_GTS 9 | 78 | #define S_DISABLE_CMDQ1_GTS 9 |
75 | #define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) | 79 | #define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) |
76 | #define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) | 80 | #define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) |
@@ -87,12 +91,18 @@ | |||
87 | #define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) | 91 | #define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) |
88 | #define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) | 92 | #define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) |
89 | 93 | ||
94 | #define S_FL_SELECTION_CRITERIA 13 | ||
95 | #define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA) | ||
96 | #define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U) | ||
97 | |||
90 | #define S_ISCSI_COALESCE 14 | 98 | #define S_ISCSI_COALESCE 14 |
91 | #define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) | 99 | #define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) |
92 | #define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) | 100 | #define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) |
93 | 101 | ||
94 | #define S_RX_PKT_OFFSET 15 | 102 | #define S_RX_PKT_OFFSET 15 |
103 | #define M_RX_PKT_OFFSET 0x7 | ||
95 | #define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) | 104 | #define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) |
105 | #define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET) | ||
96 | 106 | ||
97 | #define S_VLAN_XTRACT 18 | 107 | #define S_VLAN_XTRACT 18 |
98 | #define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) | 108 | #define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) |
@@ -108,16 +118,114 @@ | |||
108 | #define A_SG_FL1BASELWR 0x20 | 118 | #define A_SG_FL1BASELWR 0x20 |
109 | #define A_SG_FL1BASEUPR 0x24 | 119 | #define A_SG_FL1BASEUPR 0x24 |
110 | #define A_SG_CMD0SIZE 0x28 | 120 | #define A_SG_CMD0SIZE 0x28 |
121 | |||
122 | #define S_CMDQ0_SIZE 0 | ||
123 | #define M_CMDQ0_SIZE 0x1ffff | ||
124 | #define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE) | ||
125 | #define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE) | ||
126 | |||
111 | #define A_SG_FL0SIZE 0x2c | 127 | #define A_SG_FL0SIZE 0x2c |
128 | |||
129 | #define S_FL0_SIZE 0 | ||
130 | #define M_FL0_SIZE 0x1ffff | ||
131 | #define V_FL0_SIZE(x) ((x) << S_FL0_SIZE) | ||
132 | #define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE) | ||
133 | |||
112 | #define A_SG_RSPSIZE 0x30 | 134 | #define A_SG_RSPSIZE 0x30 |
135 | |||
136 | #define S_RESPQ_SIZE 0 | ||
137 | #define M_RESPQ_SIZE 0x1ffff | ||
138 | #define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE) | ||
139 | #define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE) | ||
140 | |||
113 | #define A_SG_RSPBASELWR 0x34 | 141 | #define A_SG_RSPBASELWR 0x34 |
114 | #define A_SG_RSPBASEUPR 0x38 | 142 | #define A_SG_RSPBASEUPR 0x38 |
115 | #define A_SG_FLTHRESHOLD 0x3c | 143 | #define A_SG_FLTHRESHOLD 0x3c |
144 | |||
145 | #define S_FL_THRESHOLD 0 | ||
146 | #define M_FL_THRESHOLD 0xffff | ||
147 | #define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD) | ||
148 | #define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD) | ||
149 | |||
116 | #define A_SG_RSPQUEUECREDIT 0x40 | 150 | #define A_SG_RSPQUEUECREDIT 0x40 |
151 | |||
152 | #define S_RESPQ_CREDIT 0 | ||
153 | #define M_RESPQ_CREDIT 0x1ffff | ||
154 | #define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT) | ||
155 | #define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT) | ||
156 | |||
117 | #define A_SG_SLEEPING 0x48 | 157 | #define A_SG_SLEEPING 0x48 |
158 | |||
159 | #define S_SLEEPING 0 | ||
160 | #define M_SLEEPING 0xffff | ||
161 | #define V_SLEEPING(x) ((x) << S_SLEEPING) | ||
162 | #define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING) | ||
163 | |||
118 | #define A_SG_INTRTIMER 0x4c | 164 | #define A_SG_INTRTIMER 0x4c |
165 | |||
166 | #define S_INTERRUPT_TIMER_COUNT 0 | ||
167 | #define M_INTERRUPT_TIMER_COUNT 0xffffff | ||
168 | #define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT) | ||
169 | #define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT) | ||
170 | |||
171 | #define A_SG_CMD0PTR 0x50 | ||
172 | |||
173 | #define S_CMDQ0_POINTER 0 | ||
174 | #define M_CMDQ0_POINTER 0xffff | ||
175 | #define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER) | ||
176 | #define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER) | ||
177 | |||
178 | #define S_CURRENT_GENERATION_BIT 16 | ||
179 | #define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT) | ||
180 | #define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U) | ||
181 | |||
182 | #define A_SG_CMD1PTR 0x54 | ||
183 | |||
184 | #define S_CMDQ1_POINTER 0 | ||
185 | #define M_CMDQ1_POINTER 0xffff | ||
186 | #define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER) | ||
187 | #define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER) | ||
188 | |||
189 | #define A_SG_FL0PTR 0x58 | ||
190 | |||
191 | #define S_FL0_POINTER 0 | ||
192 | #define M_FL0_POINTER 0xffff | ||
193 | #define V_FL0_POINTER(x) ((x) << S_FL0_POINTER) | ||
194 | #define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER) | ||
195 | |||
196 | #define A_SG_FL1PTR 0x5c | ||
197 | |||
198 | #define S_FL1_POINTER 0 | ||
199 | #define M_FL1_POINTER 0xffff | ||
200 | #define V_FL1_POINTER(x) ((x) << S_FL1_POINTER) | ||
201 | #define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER) | ||
202 | |||
203 | #define A_SG_VERSION 0x6c | ||
204 | |||
205 | #define S_DAY 0 | ||
206 | #define M_DAY 0x1f | ||
207 | #define V_DAY(x) ((x) << S_DAY) | ||
208 | #define G_DAY(x) (((x) >> S_DAY) & M_DAY) | ||
209 | |||
210 | #define S_MONTH 5 | ||
211 | #define M_MONTH 0xf | ||
212 | #define V_MONTH(x) ((x) << S_MONTH) | ||
213 | #define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH) | ||
214 | |||
119 | #define A_SG_CMD1SIZE 0xb0 | 215 | #define A_SG_CMD1SIZE 0xb0 |
216 | |||
217 | #define S_CMDQ1_SIZE 0 | ||
218 | #define M_CMDQ1_SIZE 0x1ffff | ||
219 | #define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE) | ||
220 | #define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE) | ||
221 | |||
120 | #define A_SG_FL1SIZE 0xb4 | 222 | #define A_SG_FL1SIZE 0xb4 |
223 | |||
224 | #define S_FL1_SIZE 0 | ||
225 | #define M_FL1_SIZE 0x1ffff | ||
226 | #define V_FL1_SIZE(x) ((x) << S_FL1_SIZE) | ||
227 | #define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE) | ||
228 | |||
121 | #define A_SG_INT_ENABLE 0xb8 | 229 | #define A_SG_INT_ENABLE 0xb8 |
122 | 230 | ||
123 | #define S_RESPQ_EXHAUSTED 0 | 231 | #define S_RESPQ_EXHAUSTED 0 |
@@ -144,21 +252,369 @@ | |||
144 | #define A_SG_RESPACCUTIMER 0xc0 | 252 | #define A_SG_RESPACCUTIMER 0xc0 |
145 | 253 | ||
146 | /* MC3 registers */ | 254 | /* MC3 registers */ |
255 | #define A_MC3_CFG 0x100 | ||
256 | |||
257 | #define S_CLK_ENABLE 0 | ||
258 | #define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE) | ||
259 | #define F_CLK_ENABLE V_CLK_ENABLE(1U) | ||
147 | 260 | ||
148 | #define S_READY 1 | 261 | #define S_READY 1 |
149 | #define V_READY(x) ((x) << S_READY) | 262 | #define V_READY(x) ((x) << S_READY) |
150 | #define F_READY V_READY(1U) | 263 | #define F_READY V_READY(1U) |
151 | 264 | ||
152 | /* MC4 registers */ | 265 | #define S_READ_TO_WRITE_DELAY 2 |
266 | #define M_READ_TO_WRITE_DELAY 0x7 | ||
267 | #define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY) | ||
268 | #define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY) | ||
269 | |||
270 | #define S_WRITE_TO_READ_DELAY 5 | ||
271 | #define M_WRITE_TO_READ_DELAY 0x7 | ||
272 | #define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY) | ||
273 | #define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY) | ||
153 | 274 | ||
275 | #define S_MC3_BANK_CYCLE 8 | ||
276 | #define M_MC3_BANK_CYCLE 0xf | ||
277 | #define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE) | ||
278 | #define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE) | ||
279 | |||
280 | #define S_REFRESH_CYCLE 12 | ||
281 | #define M_REFRESH_CYCLE 0xf | ||
282 | #define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE) | ||
283 | #define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE) | ||
284 | |||
285 | #define S_PRECHARGE_CYCLE 16 | ||
286 | #define M_PRECHARGE_CYCLE 0x3 | ||
287 | #define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE) | ||
288 | #define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE) | ||
289 | |||
290 | #define S_ACTIVE_TO_READ_WRITE_DELAY 18 | ||
291 | #define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY) | ||
292 | #define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U) | ||
293 | |||
294 | #define S_ACTIVE_TO_PRECHARGE_DELAY 19 | ||
295 | #define M_ACTIVE_TO_PRECHARGE_DELAY 0x7 | ||
296 | #define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY) | ||
297 | #define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY) | ||
298 | |||
299 | #define S_WRITE_RECOVERY_DELAY 22 | ||
300 | #define M_WRITE_RECOVERY_DELAY 0x3 | ||
301 | #define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY) | ||
302 | #define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY) | ||
303 | |||
304 | #define S_DENSITY 24 | ||
305 | #define M_DENSITY 0x3 | ||
306 | #define V_DENSITY(x) ((x) << S_DENSITY) | ||
307 | #define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY) | ||
308 | |||
309 | #define S_ORGANIZATION 26 | ||
310 | #define V_ORGANIZATION(x) ((x) << S_ORGANIZATION) | ||
311 | #define F_ORGANIZATION V_ORGANIZATION(1U) | ||
312 | |||
313 | #define S_BANKS 27 | ||
314 | #define V_BANKS(x) ((x) << S_BANKS) | ||
315 | #define F_BANKS V_BANKS(1U) | ||
316 | |||
317 | #define S_UNREGISTERED 28 | ||
318 | #define V_UNREGISTERED(x) ((x) << S_UNREGISTERED) | ||
319 | #define F_UNREGISTERED V_UNREGISTERED(1U) | ||
320 | |||
321 | #define S_MC3_WIDTH 29 | ||
322 | #define M_MC3_WIDTH 0x3 | ||
323 | #define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH) | ||
324 | #define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH) | ||
325 | |||
326 | #define S_MC3_SLOW 31 | ||
327 | #define V_MC3_SLOW(x) ((x) << S_MC3_SLOW) | ||
328 | #define F_MC3_SLOW V_MC3_SLOW(1U) | ||
329 | |||
330 | #define A_MC3_MODE 0x104 | ||
331 | |||
332 | #define S_MC3_MODE 0 | ||
333 | #define M_MC3_MODE 0x3fff | ||
334 | #define V_MC3_MODE(x) ((x) << S_MC3_MODE) | ||
335 | #define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE) | ||
336 | |||
337 | #define S_BUSY 31 | ||
338 | #define V_BUSY(x) ((x) << S_BUSY) | ||
339 | #define F_BUSY V_BUSY(1U) | ||
340 | |||
341 | #define A_MC3_EXT_MODE 0x108 | ||
342 | |||
343 | #define S_MC3_EXTENDED_MODE 0 | ||
344 | #define M_MC3_EXTENDED_MODE 0x3fff | ||
345 | #define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE) | ||
346 | #define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE) | ||
347 | |||
348 | #define A_MC3_PRECHARG 0x10c | ||
349 | #define A_MC3_REFRESH 0x110 | ||
350 | |||
351 | #define S_REFRESH_ENABLE 0 | ||
352 | #define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE) | ||
353 | #define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U) | ||
354 | |||
355 | #define S_REFRESH_DIVISOR 1 | ||
356 | #define M_REFRESH_DIVISOR 0x3fff | ||
357 | #define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR) | ||
358 | #define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR) | ||
359 | |||
360 | #define A_MC3_STROBE 0x114 | ||
361 | |||
362 | #define S_MASTER_DLL_RESET 0 | ||
363 | #define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET) | ||
364 | #define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U) | ||
365 | |||
366 | #define S_MASTER_DLL_TAP_COUNT 1 | ||
367 | #define M_MASTER_DLL_TAP_COUNT 0xff | ||
368 | #define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT) | ||
369 | #define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT) | ||
370 | |||
371 | #define S_MASTER_DLL_LOCKED 9 | ||
372 | #define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED) | ||
373 | #define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U) | ||
374 | |||
375 | #define S_MASTER_DLL_MAX_TAP_COUNT 10 | ||
376 | #define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT) | ||
377 | #define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U) | ||
378 | |||
379 | #define S_MASTER_DLL_TAP_COUNT_OFFSET 11 | ||
380 | #define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f | ||
381 | #define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET) | ||
382 | #define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET) | ||
383 | |||
384 | #define S_SLAVE_DLL_RESET 11 | ||
385 | #define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET) | ||
386 | #define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U) | ||
387 | |||
388 | #define S_SLAVE_DLL_DELTA 12 | ||
389 | #define M_SLAVE_DLL_DELTA 0xf | ||
390 | #define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA) | ||
391 | #define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA) | ||
392 | |||
393 | #define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17 | ||
394 | #define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f | ||
395 | #define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) | ||
396 | #define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) | ||
397 | |||
398 | #define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23 | ||
399 | #define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE) | ||
400 | #define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U) | ||
401 | |||
402 | #define S_SLAVE_DELAY_LINE_TAP_COUNT 24 | ||
403 | #define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f | ||
404 | #define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT) | ||
405 | #define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT) | ||
406 | |||
407 | #define A_MC3_ECC_CNTL 0x118 | ||
408 | |||
409 | #define S_ECC_GENERATION_ENABLE 0 | ||
410 | #define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE) | ||
411 | #define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U) | ||
412 | |||
413 | #define S_ECC_CHECK_ENABLE 1 | ||
414 | #define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE) | ||
415 | #define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U) | ||
416 | |||
417 | #define S_CORRECTABLE_ERROR_COUNT 2 | ||
418 | #define M_CORRECTABLE_ERROR_COUNT 0xff | ||
419 | #define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT) | ||
420 | #define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT) | ||
421 | |||
422 | #define S_UNCORRECTABLE_ERROR_COUNT 10 | ||
423 | #define M_UNCORRECTABLE_ERROR_COUNT 0xff | ||
424 | #define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT) | ||
425 | #define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT) | ||
426 | |||
427 | #define A_MC3_CE_ADDR 0x11c | ||
428 | |||
429 | #define S_MC3_CE_ADDR 4 | ||
430 | #define M_MC3_CE_ADDR 0xfffffff | ||
431 | #define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR) | ||
432 | #define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR) | ||
433 | |||
434 | #define A_MC3_CE_DATA0 0x120 | ||
435 | #define A_MC3_CE_DATA1 0x124 | ||
436 | #define A_MC3_CE_DATA2 0x128 | ||
437 | #define A_MC3_CE_DATA3 0x12c | ||
438 | #define A_MC3_CE_DATA4 0x130 | ||
439 | #define A_MC3_UE_ADDR 0x134 | ||
440 | |||
441 | #define S_MC3_UE_ADDR 4 | ||
442 | #define M_MC3_UE_ADDR 0xfffffff | ||
443 | #define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR) | ||
444 | #define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR) | ||
445 | |||
446 | #define A_MC3_UE_DATA0 0x138 | ||
447 | #define A_MC3_UE_DATA1 0x13c | ||
448 | #define A_MC3_UE_DATA2 0x140 | ||
449 | #define A_MC3_UE_DATA3 0x144 | ||
450 | #define A_MC3_UE_DATA4 0x148 | ||
451 | #define A_MC3_BD_ADDR 0x14c | ||
452 | #define A_MC3_BD_DATA0 0x150 | ||
453 | #define A_MC3_BD_DATA1 0x154 | ||
454 | #define A_MC3_BD_DATA2 0x158 | ||
455 | #define A_MC3_BD_DATA3 0x15c | ||
456 | #define A_MC3_BD_DATA4 0x160 | ||
457 | #define A_MC3_BD_OP 0x164 | ||
458 | |||
459 | #define S_BACK_DOOR_OPERATION 0 | ||
460 | #define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION) | ||
461 | #define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U) | ||
462 | |||
463 | #define A_MC3_BIST_ADDR_BEG 0x168 | ||
464 | #define A_MC3_BIST_ADDR_END 0x16c | ||
465 | #define A_MC3_BIST_DATA 0x170 | ||
466 | #define A_MC3_BIST_OP 0x174 | ||
467 | |||
468 | #define S_OP 0 | ||
469 | #define V_OP(x) ((x) << S_OP) | ||
470 | #define F_OP V_OP(1U) | ||
471 | |||
472 | #define S_DATA_PATTERN 1 | ||
473 | #define M_DATA_PATTERN 0x3 | ||
474 | #define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN) | ||
475 | #define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN) | ||
476 | |||
477 | #define S_CONTINUOUS 3 | ||
478 | #define V_CONTINUOUS(x) ((x) << S_CONTINUOUS) | ||
479 | #define F_CONTINUOUS V_CONTINUOUS(1U) | ||
480 | |||
481 | #define A_MC3_INT_ENABLE 0x178 | ||
482 | |||
483 | #define S_MC3_CORR_ERR 0 | ||
484 | #define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR) | ||
485 | #define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U) | ||
486 | |||
487 | #define S_MC3_UNCORR_ERR 1 | ||
488 | #define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR) | ||
489 | #define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U) | ||
490 | |||
491 | #define S_MC3_PARITY_ERR 2 | ||
492 | #define M_MC3_PARITY_ERR 0xff | ||
493 | #define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR) | ||
494 | #define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR) | ||
495 | |||
496 | #define S_MC3_ADDR_ERR 10 | ||
497 | #define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR) | ||
498 | #define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U) | ||
499 | |||
500 | #define A_MC3_INT_CAUSE 0x17c | ||
501 | |||
502 | /* MC4 registers */ | ||
154 | #define A_MC4_CFG 0x180 | 503 | #define A_MC4_CFG 0x180 |
504 | |||
505 | #define S_POWER_UP 0 | ||
506 | #define V_POWER_UP(x) ((x) << S_POWER_UP) | ||
507 | #define F_POWER_UP V_POWER_UP(1U) | ||
508 | |||
509 | #define S_MC4_BANK_CYCLE 8 | ||
510 | #define M_MC4_BANK_CYCLE 0x7 | ||
511 | #define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE) | ||
512 | #define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE) | ||
513 | |||
514 | #define S_MC4_NARROW 24 | ||
515 | #define V_MC4_NARROW(x) ((x) << S_MC4_NARROW) | ||
516 | #define F_MC4_NARROW V_MC4_NARROW(1U) | ||
517 | |||
155 | #define S_MC4_SLOW 25 | 518 | #define S_MC4_SLOW 25 |
156 | #define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) | 519 | #define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) |
157 | #define F_MC4_SLOW V_MC4_SLOW(1U) | 520 | #define F_MC4_SLOW V_MC4_SLOW(1U) |
158 | 521 | ||
159 | /* TPI registers */ | 522 | #define S_MC4A_WIDTH 24 |
523 | #define M_MC4A_WIDTH 0x3 | ||
524 | #define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH) | ||
525 | #define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH) | ||
526 | |||
527 | #define S_MC4A_SLOW 26 | ||
528 | #define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW) | ||
529 | #define F_MC4A_SLOW V_MC4A_SLOW(1U) | ||
530 | |||
531 | #define A_MC4_MODE 0x184 | ||
532 | |||
533 | #define S_MC4_MODE 0 | ||
534 | #define M_MC4_MODE 0x7fff | ||
535 | #define V_MC4_MODE(x) ((x) << S_MC4_MODE) | ||
536 | #define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE) | ||
537 | |||
538 | #define A_MC4_EXT_MODE 0x188 | ||
539 | |||
540 | #define S_MC4_EXTENDED_MODE 0 | ||
541 | #define M_MC4_EXTENDED_MODE 0x7fff | ||
542 | #define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE) | ||
543 | #define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE) | ||
544 | |||
545 | #define A_MC4_REFRESH 0x190 | ||
546 | #define A_MC4_STROBE 0x194 | ||
547 | #define A_MC4_ECC_CNTL 0x198 | ||
548 | #define A_MC4_CE_ADDR 0x19c | ||
549 | |||
550 | #define S_MC4_CE_ADDR 4 | ||
551 | #define M_MC4_CE_ADDR 0xffffff | ||
552 | #define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR) | ||
553 | #define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR) | ||
554 | |||
555 | #define A_MC4_CE_DATA0 0x1a0 | ||
556 | #define A_MC4_CE_DATA1 0x1a4 | ||
557 | #define A_MC4_CE_DATA2 0x1a8 | ||
558 | #define A_MC4_CE_DATA3 0x1ac | ||
559 | #define A_MC4_CE_DATA4 0x1b0 | ||
560 | #define A_MC4_UE_ADDR 0x1b4 | ||
561 | |||
562 | #define S_MC4_UE_ADDR 4 | ||
563 | #define M_MC4_UE_ADDR 0xffffff | ||
564 | #define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR) | ||
565 | #define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR) | ||
566 | |||
567 | #define A_MC4_UE_DATA0 0x1b8 | ||
568 | #define A_MC4_UE_DATA1 0x1bc | ||
569 | #define A_MC4_UE_DATA2 0x1c0 | ||
570 | #define A_MC4_UE_DATA3 0x1c4 | ||
571 | #define A_MC4_UE_DATA4 0x1c8 | ||
572 | #define A_MC4_BD_ADDR 0x1cc | ||
573 | |||
574 | #define S_MC4_BACK_DOOR_ADDR 0 | ||
575 | #define M_MC4_BACK_DOOR_ADDR 0xfffffff | ||
576 | #define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR) | ||
577 | #define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR) | ||
578 | |||
579 | #define A_MC4_BD_DATA0 0x1d0 | ||
580 | #define A_MC4_BD_DATA1 0x1d4 | ||
581 | #define A_MC4_BD_DATA2 0x1d8 | ||
582 | #define A_MC4_BD_DATA3 0x1dc | ||
583 | #define A_MC4_BD_DATA4 0x1e0 | ||
584 | #define A_MC4_BD_OP 0x1e4 | ||
585 | |||
586 | #define S_OPERATION 0 | ||
587 | #define V_OPERATION(x) ((x) << S_OPERATION) | ||
588 | #define F_OPERATION V_OPERATION(1U) | ||
589 | |||
590 | #define A_MC4_BIST_ADDR_BEG 0x1e8 | ||
591 | #define A_MC4_BIST_ADDR_END 0x1ec | ||
592 | #define A_MC4_BIST_DATA 0x1f0 | ||
593 | #define A_MC4_BIST_OP 0x1f4 | ||
594 | #define A_MC4_INT_ENABLE 0x1f8 | ||
595 | |||
596 | #define S_MC4_CORR_ERR 0 | ||
597 | #define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR) | ||
598 | #define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U) | ||
599 | |||
600 | #define S_MC4_UNCORR_ERR 1 | ||
601 | #define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR) | ||
602 | #define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U) | ||
603 | |||
604 | #define S_MC4_ADDR_ERR 2 | ||
605 | #define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR) | ||
606 | #define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U) | ||
607 | |||
608 | #define A_MC4_INT_CAUSE 0x1fc | ||
160 | 609 | ||
610 | /* TPI registers */ | ||
161 | #define A_TPI_ADDR 0x280 | 611 | #define A_TPI_ADDR 0x280 |
612 | |||
613 | #define S_TPI_ADDRESS 0 | ||
614 | #define M_TPI_ADDRESS 0xffffff | ||
615 | #define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS) | ||
616 | #define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS) | ||
617 | |||
162 | #define A_TPI_WR_DATA 0x284 | 618 | #define A_TPI_WR_DATA 0x284 |
163 | #define A_TPI_RD_DATA 0x288 | 619 | #define A_TPI_RD_DATA 0x288 |
164 | #define A_TPI_CSR 0x28c | 620 | #define A_TPI_CSR 0x28c |
@@ -171,6 +627,10 @@ | |||
171 | #define V_TPIRDY(x) ((x) << S_TPIRDY) | 627 | #define V_TPIRDY(x) ((x) << S_TPIRDY) |
172 | #define F_TPIRDY V_TPIRDY(1U) | 628 | #define F_TPIRDY V_TPIRDY(1U) |
173 | 629 | ||
630 | #define S_INT_DIR 31 | ||
631 | #define V_INT_DIR(x) ((x) << S_INT_DIR) | ||
632 | #define F_INT_DIR V_INT_DIR(1U) | ||
633 | |||
174 | #define A_TPI_PAR 0x29c | 634 | #define A_TPI_PAR 0x29c |
175 | 635 | ||
176 | #define S_TPIPAR 0 | 636 | #define S_TPIPAR 0 |
@@ -178,14 +638,26 @@ | |||
178 | #define V_TPIPAR(x) ((x) << S_TPIPAR) | 638 | #define V_TPIPAR(x) ((x) << S_TPIPAR) |
179 | #define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) | 639 | #define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) |
180 | 640 | ||
181 | /* TP registers */ | ||
182 | 641 | ||
642 | /* TP registers */ | ||
183 | #define A_TP_IN_CONFIG 0x300 | 643 | #define A_TP_IN_CONFIG 0x300 |
184 | 644 | ||
645 | #define S_TP_IN_CSPI_TUNNEL 0 | ||
646 | #define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL) | ||
647 | #define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U) | ||
648 | |||
649 | #define S_TP_IN_CSPI_ETHERNET 1 | ||
650 | #define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET) | ||
651 | #define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U) | ||
652 | |||
185 | #define S_TP_IN_CSPI_CPL 3 | 653 | #define S_TP_IN_CSPI_CPL 3 |
186 | #define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) | 654 | #define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) |
187 | #define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) | 655 | #define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) |
188 | 656 | ||
657 | #define S_TP_IN_CSPI_POS 4 | ||
658 | #define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS) | ||
659 | #define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U) | ||
660 | |||
189 | #define S_TP_IN_CSPI_CHECK_IP_CSUM 5 | 661 | #define S_TP_IN_CSPI_CHECK_IP_CSUM 5 |
190 | #define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) | 662 | #define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) |
191 | #define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) | 663 | #define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) |
@@ -194,10 +666,22 @@ | |||
194 | #define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) | 666 | #define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) |
195 | #define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) | 667 | #define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) |
196 | 668 | ||
669 | #define S_TP_IN_ESPI_TUNNEL 7 | ||
670 | #define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL) | ||
671 | #define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U) | ||
672 | |||
197 | #define S_TP_IN_ESPI_ETHERNET 8 | 673 | #define S_TP_IN_ESPI_ETHERNET 8 |
198 | #define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) | 674 | #define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) |
199 | #define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) | 675 | #define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) |
200 | 676 | ||
677 | #define S_TP_IN_ESPI_CPL 10 | ||
678 | #define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL) | ||
679 | #define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U) | ||
680 | |||
681 | #define S_TP_IN_ESPI_POS 11 | ||
682 | #define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS) | ||
683 | #define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U) | ||
684 | |||
201 | #define S_TP_IN_ESPI_CHECK_IP_CSUM 12 | 685 | #define S_TP_IN_ESPI_CHECK_IP_CSUM 12 |
202 | #define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) | 686 | #define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) |
203 | #define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) | 687 | #define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) |
@@ -212,14 +696,42 @@ | |||
212 | 696 | ||
213 | #define A_TP_OUT_CONFIG 0x304 | 697 | #define A_TP_OUT_CONFIG 0x304 |
214 | 698 | ||
699 | #define S_TP_OUT_C_ETH 0 | ||
700 | #define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH) | ||
701 | #define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U) | ||
702 | |||
215 | #define S_TP_OUT_CSPI_CPL 2 | 703 | #define S_TP_OUT_CSPI_CPL 2 |
216 | #define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) | 704 | #define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) |
217 | #define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) | 705 | #define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) |
218 | 706 | ||
707 | #define S_TP_OUT_CSPI_POS 3 | ||
708 | #define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS) | ||
709 | #define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U) | ||
710 | |||
711 | #define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4 | ||
712 | #define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM) | ||
713 | #define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U) | ||
714 | |||
715 | #define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5 | ||
716 | #define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM) | ||
717 | #define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U) | ||
718 | |||
219 | #define S_TP_OUT_ESPI_ETHERNET 6 | 719 | #define S_TP_OUT_ESPI_ETHERNET 6 |
220 | #define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) | 720 | #define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) |
221 | #define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) | 721 | #define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) |
222 | 722 | ||
723 | #define S_TP_OUT_ESPI_TAG_ETHERNET 7 | ||
724 | #define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET) | ||
725 | #define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U) | ||
726 | |||
727 | #define S_TP_OUT_ESPI_CPL 8 | ||
728 | #define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL) | ||
729 | #define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U) | ||
730 | |||
731 | #define S_TP_OUT_ESPI_POS 9 | ||
732 | #define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS) | ||
733 | #define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U) | ||
734 | |||
223 | #define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 | 735 | #define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 |
224 | #define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) | 736 | #define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) |
225 | #define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) | 737 | #define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) |
@@ -233,6 +745,16 @@ | |||
233 | #define S_IP_TTL 0 | 745 | #define S_IP_TTL 0 |
234 | #define M_IP_TTL 0xff | 746 | #define M_IP_TTL 0xff |
235 | #define V_IP_TTL(x) ((x) << S_IP_TTL) | 747 | #define V_IP_TTL(x) ((x) << S_IP_TTL) |
748 | #define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL) | ||
749 | |||
750 | #define S_TCAM_SERVER_REGION_USAGE 8 | ||
751 | #define M_TCAM_SERVER_REGION_USAGE 0x3 | ||
752 | #define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE) | ||
753 | #define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE) | ||
754 | |||
755 | #define S_QOS_MAPPING 10 | ||
756 | #define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING) | ||
757 | #define F_QOS_MAPPING V_QOS_MAPPING(1U) | ||
236 | 758 | ||
237 | #define S_TCP_CSUM 11 | 759 | #define S_TCP_CSUM 11 |
238 | #define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) | 760 | #define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) |
@@ -246,31 +768,476 @@ | |||
246 | #define V_IP_CSUM(x) ((x) << S_IP_CSUM) | 768 | #define V_IP_CSUM(x) ((x) << S_IP_CSUM) |
247 | #define F_IP_CSUM V_IP_CSUM(1U) | 769 | #define F_IP_CSUM V_IP_CSUM(1U) |
248 | 770 | ||
771 | #define S_IP_ID_SPLIT 14 | ||
772 | #define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT) | ||
773 | #define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U) | ||
774 | |||
249 | #define S_PATH_MTU 15 | 775 | #define S_PATH_MTU 15 |
250 | #define V_PATH_MTU(x) ((x) << S_PATH_MTU) | 776 | #define V_PATH_MTU(x) ((x) << S_PATH_MTU) |
251 | #define F_PATH_MTU V_PATH_MTU(1U) | 777 | #define F_PATH_MTU V_PATH_MTU(1U) |
252 | 778 | ||
253 | #define S_5TUPLE_LOOKUP 17 | 779 | #define S_5TUPLE_LOOKUP 17 |
780 | #define M_5TUPLE_LOOKUP 0x3 | ||
254 | #define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) | 781 | #define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) |
782 | #define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP) | ||
783 | |||
784 | #define S_IP_FRAGMENT_DROP 19 | ||
785 | #define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP) | ||
786 | #define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U) | ||
787 | |||
788 | #define S_PING_DROP 20 | ||
789 | #define V_PING_DROP(x) ((x) << S_PING_DROP) | ||
790 | #define F_PING_DROP V_PING_DROP(1U) | ||
791 | |||
792 | #define S_PROTECT_MODE 21 | ||
793 | #define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE) | ||
794 | #define F_PROTECT_MODE V_PROTECT_MODE(1U) | ||
795 | |||
796 | #define S_SYN_COOKIE_ALGORITHM 22 | ||
797 | #define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM) | ||
798 | #define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U) | ||
799 | |||
800 | #define S_ATTACK_FILTER 23 | ||
801 | #define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER) | ||
802 | #define F_ATTACK_FILTER V_ATTACK_FILTER(1U) | ||
803 | |||
804 | #define S_INTERFACE_TYPE 24 | ||
805 | #define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE) | ||
806 | #define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U) | ||
807 | |||
808 | #define S_DISABLE_RX_FLOW_CONTROL 25 | ||
809 | #define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL) | ||
810 | #define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U) | ||
255 | 811 | ||
256 | #define S_SYN_COOKIE_PARAMETER 26 | 812 | #define S_SYN_COOKIE_PARAMETER 26 |
813 | #define M_SYN_COOKIE_PARAMETER 0x3f | ||
257 | #define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) | 814 | #define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) |
815 | #define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER) | ||
816 | |||
817 | #define A_TP_GLOBAL_RX_CREDITS 0x30c | ||
818 | #define A_TP_CM_SIZE 0x310 | ||
819 | #define A_TP_CM_MM_BASE 0x314 | ||
820 | |||
821 | #define S_CM_MEMMGR_BASE 0 | ||
822 | #define M_CM_MEMMGR_BASE 0xfffffff | ||
823 | #define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE) | ||
824 | #define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE) | ||
825 | |||
826 | #define A_TP_CM_TIMER_BASE 0x318 | ||
827 | |||
828 | #define S_CM_TIMER_BASE 0 | ||
829 | #define M_CM_TIMER_BASE 0xfffffff | ||
830 | #define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE) | ||
831 | #define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE) | ||
832 | |||
833 | #define A_TP_PM_SIZE 0x31c | ||
834 | #define A_TP_PM_TX_BASE 0x320 | ||
835 | #define A_TP_PM_DEFRAG_BASE 0x324 | ||
836 | #define A_TP_PM_RX_BASE 0x328 | ||
837 | #define A_TP_PM_RX_PG_SIZE 0x32c | ||
838 | #define A_TP_PM_RX_MAX_PGS 0x330 | ||
839 | #define A_TP_PM_TX_PG_SIZE 0x334 | ||
840 | #define A_TP_PM_TX_MAX_PGS 0x338 | ||
841 | #define A_TP_TCP_OPTIONS 0x340 | ||
842 | |||
843 | #define S_TIMESTAMP 0 | ||
844 | #define M_TIMESTAMP 0x3 | ||
845 | #define V_TIMESTAMP(x) ((x) << S_TIMESTAMP) | ||
846 | #define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP) | ||
847 | |||
848 | #define S_WINDOW_SCALE 2 | ||
849 | #define M_WINDOW_SCALE 0x3 | ||
850 | #define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE) | ||
851 | #define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE) | ||
852 | |||
853 | #define S_SACK 4 | ||
854 | #define M_SACK 0x3 | ||
855 | #define V_SACK(x) ((x) << S_SACK) | ||
856 | #define G_SACK(x) (((x) >> S_SACK) & M_SACK) | ||
857 | |||
858 | #define S_ECN 6 | ||
859 | #define M_ECN 0x3 | ||
860 | #define V_ECN(x) ((x) << S_ECN) | ||
861 | #define G_ECN(x) (((x) >> S_ECN) & M_ECN) | ||
862 | |||
863 | #define S_SACK_ALGORITHM 8 | ||
864 | #define M_SACK_ALGORITHM 0x3 | ||
865 | #define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM) | ||
866 | #define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM) | ||
867 | |||
868 | #define S_MSS 10 | ||
869 | #define V_MSS(x) ((x) << S_MSS) | ||
870 | #define F_MSS V_MSS(1U) | ||
871 | |||
872 | #define S_DEFAULT_PEER_MSS 16 | ||
873 | #define M_DEFAULT_PEER_MSS 0xffff | ||
874 | #define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS) | ||
875 | #define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS) | ||
876 | |||
877 | #define A_TP_DACK_CONFIG 0x344 | ||
878 | |||
879 | #define S_DACK_MODE 0 | ||
880 | #define V_DACK_MODE(x) ((x) << S_DACK_MODE) | ||
881 | #define F_DACK_MODE V_DACK_MODE(1U) | ||
882 | |||
883 | #define S_DACK_AUTO_MGMT 1 | ||
884 | #define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT) | ||
885 | #define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U) | ||
886 | |||
887 | #define S_DACK_AUTO_CAREFUL 2 | ||
888 | #define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL) | ||
889 | #define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U) | ||
890 | |||
891 | #define S_DACK_MSS_SELECTOR 3 | ||
892 | #define M_DACK_MSS_SELECTOR 0x3 | ||
893 | #define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR) | ||
894 | #define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR) | ||
895 | |||
896 | #define S_DACK_BYTE_THRESHOLD 5 | ||
897 | #define M_DACK_BYTE_THRESHOLD 0xfffff | ||
898 | #define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD) | ||
899 | #define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD) | ||
258 | 900 | ||
259 | #define A_TP_PC_CONFIG 0x348 | 901 | #define A_TP_PC_CONFIG 0x348 |
902 | |||
903 | #define S_TP_ACCESS_LATENCY 0 | ||
904 | #define M_TP_ACCESS_LATENCY 0xf | ||
905 | #define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY) | ||
906 | #define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY) | ||
907 | |||
908 | #define S_HELD_FIN_DISABLE 4 | ||
909 | #define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE) | ||
910 | #define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U) | ||
911 | |||
912 | #define S_DDP_FC_ENABLE 5 | ||
913 | #define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE) | ||
914 | #define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U) | ||
915 | |||
916 | #define S_RDMA_ERR_ENABLE 6 | ||
917 | #define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE) | ||
918 | #define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U) | ||
919 | |||
920 | #define S_FAST_PDU_DELIVERY 7 | ||
921 | #define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY) | ||
922 | #define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U) | ||
923 | |||
924 | #define S_CLEAR_FIN 8 | ||
925 | #define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN) | ||
926 | #define F_CLEAR_FIN V_CLEAR_FIN(1U) | ||
927 | |||
260 | #define S_DIS_TX_FILL_WIN_PUSH 12 | 928 | #define S_DIS_TX_FILL_WIN_PUSH 12 |
261 | #define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) | 929 | #define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) |
262 | #define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) | 930 | #define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) |
263 | 931 | ||
264 | #define S_TP_PC_REV 30 | 932 | #define S_TP_PC_REV 30 |
265 | #define M_TP_PC_REV 0x3 | 933 | #define M_TP_PC_REV 0x3 |
934 | #define V_TP_PC_REV(x) ((x) << S_TP_PC_REV) | ||
266 | #define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) | 935 | #define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) |
936 | |||
937 | #define A_TP_BACKOFF0 0x350 | ||
938 | |||
939 | #define S_ELEMENT0 0 | ||
940 | #define M_ELEMENT0 0xff | ||
941 | #define V_ELEMENT0(x) ((x) << S_ELEMENT0) | ||
942 | #define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0) | ||
943 | |||
944 | #define S_ELEMENT1 8 | ||
945 | #define M_ELEMENT1 0xff | ||
946 | #define V_ELEMENT1(x) ((x) << S_ELEMENT1) | ||
947 | #define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1) | ||
948 | |||
949 | #define S_ELEMENT2 16 | ||
950 | #define M_ELEMENT2 0xff | ||
951 | #define V_ELEMENT2(x) ((x) << S_ELEMENT2) | ||
952 | #define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2) | ||
953 | |||
954 | #define S_ELEMENT3 24 | ||
955 | #define M_ELEMENT3 0xff | ||
956 | #define V_ELEMENT3(x) ((x) << S_ELEMENT3) | ||
957 | #define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3) | ||
958 | |||
959 | #define A_TP_BACKOFF1 0x354 | ||
960 | #define A_TP_BACKOFF2 0x358 | ||
961 | #define A_TP_BACKOFF3 0x35c | ||
962 | #define A_TP_PARA_REG0 0x360 | ||
963 | |||
964 | #define S_VAR_MULT 0 | ||
965 | #define M_VAR_MULT 0xf | ||
966 | #define V_VAR_MULT(x) ((x) << S_VAR_MULT) | ||
967 | #define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT) | ||
968 | |||
969 | #define S_VAR_GAIN 4 | ||
970 | #define M_VAR_GAIN 0xf | ||
971 | #define V_VAR_GAIN(x) ((x) << S_VAR_GAIN) | ||
972 | #define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN) | ||
973 | |||
974 | #define S_SRTT_GAIN 8 | ||
975 | #define M_SRTT_GAIN 0xf | ||
976 | #define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN) | ||
977 | #define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN) | ||
978 | |||
979 | #define S_RTTVAR_INIT 12 | ||
980 | #define M_RTTVAR_INIT 0xf | ||
981 | #define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT) | ||
982 | #define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT) | ||
983 | |||
984 | #define S_DUP_THRESH 20 | ||
985 | #define M_DUP_THRESH 0xf | ||
986 | #define V_DUP_THRESH(x) ((x) << S_DUP_THRESH) | ||
987 | #define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH) | ||
988 | |||
989 | #define S_INIT_CONG_WIN 24 | ||
990 | #define M_INIT_CONG_WIN 0x7 | ||
991 | #define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN) | ||
992 | #define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN) | ||
993 | |||
994 | #define A_TP_PARA_REG1 0x364 | ||
995 | |||
996 | #define S_INITIAL_SLOW_START_THRESHOLD 0 | ||
997 | #define M_INITIAL_SLOW_START_THRESHOLD 0xffff | ||
998 | #define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD) | ||
999 | #define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD) | ||
1000 | |||
1001 | #define S_RECEIVE_BUFFER_SIZE 16 | ||
1002 | #define M_RECEIVE_BUFFER_SIZE 0xffff | ||
1003 | #define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE) | ||
1004 | #define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE) | ||
1005 | |||
1006 | #define A_TP_PARA_REG2 0x368 | ||
1007 | |||
1008 | #define S_RX_COALESCE_SIZE 0 | ||
1009 | #define M_RX_COALESCE_SIZE 0xffff | ||
1010 | #define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE) | ||
1011 | #define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE) | ||
1012 | |||
1013 | #define S_MAX_RX_SIZE 16 | ||
1014 | #define M_MAX_RX_SIZE 0xffff | ||
1015 | #define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE) | ||
1016 | #define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE) | ||
1017 | |||
1018 | #define A_TP_PARA_REG3 0x36c | ||
1019 | |||
1020 | #define S_RX_COALESCING_PSH_DELIVER 0 | ||
1021 | #define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER) | ||
1022 | #define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U) | ||
1023 | |||
1024 | #define S_RX_COALESCING_ENABLE 1 | ||
1025 | #define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE) | ||
1026 | #define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U) | ||
1027 | |||
1028 | #define S_TAHOE_ENABLE 2 | ||
1029 | #define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE) | ||
1030 | #define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U) | ||
1031 | |||
1032 | #define S_MAX_REORDER_FRAGMENTS 12 | ||
1033 | #define M_MAX_REORDER_FRAGMENTS 0x7 | ||
1034 | #define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS) | ||
1035 | #define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS) | ||
1036 | |||
1037 | #define A_TP_TIMER_RESOLUTION 0x390 | ||
1038 | |||
1039 | #define S_DELAYED_ACK_TIMER_RESOLUTION 0 | ||
1040 | #define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f | ||
1041 | #define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION) | ||
1042 | #define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION) | ||
1043 | |||
1044 | #define S_GENERIC_TIMER_RESOLUTION 16 | ||
1045 | #define M_GENERIC_TIMER_RESOLUTION 0x3f | ||
1046 | #define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION) | ||
1047 | #define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION) | ||
1048 | |||
1049 | #define A_TP_2MSL 0x394 | ||
1050 | |||
1051 | #define S_2MSL 0 | ||
1052 | #define M_2MSL 0x3fffffff | ||
1053 | #define V_2MSL(x) ((x) << S_2MSL) | ||
1054 | #define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL) | ||
1055 | |||
1056 | #define A_TP_RXT_MIN 0x398 | ||
1057 | |||
1058 | #define S_RETRANSMIT_TIMER_MIN 0 | ||
1059 | #define M_RETRANSMIT_TIMER_MIN 0xffff | ||
1060 | #define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN) | ||
1061 | #define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN) | ||
1062 | |||
1063 | #define A_TP_RXT_MAX 0x39c | ||
1064 | |||
1065 | #define S_RETRANSMIT_TIMER_MAX 0 | ||
1066 | #define M_RETRANSMIT_TIMER_MAX 0x3fffffff | ||
1067 | #define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX) | ||
1068 | #define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX) | ||
1069 | |||
1070 | #define A_TP_PERS_MIN 0x3a0 | ||
1071 | |||
1072 | #define S_PERSIST_TIMER_MIN 0 | ||
1073 | #define M_PERSIST_TIMER_MIN 0xffff | ||
1074 | #define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN) | ||
1075 | #define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN) | ||
1076 | |||
1077 | #define A_TP_PERS_MAX 0x3a4 | ||
1078 | |||
1079 | #define S_PERSIST_TIMER_MAX 0 | ||
1080 | #define M_PERSIST_TIMER_MAX 0x3fffffff | ||
1081 | #define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX) | ||
1082 | #define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX) | ||
1083 | |||
1084 | #define A_TP_KEEP_IDLE 0x3ac | ||
1085 | |||
1086 | #define S_KEEP_ALIVE_IDLE_TIME 0 | ||
1087 | #define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff | ||
1088 | #define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME) | ||
1089 | #define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME) | ||
1090 | |||
1091 | #define A_TP_KEEP_INTVL 0x3b0 | ||
1092 | |||
1093 | #define S_KEEP_ALIVE_INTERVAL_TIME 0 | ||
1094 | #define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff | ||
1095 | #define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME) | ||
1096 | #define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME) | ||
1097 | |||
1098 | #define A_TP_INIT_SRTT 0x3b4 | ||
1099 | |||
1100 | #define S_INITIAL_SRTT 0 | ||
1101 | #define M_INITIAL_SRTT 0xffff | ||
1102 | #define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT) | ||
1103 | #define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT) | ||
1104 | |||
1105 | #define A_TP_DACK_TIME 0x3b8 | ||
1106 | |||
1107 | #define S_DELAYED_ACK_TIME 0 | ||
1108 | #define M_DELAYED_ACK_TIME 0x7ff | ||
1109 | #define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME) | ||
1110 | #define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME) | ||
1111 | |||
1112 | #define A_TP_FINWAIT2_TIME 0x3bc | ||
1113 | |||
1114 | #define S_FINWAIT2_TIME 0 | ||
1115 | #define M_FINWAIT2_TIME 0x3fffffff | ||
1116 | #define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME) | ||
1117 | #define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME) | ||
1118 | |||
1119 | #define A_TP_FAST_FINWAIT2_TIME 0x3c0 | ||
1120 | |||
1121 | #define S_FAST_FINWAIT2_TIME 0 | ||
1122 | #define M_FAST_FINWAIT2_TIME 0x3fffffff | ||
1123 | #define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME) | ||
1124 | #define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME) | ||
1125 | |||
1126 | #define A_TP_SHIFT_CNT 0x3c4 | ||
1127 | |||
1128 | #define S_KEEPALIVE_MAX 0 | ||
1129 | #define M_KEEPALIVE_MAX 0xff | ||
1130 | #define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX) | ||
1131 | #define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX) | ||
1132 | |||
1133 | #define S_WINDOWPROBE_MAX 8 | ||
1134 | #define M_WINDOWPROBE_MAX 0xff | ||
1135 | #define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX) | ||
1136 | #define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX) | ||
1137 | |||
1138 | #define S_RETRANSMISSION_MAX 16 | ||
1139 | #define M_RETRANSMISSION_MAX 0xff | ||
1140 | #define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX) | ||
1141 | #define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX) | ||
1142 | |||
1143 | #define S_SYN_MAX 24 | ||
1144 | #define M_SYN_MAX 0xff | ||
1145 | #define V_SYN_MAX(x) ((x) << S_SYN_MAX) | ||
1146 | #define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX) | ||
1147 | |||
1148 | #define A_TP_QOS_REG0 0x3e0 | ||
1149 | |||
1150 | #define S_L3_VALUE 0 | ||
1151 | #define M_L3_VALUE 0x3f | ||
1152 | #define V_L3_VALUE(x) ((x) << S_L3_VALUE) | ||
1153 | #define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE) | ||
1154 | |||
1155 | #define A_TP_QOS_REG1 0x3e4 | ||
1156 | #define A_TP_QOS_REG2 0x3e8 | ||
1157 | #define A_TP_QOS_REG3 0x3ec | ||
1158 | #define A_TP_QOS_REG4 0x3f0 | ||
1159 | #define A_TP_QOS_REG5 0x3f4 | ||
1160 | #define A_TP_QOS_REG6 0x3f8 | ||
1161 | #define A_TP_QOS_REG7 0x3fc | ||
1162 | #define A_TP_MTU_REG0 0x404 | ||
1163 | #define A_TP_MTU_REG1 0x408 | ||
1164 | #define A_TP_MTU_REG2 0x40c | ||
1165 | #define A_TP_MTU_REG3 0x410 | ||
1166 | #define A_TP_MTU_REG4 0x414 | ||
1167 | #define A_TP_MTU_REG5 0x418 | ||
1168 | #define A_TP_MTU_REG6 0x41c | ||
1169 | #define A_TP_MTU_REG7 0x420 | ||
267 | #define A_TP_RESET 0x44c | 1170 | #define A_TP_RESET 0x44c |
1171 | |||
268 | #define S_TP_RESET 0 | 1172 | #define S_TP_RESET 0 |
269 | #define V_TP_RESET(x) ((x) << S_TP_RESET) | 1173 | #define V_TP_RESET(x) ((x) << S_TP_RESET) |
270 | #define F_TP_RESET V_TP_RESET(1U) | 1174 | #define F_TP_RESET V_TP_RESET(1U) |
271 | 1175 | ||
1176 | #define S_CM_MEMMGR_INIT 1 | ||
1177 | #define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT) | ||
1178 | #define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U) | ||
1179 | |||
1180 | #define A_TP_MIB_INDEX 0x450 | ||
1181 | #define A_TP_MIB_DATA 0x454 | ||
1182 | #define A_TP_SYNC_TIME_HI 0x458 | ||
1183 | #define A_TP_SYNC_TIME_LO 0x45c | ||
1184 | #define A_TP_CM_MM_RX_FLST_BASE 0x460 | ||
1185 | |||
1186 | #define S_CM_MEMMGR_RX_FREE_LIST_BASE 0 | ||
1187 | #define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff | ||
1188 | #define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE) | ||
1189 | #define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE) | ||
1190 | |||
1191 | #define A_TP_CM_MM_TX_FLST_BASE 0x464 | ||
1192 | |||
1193 | #define S_CM_MEMMGR_TX_FREE_LIST_BASE 0 | ||
1194 | #define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff | ||
1195 | #define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE) | ||
1196 | #define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE) | ||
1197 | |||
1198 | #define A_TP_CM_MM_P_FLST_BASE 0x468 | ||
1199 | |||
1200 | #define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0 | ||
1201 | #define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff | ||
1202 | #define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) | ||
1203 | #define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) | ||
1204 | |||
1205 | #define A_TP_CM_MM_MAX_P 0x46c | ||
1206 | |||
1207 | #define S_CM_MEMMGR_MAX_PSTRUCT 0 | ||
1208 | #define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff | ||
1209 | #define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT) | ||
1210 | #define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT) | ||
1211 | |||
272 | #define A_TP_INT_ENABLE 0x470 | 1212 | #define A_TP_INT_ENABLE 0x470 |
1213 | |||
1214 | #define S_TX_FREE_LIST_EMPTY 0 | ||
1215 | #define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY) | ||
1216 | #define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U) | ||
1217 | |||
1218 | #define S_RX_FREE_LIST_EMPTY 1 | ||
1219 | #define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY) | ||
1220 | #define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U) | ||
1221 | |||
273 | #define A_TP_INT_CAUSE 0x474 | 1222 | #define A_TP_INT_CAUSE 0x474 |
1223 | #define A_TP_TIMER_SEPARATOR 0x4a4 | ||
1224 | |||
1225 | #define S_DISABLE_PAST_TIMER_INSERTION 0 | ||
1226 | #define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION) | ||
1227 | #define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U) | ||
1228 | |||
1229 | #define S_MODULATION_TIMER_SEPARATOR 1 | ||
1230 | #define M_MODULATION_TIMER_SEPARATOR 0x7fff | ||
1231 | #define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR) | ||
1232 | #define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR) | ||
1233 | |||
1234 | #define S_GLOBAL_TIMER_SEPARATOR 16 | ||
1235 | #define M_GLOBAL_TIMER_SEPARATOR 0xffff | ||
1236 | #define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR) | ||
1237 | #define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR) | ||
1238 | |||
1239 | #define A_TP_CM_FC_MODE 0x4b0 | ||
1240 | #define A_TP_PC_CONGESTION_CNTL 0x4b4 | ||
274 | #define A_TP_TX_DROP_CONFIG 0x4b8 | 1241 | #define A_TP_TX_DROP_CONFIG 0x4b8 |
275 | 1242 | ||
276 | #define S_ENABLE_TX_DROP 31 | 1243 | #define S_ENABLE_TX_DROP 31 |
@@ -282,12 +1249,108 @@ | |||
282 | #define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) | 1249 | #define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) |
283 | 1250 | ||
284 | #define S_DROP_TICKS_CNT 4 | 1251 | #define S_DROP_TICKS_CNT 4 |
1252 | #define M_DROP_TICKS_CNT 0x3ffffff | ||
285 | #define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) | 1253 | #define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) |
1254 | #define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT) | ||
286 | 1255 | ||
287 | #define S_NUM_PKTS_DROPPED 0 | 1256 | #define S_NUM_PKTS_DROPPED 0 |
1257 | #define M_NUM_PKTS_DROPPED 0xf | ||
288 | #define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) | 1258 | #define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) |
1259 | #define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED) | ||
1260 | |||
1261 | #define A_TP_TX_DROP_COUNT 0x4bc | ||
1262 | |||
1263 | /* RAT registers */ | ||
1264 | #define A_RAT_ROUTE_CONTROL 0x580 | ||
1265 | |||
1266 | #define S_USE_ROUTE_TABLE 0 | ||
1267 | #define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE) | ||
1268 | #define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U) | ||
1269 | |||
1270 | #define S_ENABLE_CSPI 1 | ||
1271 | #define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI) | ||
1272 | #define F_ENABLE_CSPI V_ENABLE_CSPI(1U) | ||
1273 | |||
1274 | #define S_ENABLE_PCIX 2 | ||
1275 | #define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX) | ||
1276 | #define F_ENABLE_PCIX V_ENABLE_PCIX(1U) | ||
1277 | |||
1278 | #define A_RAT_ROUTE_TABLE_INDEX 0x584 | ||
1279 | |||
1280 | #define S_ROUTE_TABLE_INDEX 0 | ||
1281 | #define M_ROUTE_TABLE_INDEX 0xf | ||
1282 | #define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX) | ||
1283 | #define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX) | ||
1284 | |||
1285 | #define A_RAT_ROUTE_TABLE_DATA 0x588 | ||
1286 | #define A_RAT_NO_ROUTE 0x58c | ||
1287 | |||
1288 | #define S_CPL_OPCODE 0 | ||
1289 | #define M_CPL_OPCODE 0xff | ||
1290 | #define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) | ||
1291 | #define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE) | ||
1292 | |||
1293 | #define A_RAT_INTR_ENABLE 0x590 | ||
1294 | |||
1295 | #define S_ZEROROUTEERROR 0 | ||
1296 | #define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR) | ||
1297 | #define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U) | ||
1298 | |||
1299 | #define S_CSPIFRAMINGERROR 1 | ||
1300 | #define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR) | ||
1301 | #define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U) | ||
1302 | |||
1303 | #define S_SGEFRAMINGERROR 2 | ||
1304 | #define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR) | ||
1305 | #define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U) | ||
1306 | |||
1307 | #define S_TPFRAMINGERROR 3 | ||
1308 | #define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR) | ||
1309 | #define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U) | ||
1310 | |||
1311 | #define A_RAT_INTR_CAUSE 0x594 | ||
289 | 1312 | ||
290 | /* CSPI registers */ | 1313 | /* CSPI registers */ |
1314 | #define A_CSPI_RX_AE_WM 0x810 | ||
1315 | #define A_CSPI_RX_AF_WM 0x814 | ||
1316 | #define A_CSPI_CALENDAR_LEN 0x818 | ||
1317 | |||
1318 | #define S_CALENDARLENGTH 0 | ||
1319 | #define M_CALENDARLENGTH 0xffff | ||
1320 | #define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH) | ||
1321 | #define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH) | ||
1322 | |||
1323 | #define A_CSPI_FIFO_STATUS_ENABLE 0x820 | ||
1324 | |||
1325 | #define S_FIFOSTATUSENABLE 0 | ||
1326 | #define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE) | ||
1327 | #define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U) | ||
1328 | |||
1329 | #define A_CSPI_MAXBURST1_MAXBURST2 0x828 | ||
1330 | |||
1331 | #define S_MAXBURST1 0 | ||
1332 | #define M_MAXBURST1 0xffff | ||
1333 | #define V_MAXBURST1(x) ((x) << S_MAXBURST1) | ||
1334 | #define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1) | ||
1335 | |||
1336 | #define S_MAXBURST2 16 | ||
1337 | #define M_MAXBURST2 0xffff | ||
1338 | #define V_MAXBURST2(x) ((x) << S_MAXBURST2) | ||
1339 | #define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2) | ||
1340 | |||
1341 | #define A_CSPI_TRAIN 0x82c | ||
1342 | |||
1343 | #define S_CSPI_TRAIN_ALPHA 0 | ||
1344 | #define M_CSPI_TRAIN_ALPHA 0xffff | ||
1345 | #define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA) | ||
1346 | #define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA) | ||
1347 | |||
1348 | #define S_CSPI_TRAIN_DATA_MAXT 16 | ||
1349 | #define M_CSPI_TRAIN_DATA_MAXT 0xffff | ||
1350 | #define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT) | ||
1351 | #define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT) | ||
1352 | |||
1353 | #define A_CSPI_INTR_STATUS 0x848 | ||
291 | 1354 | ||
292 | #define S_DIP4ERR 0 | 1355 | #define S_DIP4ERR 0 |
293 | #define V_DIP4ERR(x) ((x) << S_DIP4ERR) | 1356 | #define V_DIP4ERR(x) ((x) << S_DIP4ERR) |
@@ -309,22 +1372,63 @@ | |||
309 | #define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) | 1372 | #define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) |
310 | #define F_RAMPARITYERR V_RAMPARITYERR(1U) | 1373 | #define F_RAMPARITYERR V_RAMPARITYERR(1U) |
311 | 1374 | ||
312 | /* ESPI registers */ | 1375 | #define A_CSPI_INTR_ENABLE 0x84c |
313 | 1376 | ||
1377 | /* ESPI registers */ | ||
314 | #define A_ESPI_SCH_TOKEN0 0x880 | 1378 | #define A_ESPI_SCH_TOKEN0 0x880 |
1379 | |||
1380 | #define S_SCHTOKEN0 0 | ||
1381 | #define M_SCHTOKEN0 0xffff | ||
1382 | #define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0) | ||
1383 | #define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0) | ||
1384 | |||
315 | #define A_ESPI_SCH_TOKEN1 0x884 | 1385 | #define A_ESPI_SCH_TOKEN1 0x884 |
1386 | |||
1387 | #define S_SCHTOKEN1 0 | ||
1388 | #define M_SCHTOKEN1 0xffff | ||
1389 | #define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1) | ||
1390 | #define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1) | ||
1391 | |||
316 | #define A_ESPI_SCH_TOKEN2 0x888 | 1392 | #define A_ESPI_SCH_TOKEN2 0x888 |
1393 | |||
1394 | #define S_SCHTOKEN2 0 | ||
1395 | #define M_SCHTOKEN2 0xffff | ||
1396 | #define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2) | ||
1397 | #define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2) | ||
1398 | |||
317 | #define A_ESPI_SCH_TOKEN3 0x88c | 1399 | #define A_ESPI_SCH_TOKEN3 0x88c |
1400 | |||
1401 | #define S_SCHTOKEN3 0 | ||
1402 | #define M_SCHTOKEN3 0xffff | ||
1403 | #define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3) | ||
1404 | #define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3) | ||
1405 | |||
318 | #define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 | 1406 | #define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 |
1407 | |||
1408 | #define S_ALMOSTEMPTY 0 | ||
1409 | #define M_ALMOSTEMPTY 0xffff | ||
1410 | #define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY) | ||
1411 | #define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY) | ||
1412 | |||
319 | #define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 | 1413 | #define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 |
1414 | |||
1415 | #define S_ALMOSTFULL 0 | ||
1416 | #define M_ALMOSTFULL 0xffff | ||
1417 | #define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL) | ||
1418 | #define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL) | ||
1419 | |||
320 | #define A_ESPI_CALENDAR_LENGTH 0x898 | 1420 | #define A_ESPI_CALENDAR_LENGTH 0x898 |
321 | #define A_PORT_CONFIG 0x89c | 1421 | #define A_PORT_CONFIG 0x89c |
322 | 1422 | ||
323 | #define S_RX_NPORTS 0 | 1423 | #define S_RX_NPORTS 0 |
1424 | #define M_RX_NPORTS 0xff | ||
324 | #define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) | 1425 | #define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) |
1426 | #define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS) | ||
325 | 1427 | ||
326 | #define S_TX_NPORTS 8 | 1428 | #define S_TX_NPORTS 8 |
1429 | #define M_TX_NPORTS 0xff | ||
327 | #define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) | 1430 | #define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) |
1431 | #define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS) | ||
328 | 1432 | ||
329 | #define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 | 1433 | #define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 |
330 | 1434 | ||
@@ -332,12 +1436,124 @@ | |||
332 | #define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) | 1436 | #define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) |
333 | #define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) | 1437 | #define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) |
334 | 1438 | ||
1439 | #define S_TXDROPENABLE 1 | ||
1440 | #define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE) | ||
1441 | #define F_TXDROPENABLE V_TXDROPENABLE(1U) | ||
1442 | |||
1443 | #define S_RXENDIANMODE 2 | ||
1444 | #define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE) | ||
1445 | #define F_RXENDIANMODE V_RXENDIANMODE(1U) | ||
1446 | |||
1447 | #define S_TXENDIANMODE 3 | ||
1448 | #define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE) | ||
1449 | #define F_TXENDIANMODE V_TXENDIANMODE(1U) | ||
1450 | |||
335 | #define S_INTEL1010MODE 4 | 1451 | #define S_INTEL1010MODE 4 |
336 | #define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) | 1452 | #define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) |
337 | #define F_INTEL1010MODE V_INTEL1010MODE(1U) | 1453 | #define F_INTEL1010MODE V_INTEL1010MODE(1U) |
338 | 1454 | ||
339 | #define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 | 1455 | #define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 |
340 | #define A_ESPI_TRAIN 0x8ac | 1456 | #define A_ESPI_TRAIN 0x8ac |
1457 | |||
1458 | #define S_MAXTRAINALPHA 0 | ||
1459 | #define M_MAXTRAINALPHA 0xffff | ||
1460 | #define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA) | ||
1461 | #define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA) | ||
1462 | |||
1463 | #define S_MAXTRAINDATA 16 | ||
1464 | #define M_MAXTRAINDATA 0xffff | ||
1465 | #define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA) | ||
1466 | #define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA) | ||
1467 | |||
1468 | #define A_RAM_STATUS 0x8b0 | ||
1469 | |||
1470 | #define S_RXFIFOPARITYERROR 0 | ||
1471 | #define M_RXFIFOPARITYERROR 0x3ff | ||
1472 | #define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR) | ||
1473 | #define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR) | ||
1474 | |||
1475 | #define S_TXFIFOPARITYERROR 10 | ||
1476 | #define M_TXFIFOPARITYERROR 0x3ff | ||
1477 | #define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR) | ||
1478 | #define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR) | ||
1479 | |||
1480 | #define S_RXFIFOOVERFLOW 20 | ||
1481 | #define M_RXFIFOOVERFLOW 0x3ff | ||
1482 | #define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW) | ||
1483 | #define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW) | ||
1484 | |||
1485 | #define A_TX_DROP_COUNT0 0x8b4 | ||
1486 | |||
1487 | #define S_TXPORT0DROPCNT 0 | ||
1488 | #define M_TXPORT0DROPCNT 0xffff | ||
1489 | #define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT) | ||
1490 | #define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT) | ||
1491 | |||
1492 | #define S_TXPORT1DROPCNT 16 | ||
1493 | #define M_TXPORT1DROPCNT 0xffff | ||
1494 | #define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT) | ||
1495 | #define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT) | ||
1496 | |||
1497 | #define A_TX_DROP_COUNT1 0x8b8 | ||
1498 | |||
1499 | #define S_TXPORT2DROPCNT 0 | ||
1500 | #define M_TXPORT2DROPCNT 0xffff | ||
1501 | #define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT) | ||
1502 | #define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT) | ||
1503 | |||
1504 | #define S_TXPORT3DROPCNT 16 | ||
1505 | #define M_TXPORT3DROPCNT 0xffff | ||
1506 | #define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT) | ||
1507 | #define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT) | ||
1508 | |||
1509 | #define A_RX_DROP_COUNT0 0x8bc | ||
1510 | |||
1511 | #define S_RXPORT0DROPCNT 0 | ||
1512 | #define M_RXPORT0DROPCNT 0xffff | ||
1513 | #define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT) | ||
1514 | #define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT) | ||
1515 | |||
1516 | #define S_RXPORT1DROPCNT 16 | ||
1517 | #define M_RXPORT1DROPCNT 0xffff | ||
1518 | #define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT) | ||
1519 | #define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT) | ||
1520 | |||
1521 | #define A_RX_DROP_COUNT1 0x8c0 | ||
1522 | |||
1523 | #define S_RXPORT2DROPCNT 0 | ||
1524 | #define M_RXPORT2DROPCNT 0xffff | ||
1525 | #define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT) | ||
1526 | #define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT) | ||
1527 | |||
1528 | #define S_RXPORT3DROPCNT 16 | ||
1529 | #define M_RXPORT3DROPCNT 0xffff | ||
1530 | #define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT) | ||
1531 | #define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT) | ||
1532 | |||
1533 | #define A_DIP4_ERROR_COUNT 0x8c4 | ||
1534 | |||
1535 | #define S_DIP4ERRORCNT 0 | ||
1536 | #define M_DIP4ERRORCNT 0xfff | ||
1537 | #define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT) | ||
1538 | #define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT) | ||
1539 | |||
1540 | #define S_DIP4ERRORCNTSHADOW 12 | ||
1541 | #define M_DIP4ERRORCNTSHADOW 0xfff | ||
1542 | #define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW) | ||
1543 | #define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW) | ||
1544 | |||
1545 | #define S_TRICN_RX_TRAIN_ERR 24 | ||
1546 | #define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR) | ||
1547 | #define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U) | ||
1548 | |||
1549 | #define S_TRICN_RX_TRAINING 25 | ||
1550 | #define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING) | ||
1551 | #define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U) | ||
1552 | |||
1553 | #define S_TRICN_RX_TRAIN_OK 26 | ||
1554 | #define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK) | ||
1555 | #define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U) | ||
1556 | |||
341 | #define A_ESPI_INTR_STATUS 0x8c8 | 1557 | #define A_ESPI_INTR_STATUS 0x8c8 |
342 | 1558 | ||
343 | #define S_DIP2PARITYERR 5 | 1559 | #define S_DIP2PARITYERR 5 |
@@ -347,19 +1563,56 @@ | |||
347 | #define A_ESPI_INTR_ENABLE 0x8cc | 1563 | #define A_ESPI_INTR_ENABLE 0x8cc |
348 | #define A_RX_DROP_THRESHOLD 0x8d0 | 1564 | #define A_RX_DROP_THRESHOLD 0x8d0 |
349 | #define A_ESPI_RX_RESET 0x8ec | 1565 | #define A_ESPI_RX_RESET 0x8ec |
1566 | |||
1567 | #define S_ESPI_RX_LNK_RST 0 | ||
1568 | #define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST) | ||
1569 | #define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U) | ||
1570 | |||
1571 | #define S_ESPI_RX_CORE_RST 1 | ||
1572 | #define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST) | ||
1573 | #define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U) | ||
1574 | |||
1575 | #define S_RX_CLK_STATUS 2 | ||
1576 | #define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS) | ||
1577 | #define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U) | ||
1578 | |||
350 | #define A_ESPI_MISC_CONTROL 0x8f0 | 1579 | #define A_ESPI_MISC_CONTROL 0x8f0 |
351 | 1580 | ||
352 | #define S_OUT_OF_SYNC_COUNT 0 | 1581 | #define S_OUT_OF_SYNC_COUNT 0 |
1582 | #define M_OUT_OF_SYNC_COUNT 0xf | ||
353 | #define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) | 1583 | #define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) |
1584 | #define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT) | ||
1585 | |||
1586 | #define S_DIP2_COUNT_MODE_ENABLE 4 | ||
1587 | #define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE) | ||
1588 | #define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U) | ||
354 | 1589 | ||
355 | #define S_DIP2_PARITY_ERR_THRES 5 | 1590 | #define S_DIP2_PARITY_ERR_THRES 5 |
1591 | #define M_DIP2_PARITY_ERR_THRES 0xf | ||
356 | #define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) | 1592 | #define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) |
1593 | #define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES) | ||
357 | 1594 | ||
358 | #define S_DIP4_THRES 9 | 1595 | #define S_DIP4_THRES 9 |
1596 | #define M_DIP4_THRES 0xfff | ||
359 | #define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) | 1597 | #define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) |
1598 | #define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES) | ||
1599 | |||
1600 | #define S_DIP4_THRES_ENABLE 21 | ||
1601 | #define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE) | ||
1602 | #define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U) | ||
1603 | |||
1604 | #define S_FORCE_DISABLE_STATUS 22 | ||
1605 | #define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS) | ||
1606 | #define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U) | ||
1607 | |||
1608 | #define S_DYNAMIC_DESKEW 23 | ||
1609 | #define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW) | ||
1610 | #define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U) | ||
360 | 1611 | ||
361 | #define S_MONITORED_PORT_NUM 25 | 1612 | #define S_MONITORED_PORT_NUM 25 |
1613 | #define M_MONITORED_PORT_NUM 0x3 | ||
362 | #define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) | 1614 | #define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) |
1615 | #define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM) | ||
363 | 1616 | ||
364 | #define S_MONITORED_DIRECTION 27 | 1617 | #define S_MONITORED_DIRECTION 27 |
365 | #define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) | 1618 | #define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) |
@@ -370,33 +1623,125 @@ | |||
370 | #define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) | 1623 | #define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) |
371 | 1624 | ||
372 | #define A_ESPI_DIP2_ERR_COUNT 0x8f4 | 1625 | #define A_ESPI_DIP2_ERR_COUNT 0x8f4 |
1626 | |||
1627 | #define S_DIP2_ERR_CNT 0 | ||
1628 | #define M_DIP2_ERR_CNT 0xf | ||
1629 | #define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT) | ||
1630 | #define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT) | ||
1631 | |||
373 | #define A_ESPI_CMD_ADDR 0x8f8 | 1632 | #define A_ESPI_CMD_ADDR 0x8f8 |
374 | 1633 | ||
375 | #define S_WRITE_DATA 0 | 1634 | #define S_WRITE_DATA 0 |
1635 | #define M_WRITE_DATA 0xff | ||
376 | #define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) | 1636 | #define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) |
1637 | #define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA) | ||
377 | 1638 | ||
378 | #define S_REGISTER_OFFSET 8 | 1639 | #define S_REGISTER_OFFSET 8 |
1640 | #define M_REGISTER_OFFSET 0xf | ||
379 | #define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) | 1641 | #define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) |
1642 | #define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET) | ||
380 | 1643 | ||
381 | #define S_CHANNEL_ADDR 12 | 1644 | #define S_CHANNEL_ADDR 12 |
1645 | #define M_CHANNEL_ADDR 0xf | ||
382 | #define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) | 1646 | #define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) |
1647 | #define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR) | ||
383 | 1648 | ||
384 | #define S_MODULE_ADDR 16 | 1649 | #define S_MODULE_ADDR 16 |
1650 | #define M_MODULE_ADDR 0x3 | ||
385 | #define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) | 1651 | #define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) |
1652 | #define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR) | ||
386 | 1653 | ||
387 | #define S_BUNDLE_ADDR 20 | 1654 | #define S_BUNDLE_ADDR 20 |
1655 | #define M_BUNDLE_ADDR 0x3 | ||
388 | #define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) | 1656 | #define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) |
1657 | #define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR) | ||
389 | 1658 | ||
390 | #define S_SPI4_COMMAND 24 | 1659 | #define S_SPI4_COMMAND 24 |
1660 | #define M_SPI4_COMMAND 0xff | ||
391 | #define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) | 1661 | #define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) |
1662 | #define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND) | ||
392 | 1663 | ||
393 | #define A_ESPI_GOSTAT 0x8fc | 1664 | #define A_ESPI_GOSTAT 0x8fc |
1665 | |||
1666 | #define S_READ_DATA 0 | ||
1667 | #define M_READ_DATA 0xff | ||
1668 | #define V_READ_DATA(x) ((x) << S_READ_DATA) | ||
1669 | #define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA) | ||
1670 | |||
394 | #define S_ESPI_CMD_BUSY 8 | 1671 | #define S_ESPI_CMD_BUSY 8 |
395 | #define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) | 1672 | #define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) |
396 | #define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) | 1673 | #define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) |
397 | 1674 | ||
398 | /* PL registers */ | 1675 | #define S_ERROR_ACK 9 |
1676 | #define V_ERROR_ACK(x) ((x) << S_ERROR_ACK) | ||
1677 | #define F_ERROR_ACK V_ERROR_ACK(1U) | ||
1678 | |||
1679 | #define S_UNMAPPED_ERR 10 | ||
1680 | #define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR) | ||
1681 | #define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U) | ||
1682 | |||
1683 | #define S_TRANSACTION_TIMER 16 | ||
1684 | #define M_TRANSACTION_TIMER 0xff | ||
1685 | #define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER) | ||
1686 | #define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER) | ||
1687 | |||
1688 | |||
1689 | /* ULP registers */ | ||
1690 | #define A_ULP_ULIMIT 0x980 | ||
1691 | #define A_ULP_TAGMASK 0x984 | ||
1692 | #define A_ULP_HREG_INDEX 0x988 | ||
1693 | #define A_ULP_HREG_DATA 0x98c | ||
1694 | #define A_ULP_INT_ENABLE 0x990 | ||
1695 | #define A_ULP_INT_CAUSE 0x994 | ||
399 | 1696 | ||
1697 | #define S_HREG_PAR_ERR 0 | ||
1698 | #define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR) | ||
1699 | #define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U) | ||
1700 | |||
1701 | #define S_EGRS_DATA_PAR_ERR 1 | ||
1702 | #define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR) | ||
1703 | #define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U) | ||
1704 | |||
1705 | #define S_INGRS_DATA_PAR_ERR 2 | ||
1706 | #define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR) | ||
1707 | #define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U) | ||
1708 | |||
1709 | #define S_PM_INTR 3 | ||
1710 | #define V_PM_INTR(x) ((x) << S_PM_INTR) | ||
1711 | #define F_PM_INTR V_PM_INTR(1U) | ||
1712 | |||
1713 | #define S_PM_E2C_SYNC_ERR 4 | ||
1714 | #define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR) | ||
1715 | #define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U) | ||
1716 | |||
1717 | #define S_PM_C2E_SYNC_ERR 5 | ||
1718 | #define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR) | ||
1719 | #define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U) | ||
1720 | |||
1721 | #define S_PM_E2C_EMPTY_ERR 6 | ||
1722 | #define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR) | ||
1723 | #define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U) | ||
1724 | |||
1725 | #define S_PM_C2E_EMPTY_ERR 7 | ||
1726 | #define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR) | ||
1727 | #define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U) | ||
1728 | |||
1729 | #define S_PM_PAR_ERR 8 | ||
1730 | #define M_PM_PAR_ERR 0xffff | ||
1731 | #define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR) | ||
1732 | #define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR) | ||
1733 | |||
1734 | #define S_PM_E2C_WRT_FULL 24 | ||
1735 | #define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL) | ||
1736 | #define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U) | ||
1737 | |||
1738 | #define S_PM_C2E_WRT_FULL 25 | ||
1739 | #define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL) | ||
1740 | #define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U) | ||
1741 | |||
1742 | #define A_ULP_PIO_CTRL 0x998 | ||
1743 | |||
1744 | /* PL registers */ | ||
400 | #define A_PL_ENABLE 0xa00 | 1745 | #define A_PL_ENABLE 0xa00 |
401 | 1746 | ||
402 | #define S_PL_INTR_SGE_ERR 0 | 1747 | #define S_PL_INTR_SGE_ERR 0 |
@@ -407,14 +1752,38 @@ | |||
407 | #define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) | 1752 | #define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) |
408 | #define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) | 1753 | #define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) |
409 | 1754 | ||
1755 | #define S_PL_INTR_MC3 2 | ||
1756 | #define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3) | ||
1757 | #define F_PL_INTR_MC3 V_PL_INTR_MC3(1U) | ||
1758 | |||
1759 | #define S_PL_INTR_MC4 3 | ||
1760 | #define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4) | ||
1761 | #define F_PL_INTR_MC4 V_PL_INTR_MC4(1U) | ||
1762 | |||
1763 | #define S_PL_INTR_MC5 4 | ||
1764 | #define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5) | ||
1765 | #define F_PL_INTR_MC5 V_PL_INTR_MC5(1U) | ||
1766 | |||
1767 | #define S_PL_INTR_RAT 5 | ||
1768 | #define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT) | ||
1769 | #define F_PL_INTR_RAT V_PL_INTR_RAT(1U) | ||
1770 | |||
410 | #define S_PL_INTR_TP 6 | 1771 | #define S_PL_INTR_TP 6 |
411 | #define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) | 1772 | #define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) |
412 | #define F_PL_INTR_TP V_PL_INTR_TP(1U) | 1773 | #define F_PL_INTR_TP V_PL_INTR_TP(1U) |
413 | 1774 | ||
1775 | #define S_PL_INTR_ULP 7 | ||
1776 | #define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP) | ||
1777 | #define F_PL_INTR_ULP V_PL_INTR_ULP(1U) | ||
1778 | |||
414 | #define S_PL_INTR_ESPI 8 | 1779 | #define S_PL_INTR_ESPI 8 |
415 | #define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) | 1780 | #define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) |
416 | #define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) | 1781 | #define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) |
417 | 1782 | ||
1783 | #define S_PL_INTR_CSPI 9 | ||
1784 | #define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI) | ||
1785 | #define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U) | ||
1786 | |||
418 | #define S_PL_INTR_PCIX 10 | 1787 | #define S_PL_INTR_PCIX 10 |
419 | #define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) | 1788 | #define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) |
420 | #define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) | 1789 | #define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) |
@@ -426,43 +1795,374 @@ | |||
426 | #define A_PL_CAUSE 0xa04 | 1795 | #define A_PL_CAUSE 0xa04 |
427 | 1796 | ||
428 | /* MC5 registers */ | 1797 | /* MC5 registers */ |
429 | |||
430 | #define A_MC5_CONFIG 0xc04 | 1798 | #define A_MC5_CONFIG 0xc04 |
431 | 1799 | ||
1800 | #define S_MODE 0 | ||
1801 | #define V_MODE(x) ((x) << S_MODE) | ||
1802 | #define F_MODE V_MODE(1U) | ||
1803 | |||
432 | #define S_TCAM_RESET 1 | 1804 | #define S_TCAM_RESET 1 |
433 | #define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) | 1805 | #define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) |
434 | #define F_TCAM_RESET V_TCAM_RESET(1U) | 1806 | #define F_TCAM_RESET V_TCAM_RESET(1U) |
435 | 1807 | ||
1808 | #define S_TCAM_READY 2 | ||
1809 | #define V_TCAM_READY(x) ((x) << S_TCAM_READY) | ||
1810 | #define F_TCAM_READY V_TCAM_READY(1U) | ||
1811 | |||
1812 | #define S_DBGI_ENABLE 4 | ||
1813 | #define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE) | ||
1814 | #define F_DBGI_ENABLE V_DBGI_ENABLE(1U) | ||
1815 | |||
436 | #define S_M_BUS_ENABLE 5 | 1816 | #define S_M_BUS_ENABLE 5 |
437 | #define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) | 1817 | #define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) |
438 | #define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) | 1818 | #define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) |
439 | 1819 | ||
440 | /* PCICFG registers */ | 1820 | #define S_PARITY_ENABLE 6 |
1821 | #define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE) | ||
1822 | #define F_PARITY_ENABLE V_PARITY_ENABLE(1U) | ||
1823 | |||
1824 | #define S_SYN_ISSUE_MODE 7 | ||
1825 | #define M_SYN_ISSUE_MODE 0x3 | ||
1826 | #define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE) | ||
1827 | #define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE) | ||
1828 | |||
1829 | #define S_BUILD 16 | ||
1830 | #define V_BUILD(x) ((x) << S_BUILD) | ||
1831 | #define F_BUILD V_BUILD(1U) | ||
1832 | |||
1833 | #define S_COMPRESSION_ENABLE 17 | ||
1834 | #define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE) | ||
1835 | #define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U) | ||
1836 | |||
1837 | #define S_NUM_LIP 18 | ||
1838 | #define M_NUM_LIP 0x3f | ||
1839 | #define V_NUM_LIP(x) ((x) << S_NUM_LIP) | ||
1840 | #define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP) | ||
1841 | |||
1842 | #define S_TCAM_PART_CNT 24 | ||
1843 | #define M_TCAM_PART_CNT 0x3 | ||
1844 | #define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT) | ||
1845 | #define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT) | ||
1846 | |||
1847 | #define S_TCAM_PART_TYPE 26 | ||
1848 | #define M_TCAM_PART_TYPE 0x3 | ||
1849 | #define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE) | ||
1850 | #define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE) | ||
1851 | |||
1852 | #define S_TCAM_PART_SIZE 28 | ||
1853 | #define M_TCAM_PART_SIZE 0x3 | ||
1854 | #define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE) | ||
1855 | #define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE) | ||
1856 | |||
1857 | #define S_TCAM_PART_TYPE_HI 30 | ||
1858 | #define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI) | ||
1859 | #define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U) | ||
1860 | |||
1861 | #define A_MC5_SIZE 0xc08 | ||
1862 | |||
1863 | #define S_SIZE 0 | ||
1864 | #define M_SIZE 0x3fffff | ||
1865 | #define V_SIZE(x) ((x) << S_SIZE) | ||
1866 | #define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE) | ||
1867 | |||
1868 | #define A_MC5_ROUTING_TABLE_INDEX 0xc0c | ||
441 | 1869 | ||
1870 | #define S_START_OF_ROUTING_TABLE 0 | ||
1871 | #define M_START_OF_ROUTING_TABLE 0x3fffff | ||
1872 | #define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE) | ||
1873 | #define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE) | ||
1874 | |||
1875 | #define A_MC5_SERVER_INDEX 0xc14 | ||
1876 | |||
1877 | #define S_START_OF_SERVER_INDEX 0 | ||
1878 | #define M_START_OF_SERVER_INDEX 0x3fffff | ||
1879 | #define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX) | ||
1880 | #define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX) | ||
1881 | |||
1882 | #define A_MC5_LIP_RAM_ADDR 0xc18 | ||
1883 | |||
1884 | #define S_LOCAL_IP_RAM_ADDR 0 | ||
1885 | #define M_LOCAL_IP_RAM_ADDR 0x3f | ||
1886 | #define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR) | ||
1887 | #define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR) | ||
1888 | |||
1889 | #define S_RAM_WRITE_ENABLE 8 | ||
1890 | #define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE) | ||
1891 | #define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U) | ||
1892 | |||
1893 | #define A_MC5_LIP_RAM_DATA 0xc1c | ||
1894 | #define A_MC5_RSP_LATENCY 0xc20 | ||
1895 | |||
1896 | #define S_SEARCH_RESPONSE_LATENCY 0 | ||
1897 | #define M_SEARCH_RESPONSE_LATENCY 0x1f | ||
1898 | #define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY) | ||
1899 | #define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY) | ||
1900 | |||
1901 | #define S_LEARN_RESPONSE_LATENCY 8 | ||
1902 | #define M_LEARN_RESPONSE_LATENCY 0x1f | ||
1903 | #define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY) | ||
1904 | #define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY) | ||
1905 | |||
1906 | #define A_MC5_PARITY_LATENCY 0xc24 | ||
1907 | |||
1908 | #define S_SRCHLAT 0 | ||
1909 | #define M_SRCHLAT 0x1f | ||
1910 | #define V_SRCHLAT(x) ((x) << S_SRCHLAT) | ||
1911 | #define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT) | ||
1912 | |||
1913 | #define S_PARLAT 8 | ||
1914 | #define M_PARLAT 0x1f | ||
1915 | #define V_PARLAT(x) ((x) << S_PARLAT) | ||
1916 | #define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT) | ||
1917 | |||
1918 | #define A_MC5_WR_LRN_VERIFY 0xc28 | ||
1919 | |||
1920 | #define S_POVEREN 0 | ||
1921 | #define V_POVEREN(x) ((x) << S_POVEREN) | ||
1922 | #define F_POVEREN V_POVEREN(1U) | ||
1923 | |||
1924 | #define S_LRNVEREN 1 | ||
1925 | #define V_LRNVEREN(x) ((x) << S_LRNVEREN) | ||
1926 | #define F_LRNVEREN V_LRNVEREN(1U) | ||
1927 | |||
1928 | #define S_VWVEREN 2 | ||
1929 | #define V_VWVEREN(x) ((x) << S_VWVEREN) | ||
1930 | #define F_VWVEREN V_VWVEREN(1U) | ||
1931 | |||
1932 | #define A_MC5_PART_ID_INDEX 0xc2c | ||
1933 | |||
1934 | #define S_IDINDEX 0 | ||
1935 | #define M_IDINDEX 0xf | ||
1936 | #define V_IDINDEX(x) ((x) << S_IDINDEX) | ||
1937 | #define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX) | ||
1938 | |||
1939 | #define A_MC5_RESET_MAX 0xc30 | ||
1940 | |||
1941 | #define S_RSTMAX 0 | ||
1942 | #define M_RSTMAX 0x1ff | ||
1943 | #define V_RSTMAX(x) ((x) << S_RSTMAX) | ||
1944 | #define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX) | ||
1945 | |||
1946 | #define A_MC5_INT_ENABLE 0xc40 | ||
1947 | |||
1948 | #define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0 | ||
1949 | #define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR) | ||
1950 | #define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U) | ||
1951 | |||
1952 | #define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1 | ||
1953 | #define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR) | ||
1954 | #define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U) | ||
1955 | |||
1956 | #define S_MC5_INT_HIT_IN_RT_REGION_ERR 2 | ||
1957 | #define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR) | ||
1958 | #define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U) | ||
1959 | |||
1960 | #define S_MC5_INT_MISS_ERR 3 | ||
1961 | #define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR) | ||
1962 | #define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U) | ||
1963 | |||
1964 | #define S_MC5_INT_LIP0_ERR 4 | ||
1965 | #define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR) | ||
1966 | #define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U) | ||
1967 | |||
1968 | #define S_MC5_INT_LIP_MISS_ERR 5 | ||
1969 | #define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR) | ||
1970 | #define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U) | ||
1971 | |||
1972 | #define S_MC5_INT_PARITY_ERR 6 | ||
1973 | #define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR) | ||
1974 | #define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U) | ||
1975 | |||
1976 | #define S_MC5_INT_ACTIVE_REGION_FULL 7 | ||
1977 | #define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL) | ||
1978 | #define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U) | ||
1979 | |||
1980 | #define S_MC5_INT_NFA_SRCH_ERR 8 | ||
1981 | #define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR) | ||
1982 | #define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U) | ||
1983 | |||
1984 | #define S_MC5_INT_SYN_COOKIE 9 | ||
1985 | #define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE) | ||
1986 | #define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U) | ||
1987 | |||
1988 | #define S_MC5_INT_SYN_COOKIE_BAD 10 | ||
1989 | #define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD) | ||
1990 | #define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U) | ||
1991 | |||
1992 | #define S_MC5_INT_SYN_COOKIE_OFF 11 | ||
1993 | #define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF) | ||
1994 | #define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U) | ||
1995 | |||
1996 | #define S_MC5_INT_UNKNOWN_CMD 15 | ||
1997 | #define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD) | ||
1998 | #define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U) | ||
1999 | |||
2000 | #define S_MC5_INT_REQUESTQ_PARITY_ERR 16 | ||
2001 | #define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR) | ||
2002 | #define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U) | ||
2003 | |||
2004 | #define S_MC5_INT_DISPATCHQ_PARITY_ERR 17 | ||
2005 | #define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR) | ||
2006 | #define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U) | ||
2007 | |||
2008 | #define S_MC5_INT_DEL_ACT_EMPTY 18 | ||
2009 | #define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY) | ||
2010 | #define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U) | ||
2011 | |||
2012 | #define A_MC5_INT_CAUSE 0xc44 | ||
2013 | #define A_MC5_INT_TID 0xc48 | ||
2014 | #define A_MC5_INT_PTID 0xc4c | ||
2015 | #define A_MC5_DBGI_CONFIG 0xc74 | ||
2016 | #define A_MC5_DBGI_REQ_CMD 0xc78 | ||
2017 | |||
2018 | #define S_CMDMODE 0 | ||
2019 | #define M_CMDMODE 0x7 | ||
2020 | #define V_CMDMODE(x) ((x) << S_CMDMODE) | ||
2021 | #define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE) | ||
2022 | |||
2023 | #define S_SADRSEL 4 | ||
2024 | #define V_SADRSEL(x) ((x) << S_SADRSEL) | ||
2025 | #define F_SADRSEL V_SADRSEL(1U) | ||
2026 | |||
2027 | #define S_WRITE_BURST_SIZE 22 | ||
2028 | #define M_WRITE_BURST_SIZE 0x3ff | ||
2029 | #define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE) | ||
2030 | #define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE) | ||
2031 | |||
2032 | #define A_MC5_DBGI_REQ_ADDR0 0xc7c | ||
2033 | #define A_MC5_DBGI_REQ_ADDR1 0xc80 | ||
2034 | #define A_MC5_DBGI_REQ_ADDR2 0xc84 | ||
2035 | #define A_MC5_DBGI_REQ_DATA0 0xc88 | ||
2036 | #define A_MC5_DBGI_REQ_DATA1 0xc8c | ||
2037 | #define A_MC5_DBGI_REQ_DATA2 0xc90 | ||
2038 | #define A_MC5_DBGI_REQ_DATA3 0xc94 | ||
2039 | #define A_MC5_DBGI_REQ_DATA4 0xc98 | ||
2040 | #define A_MC5_DBGI_REQ_MASK0 0xc9c | ||
2041 | #define A_MC5_DBGI_REQ_MASK1 0xca0 | ||
2042 | #define A_MC5_DBGI_REQ_MASK2 0xca4 | ||
2043 | #define A_MC5_DBGI_REQ_MASK3 0xca8 | ||
2044 | #define A_MC5_DBGI_REQ_MASK4 0xcac | ||
2045 | #define A_MC5_DBGI_RSP_STATUS 0xcb0 | ||
2046 | |||
2047 | #define S_DBGI_RSP_VALID 0 | ||
2048 | #define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID) | ||
2049 | #define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U) | ||
2050 | |||
2051 | #define S_DBGI_RSP_HIT 1 | ||
2052 | #define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT) | ||
2053 | #define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U) | ||
2054 | |||
2055 | #define S_DBGI_RSP_ERR 2 | ||
2056 | #define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR) | ||
2057 | #define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U) | ||
2058 | |||
2059 | #define S_DBGI_RSP_ERR_REASON 8 | ||
2060 | #define M_DBGI_RSP_ERR_REASON 0x7 | ||
2061 | #define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON) | ||
2062 | #define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON) | ||
2063 | |||
2064 | #define A_MC5_DBGI_RSP_DATA0 0xcb4 | ||
2065 | #define A_MC5_DBGI_RSP_DATA1 0xcb8 | ||
2066 | #define A_MC5_DBGI_RSP_DATA2 0xcbc | ||
2067 | #define A_MC5_DBGI_RSP_DATA3 0xcc0 | ||
2068 | #define A_MC5_DBGI_RSP_DATA4 0xcc4 | ||
2069 | #define A_MC5_DBGI_RSP_LAST_CMD 0xcc8 | ||
2070 | #define A_MC5_POPEN_DATA_WR_CMD 0xccc | ||
2071 | #define A_MC5_POPEN_MASK_WR_CMD 0xcd0 | ||
2072 | #define A_MC5_AOPEN_SRCH_CMD 0xcd4 | ||
2073 | #define A_MC5_AOPEN_LRN_CMD 0xcd8 | ||
2074 | #define A_MC5_SYN_SRCH_CMD 0xcdc | ||
2075 | #define A_MC5_SYN_LRN_CMD 0xce0 | ||
2076 | #define A_MC5_ACK_SRCH_CMD 0xce4 | ||
2077 | #define A_MC5_ACK_LRN_CMD 0xce8 | ||
2078 | #define A_MC5_ILOOKUP_CMD 0xcec | ||
2079 | #define A_MC5_ELOOKUP_CMD 0xcf0 | ||
2080 | #define A_MC5_DATA_WRITE_CMD 0xcf4 | ||
2081 | #define A_MC5_DATA_READ_CMD 0xcf8 | ||
2082 | #define A_MC5_MASK_WRITE_CMD 0xcfc | ||
2083 | |||
2084 | /* PCICFG registers */ | ||
442 | #define A_PCICFG_PM_CSR 0x44 | 2085 | #define A_PCICFG_PM_CSR 0x44 |
443 | #define A_PCICFG_VPD_ADDR 0x4a | 2086 | #define A_PCICFG_VPD_ADDR 0x4a |
444 | 2087 | ||
2088 | #define S_VPD_ADDR 0 | ||
2089 | #define M_VPD_ADDR 0x7fff | ||
2090 | #define V_VPD_ADDR(x) ((x) << S_VPD_ADDR) | ||
2091 | #define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR) | ||
2092 | |||
445 | #define S_VPD_OP_FLAG 15 | 2093 | #define S_VPD_OP_FLAG 15 |
446 | #define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) | 2094 | #define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) |
447 | #define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) | 2095 | #define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) |
448 | 2096 | ||
449 | #define A_PCICFG_VPD_DATA 0x4c | 2097 | #define A_PCICFG_VPD_DATA 0x4c |
450 | 2098 | #define A_PCICFG_PCIX_CMD 0x60 | |
451 | #define A_PCICFG_INTR_ENABLE 0xf4 | 2099 | #define A_PCICFG_INTR_ENABLE 0xf4 |
452 | #define A_PCICFG_INTR_CAUSE 0xf8 | ||
453 | 2100 | ||
2101 | #define S_MASTER_PARITY_ERR 0 | ||
2102 | #define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR) | ||
2103 | #define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U) | ||
2104 | |||
2105 | #define S_SIG_TARGET_ABORT 1 | ||
2106 | #define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT) | ||
2107 | #define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U) | ||
2108 | |||
2109 | #define S_RCV_TARGET_ABORT 2 | ||
2110 | #define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT) | ||
2111 | #define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U) | ||
2112 | |||
2113 | #define S_RCV_MASTER_ABORT 3 | ||
2114 | #define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT) | ||
2115 | #define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U) | ||
2116 | |||
2117 | #define S_SIG_SYS_ERR 4 | ||
2118 | #define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR) | ||
2119 | #define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U) | ||
2120 | |||
2121 | #define S_DET_PARITY_ERR 5 | ||
2122 | #define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR) | ||
2123 | #define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U) | ||
2124 | |||
2125 | #define S_PIO_PARITY_ERR 6 | ||
2126 | #define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR) | ||
2127 | #define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U) | ||
2128 | |||
2129 | #define S_WF_PARITY_ERR 7 | ||
2130 | #define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR) | ||
2131 | #define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U) | ||
2132 | |||
2133 | #define S_RF_PARITY_ERR 8 | ||
2134 | #define M_RF_PARITY_ERR 0x3 | ||
2135 | #define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR) | ||
2136 | #define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR) | ||
2137 | |||
2138 | #define S_CF_PARITY_ERR 10 | ||
2139 | #define M_CF_PARITY_ERR 0x3 | ||
2140 | #define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR) | ||
2141 | #define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR) | ||
2142 | |||
2143 | #define A_PCICFG_INTR_CAUSE 0xf8 | ||
454 | #define A_PCICFG_MODE 0xfc | 2144 | #define A_PCICFG_MODE 0xfc |
455 | 2145 | ||
456 | #define S_PCI_MODE_64BIT 0 | 2146 | #define S_PCI_MODE_64BIT 0 |
457 | #define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) | 2147 | #define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) |
458 | #define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) | 2148 | #define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) |
459 | 2149 | ||
2150 | #define S_PCI_MODE_66MHZ 1 | ||
2151 | #define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ) | ||
2152 | #define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U) | ||
2153 | |||
2154 | #define S_PCI_MODE_PCIX_INITPAT 2 | ||
2155 | #define M_PCI_MODE_PCIX_INITPAT 0x7 | ||
2156 | #define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT) | ||
2157 | #define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT) | ||
2158 | |||
460 | #define S_PCI_MODE_PCIX 5 | 2159 | #define S_PCI_MODE_PCIX 5 |
461 | #define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) | 2160 | #define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) |
462 | #define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) | 2161 | #define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) |
463 | 2162 | ||
464 | #define S_PCI_MODE_CLK 6 | 2163 | #define S_PCI_MODE_CLK 6 |
465 | #define M_PCI_MODE_CLK 0x3 | 2164 | #define M_PCI_MODE_CLK 0x3 |
2165 | #define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK) | ||
466 | #define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) | 2166 | #define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) |
467 | 2167 | ||
468 | #endif /* _CXGB_REGS_H_ */ | 2168 | #endif /* _CXGB_REGS_H_ */ |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 9799c12380fc..0ca8d876e16f 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -42,12 +42,14 @@ | |||
42 | #include <linux/types.h> | 42 | #include <linux/types.h> |
43 | #include <linux/errno.h> | 43 | #include <linux/errno.h> |
44 | #include <linux/pci.h> | 44 | #include <linux/pci.h> |
45 | #include <linux/ktime.h> | ||
45 | #include <linux/netdevice.h> | 46 | #include <linux/netdevice.h> |
46 | #include <linux/etherdevice.h> | 47 | #include <linux/etherdevice.h> |
47 | #include <linux/if_vlan.h> | 48 | #include <linux/if_vlan.h> |
48 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
49 | #include <linux/init.h> | 50 | #include <linux/init.h> |
50 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
52 | #include <linux/tcp.h> | ||
51 | #include <linux/ip.h> | 53 | #include <linux/ip.h> |
52 | #include <linux/in.h> | 54 | #include <linux/in.h> |
53 | #include <linux/if_arp.h> | 55 | #include <linux/if_arp.h> |
@@ -57,10 +59,8 @@ | |||
57 | #include "regs.h" | 59 | #include "regs.h" |
58 | #include "espi.h" | 60 | #include "espi.h" |
59 | 61 | ||
60 | 62 | /* This belongs in if_ether.h */ | |
61 | #ifdef NETIF_F_TSO | 63 | #define ETH_P_CPL5 0xf |
62 | #include <linux/tcp.h> | ||
63 | #endif | ||
64 | 64 | ||
65 | #define SGE_CMDQ_N 2 | 65 | #define SGE_CMDQ_N 2 |
66 | #define SGE_FREELQ_N 2 | 66 | #define SGE_FREELQ_N 2 |
@@ -73,6 +73,7 @@ | |||
73 | #define SGE_INTRTIMER_NRES 1000 | 73 | #define SGE_INTRTIMER_NRES 1000 |
74 | #define SGE_RX_COPY_THRES 256 | 74 | #define SGE_RX_COPY_THRES 256 |
75 | #define SGE_RX_SM_BUF_SIZE 1536 | 75 | #define SGE_RX_SM_BUF_SIZE 1536 |
76 | #define SGE_TX_DESC_MAX_PLEN 16384 | ||
76 | 77 | ||
77 | # define SGE_RX_DROP_THRES 2 | 78 | # define SGE_RX_DROP_THRES 2 |
78 | 79 | ||
@@ -184,17 +185,17 @@ struct cmdQ { | |||
184 | unsigned long status; /* HW DMA fetch status */ | 185 | unsigned long status; /* HW DMA fetch status */ |
185 | unsigned int in_use; /* # of in-use command descriptors */ | 186 | unsigned int in_use; /* # of in-use command descriptors */ |
186 | unsigned int size; /* # of descriptors */ | 187 | unsigned int size; /* # of descriptors */ |
187 | unsigned int processed; /* total # of descs HW has processed */ | 188 | unsigned int processed; /* total # of descs HW has processed */ |
188 | unsigned int cleaned; /* total # of descs SW has reclaimed */ | 189 | unsigned int cleaned; /* total # of descs SW has reclaimed */ |
189 | unsigned int stop_thres; /* SW TX queue suspend threshold */ | 190 | unsigned int stop_thres; /* SW TX queue suspend threshold */ |
190 | u16 pidx; /* producer index (SW) */ | 191 | u16 pidx; /* producer index (SW) */ |
191 | u16 cidx; /* consumer index (HW) */ | 192 | u16 cidx; /* consumer index (HW) */ |
192 | u8 genbit; /* current generation (=valid) bit */ | 193 | u8 genbit; /* current generation (=valid) bit */ |
193 | u8 sop; /* is next entry start of packet? */ | 194 | u8 sop; /* is next entry start of packet? */ |
194 | struct cmdQ_e *entries; /* HW command descriptor Q */ | 195 | struct cmdQ_e *entries; /* HW command descriptor Q */ |
195 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | 196 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ |
196 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ | ||
197 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ | 197 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ |
198 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ | ||
198 | }; | 199 | }; |
199 | 200 | ||
200 | struct freelQ { | 201 | struct freelQ { |
@@ -203,8 +204,8 @@ struct freelQ { | |||
203 | u16 pidx; /* producer index (SW) */ | 204 | u16 pidx; /* producer index (SW) */ |
204 | u16 cidx; /* consumer index (HW) */ | 205 | u16 cidx; /* consumer index (HW) */ |
205 | u16 rx_buffer_size; /* Buffer size on this free list */ | 206 | u16 rx_buffer_size; /* Buffer size on this free list */ |
206 | u16 dma_offset; /* DMA offset to align IP headers */ | 207 | u16 dma_offset; /* DMA offset to align IP headers */ |
207 | u16 recycleq_idx; /* skb recycle q to use */ | 208 | u16 recycleq_idx; /* skb recycle q to use */ |
208 | u8 genbit; /* current generation (=valid) bit */ | 209 | u8 genbit; /* current generation (=valid) bit */ |
209 | struct freelQ_e *entries; /* HW freelist descriptor Q */ | 210 | struct freelQ_e *entries; /* HW freelist descriptor Q */ |
210 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ | 211 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ |
@@ -226,6 +227,29 @@ enum { | |||
226 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ | 227 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ |
227 | }; | 228 | }; |
228 | 229 | ||
230 | /* T204 TX SW scheduler */ | ||
231 | |||
232 | /* Per T204 TX port */ | ||
233 | struct sched_port { | ||
234 | unsigned int avail; /* available bits - quota */ | ||
235 | unsigned int drain_bits_per_1024ns; /* drain rate */ | ||
236 | unsigned int speed; /* drain rate, mbps */ | ||
237 | unsigned int mtu; /* mtu size */ | ||
238 | struct sk_buff_head skbq; /* pending skbs */ | ||
239 | }; | ||
240 | |||
241 | /* Per T204 device */ | ||
242 | struct sched { | ||
243 | ktime_t last_updated; /* last time quotas were computed */ | ||
244 | unsigned int max_avail; /* max bits to be sent to any port */ | ||
245 | unsigned int port; /* port index (round robin ports) */ | ||
246 | unsigned int num; /* num skbs in per port queues */ | ||
247 | struct sched_port p[MAX_NPORTS]; | ||
248 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ | ||
249 | }; | ||
250 | static void restart_sched(unsigned long); | ||
251 | |||
252 | |||
229 | /* | 253 | /* |
230 | * Main SGE data structure | 254 | * Main SGE data structure |
231 | * | 255 | * |
@@ -243,18 +267,240 @@ struct sge { | |||
243 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ | 267 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
244 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | 268 | unsigned int jumbo_fl; /* jumbo freelist Q index */ |
245 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ | 269 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ |
246 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ | 270 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ |
247 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ | 271 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
248 | struct timer_list espibug_timer; | 272 | struct timer_list espibug_timer; |
249 | unsigned int espibug_timeout; | 273 | unsigned long espibug_timeout; |
250 | struct sk_buff *espibug_skb; | 274 | struct sk_buff *espibug_skb[MAX_NPORTS]; |
251 | u32 sge_control; /* shadow value of sge control reg */ | 275 | u32 sge_control; /* shadow value of sge control reg */ |
252 | struct sge_intr_counts stats; | 276 | struct sge_intr_counts stats; |
253 | struct sge_port_stats port_stats[MAX_NPORTS]; | 277 | struct sge_port_stats *port_stats[MAX_NPORTS]; |
278 | struct sched *tx_sched; | ||
254 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; | 279 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; |
255 | }; | 280 | }; |
256 | 281 | ||
257 | /* | 282 | /* |
283 | * stop tasklet and free all pending skb's | ||
284 | */ | ||
285 | static void tx_sched_stop(struct sge *sge) | ||
286 | { | ||
287 | struct sched *s = sge->tx_sched; | ||
288 | int i; | ||
289 | |||
290 | tasklet_kill(&s->sched_tsk); | ||
291 | |||
292 | for (i = 0; i < MAX_NPORTS; i++) | ||
293 | __skb_queue_purge(&s->p[s->port].skbq); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * t1_sched_update_parms() is called when the MTU or link speed changes. It | ||
298 | * re-computes scheduler parameters to scope with the change. | ||
299 | */ | ||
300 | unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, | ||
301 | unsigned int mtu, unsigned int speed) | ||
302 | { | ||
303 | struct sched *s = sge->tx_sched; | ||
304 | struct sched_port *p = &s->p[port]; | ||
305 | unsigned int max_avail_segs; | ||
306 | |||
307 | pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); | ||
308 | if (speed) | ||
309 | p->speed = speed; | ||
310 | if (mtu) | ||
311 | p->mtu = mtu; | ||
312 | |||
313 | if (speed || mtu) { | ||
314 | unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); | ||
315 | do_div(drain, (p->mtu + 50) * 1000); | ||
316 | p->drain_bits_per_1024ns = (unsigned int) drain; | ||
317 | |||
318 | if (p->speed < 1000) | ||
319 | p->drain_bits_per_1024ns = | ||
320 | 90 * p->drain_bits_per_1024ns / 100; | ||
321 | } | ||
322 | |||
323 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { | ||
324 | p->drain_bits_per_1024ns -= 16; | ||
325 | s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); | ||
326 | max_avail_segs = max(1U, 4096 / (p->mtu - 40)); | ||
327 | } else { | ||
328 | s->max_avail = 16384; | ||
329 | max_avail_segs = max(1U, 9000 / (p->mtu - 40)); | ||
330 | } | ||
331 | |||
332 | pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " | ||
333 | "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, | ||
334 | p->speed, s->max_avail, max_avail_segs, | ||
335 | p->drain_bits_per_1024ns); | ||
336 | |||
337 | return max_avail_segs * (p->mtu - 40); | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of | ||
342 | * data that can be pushed per port. | ||
343 | */ | ||
344 | void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) | ||
345 | { | ||
346 | struct sched *s = sge->tx_sched; | ||
347 | unsigned int i; | ||
348 | |||
349 | s->max_avail = val; | ||
350 | for (i = 0; i < MAX_NPORTS; i++) | ||
351 | t1_sched_update_parms(sge, i, 0, 0); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port | ||
356 | * is draining. | ||
357 | */ | ||
358 | void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, | ||
359 | unsigned int val) | ||
360 | { | ||
361 | struct sched *s = sge->tx_sched; | ||
362 | struct sched_port *p = &s->p[port]; | ||
363 | p->drain_bits_per_1024ns = val * 1024 / 1000; | ||
364 | t1_sched_update_parms(sge, port, 0, 0); | ||
365 | } | ||
366 | |||
367 | |||
368 | /* | ||
369 | * get_clock() implements a ns clock (see ktime_get) | ||
370 | */ | ||
371 | static inline ktime_t get_clock(void) | ||
372 | { | ||
373 | struct timespec ts; | ||
374 | |||
375 | ktime_get_ts(&ts); | ||
376 | return timespec_to_ktime(ts); | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * tx_sched_init() allocates resources and does basic initialization. | ||
381 | */ | ||
382 | static int tx_sched_init(struct sge *sge) | ||
383 | { | ||
384 | struct sched *s; | ||
385 | int i; | ||
386 | |||
387 | s = kzalloc(sizeof (struct sched), GFP_KERNEL); | ||
388 | if (!s) | ||
389 | return -ENOMEM; | ||
390 | |||
391 | pr_debug("tx_sched_init\n"); | ||
392 | tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); | ||
393 | sge->tx_sched = s; | ||
394 | |||
395 | for (i = 0; i < MAX_NPORTS; i++) { | ||
396 | skb_queue_head_init(&s->p[i].skbq); | ||
397 | t1_sched_update_parms(sge, i, 1500, 1000); | ||
398 | } | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * sched_update_avail() computes the delta since the last time it was called | ||
405 | * and updates the per port quota (number of bits that can be sent to the any | ||
406 | * port). | ||
407 | */ | ||
408 | static inline int sched_update_avail(struct sge *sge) | ||
409 | { | ||
410 | struct sched *s = sge->tx_sched; | ||
411 | ktime_t now = get_clock(); | ||
412 | unsigned int i; | ||
413 | long long delta_time_ns; | ||
414 | |||
415 | delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); | ||
416 | |||
417 | pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); | ||
418 | if (delta_time_ns < 15000) | ||
419 | return 0; | ||
420 | |||
421 | for (i = 0; i < MAX_NPORTS; i++) { | ||
422 | struct sched_port *p = &s->p[i]; | ||
423 | unsigned int delta_avail; | ||
424 | |||
425 | delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; | ||
426 | p->avail = min(p->avail + delta_avail, s->max_avail); | ||
427 | } | ||
428 | |||
429 | s->last_updated = now; | ||
430 | |||
431 | return 1; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * sched_skb() is called from two different places. In the tx path, any | ||
436 | * packet generating load on an output port will call sched_skb() | ||
437 | * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq | ||
438 | * context (skb == NULL). | ||
439 | * The scheduler only returns a skb (which will then be sent) if the | ||
440 | * length of the skb is <= the current quota of the output port. | ||
441 | */ | ||
442 | static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, | ||
443 | unsigned int credits) | ||
444 | { | ||
445 | struct sched *s = sge->tx_sched; | ||
446 | struct sk_buff_head *skbq; | ||
447 | unsigned int i, len, update = 1; | ||
448 | |||
449 | pr_debug("sched_skb %p\n", skb); | ||
450 | if (!skb) { | ||
451 | if (!s->num) | ||
452 | return NULL; | ||
453 | } else { | ||
454 | skbq = &s->p[skb->dev->if_port].skbq; | ||
455 | __skb_queue_tail(skbq, skb); | ||
456 | s->num++; | ||
457 | skb = NULL; | ||
458 | } | ||
459 | |||
460 | if (credits < MAX_SKB_FRAGS + 1) | ||
461 | goto out; | ||
462 | |||
463 | again: | ||
464 | for (i = 0; i < MAX_NPORTS; i++) { | ||
465 | s->port = ++s->port & (MAX_NPORTS - 1); | ||
466 | skbq = &s->p[s->port].skbq; | ||
467 | |||
468 | skb = skb_peek(skbq); | ||
469 | |||
470 | if (!skb) | ||
471 | continue; | ||
472 | |||
473 | len = skb->len; | ||
474 | if (len <= s->p[s->port].avail) { | ||
475 | s->p[s->port].avail -= len; | ||
476 | s->num--; | ||
477 | __skb_unlink(skb, skbq); | ||
478 | goto out; | ||
479 | } | ||
480 | skb = NULL; | ||
481 | } | ||
482 | |||
483 | if (update-- && sched_update_avail(sge)) | ||
484 | goto again; | ||
485 | |||
486 | out: | ||
487 | /* If there are more pending skbs, we use the hardware to schedule us | ||
488 | * again. | ||
489 | */ | ||
490 | if (s->num && !skb) { | ||
491 | struct cmdQ *q = &sge->cmdQ[0]; | ||
492 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
493 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | ||
494 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
495 | writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); | ||
496 | } | ||
497 | } | ||
498 | pr_debug("sched_skb ret %p\n", skb); | ||
499 | |||
500 | return skb; | ||
501 | } | ||
502 | |||
503 | /* | ||
258 | * PIO to indicate that memory mapped Q contains valid descriptor(s). | 504 | * PIO to indicate that memory mapped Q contains valid descriptor(s). |
259 | */ | 505 | */ |
260 | static inline void doorbell_pio(struct adapter *adapter, u32 val) | 506 | static inline void doorbell_pio(struct adapter *adapter, u32 val) |
@@ -335,10 +581,9 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
335 | goto err_no_mem; | 581 | goto err_no_mem; |
336 | memset(q->entries, 0, size); | 582 | memset(q->entries, 0, size); |
337 | size = sizeof(struct freelQ_ce) * q->size; | 583 | size = sizeof(struct freelQ_ce) * q->size; |
338 | q->centries = kmalloc(size, GFP_KERNEL); | 584 | q->centries = kzalloc(size, GFP_KERNEL); |
339 | if (!q->centries) | 585 | if (!q->centries) |
340 | goto err_no_mem; | 586 | goto err_no_mem; |
341 | memset(q->centries, 0, size); | ||
342 | } | 587 | } |
343 | 588 | ||
344 | /* | 589 | /* |
@@ -351,8 +596,11 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
351 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + | 596 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + |
352 | sizeof(struct cpl_rx_data) + | 597 | sizeof(struct cpl_rx_data) + |
353 | sge->freelQ[!sge->jumbo_fl].dma_offset; | 598 | sge->freelQ[!sge->jumbo_fl].dma_offset; |
354 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - | 599 | |
355 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 600 | size = (16 * 1024) - |
601 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
602 | |||
603 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; | ||
356 | 604 | ||
357 | /* | 605 | /* |
358 | * Setup which skb recycle Q should be used when recycling buffers from | 606 | * Setup which skb recycle Q should be used when recycling buffers from |
@@ -389,17 +637,23 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) | |||
389 | q->in_use -= n; | 637 | q->in_use -= n; |
390 | ce = &q->centries[cidx]; | 638 | ce = &q->centries[cidx]; |
391 | while (n--) { | 639 | while (n--) { |
392 | if (q->sop) | 640 | if (q->sop) { |
393 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | 641 | if (likely(pci_unmap_len(ce, dma_len))) { |
394 | pci_unmap_len(ce, dma_len), | 642 | pci_unmap_single(pdev, |
395 | PCI_DMA_TODEVICE); | 643 | pci_unmap_addr(ce, dma_addr), |
396 | else | 644 | pci_unmap_len(ce, dma_len), |
397 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | 645 | PCI_DMA_TODEVICE); |
398 | pci_unmap_len(ce, dma_len), | 646 | q->sop = 0; |
399 | PCI_DMA_TODEVICE); | 647 | } |
400 | q->sop = 0; | 648 | } else { |
649 | if (likely(pci_unmap_len(ce, dma_len))) { | ||
650 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | ||
651 | pci_unmap_len(ce, dma_len), | ||
652 | PCI_DMA_TODEVICE); | ||
653 | } | ||
654 | } | ||
401 | if (ce->skb) { | 655 | if (ce->skb) { |
402 | dev_kfree_skb(ce->skb); | 656 | dev_kfree_skb_any(ce->skb); |
403 | q->sop = 1; | 657 | q->sop = 1; |
404 | } | 658 | } |
405 | ce++; | 659 | ce++; |
@@ -463,10 +717,9 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | |||
463 | goto err_no_mem; | 717 | goto err_no_mem; |
464 | memset(q->entries, 0, size); | 718 | memset(q->entries, 0, size); |
465 | size = sizeof(struct cmdQ_ce) * q->size; | 719 | size = sizeof(struct cmdQ_ce) * q->size; |
466 | q->centries = kmalloc(size, GFP_KERNEL); | 720 | q->centries = kzalloc(size, GFP_KERNEL); |
467 | if (!q->centries) | 721 | if (!q->centries) |
468 | goto err_no_mem; | 722 | goto err_no_mem; |
469 | memset(q->centries, 0, size); | ||
470 | } | 723 | } |
471 | 724 | ||
472 | /* | 725 | /* |
@@ -506,7 +759,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off) | |||
506 | sge->sge_control |= F_VLAN_XTRACT; | 759 | sge->sge_control |= F_VLAN_XTRACT; |
507 | if (adapter->open_device_map) { | 760 | if (adapter->open_device_map) { |
508 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); | 761 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); |
509 | readl(adapter->regs + A_SG_CONTROL); /* flush */ | 762 | readl(adapter->regs + A_SG_CONTROL); /* flush */ |
510 | } | 763 | } |
511 | } | 764 | } |
512 | 765 | ||
@@ -540,7 +793,6 @@ static void configure_sge(struct sge *sge, struct sge_params *p) | |||
540 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | | 793 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | |
541 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | | 794 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | |
542 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | | 795 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | |
543 | F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS | | ||
544 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); | 796 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); |
545 | 797 | ||
546 | #if defined(__BIG_ENDIAN_BITFIELD) | 798 | #if defined(__BIG_ENDIAN_BITFIELD) |
@@ -568,9 +820,12 @@ static inline unsigned int jumbo_payload_capacity(const struct sge *sge) | |||
568 | */ | 820 | */ |
569 | void t1_sge_destroy(struct sge *sge) | 821 | void t1_sge_destroy(struct sge *sge) |
570 | { | 822 | { |
571 | if (sge->espibug_skb) | 823 | int i; |
572 | kfree_skb(sge->espibug_skb); | ||
573 | 824 | ||
825 | for_each_port(sge->adapter, i) | ||
826 | free_percpu(sge->port_stats[i]); | ||
827 | |||
828 | kfree(sge->tx_sched); | ||
574 | free_tx_resources(sge); | 829 | free_tx_resources(sge); |
575 | free_rx_resources(sge); | 830 | free_rx_resources(sge); |
576 | kfree(sge); | 831 | kfree(sge); |
@@ -735,14 +990,28 @@ int t1_sge_intr_error_handler(struct sge *sge) | |||
735 | return 0; | 990 | return 0; |
736 | } | 991 | } |
737 | 992 | ||
738 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge) | 993 | const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) |
739 | { | 994 | { |
740 | return &sge->stats; | 995 | return &sge->stats; |
741 | } | 996 | } |
742 | 997 | ||
743 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port) | 998 | void t1_sge_get_port_stats(const struct sge *sge, int port, |
999 | struct sge_port_stats *ss) | ||
744 | { | 1000 | { |
745 | return &sge->port_stats[port]; | 1001 | int cpu; |
1002 | |||
1003 | memset(ss, 0, sizeof(*ss)); | ||
1004 | for_each_possible_cpu(cpu) { | ||
1005 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); | ||
1006 | |||
1007 | ss->rx_packets += st->rx_packets; | ||
1008 | ss->rx_cso_good += st->rx_cso_good; | ||
1009 | ss->tx_packets += st->tx_packets; | ||
1010 | ss->tx_cso += st->tx_cso; | ||
1011 | ss->tx_tso += st->tx_tso; | ||
1012 | ss->vlan_xtract += st->vlan_xtract; | ||
1013 | ss->vlan_insert += st->vlan_insert; | ||
1014 | } | ||
746 | } | 1015 | } |
747 | 1016 | ||
748 | /** | 1017 | /** |
@@ -856,6 +1125,99 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |||
856 | } | 1125 | } |
857 | 1126 | ||
858 | /* | 1127 | /* |
1128 | * T1/T2 SGE limits the maximum DMA size per TX descriptor to | ||
1129 | * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the | ||
1130 | * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. | ||
1131 | * Note that the *_large_page_tx_descs stuff will be optimized out when | ||
1132 | * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. | ||
1133 | * | ||
1134 | * compute_large_page_descs() computes how many additional descriptors are | ||
1135 | * required to break down the stack's request. | ||
1136 | */ | ||
1137 | static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) | ||
1138 | { | ||
1139 | unsigned int count = 0; | ||
1140 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { | ||
1141 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | ||
1142 | unsigned int i, len = skb->len - skb->data_len; | ||
1143 | while (len > SGE_TX_DESC_MAX_PLEN) { | ||
1144 | count++; | ||
1145 | len -= SGE_TX_DESC_MAX_PLEN; | ||
1146 | } | ||
1147 | for (i = 0; nfrags--; i++) { | ||
1148 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1149 | len = frag->size; | ||
1150 | while (len > SGE_TX_DESC_MAX_PLEN) { | ||
1151 | count++; | ||
1152 | len -= SGE_TX_DESC_MAX_PLEN; | ||
1153 | } | ||
1154 | } | ||
1155 | } | ||
1156 | return count; | ||
1157 | } | ||
1158 | |||
1159 | /* | ||
1160 | * Write a cmdQ entry. | ||
1161 | * | ||
1162 | * Since this function writes the 'flags' field, it must not be used to | ||
1163 | * write the first cmdQ entry. | ||
1164 | */ | ||
1165 | static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, | ||
1166 | unsigned int len, unsigned int gen, | ||
1167 | unsigned int eop) | ||
1168 | { | ||
1169 | if (unlikely(len > SGE_TX_DESC_MAX_PLEN)) | ||
1170 | BUG(); | ||
1171 | e->addr_lo = (u32)mapping; | ||
1172 | e->addr_hi = (u64)mapping >> 32; | ||
1173 | e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); | ||
1174 | e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * See comment for previous function. | ||
1179 | * | ||
1180 | * write_tx_descs_large_page() writes additional SGE tx descriptors if | ||
1181 | * *desc_len exceeds HW's capability. | ||
1182 | */ | ||
1183 | static inline unsigned int write_large_page_tx_descs(unsigned int pidx, | ||
1184 | struct cmdQ_e **e, | ||
1185 | struct cmdQ_ce **ce, | ||
1186 | unsigned int *gen, | ||
1187 | dma_addr_t *desc_mapping, | ||
1188 | unsigned int *desc_len, | ||
1189 | unsigned int nfrags, | ||
1190 | struct cmdQ *q) | ||
1191 | { | ||
1192 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { | ||
1193 | struct cmdQ_e *e1 = *e; | ||
1194 | struct cmdQ_ce *ce1 = *ce; | ||
1195 | |||
1196 | while (*desc_len > SGE_TX_DESC_MAX_PLEN) { | ||
1197 | *desc_len -= SGE_TX_DESC_MAX_PLEN; | ||
1198 | write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, | ||
1199 | *gen, nfrags == 0 && *desc_len == 0); | ||
1200 | ce1->skb = NULL; | ||
1201 | pci_unmap_len_set(ce1, dma_len, 0); | ||
1202 | *desc_mapping += SGE_TX_DESC_MAX_PLEN; | ||
1203 | if (*desc_len) { | ||
1204 | ce1++; | ||
1205 | e1++; | ||
1206 | if (++pidx == q->size) { | ||
1207 | pidx = 0; | ||
1208 | *gen ^= 1; | ||
1209 | ce1 = q->centries; | ||
1210 | e1 = q->entries; | ||
1211 | } | ||
1212 | } | ||
1213 | } | ||
1214 | *e = e1; | ||
1215 | *ce = ce1; | ||
1216 | } | ||
1217 | return pidx; | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
859 | * Write the command descriptors to transmit the given skb starting at | 1221 | * Write the command descriptors to transmit the given skb starting at |
860 | * descriptor pidx with the given generation. | 1222 | * descriptor pidx with the given generation. |
861 | */ | 1223 | */ |
@@ -863,50 +1225,84 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
863 | unsigned int pidx, unsigned int gen, | 1225 | unsigned int pidx, unsigned int gen, |
864 | struct cmdQ *q) | 1226 | struct cmdQ *q) |
865 | { | 1227 | { |
866 | dma_addr_t mapping; | 1228 | dma_addr_t mapping, desc_mapping; |
867 | struct cmdQ_e *e, *e1; | 1229 | struct cmdQ_e *e, *e1; |
868 | struct cmdQ_ce *ce; | 1230 | struct cmdQ_ce *ce; |
869 | unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags; | 1231 | unsigned int i, flags, first_desc_len, desc_len, |
1232 | nfrags = skb_shinfo(skb)->nr_frags; | ||
870 | 1233 | ||
871 | mapping = pci_map_single(adapter->pdev, skb->data, | 1234 | e = e1 = &q->entries[pidx]; |
872 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | ||
873 | ce = &q->centries[pidx]; | 1235 | ce = &q->centries[pidx]; |
1236 | |||
1237 | mapping = pci_map_single(adapter->pdev, skb->data, | ||
1238 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | ||
1239 | |||
1240 | desc_mapping = mapping; | ||
1241 | desc_len = skb->len - skb->data_len; | ||
1242 | |||
1243 | flags = F_CMD_DATAVALID | F_CMD_SOP | | ||
1244 | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | | ||
1245 | V_CMD_GEN2(gen); | ||
1246 | first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? | ||
1247 | desc_len : SGE_TX_DESC_MAX_PLEN; | ||
1248 | e->addr_lo = (u32)desc_mapping; | ||
1249 | e->addr_hi = (u64)desc_mapping >> 32; | ||
1250 | e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); | ||
1251 | ce->skb = NULL; | ||
1252 | pci_unmap_len_set(ce, dma_len, 0); | ||
1253 | |||
1254 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && | ||
1255 | desc_len > SGE_TX_DESC_MAX_PLEN) { | ||
1256 | desc_mapping += first_desc_len; | ||
1257 | desc_len -= first_desc_len; | ||
1258 | e1++; | ||
1259 | ce++; | ||
1260 | if (++pidx == q->size) { | ||
1261 | pidx = 0; | ||
1262 | gen ^= 1; | ||
1263 | e1 = q->entries; | ||
1264 | ce = q->centries; | ||
1265 | } | ||
1266 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, | ||
1267 | &desc_mapping, &desc_len, | ||
1268 | nfrags, q); | ||
1269 | |||
1270 | if (likely(desc_len)) | ||
1271 | write_tx_desc(e1, desc_mapping, desc_len, gen, | ||
1272 | nfrags == 0); | ||
1273 | } | ||
1274 | |||
874 | ce->skb = NULL; | 1275 | ce->skb = NULL; |
875 | pci_unmap_addr_set(ce, dma_addr, mapping); | 1276 | pci_unmap_addr_set(ce, dma_addr, mapping); |
876 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); | 1277 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); |
877 | 1278 | ||
878 | flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) | | 1279 | for (i = 0; nfrags--; i++) { |
879 | V_CMD_GEN2(gen); | ||
880 | e = &q->entries[pidx]; | ||
881 | e->addr_lo = (u32)mapping; | ||
882 | e->addr_hi = (u64)mapping >> 32; | ||
883 | e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen); | ||
884 | for (e1 = e, i = 0; nfrags--; i++) { | ||
885 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1280 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
886 | |||
887 | ce++; | ||
888 | e1++; | 1281 | e1++; |
1282 | ce++; | ||
889 | if (++pidx == q->size) { | 1283 | if (++pidx == q->size) { |
890 | pidx = 0; | 1284 | pidx = 0; |
891 | gen ^= 1; | 1285 | gen ^= 1; |
892 | ce = q->centries; | ||
893 | e1 = q->entries; | 1286 | e1 = q->entries; |
1287 | ce = q->centries; | ||
894 | } | 1288 | } |
895 | 1289 | ||
896 | mapping = pci_map_page(adapter->pdev, frag->page, | 1290 | mapping = pci_map_page(adapter->pdev, frag->page, |
897 | frag->page_offset, frag->size, | 1291 | frag->page_offset, frag->size, |
898 | PCI_DMA_TODEVICE); | 1292 | PCI_DMA_TODEVICE); |
1293 | desc_mapping = mapping; | ||
1294 | desc_len = frag->size; | ||
1295 | |||
1296 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, | ||
1297 | &desc_mapping, &desc_len, | ||
1298 | nfrags, q); | ||
1299 | if (likely(desc_len)) | ||
1300 | write_tx_desc(e1, desc_mapping, desc_len, gen, | ||
1301 | nfrags == 0); | ||
899 | ce->skb = NULL; | 1302 | ce->skb = NULL; |
900 | pci_unmap_addr_set(ce, dma_addr, mapping); | 1303 | pci_unmap_addr_set(ce, dma_addr, mapping); |
901 | pci_unmap_len_set(ce, dma_len, frag->size); | 1304 | pci_unmap_len_set(ce, dma_len, frag->size); |
902 | |||
903 | e1->addr_lo = (u32)mapping; | ||
904 | e1->addr_hi = (u64)mapping >> 32; | ||
905 | e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen); | ||
906 | e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) | | ||
907 | V_CMD_GEN2(gen); | ||
908 | } | 1305 | } |
909 | |||
910 | ce->skb = skb; | 1306 | ce->skb = skb; |
911 | wmb(); | 1307 | wmb(); |
912 | e->flags = flags; | 1308 | e->flags = flags; |
@@ -920,26 +1316,56 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) | |||
920 | unsigned int reclaim = q->processed - q->cleaned; | 1316 | unsigned int reclaim = q->processed - q->cleaned; |
921 | 1317 | ||
922 | if (reclaim) { | 1318 | if (reclaim) { |
1319 | pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", | ||
1320 | q->processed, q->cleaned); | ||
923 | free_cmdQ_buffers(sge, q, reclaim); | 1321 | free_cmdQ_buffers(sge, q, reclaim); |
924 | q->cleaned += reclaim; | 1322 | q->cleaned += reclaim; |
925 | } | 1323 | } |
926 | } | 1324 | } |
927 | 1325 | ||
928 | #ifndef SET_ETHTOOL_OPS | ||
929 | # define __netif_rx_complete(dev) netif_rx_complete(dev) | ||
930 | #endif | ||
931 | |||
932 | /* | 1326 | /* |
933 | * We cannot use the standard netif_rx_schedule_prep() because we have multiple | 1327 | * Called from tasklet. Checks the scheduler for any |
934 | * ports plus the TOE all multiplexing onto a single response queue, therefore | 1328 | * pending skbs that can be sent. |
935 | * accepting new responses cannot depend on the state of any particular port. | ||
936 | * So define our own equivalent that omits the netif_running() test. | ||
937 | */ | 1329 | */ |
938 | static inline int napi_schedule_prep(struct net_device *dev) | 1330 | static void restart_sched(unsigned long arg) |
939 | { | 1331 | { |
940 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | 1332 | struct sge *sge = (struct sge *) arg; |
941 | } | 1333 | struct adapter *adapter = sge->adapter; |
1334 | struct cmdQ *q = &sge->cmdQ[0]; | ||
1335 | struct sk_buff *skb; | ||
1336 | unsigned int credits, queued_skb = 0; | ||
942 | 1337 | ||
1338 | spin_lock(&q->lock); | ||
1339 | reclaim_completed_tx(sge, q); | ||
1340 | |||
1341 | credits = q->size - q->in_use; | ||
1342 | pr_debug("restart_sched credits=%d\n", credits); | ||
1343 | while ((skb = sched_skb(sge, NULL, credits)) != NULL) { | ||
1344 | unsigned int genbit, pidx, count; | ||
1345 | count = 1 + skb_shinfo(skb)->nr_frags; | ||
1346 | count += compute_large_page_tx_descs(skb); | ||
1347 | q->in_use += count; | ||
1348 | genbit = q->genbit; | ||
1349 | pidx = q->pidx; | ||
1350 | q->pidx += count; | ||
1351 | if (q->pidx >= q->size) { | ||
1352 | q->pidx -= q->size; | ||
1353 | q->genbit ^= 1; | ||
1354 | } | ||
1355 | write_tx_descs(adapter, skb, pidx, genbit, q); | ||
1356 | credits = q->size - q->in_use; | ||
1357 | queued_skb = 1; | ||
1358 | } | ||
1359 | |||
1360 | if (queued_skb) { | ||
1361 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
1362 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | ||
1363 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
1364 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | ||
1365 | } | ||
1366 | } | ||
1367 | spin_unlock(&q->lock); | ||
1368 | } | ||
943 | 1369 | ||
944 | /** | 1370 | /** |
945 | * sge_rx - process an ingress ethernet packet | 1371 | * sge_rx - process an ingress ethernet packet |
@@ -954,31 +1380,39 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
954 | struct sk_buff *skb; | 1380 | struct sk_buff *skb; |
955 | struct cpl_rx_pkt *p; | 1381 | struct cpl_rx_pkt *p; |
956 | struct adapter *adapter = sge->adapter; | 1382 | struct adapter *adapter = sge->adapter; |
1383 | struct sge_port_stats *st; | ||
957 | 1384 | ||
958 | sge->stats.ethernet_pkts++; | ||
959 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, | 1385 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, |
960 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, | 1386 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, |
961 | SGE_RX_DROP_THRES); | 1387 | SGE_RX_DROP_THRES); |
962 | if (!skb) { | 1388 | if (unlikely(!skb)) { |
963 | sge->port_stats[0].rx_drops++; /* charge only port 0 for now */ | 1389 | sge->stats.rx_drops++; |
964 | return 0; | 1390 | return 0; |
965 | } | 1391 | } |
966 | 1392 | ||
967 | p = (struct cpl_rx_pkt *)skb->data; | 1393 | p = (struct cpl_rx_pkt *)skb->data; |
968 | skb_pull(skb, sizeof(*p)); | 1394 | skb_pull(skb, sizeof(*p)); |
1395 | if (p->iff >= adapter->params.nports) { | ||
1396 | kfree_skb(skb); | ||
1397 | return 0; | ||
1398 | } | ||
1399 | |||
969 | skb->dev = adapter->port[p->iff].dev; | 1400 | skb->dev = adapter->port[p->iff].dev; |
970 | skb->dev->last_rx = jiffies; | 1401 | skb->dev->last_rx = jiffies; |
1402 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); | ||
1403 | st->rx_packets++; | ||
1404 | |||
971 | skb->protocol = eth_type_trans(skb, skb->dev); | 1405 | skb->protocol = eth_type_trans(skb, skb->dev); |
972 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | 1406 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
973 | skb->protocol == htons(ETH_P_IP) && | 1407 | skb->protocol == htons(ETH_P_IP) && |
974 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | 1408 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
975 | sge->port_stats[p->iff].rx_cso_good++; | 1409 | ++st->rx_cso_good; |
976 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1410 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
977 | } else | 1411 | } else |
978 | skb->ip_summed = CHECKSUM_NONE; | 1412 | skb->ip_summed = CHECKSUM_NONE; |
979 | 1413 | ||
980 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { | 1414 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { |
981 | sge->port_stats[p->iff].vlan_xtract++; | 1415 | st->vlan_xtract++; |
982 | if (adapter->params.sge.polling) | 1416 | if (adapter->params.sge.polling) |
983 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, | 1417 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
984 | ntohs(p->vlan)); | 1418 | ntohs(p->vlan)); |
@@ -1039,18 +1473,24 @@ static unsigned int update_tx_info(struct adapter *adapter, | |||
1039 | struct cmdQ *cmdq = &sge->cmdQ[0]; | 1473 | struct cmdQ *cmdq = &sge->cmdQ[0]; |
1040 | 1474 | ||
1041 | cmdq->processed += pr0; | 1475 | cmdq->processed += pr0; |
1042 | 1476 | if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { | |
1477 | freelQs_empty(sge); | ||
1478 | flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); | ||
1479 | } | ||
1043 | if (flags & F_CMDQ0_ENABLE) { | 1480 | if (flags & F_CMDQ0_ENABLE) { |
1044 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); | 1481 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); |
1045 | 1482 | ||
1046 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && | 1483 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && |
1047 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { | 1484 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { |
1048 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); | 1485 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); |
1049 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | 1486 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); |
1050 | } | 1487 | } |
1051 | flags &= ~F_CMDQ0_ENABLE; | 1488 | if (sge->tx_sched) |
1489 | tasklet_hi_schedule(&sge->tx_sched->sched_tsk); | ||
1490 | |||
1491 | flags &= ~F_CMDQ0_ENABLE; | ||
1052 | } | 1492 | } |
1053 | 1493 | ||
1054 | if (unlikely(sge->stopped_tx_queues != 0)) | 1494 | if (unlikely(sge->stopped_tx_queues != 0)) |
1055 | restart_tx_queues(sge); | 1495 | restart_tx_queues(sge); |
1056 | 1496 | ||
@@ -1241,20 +1681,21 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data) | |||
1241 | if (e->GenerationBit == q->genbit) { | 1681 | if (e->GenerationBit == q->genbit) { |
1242 | if (e->DataValid || | 1682 | if (e->DataValid || |
1243 | process_pure_responses(adapter, e)) { | 1683 | process_pure_responses(adapter, e)) { |
1244 | if (likely(napi_schedule_prep(sge->netdev))) | 1684 | if (likely(__netif_rx_schedule_prep(sge->netdev))) |
1245 | __netif_rx_schedule(sge->netdev); | 1685 | __netif_rx_schedule(sge->netdev); |
1246 | else | 1686 | else if (net_ratelimit()) |
1247 | printk(KERN_CRIT | 1687 | printk(KERN_INFO |
1248 | "NAPI schedule failure!\n"); | 1688 | "NAPI schedule failure!\n"); |
1249 | } else | 1689 | } else |
1250 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | 1690 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); |
1691 | |||
1251 | handled = 1; | 1692 | handled = 1; |
1252 | goto unlock; | 1693 | goto unlock; |
1253 | } else | 1694 | } else |
1254 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | 1695 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); |
1255 | } else | 1696 | } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) { |
1256 | if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) | 1697 | printk(KERN_ERR "data interrupt while NAPI running\n"); |
1257 | printk(KERN_ERR "data interrupt while NAPI running\n"); | 1698 | } |
1258 | 1699 | ||
1259 | handled = t1_slow_intr_handler(adapter); | 1700 | handled = t1_slow_intr_handler(adapter); |
1260 | if (!handled) | 1701 | if (!handled) |
@@ -1335,34 +1776,59 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |||
1335 | { | 1776 | { |
1336 | struct sge *sge = adapter->sge; | 1777 | struct sge *sge = adapter->sge; |
1337 | struct cmdQ *q = &sge->cmdQ[qid]; | 1778 | struct cmdQ *q = &sge->cmdQ[qid]; |
1338 | unsigned int credits, pidx, genbit, count; | 1779 | unsigned int credits, pidx, genbit, count, use_sched_skb = 0; |
1780 | |||
1781 | if (!spin_trylock(&q->lock)) | ||
1782 | return NETDEV_TX_LOCKED; | ||
1339 | 1783 | ||
1340 | spin_lock(&q->lock); | ||
1341 | reclaim_completed_tx(sge, q); | 1784 | reclaim_completed_tx(sge, q); |
1342 | 1785 | ||
1343 | pidx = q->pidx; | 1786 | pidx = q->pidx; |
1344 | credits = q->size - q->in_use; | 1787 | credits = q->size - q->in_use; |
1345 | count = 1 + skb_shinfo(skb)->nr_frags; | 1788 | count = 1 + skb_shinfo(skb)->nr_frags; |
1789 | count += compute_large_page_tx_descs(skb); | ||
1346 | 1790 | ||
1347 | { /* Ethernet packet */ | 1791 | /* Ethernet packet */ |
1348 | if (unlikely(credits < count)) { | 1792 | if (unlikely(credits < count)) { |
1793 | if (!netif_queue_stopped(dev)) { | ||
1349 | netif_stop_queue(dev); | 1794 | netif_stop_queue(dev); |
1350 | set_bit(dev->if_port, &sge->stopped_tx_queues); | 1795 | set_bit(dev->if_port, &sge->stopped_tx_queues); |
1351 | sge->stats.cmdQ_full[2]++; | 1796 | sge->stats.cmdQ_full[2]++; |
1352 | spin_unlock(&q->lock); | 1797 | CH_ERR("%s: Tx ring full while queue awake!\n", |
1353 | if (!netif_queue_stopped(dev)) | 1798 | adapter->name); |
1354 | CH_ERR("%s: Tx ring full while queue awake!\n", | ||
1355 | adapter->name); | ||
1356 | return NETDEV_TX_BUSY; | ||
1357 | } | 1799 | } |
1358 | if (unlikely(credits - count < q->stop_thres)) { | 1800 | spin_unlock(&q->lock); |
1359 | sge->stats.cmdQ_full[2]++; | 1801 | return NETDEV_TX_BUSY; |
1360 | netif_stop_queue(dev); | 1802 | } |
1361 | set_bit(dev->if_port, &sge->stopped_tx_queues); | 1803 | |
1804 | if (unlikely(credits - count < q->stop_thres)) { | ||
1805 | netif_stop_queue(dev); | ||
1806 | set_bit(dev->if_port, &sge->stopped_tx_queues); | ||
1807 | sge->stats.cmdQ_full[2]++; | ||
1808 | } | ||
1809 | |||
1810 | /* T204 cmdQ0 skbs that are destined for a certain port have to go | ||
1811 | * through the scheduler. | ||
1812 | */ | ||
1813 | if (sge->tx_sched && !qid && skb->dev) { | ||
1814 | use_sched: | ||
1815 | use_sched_skb = 1; | ||
1816 | /* Note that the scheduler might return a different skb than | ||
1817 | * the one passed in. | ||
1818 | */ | ||
1819 | skb = sched_skb(sge, skb, credits); | ||
1820 | if (!skb) { | ||
1821 | spin_unlock(&q->lock); | ||
1822 | return NETDEV_TX_OK; | ||
1362 | } | 1823 | } |
1824 | pidx = q->pidx; | ||
1825 | count = 1 + skb_shinfo(skb)->nr_frags; | ||
1826 | count += compute_large_page_tx_descs(skb); | ||
1363 | } | 1827 | } |
1828 | |||
1364 | q->in_use += count; | 1829 | q->in_use += count; |
1365 | genbit = q->genbit; | 1830 | genbit = q->genbit; |
1831 | pidx = q->pidx; | ||
1366 | q->pidx += count; | 1832 | q->pidx += count; |
1367 | if (q->pidx >= q->size) { | 1833 | if (q->pidx >= q->size) { |
1368 | q->pidx -= q->size; | 1834 | q->pidx -= q->size; |
@@ -1388,6 +1854,14 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |||
1388 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | 1854 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); |
1389 | } | 1855 | } |
1390 | } | 1856 | } |
1857 | |||
1858 | if (use_sched_skb) { | ||
1859 | if (spin_trylock(&q->lock)) { | ||
1860 | credits = q->size - q->in_use; | ||
1861 | skb = NULL; | ||
1862 | goto use_sched; | ||
1863 | } | ||
1864 | } | ||
1391 | return NETDEV_TX_OK; | 1865 | return NETDEV_TX_OK; |
1392 | } | 1866 | } |
1393 | 1867 | ||
@@ -1412,16 +1886,20 @@ static inline int eth_hdr_len(const void *data) | |||
1412 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1886 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1413 | { | 1887 | { |
1414 | struct adapter *adapter = dev->priv; | 1888 | struct adapter *adapter = dev->priv; |
1415 | struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port]; | ||
1416 | struct sge *sge = adapter->sge; | 1889 | struct sge *sge = adapter->sge; |
1890 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); | ||
1417 | struct cpl_tx_pkt *cpl; | 1891 | struct cpl_tx_pkt *cpl; |
1892 | struct sk_buff *orig_skb = skb; | ||
1893 | int ret; | ||
1894 | |||
1895 | if (skb->protocol == htons(ETH_P_CPL5)) | ||
1896 | goto send; | ||
1418 | 1897 | ||
1419 | #ifdef NETIF_F_TSO | 1898 | if (skb_shinfo(skb)->gso_size) { |
1420 | if (skb_is_gso(skb)) { | ||
1421 | int eth_type; | 1899 | int eth_type; |
1422 | struct cpl_tx_pkt_lso *hdr; | 1900 | struct cpl_tx_pkt_lso *hdr; |
1423 | 1901 | ||
1424 | st->tso++; | 1902 | ++st->tx_tso; |
1425 | 1903 | ||
1426 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | 1904 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? |
1427 | CPL_ETH_II : CPL_ETH_II_VLAN; | 1905 | CPL_ETH_II : CPL_ETH_II_VLAN; |
@@ -1432,13 +1910,10 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1432 | hdr->ip_hdr_words = skb->nh.iph->ihl; | 1910 | hdr->ip_hdr_words = skb->nh.iph->ihl; |
1433 | hdr->tcp_hdr_words = skb->h.th->doff; | 1911 | hdr->tcp_hdr_words = skb->h.th->doff; |
1434 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | 1912 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
1435 | skb_shinfo(skb)->gso_size)); | 1913 | skb_shinfo(skb)->gso_size)); |
1436 | hdr->len = htonl(skb->len - sizeof(*hdr)); | 1914 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
1437 | cpl = (struct cpl_tx_pkt *)hdr; | 1915 | cpl = (struct cpl_tx_pkt *)hdr; |
1438 | sge->stats.tx_lso_pkts++; | 1916 | } else { |
1439 | } else | ||
1440 | #endif | ||
1441 | { | ||
1442 | /* | 1917 | /* |
1443 | * Packets shorter than ETH_HLEN can break the MAC, drop them | 1918 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
1444 | * early. Also, we may get oversized packets because some | 1919 | * early. Also, we may get oversized packets because some |
@@ -1447,6 +1922,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1447 | */ | 1922 | */ |
1448 | if (unlikely(skb->len < ETH_HLEN || | 1923 | if (unlikely(skb->len < ETH_HLEN || |
1449 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { | 1924 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { |
1925 | pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, | ||
1926 | skb->len, eth_hdr_len(skb->data), dev->mtu); | ||
1450 | dev_kfree_skb_any(skb); | 1927 | dev_kfree_skb_any(skb); |
1451 | return NETDEV_TX_OK; | 1928 | return NETDEV_TX_OK; |
1452 | } | 1929 | } |
@@ -1456,9 +1933,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1456 | * components, such as pktgen, do not handle it right. | 1933 | * components, such as pktgen, do not handle it right. |
1457 | * Complain when this happens but try to fix things up. | 1934 | * Complain when this happens but try to fix things up. |
1458 | */ | 1935 | */ |
1459 | if (unlikely(skb_headroom(skb) < | 1936 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { |
1460 | dev->hard_header_len - ETH_HLEN)) { | 1937 | pr_debug("%s: headroom %d header_len %d\n", dev->name, |
1461 | struct sk_buff *orig_skb = skb; | 1938 | skb_headroom(skb), dev->hard_header_len); |
1462 | 1939 | ||
1463 | if (net_ratelimit()) | 1940 | if (net_ratelimit()) |
1464 | printk(KERN_ERR "%s: inadequate headroom in " | 1941 | printk(KERN_ERR "%s: inadequate headroom in " |
@@ -1471,19 +1948,21 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1471 | 1948 | ||
1472 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | 1949 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && |
1473 | skb->ip_summed == CHECKSUM_PARTIAL && | 1950 | skb->ip_summed == CHECKSUM_PARTIAL && |
1474 | skb->nh.iph->protocol == IPPROTO_UDP) | 1951 | skb->nh.iph->protocol == IPPROTO_UDP) { |
1475 | if (unlikely(skb_checksum_help(skb))) { | 1952 | if (unlikely(skb_checksum_help(skb))) { |
1953 | pr_debug("%s: unable to do udp checksum\n", dev->name); | ||
1476 | dev_kfree_skb_any(skb); | 1954 | dev_kfree_skb_any(skb); |
1477 | return NETDEV_TX_OK; | 1955 | return NETDEV_TX_OK; |
1478 | } | 1956 | } |
1957 | } | ||
1479 | 1958 | ||
1480 | /* Hmmm, assuming to catch the gratious arp... and we'll use | 1959 | /* Hmmm, assuming to catch the gratious arp... and we'll use |
1481 | * it to flush out stuck espi packets... | 1960 | * it to flush out stuck espi packets... |
1482 | */ | 1961 | */ |
1483 | if (unlikely(!adapter->sge->espibug_skb)) { | 1962 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { |
1484 | if (skb->protocol == htons(ETH_P_ARP) && | 1963 | if (skb->protocol == htons(ETH_P_ARP) && |
1485 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { | 1964 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { |
1486 | adapter->sge->espibug_skb = skb; | 1965 | adapter->sge->espibug_skb[dev->if_port] = skb; |
1487 | /* We want to re-use this skb later. We | 1966 | /* We want to re-use this skb later. We |
1488 | * simply bump the reference count and it | 1967 | * simply bump the reference count and it |
1489 | * will not be freed... | 1968 | * will not be freed... |
@@ -1499,8 +1978,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1499 | /* the length field isn't used so don't bother setting it */ | 1978 | /* the length field isn't used so don't bother setting it */ |
1500 | 1979 | ||
1501 | st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); | 1980 | st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); |
1502 | sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL); | ||
1503 | sge->stats.tx_reg_pkts++; | ||
1504 | } | 1981 | } |
1505 | cpl->iff = dev->if_port; | 1982 | cpl->iff = dev->if_port; |
1506 | 1983 | ||
@@ -1513,8 +1990,19 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1513 | #endif | 1990 | #endif |
1514 | cpl->vlan_valid = 0; | 1991 | cpl->vlan_valid = 0; |
1515 | 1992 | ||
1993 | send: | ||
1994 | st->tx_packets++; | ||
1516 | dev->trans_start = jiffies; | 1995 | dev->trans_start = jiffies; |
1517 | return t1_sge_tx(skb, adapter, 0, dev); | 1996 | ret = t1_sge_tx(skb, adapter, 0, dev); |
1997 | |||
1998 | /* If transmit busy, and we reallocated skb's due to headroom limit, | ||
1999 | * then silently discard to avoid leak. | ||
2000 | */ | ||
2001 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { | ||
2002 | dev_kfree_skb_any(skb); | ||
2003 | ret = NETDEV_TX_OK; | ||
2004 | } | ||
2005 | return ret; | ||
1518 | } | 2006 | } |
1519 | 2007 | ||
1520 | /* | 2008 | /* |
@@ -1532,10 +2020,9 @@ static void sge_tx_reclaim_cb(unsigned long data) | |||
1532 | continue; | 2020 | continue; |
1533 | 2021 | ||
1534 | reclaim_completed_tx(sge, q); | 2022 | reclaim_completed_tx(sge, q); |
1535 | if (i == 0 && q->in_use) /* flush pending credits */ | 2023 | if (i == 0 && q->in_use) { /* flush pending credits */ |
1536 | writel(F_CMDQ0_ENABLE, | 2024 | writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); |
1537 | sge->adapter->regs + A_SG_DOORBELL); | 2025 | } |
1538 | |||
1539 | spin_unlock(&q->lock); | 2026 | spin_unlock(&q->lock); |
1540 | } | 2027 | } |
1541 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | 2028 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); |
@@ -1582,11 +2069,20 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p) | |||
1582 | */ | 2069 | */ |
1583 | void t1_sge_stop(struct sge *sge) | 2070 | void t1_sge_stop(struct sge *sge) |
1584 | { | 2071 | { |
2072 | int i; | ||
1585 | writel(0, sge->adapter->regs + A_SG_CONTROL); | 2073 | writel(0, sge->adapter->regs + A_SG_CONTROL); |
1586 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | 2074 | readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
2075 | |||
1587 | if (is_T2(sge->adapter)) | 2076 | if (is_T2(sge->adapter)) |
1588 | del_timer_sync(&sge->espibug_timer); | 2077 | del_timer_sync(&sge->espibug_timer); |
2078 | |||
1589 | del_timer_sync(&sge->tx_reclaim_timer); | 2079 | del_timer_sync(&sge->tx_reclaim_timer); |
2080 | if (sge->tx_sched) | ||
2081 | tx_sched_stop(sge); | ||
2082 | |||
2083 | for (i = 0; i < MAX_NPORTS; i++) | ||
2084 | if (sge->espibug_skb[i]) | ||
2085 | kfree_skb(sge->espibug_skb[i]); | ||
1590 | } | 2086 | } |
1591 | 2087 | ||
1592 | /* | 2088 | /* |
@@ -1599,74 +2095,128 @@ void t1_sge_start(struct sge *sge) | |||
1599 | 2095 | ||
1600 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); | 2096 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); |
1601 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); | 2097 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
1602 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | 2098 | readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
1603 | 2099 | ||
1604 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | 2100 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); |
1605 | 2101 | ||
1606 | if (is_T2(sge->adapter)) | 2102 | if (is_T2(sge->adapter)) |
1607 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | 2103 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
1608 | } | 2104 | } |
1609 | 2105 | ||
1610 | /* | 2106 | /* |
1611 | * Callback for the T2 ESPI 'stuck packet feature' workaorund | 2107 | * Callback for the T2 ESPI 'stuck packet feature' workaorund |
1612 | */ | 2108 | */ |
1613 | static void espibug_workaround(void *data) | 2109 | static void espibug_workaround_t204(unsigned long data) |
1614 | { | 2110 | { |
1615 | struct adapter *adapter = (struct adapter *)data; | 2111 | struct adapter *adapter = (struct adapter *)data; |
1616 | struct sge *sge = adapter->sge; | 2112 | struct sge *sge = adapter->sge; |
2113 | unsigned int nports = adapter->params.nports; | ||
2114 | u32 seop[MAX_NPORTS]; | ||
1617 | 2115 | ||
1618 | if (netif_running(adapter->port[0].dev)) { | 2116 | if (adapter->open_device_map & PORT_MASK) { |
1619 | struct sk_buff *skb = sge->espibug_skb; | 2117 | int i; |
1620 | 2118 | if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { | |
1621 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | 2119 | return; |
1622 | 2120 | } | |
1623 | if ((seop & 0xfff0fff) == 0xfff && skb) { | 2121 | for (i = 0; i < nports; i++) { |
1624 | if (!skb->cb[0]) { | 2122 | struct sk_buff *skb = sge->espibug_skb[i]; |
1625 | u8 ch_mac_addr[ETH_ALEN] = | 2123 | if ( (netif_running(adapter->port[i].dev)) && |
1626 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | 2124 | !(netif_queue_stopped(adapter->port[i].dev)) && |
1627 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2125 | (seop[i] && ((seop[i] & 0xfff) == 0)) && |
1628 | ch_mac_addr, ETH_ALEN); | 2126 | skb ) { |
1629 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | 2127 | if (!skb->cb[0]) { |
1630 | ETH_ALEN); | 2128 | u8 ch_mac_addr[ETH_ALEN] = |
1631 | skb->cb[0] = 0xff; | 2129 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; |
2130 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | ||
2131 | ch_mac_addr, ETH_ALEN); | ||
2132 | memcpy(skb->data + skb->len - 10, | ||
2133 | ch_mac_addr, ETH_ALEN); | ||
2134 | skb->cb[0] = 0xff; | ||
2135 | } | ||
2136 | |||
2137 | /* bump the reference count to avoid freeing of | ||
2138 | * the skb once the DMA has completed. | ||
2139 | */ | ||
2140 | skb = skb_get(skb); | ||
2141 | t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); | ||
1632 | } | 2142 | } |
1633 | |||
1634 | /* bump the reference count to avoid freeing of the | ||
1635 | * skb once the DMA has completed. | ||
1636 | */ | ||
1637 | skb = skb_get(skb); | ||
1638 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); | ||
1639 | } | 2143 | } |
1640 | } | 2144 | } |
1641 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | 2145 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
1642 | } | 2146 | } |
1643 | 2147 | ||
2148 | static void espibug_workaround(unsigned long data) | ||
2149 | { | ||
2150 | struct adapter *adapter = (struct adapter *)data; | ||
2151 | struct sge *sge = adapter->sge; | ||
2152 | |||
2153 | if (netif_running(adapter->port[0].dev)) { | ||
2154 | struct sk_buff *skb = sge->espibug_skb[0]; | ||
2155 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | ||
2156 | |||
2157 | if ((seop & 0xfff0fff) == 0xfff && skb) { | ||
2158 | if (!skb->cb[0]) { | ||
2159 | u8 ch_mac_addr[ETH_ALEN] = | ||
2160 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | ||
2161 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | ||
2162 | ch_mac_addr, ETH_ALEN); | ||
2163 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | ||
2164 | ETH_ALEN); | ||
2165 | skb->cb[0] = 0xff; | ||
2166 | } | ||
2167 | |||
2168 | /* bump the reference count to avoid freeing of the | ||
2169 | * skb once the DMA has completed. | ||
2170 | */ | ||
2171 | skb = skb_get(skb); | ||
2172 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); | ||
2173 | } | ||
2174 | } | ||
2175 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | ||
2176 | } | ||
2177 | |||
1644 | /* | 2178 | /* |
1645 | * Creates a t1_sge structure and returns suggested resource parameters. | 2179 | * Creates a t1_sge structure and returns suggested resource parameters. |
1646 | */ | 2180 | */ |
1647 | struct sge * __devinit t1_sge_create(struct adapter *adapter, | 2181 | struct sge * __devinit t1_sge_create(struct adapter *adapter, |
1648 | struct sge_params *p) | 2182 | struct sge_params *p) |
1649 | { | 2183 | { |
1650 | struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL); | 2184 | struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); |
2185 | int i; | ||
1651 | 2186 | ||
1652 | if (!sge) | 2187 | if (!sge) |
1653 | return NULL; | 2188 | return NULL; |
1654 | memset(sge, 0, sizeof(*sge)); | ||
1655 | 2189 | ||
1656 | sge->adapter = adapter; | 2190 | sge->adapter = adapter; |
1657 | sge->netdev = adapter->port[0].dev; | 2191 | sge->netdev = adapter->port[0].dev; |
1658 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; | 2192 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; |
1659 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | 2193 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; |
1660 | 2194 | ||
2195 | for_each_port(adapter, i) { | ||
2196 | sge->port_stats[i] = alloc_percpu(struct sge_port_stats); | ||
2197 | if (!sge->port_stats[i]) | ||
2198 | goto nomem_port; | ||
2199 | } | ||
2200 | |||
1661 | init_timer(&sge->tx_reclaim_timer); | 2201 | init_timer(&sge->tx_reclaim_timer); |
1662 | sge->tx_reclaim_timer.data = (unsigned long)sge; | 2202 | sge->tx_reclaim_timer.data = (unsigned long)sge; |
1663 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; | 2203 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; |
1664 | 2204 | ||
1665 | if (is_T2(sge->adapter)) { | 2205 | if (is_T2(sge->adapter)) { |
1666 | init_timer(&sge->espibug_timer); | 2206 | init_timer(&sge->espibug_timer); |
1667 | sge->espibug_timer.function = (void *)&espibug_workaround; | 2207 | |
2208 | if (adapter->params.nports > 1) { | ||
2209 | tx_sched_init(sge); | ||
2210 | sge->espibug_timer.function = espibug_workaround_t204; | ||
2211 | } else { | ||
2212 | sge->espibug_timer.function = espibug_workaround; | ||
2213 | } | ||
1668 | sge->espibug_timer.data = (unsigned long)sge->adapter; | 2214 | sge->espibug_timer.data = (unsigned long)sge->adapter; |
2215 | |||
1669 | sge->espibug_timeout = 1; | 2216 | sge->espibug_timeout = 1; |
2217 | /* for T204, every 10ms */ | ||
2218 | if (adapter->params.nports > 1) | ||
2219 | sge->espibug_timeout = HZ/100; | ||
1670 | } | 2220 | } |
1671 | 2221 | ||
1672 | 2222 | ||
@@ -1674,10 +2224,25 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter, | |||
1674 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | 2224 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; |
1675 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; | 2225 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; |
1676 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; | 2226 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; |
1677 | p->rx_coalesce_usecs = 50; | 2227 | if (sge->tx_sched) { |
2228 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) | ||
2229 | p->rx_coalesce_usecs = 15; | ||
2230 | else | ||
2231 | p->rx_coalesce_usecs = 50; | ||
2232 | } else | ||
2233 | p->rx_coalesce_usecs = 50; | ||
2234 | |||
1678 | p->coalesce_enable = 0; | 2235 | p->coalesce_enable = 0; |
1679 | p->sample_interval_usecs = 0; | 2236 | p->sample_interval_usecs = 0; |
1680 | p->polling = 0; | 2237 | p->polling = 0; |
1681 | 2238 | ||
1682 | return sge; | 2239 | return sge; |
2240 | nomem_port: | ||
2241 | while (i >= 0) { | ||
2242 | free_percpu(sge->port_stats[i]); | ||
2243 | --i; | ||
2244 | } | ||
2245 | kfree(sge); | ||
2246 | return NULL; | ||
2247 | |||
1683 | } | 2248 | } |
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index 91af47bab7be..7ceb0117d039 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h | |||
@@ -44,6 +44,9 @@ | |||
44 | #include <asm/byteorder.h> | 44 | #include <asm/byteorder.h> |
45 | 45 | ||
46 | struct sge_intr_counts { | 46 | struct sge_intr_counts { |
47 | unsigned int rx_drops; /* # of packets dropped due to no mem */ | ||
48 | unsigned int pure_rsps; /* # of non-payload responses */ | ||
49 | unsigned int unhandled_irqs; /* # of unhandled interrupts */ | ||
47 | unsigned int respQ_empty; /* # times respQ empty */ | 50 | unsigned int respQ_empty; /* # times respQ empty */ |
48 | unsigned int respQ_overflow; /* # respQ overflow (fatal) */ | 51 | unsigned int respQ_overflow; /* # respQ overflow (fatal) */ |
49 | unsigned int freelistQ_empty; /* # times freelist empty */ | 52 | unsigned int freelistQ_empty; /* # times freelist empty */ |
@@ -51,24 +54,16 @@ struct sge_intr_counts { | |||
51 | unsigned int pkt_mismatch; | 54 | unsigned int pkt_mismatch; |
52 | unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ | 55 | unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ |
53 | unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ | 56 | unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ |
54 | unsigned int ethernet_pkts; /* # of Ethernet packets received */ | ||
55 | unsigned int offload_pkts; /* # of offload packets received */ | ||
56 | unsigned int offload_bundles; /* # of offload pkt bundles delivered */ | ||
57 | unsigned int pure_rsps; /* # of non-payload responses */ | ||
58 | unsigned int unhandled_irqs; /* # of unhandled interrupts */ | ||
59 | unsigned int tx_ipfrags; | ||
60 | unsigned int tx_reg_pkts; | ||
61 | unsigned int tx_lso_pkts; | ||
62 | unsigned int tx_do_cksum; | ||
63 | }; | 57 | }; |
64 | 58 | ||
65 | struct sge_port_stats { | 59 | struct sge_port_stats { |
66 | unsigned long rx_cso_good; /* # of successful RX csum offloads */ | 60 | u64 rx_packets; /* # of Ethernet packets received */ |
67 | unsigned long tx_cso; /* # of TX checksum offloads */ | 61 | u64 rx_cso_good; /* # of successful RX csum offloads */ |
68 | unsigned long vlan_xtract; /* # of VLAN tag extractions */ | 62 | u64 tx_packets; /* # of TX packets */ |
69 | unsigned long vlan_insert; /* # of VLAN tag extractions */ | 63 | u64 tx_cso; /* # of TX checksum offloads */ |
70 | unsigned long tso; /* # of TSO requests */ | 64 | u64 tx_tso; /* # of TSO requests */ |
71 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | 65 | u64 vlan_xtract; /* # of VLAN tag extractions */ |
66 | u64 vlan_insert; /* # of VLAN tag insertions */ | ||
72 | }; | 67 | }; |
73 | 68 | ||
74 | struct sk_buff; | 69 | struct sk_buff; |
@@ -90,7 +85,11 @@ int t1_sge_intr_error_handler(struct sge *); | |||
90 | void t1_sge_intr_enable(struct sge *); | 85 | void t1_sge_intr_enable(struct sge *); |
91 | void t1_sge_intr_disable(struct sge *); | 86 | void t1_sge_intr_disable(struct sge *); |
92 | void t1_sge_intr_clear(struct sge *); | 87 | void t1_sge_intr_clear(struct sge *); |
93 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge); | 88 | const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge); |
94 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port); | 89 | void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *); |
90 | void t1_sched_set_max_avail_bytes(struct sge *, unsigned int); | ||
91 | void t1_sched_set_drain_bits_per_us(struct sge *, unsigned int, unsigned int); | ||
92 | unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int, | ||
93 | unsigned int); | ||
95 | 94 | ||
96 | #endif /* _CXGB_SGE_H_ */ | 95 | #endif /* _CXGB_SGE_H_ */ |
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 12e4e96dba2d..22ed9a383c08 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "gmac.h" | 43 | #include "gmac.h" |
44 | #include "cphy.h" | 44 | #include "cphy.h" |
45 | #include "sge.h" | 45 | #include "sge.h" |
46 | #include "tp.h" | ||
46 | #include "espi.h" | 47 | #include "espi.h" |
47 | 48 | ||
48 | /** | 49 | /** |
@@ -59,7 +60,7 @@ | |||
59 | * otherwise. | 60 | * otherwise. |
60 | */ | 61 | */ |
61 | static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, | 62 | static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, |
62 | int attempts, int delay) | 63 | int attempts, int delay) |
63 | { | 64 | { |
64 | while (1) { | 65 | while (1) { |
65 | u32 val = readl(adapter->regs + reg) & mask; | 66 | u32 val = readl(adapter->regs + reg) & mask; |
@@ -78,7 +79,7 @@ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, | |||
78 | /* | 79 | /* |
79 | * Write a register over the TPI interface (unlocked and locked versions). | 80 | * Write a register over the TPI interface (unlocked and locked versions). |
80 | */ | 81 | */ |
81 | static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) | 82 | int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) |
82 | { | 83 | { |
83 | int tpi_busy; | 84 | int tpi_busy; |
84 | 85 | ||
@@ -98,16 +99,16 @@ int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) | |||
98 | { | 99 | { |
99 | int ret; | 100 | int ret; |
100 | 101 | ||
101 | spin_lock(&(adapter)->tpi_lock); | 102 | spin_lock(&adapter->tpi_lock); |
102 | ret = __t1_tpi_write(adapter, addr, value); | 103 | ret = __t1_tpi_write(adapter, addr, value); |
103 | spin_unlock(&(adapter)->tpi_lock); | 104 | spin_unlock(&adapter->tpi_lock); |
104 | return ret; | 105 | return ret; |
105 | } | 106 | } |
106 | 107 | ||
107 | /* | 108 | /* |
108 | * Read a register over the TPI interface (unlocked and locked versions). | 109 | * Read a register over the TPI interface (unlocked and locked versions). |
109 | */ | 110 | */ |
110 | static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) | 111 | int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) |
111 | { | 112 | { |
112 | int tpi_busy; | 113 | int tpi_busy; |
113 | 114 | ||
@@ -128,18 +129,26 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) | |||
128 | { | 129 | { |
129 | int ret; | 130 | int ret; |
130 | 131 | ||
131 | spin_lock(&(adapter)->tpi_lock); | 132 | spin_lock(&adapter->tpi_lock); |
132 | ret = __t1_tpi_read(adapter, addr, valp); | 133 | ret = __t1_tpi_read(adapter, addr, valp); |
133 | spin_unlock(&(adapter)->tpi_lock); | 134 | spin_unlock(&adapter->tpi_lock); |
134 | return ret; | 135 | return ret; |
135 | } | 136 | } |
136 | 137 | ||
137 | /* | 138 | /* |
139 | * Set a TPI parameter. | ||
140 | */ | ||
141 | static void t1_tpi_par(adapter_t *adapter, u32 value) | ||
142 | { | ||
143 | writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); | ||
144 | } | ||
145 | |||
146 | /* | ||
138 | * Called when a port's link settings change to propagate the new values to the | 147 | * Called when a port's link settings change to propagate the new values to the |
139 | * associated PHY and MAC. After performing the common tasks it invokes an | 148 | * associated PHY and MAC. After performing the common tasks it invokes an |
140 | * OS-specific handler. | 149 | * OS-specific handler. |
141 | */ | 150 | */ |
142 | /* static */ void link_changed(adapter_t *adapter, int port_id) | 151 | void t1_link_changed(adapter_t *adapter, int port_id) |
143 | { | 152 | { |
144 | int link_ok, speed, duplex, fc; | 153 | int link_ok, speed, duplex, fc; |
145 | struct cphy *phy = adapter->port[port_id].phy; | 154 | struct cphy *phy = adapter->port[port_id].phy; |
@@ -159,23 +168,83 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) | |||
159 | mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); | 168 | mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); |
160 | lc->fc = (unsigned char)fc; | 169 | lc->fc = (unsigned char)fc; |
161 | } | 170 | } |
162 | t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc); | 171 | t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); |
163 | } | 172 | } |
164 | 173 | ||
165 | static int t1_pci_intr_handler(adapter_t *adapter) | 174 | static int t1_pci_intr_handler(adapter_t *adapter) |
166 | { | 175 | { |
167 | u32 pcix_cause; | 176 | u32 pcix_cause; |
168 | 177 | ||
169 | pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); | 178 | pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); |
170 | 179 | ||
171 | if (pcix_cause) { | 180 | if (pcix_cause) { |
172 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, | 181 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, |
173 | pcix_cause); | 182 | pcix_cause); |
174 | t1_fatal_err(adapter); /* PCI errors are fatal */ | 183 | t1_fatal_err(adapter); /* PCI errors are fatal */ |
175 | } | 184 | } |
176 | return 0; | 185 | return 0; |
177 | } | 186 | } |
178 | 187 | ||
188 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
189 | #include "cspi.h" | ||
190 | #endif | ||
191 | #ifdef CONFIG_CHELSIO_T1_1G | ||
192 | #include "fpga_defs.h" | ||
193 | |||
194 | /* | ||
195 | * PHY interrupt handler for FPGA boards. | ||
196 | */ | ||
197 | static int fpga_phy_intr_handler(adapter_t *adapter) | ||
198 | { | ||
199 | int p; | ||
200 | u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); | ||
201 | |||
202 | for_each_port(adapter, p) | ||
203 | if (cause & (1 << p)) { | ||
204 | struct cphy *phy = adapter->port[p].phy; | ||
205 | int phy_cause = phy->ops->interrupt_handler(phy); | ||
206 | |||
207 | if (phy_cause & cphy_cause_link_change) | ||
208 | t1_link_changed(adapter, p); | ||
209 | } | ||
210 | writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * Slow path interrupt handler for FPGAs. | ||
216 | */ | ||
217 | static int fpga_slow_intr(adapter_t *adapter) | ||
218 | { | ||
219 | u32 cause = readl(adapter->regs + A_PL_CAUSE); | ||
220 | |||
221 | cause &= ~F_PL_INTR_SGE_DATA; | ||
222 | if (cause & F_PL_INTR_SGE_ERR) | ||
223 | t1_sge_intr_error_handler(adapter->sge); | ||
224 | |||
225 | if (cause & FPGA_PCIX_INTERRUPT_GMAC) | ||
226 | fpga_phy_intr_handler(adapter); | ||
227 | |||
228 | if (cause & FPGA_PCIX_INTERRUPT_TP) { | ||
229 | /* | ||
230 | * FPGA doesn't support MC4 interrupts and it requires | ||
231 | * this odd layer of indirection for MC5. | ||
232 | */ | ||
233 | u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); | ||
234 | |||
235 | /* Clear TP interrupt */ | ||
236 | writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); | ||
237 | } | ||
238 | if (cause & FPGA_PCIX_INTERRUPT_PCIX) | ||
239 | t1_pci_intr_handler(adapter); | ||
240 | |||
241 | /* Clear the interrupts just processed. */ | ||
242 | if (cause) | ||
243 | writel(cause, adapter->regs + A_PL_CAUSE); | ||
244 | |||
245 | return cause != 0; | ||
246 | } | ||
247 | #endif | ||
179 | 248 | ||
180 | /* | 249 | /* |
181 | * Wait until Elmer's MI1 interface is ready for new operations. | 250 | * Wait until Elmer's MI1 interface is ready for new operations. |
@@ -212,12 +281,62 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) | |||
212 | t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); | 281 | t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); |
213 | } | 282 | } |
214 | 283 | ||
284 | #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) | ||
285 | /* | ||
286 | * Elmer MI1 MDIO read/write operations. | ||
287 | */ | ||
288 | static int mi1_mdio_read(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
289 | int reg_addr, unsigned int *valp) | ||
290 | { | ||
291 | u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); | ||
292 | |||
293 | if (mmd_addr) | ||
294 | return -EINVAL; | ||
295 | |||
296 | spin_lock(&adapter->tpi_lock); | ||
297 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | ||
298 | __t1_tpi_write(adapter, | ||
299 | A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); | ||
300 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
301 | __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); | ||
302 | spin_unlock(&adapter->tpi_lock); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
307 | int reg_addr, unsigned int val) | ||
308 | { | ||
309 | u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); | ||
310 | |||
311 | if (mmd_addr) | ||
312 | return -EINVAL; | ||
313 | |||
314 | spin_lock(&adapter->tpi_lock); | ||
315 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | ||
316 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); | ||
317 | __t1_tpi_write(adapter, | ||
318 | A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); | ||
319 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
320 | spin_unlock(&adapter->tpi_lock); | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) | ||
325 | static struct mdio_ops mi1_mdio_ops = { | ||
326 | mi1_mdio_init, | ||
327 | mi1_mdio_read, | ||
328 | mi1_mdio_write | ||
329 | }; | ||
330 | #endif | ||
331 | |||
332 | #endif | ||
333 | |||
215 | static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, | 334 | static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, |
216 | int reg_addr, unsigned int *valp) | 335 | int reg_addr, unsigned int *valp) |
217 | { | 336 | { |
218 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); | 337 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); |
219 | 338 | ||
220 | spin_lock(&(adapter)->tpi_lock); | 339 | spin_lock(&adapter->tpi_lock); |
221 | 340 | ||
222 | /* Write the address we want. */ | 341 | /* Write the address we want. */ |
223 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | 342 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); |
@@ -227,12 +346,13 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, | |||
227 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | 346 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); |
228 | 347 | ||
229 | /* Write the operation we want. */ | 348 | /* Write the operation we want. */ |
230 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); | 349 | __t1_tpi_write(adapter, |
350 | A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); | ||
231 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | 351 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); |
232 | 352 | ||
233 | /* Read the data. */ | 353 | /* Read the data. */ |
234 | __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); | 354 | __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); |
235 | spin_unlock(&(adapter)->tpi_lock); | 355 | spin_unlock(&adapter->tpi_lock); |
236 | return 0; | 356 | return 0; |
237 | } | 357 | } |
238 | 358 | ||
@@ -241,7 +361,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, | |||
241 | { | 361 | { |
242 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); | 362 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); |
243 | 363 | ||
244 | spin_lock(&(adapter)->tpi_lock); | 364 | spin_lock(&adapter->tpi_lock); |
245 | 365 | ||
246 | /* Write the address we want. */ | 366 | /* Write the address we want. */ |
247 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | 367 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); |
@@ -254,7 +374,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, | |||
254 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); | 374 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); |
255 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); | 375 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); |
256 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | 376 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); |
257 | spin_unlock(&(adapter)->tpi_lock); | 377 | spin_unlock(&adapter->tpi_lock); |
258 | return 0; | 378 | return 0; |
259 | } | 379 | } |
260 | 380 | ||
@@ -265,12 +385,25 @@ static struct mdio_ops mi1_mdio_ext_ops = { | |||
265 | }; | 385 | }; |
266 | 386 | ||
267 | enum { | 387 | enum { |
388 | CH_BRD_T110_1CU, | ||
268 | CH_BRD_N110_1F, | 389 | CH_BRD_N110_1F, |
269 | CH_BRD_N210_1F, | 390 | CH_BRD_N210_1F, |
391 | CH_BRD_T210_1F, | ||
392 | CH_BRD_T210_1CU, | ||
393 | CH_BRD_N204_4CU, | ||
270 | }; | 394 | }; |
271 | 395 | ||
272 | static struct board_info t1_board[] = { | 396 | static struct board_info t1_board[] = { |
273 | 397 | ||
398 | { CHBT_BOARD_CHT110, 1/*ports#*/, | ||
399 | SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T1, | ||
400 | CHBT_MAC_PM3393, CHBT_PHY_MY3126, | ||
401 | 125000000/*clk-core*/, 150000000/*clk-mc3*/, 125000000/*clk-mc4*/, | ||
402 | 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/, | ||
403 | 1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops, | ||
404 | &t1_my3126_ops, &mi1_mdio_ext_ops, | ||
405 | "Chelsio T110 1x10GBase-CX4 TOE" }, | ||
406 | |||
274 | { CHBT_BOARD_N110, 1/*ports#*/, | 407 | { CHBT_BOARD_N110, 1/*ports#*/, |
275 | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1, | 408 | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1, |
276 | CHBT_MAC_PM3393, CHBT_PHY_88X2010, | 409 | CHBT_MAC_PM3393, CHBT_PHY_88X2010, |
@@ -289,12 +422,47 @@ static struct board_info t1_board[] = { | |||
289 | &t1_mv88x201x_ops, &mi1_mdio_ext_ops, | 422 | &t1_mv88x201x_ops, &mi1_mdio_ext_ops, |
290 | "Chelsio N210 1x10GBaseX NIC" }, | 423 | "Chelsio N210 1x10GBaseX NIC" }, |
291 | 424 | ||
425 | { CHBT_BOARD_CHT210, 1/*ports#*/, | ||
426 | SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2, | ||
427 | CHBT_MAC_PM3393, CHBT_PHY_88X2010, | ||
428 | 125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/, | ||
429 | 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/, | ||
430 | 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops, | ||
431 | &t1_mv88x201x_ops, &mi1_mdio_ext_ops, | ||
432 | "Chelsio T210 1x10GBaseX TOE" }, | ||
433 | |||
434 | { CHBT_BOARD_CHT210, 1/*ports#*/, | ||
435 | SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2, | ||
436 | CHBT_MAC_PM3393, CHBT_PHY_MY3126, | ||
437 | 125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/, | ||
438 | 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/, | ||
439 | 1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops, | ||
440 | &t1_my3126_ops, &mi1_mdio_ext_ops, | ||
441 | "Chelsio T210 1x10GBase-CX4 TOE" }, | ||
442 | |||
443 | #ifdef CONFIG_CHELSIO_T1_1G | ||
444 | { CHBT_BOARD_CHN204, 4/*ports#*/, | ||
445 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | | ||
446 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | | ||
447 | SUPPORTED_PAUSE | SUPPORTED_TP /*caps*/, CHBT_TERM_T2, CHBT_MAC_VSC7321, CHBT_PHY_88E1111, | ||
448 | 100000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/, | ||
449 | 4/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/, | ||
450 | 0/*mdiinv*/, 1/*mdc*/, 4/*phybaseaddr*/, &t1_vsc7326_ops, | ||
451 | &t1_mv88e1xxx_ops, &mi1_mdio_ops, | ||
452 | "Chelsio N204 4x100/1000BaseT NIC" }, | ||
453 | #endif | ||
454 | |||
292 | }; | 455 | }; |
293 | 456 | ||
294 | struct pci_device_id t1_pci_tbl[] = { | 457 | struct pci_device_id t1_pci_tbl[] = { |
458 | CH_DEVICE(8, 0, CH_BRD_T110_1CU), | ||
459 | CH_DEVICE(8, 1, CH_BRD_T110_1CU), | ||
295 | CH_DEVICE(7, 0, CH_BRD_N110_1F), | 460 | CH_DEVICE(7, 0, CH_BRD_N110_1F), |
296 | CH_DEVICE(10, 1, CH_BRD_N210_1F), | 461 | CH_DEVICE(10, 1, CH_BRD_N210_1F), |
297 | { 0, } | 462 | CH_DEVICE(11, 1, CH_BRD_T210_1F), |
463 | CH_DEVICE(14, 1, CH_BRD_T210_1CU), | ||
464 | CH_DEVICE(16, 1, CH_BRD_N204_4CU), | ||
465 | { 0 } | ||
298 | }; | 466 | }; |
299 | 467 | ||
300 | MODULE_DEVICE_TABLE(pci, t1_pci_tbl); | 468 | MODULE_DEVICE_TABLE(pci, t1_pci_tbl); |
@@ -390,9 +558,14 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) | |||
390 | if (lc->supported & SUPPORTED_Autoneg) { | 558 | if (lc->supported & SUPPORTED_Autoneg) { |
391 | lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); | 559 | lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); |
392 | if (fc) { | 560 | if (fc) { |
393 | lc->advertising |= ADVERTISED_ASYM_PAUSE; | 561 | if (fc == ((PAUSE_RX | PAUSE_TX) & |
394 | if (fc == (PAUSE_RX | PAUSE_TX)) | 562 | (mac->adapter->params.nports < 2))) |
395 | lc->advertising |= ADVERTISED_PAUSE; | 563 | lc->advertising |= ADVERTISED_PAUSE; |
564 | else { | ||
565 | lc->advertising |= ADVERTISED_ASYM_PAUSE; | ||
566 | if (fc == PAUSE_RX) | ||
567 | lc->advertising |= ADVERTISED_PAUSE; | ||
568 | } | ||
396 | } | 569 | } |
397 | phy->ops->advertise(phy, lc->advertising); | 570 | phy->ops->advertise(phy, lc->advertising); |
398 | 571 | ||
@@ -403,11 +576,15 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) | |||
403 | mac->ops->set_speed_duplex_fc(mac, lc->speed, | 576 | mac->ops->set_speed_duplex_fc(mac, lc->speed, |
404 | lc->duplex, fc); | 577 | lc->duplex, fc); |
405 | /* Also disables autoneg */ | 578 | /* Also disables autoneg */ |
579 | phy->state = PHY_AUTONEG_RDY; | ||
406 | phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); | 580 | phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); |
407 | phy->ops->reset(phy, 0); | 581 | phy->ops->reset(phy, 0); |
408 | } else | 582 | } else { |
583 | phy->state = PHY_AUTONEG_EN; | ||
409 | phy->ops->autoneg_enable(phy); /* also resets PHY */ | 584 | phy->ops->autoneg_enable(phy); /* also resets PHY */ |
585 | } | ||
410 | } else { | 586 | } else { |
587 | phy->state = PHY_AUTONEG_RDY; | ||
411 | mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); | 588 | mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); |
412 | lc->fc = (unsigned char)fc; | 589 | lc->fc = (unsigned char)fc; |
413 | phy->ops->reset(phy, 0); | 590 | phy->ops->reset(phy, 0); |
@@ -418,24 +595,109 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) | |||
418 | /* | 595 | /* |
419 | * External interrupt handler for boards using elmer0. | 596 | * External interrupt handler for boards using elmer0. |
420 | */ | 597 | */ |
421 | int elmer0_ext_intr_handler(adapter_t *adapter) | 598 | int t1_elmer0_ext_intr_handler(adapter_t *adapter) |
422 | { | 599 | { |
423 | struct cphy *phy; | 600 | struct cphy *phy; |
424 | int phy_cause; | 601 | int phy_cause; |
425 | u32 cause; | 602 | u32 cause; |
426 | 603 | ||
427 | t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); | 604 | t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); |
428 | 605 | ||
429 | switch (board_info(adapter)->board) { | 606 | switch (board_info(adapter)->board) { |
607 | #ifdef CONFIG_CHELSIO_T1_1G | ||
608 | case CHBT_BOARD_CHT204: | ||
609 | case CHBT_BOARD_CHT204E: | ||
610 | case CHBT_BOARD_CHN204: | ||
611 | case CHBT_BOARD_CHT204V: { | ||
612 | int i, port_bit; | ||
613 | for_each_port(adapter, i) { | ||
614 | port_bit = i + 1; | ||
615 | if (!(cause & (1 << port_bit))) continue; | ||
616 | |||
617 | phy = adapter->port[i].phy; | ||
618 | phy_cause = phy->ops->interrupt_handler(phy); | ||
619 | if (phy_cause & cphy_cause_link_change) | ||
620 | t1_link_changed(adapter, i); | ||
621 | } | ||
622 | break; | ||
623 | } | ||
624 | case CHBT_BOARD_CHT101: | ||
625 | if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ | ||
626 | phy = adapter->port[0].phy; | ||
627 | phy_cause = phy->ops->interrupt_handler(phy); | ||
628 | if (phy_cause & cphy_cause_link_change) | ||
629 | t1_link_changed(adapter, 0); | ||
630 | } | ||
631 | break; | ||
632 | case CHBT_BOARD_7500: { | ||
633 | int p; | ||
634 | /* | ||
635 | * Elmer0's interrupt cause isn't useful here because there is | ||
636 | * only one bit that can be set for all 4 ports. This means | ||
637 | * we are forced to check every PHY's interrupt status | ||
638 | * register to see who initiated the interrupt. | ||
639 | */ | ||
640 | for_each_port(adapter, p) { | ||
641 | phy = adapter->port[p].phy; | ||
642 | phy_cause = phy->ops->interrupt_handler(phy); | ||
643 | if (phy_cause & cphy_cause_link_change) | ||
644 | t1_link_changed(adapter, p); | ||
645 | } | ||
646 | break; | ||
647 | } | ||
648 | #endif | ||
649 | case CHBT_BOARD_CHT210: | ||
430 | case CHBT_BOARD_N210: | 650 | case CHBT_BOARD_N210: |
431 | case CHBT_BOARD_N110: | 651 | case CHBT_BOARD_N110: |
432 | if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ | 652 | if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ |
433 | phy = adapter->port[0].phy; | 653 | phy = adapter->port[0].phy; |
434 | phy_cause = phy->ops->interrupt_handler(phy); | 654 | phy_cause = phy->ops->interrupt_handler(phy); |
435 | if (phy_cause & cphy_cause_link_change) | 655 | if (phy_cause & cphy_cause_link_change) |
436 | link_changed(adapter, 0); | 656 | t1_link_changed(adapter, 0); |
657 | } | ||
658 | break; | ||
659 | case CHBT_BOARD_8000: | ||
660 | case CHBT_BOARD_CHT110: | ||
661 | CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", | ||
662 | cause); | ||
663 | if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ | ||
664 | struct cmac *mac = adapter->port[0].mac; | ||
665 | |||
666 | mac->ops->interrupt_handler(mac); | ||
437 | } | 667 | } |
668 | if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ | ||
669 | u32 mod_detect; | ||
670 | |||
671 | t1_tpi_read(adapter, | ||
672 | A_ELMER0_GPI_STAT, &mod_detect); | ||
673 | CH_MSG(adapter, INFO, LINK, "XPAK %s\n", | ||
674 | mod_detect ? "removed" : "inserted"); | ||
675 | } | ||
438 | break; | 676 | break; |
677 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
678 | case CHBT_BOARD_COUGAR: | ||
679 | if (adapter->params.nports == 1) { | ||
680 | if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */ | ||
681 | struct cmac *mac = adapter->port[0].mac; | ||
682 | mac->ops->interrupt_handler(mac); | ||
683 | } | ||
684 | if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ | ||
685 | } | ||
686 | } else { | ||
687 | int i, port_bit; | ||
688 | |||
689 | for_each_port(adapter, i) { | ||
690 | port_bit = i ? i + 1 : 0; | ||
691 | if (!(cause & (1 << port_bit))) continue; | ||
692 | |||
693 | phy = adapter->port[i].phy; | ||
694 | phy_cause = phy->ops->interrupt_handler(phy); | ||
695 | if (phy_cause & cphy_cause_link_change) | ||
696 | t1_link_changed(adapter, i); | ||
697 | } | ||
698 | } | ||
699 | break; | ||
700 | #endif | ||
439 | } | 701 | } |
440 | t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); | 702 | t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); |
441 | return 0; | 703 | return 0; |
@@ -445,11 +707,11 @@ int elmer0_ext_intr_handler(adapter_t *adapter) | |||
445 | void t1_interrupts_enable(adapter_t *adapter) | 707 | void t1_interrupts_enable(adapter_t *adapter) |
446 | { | 708 | { |
447 | unsigned int i; | 709 | unsigned int i; |
448 | u32 pl_intr; | ||
449 | 710 | ||
450 | adapter->slow_intr_mask = F_PL_INTR_SGE_ERR; | 711 | adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; |
451 | 712 | ||
452 | t1_sge_intr_enable(adapter->sge); | 713 | t1_sge_intr_enable(adapter->sge); |
714 | t1_tp_intr_enable(adapter->tp); | ||
453 | if (adapter->espi) { | 715 | if (adapter->espi) { |
454 | adapter->slow_intr_mask |= F_PL_INTR_ESPI; | 716 | adapter->slow_intr_mask |= F_PL_INTR_ESPI; |
455 | t1_espi_intr_enable(adapter->espi); | 717 | t1_espi_intr_enable(adapter->espi); |
@@ -462,15 +724,17 @@ void t1_interrupts_enable(adapter_t *adapter) | |||
462 | } | 724 | } |
463 | 725 | ||
464 | /* Enable PCIX & external chip interrupts on ASIC boards. */ | 726 | /* Enable PCIX & external chip interrupts on ASIC boards. */ |
465 | pl_intr = readl(adapter->regs + A_PL_ENABLE); | 727 | if (t1_is_asic(adapter)) { |
728 | u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); | ||
466 | 729 | ||
467 | /* PCI-X interrupts */ | 730 | /* PCI-X interrupts */ |
468 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, | 731 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, |
469 | 0xffffffff); | 732 | 0xffffffff); |
470 | 733 | ||
471 | adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; | 734 | adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; |
472 | pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; | 735 | pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; |
473 | writel(pl_intr, adapter->regs + A_PL_ENABLE); | 736 | writel(pl_intr, adapter->regs + A_PL_ENABLE); |
737 | } | ||
474 | } | 738 | } |
475 | 739 | ||
476 | /* Disables all interrupts. */ | 740 | /* Disables all interrupts. */ |
@@ -479,6 +743,7 @@ void t1_interrupts_disable(adapter_t* adapter) | |||
479 | unsigned int i; | 743 | unsigned int i; |
480 | 744 | ||
481 | t1_sge_intr_disable(adapter->sge); | 745 | t1_sge_intr_disable(adapter->sge); |
746 | t1_tp_intr_disable(adapter->tp); | ||
482 | if (adapter->espi) | 747 | if (adapter->espi) |
483 | t1_espi_intr_disable(adapter->espi); | 748 | t1_espi_intr_disable(adapter->espi); |
484 | 749 | ||
@@ -489,7 +754,8 @@ void t1_interrupts_disable(adapter_t* adapter) | |||
489 | } | 754 | } |
490 | 755 | ||
491 | /* Disable PCIX & external chip interrupts. */ | 756 | /* Disable PCIX & external chip interrupts. */ |
492 | writel(0, adapter->regs + A_PL_ENABLE); | 757 | if (t1_is_asic(adapter)) |
758 | writel(0, adapter->regs + A_PL_ENABLE); | ||
493 | 759 | ||
494 | /* PCI-X interrupts */ | 760 | /* PCI-X interrupts */ |
495 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); | 761 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); |
@@ -501,10 +767,9 @@ void t1_interrupts_disable(adapter_t* adapter) | |||
501 | void t1_interrupts_clear(adapter_t* adapter) | 767 | void t1_interrupts_clear(adapter_t* adapter) |
502 | { | 768 | { |
503 | unsigned int i; | 769 | unsigned int i; |
504 | u32 pl_intr; | ||
505 | |||
506 | 770 | ||
507 | t1_sge_intr_clear(adapter->sge); | 771 | t1_sge_intr_clear(adapter->sge); |
772 | t1_tp_intr_clear(adapter->tp); | ||
508 | if (adapter->espi) | 773 | if (adapter->espi) |
509 | t1_espi_intr_clear(adapter->espi); | 774 | t1_espi_intr_clear(adapter->espi); |
510 | 775 | ||
@@ -515,10 +780,12 @@ void t1_interrupts_clear(adapter_t* adapter) | |||
515 | } | 780 | } |
516 | 781 | ||
517 | /* Enable interrupts for external devices. */ | 782 | /* Enable interrupts for external devices. */ |
518 | pl_intr = readl(adapter->regs + A_PL_CAUSE); | 783 | if (t1_is_asic(adapter)) { |
784 | u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); | ||
519 | 785 | ||
520 | writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, | 786 | writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, |
521 | adapter->regs + A_PL_CAUSE); | 787 | adapter->regs + A_PL_CAUSE); |
788 | } | ||
522 | 789 | ||
523 | /* PCI-X interrupts */ | 790 | /* PCI-X interrupts */ |
524 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); | 791 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); |
@@ -527,7 +794,7 @@ void t1_interrupts_clear(adapter_t* adapter) | |||
527 | /* | 794 | /* |
528 | * Slow path interrupt handler for ASICs. | 795 | * Slow path interrupt handler for ASICs. |
529 | */ | 796 | */ |
530 | int t1_slow_intr_handler(adapter_t *adapter) | 797 | static int asic_slow_intr(adapter_t *adapter) |
531 | { | 798 | { |
532 | u32 cause = readl(adapter->regs + A_PL_CAUSE); | 799 | u32 cause = readl(adapter->regs + A_PL_CAUSE); |
533 | 800 | ||
@@ -536,89 +803,54 @@ int t1_slow_intr_handler(adapter_t *adapter) | |||
536 | return 0; | 803 | return 0; |
537 | if (cause & F_PL_INTR_SGE_ERR) | 804 | if (cause & F_PL_INTR_SGE_ERR) |
538 | t1_sge_intr_error_handler(adapter->sge); | 805 | t1_sge_intr_error_handler(adapter->sge); |
806 | if (cause & F_PL_INTR_TP) | ||
807 | t1_tp_intr_handler(adapter->tp); | ||
539 | if (cause & F_PL_INTR_ESPI) | 808 | if (cause & F_PL_INTR_ESPI) |
540 | t1_espi_intr_handler(adapter->espi); | 809 | t1_espi_intr_handler(adapter->espi); |
541 | if (cause & F_PL_INTR_PCIX) | 810 | if (cause & F_PL_INTR_PCIX) |
542 | t1_pci_intr_handler(adapter); | 811 | t1_pci_intr_handler(adapter); |
543 | if (cause & F_PL_INTR_EXT) | 812 | if (cause & F_PL_INTR_EXT) |
544 | t1_elmer0_ext_intr(adapter); | 813 | t1_elmer0_ext_intr_handler(adapter); |
545 | 814 | ||
546 | /* Clear the interrupts just processed. */ | 815 | /* Clear the interrupts just processed. */ |
547 | writel(cause, adapter->regs + A_PL_CAUSE); | 816 | writel(cause, adapter->regs + A_PL_CAUSE); |
548 | (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */ | 817 | readl(adapter->regs + A_PL_CAUSE); /* flush writes */ |
549 | return 1; | 818 | return 1; |
550 | } | 819 | } |
551 | 820 | ||
552 | /* Pause deadlock avoidance parameters */ | 821 | int t1_slow_intr_handler(adapter_t *adapter) |
553 | #define DROP_MSEC 16 | ||
554 | #define DROP_PKTS_CNT 1 | ||
555 | |||
556 | static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable) | ||
557 | { | ||
558 | u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG); | ||
559 | |||
560 | if (enable) | ||
561 | val |= csum_bit; | ||
562 | else | ||
563 | val &= ~csum_bit; | ||
564 | writel(val, adapter->regs + A_TP_GLOBAL_CONFIG); | ||
565 | } | ||
566 | |||
567 | void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable) | ||
568 | { | ||
569 | set_csum_offload(adapter, F_IP_CSUM, enable); | ||
570 | } | ||
571 | |||
572 | void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable) | ||
573 | { | ||
574 | set_csum_offload(adapter, F_UDP_CSUM, enable); | ||
575 | } | ||
576 | |||
577 | void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable) | ||
578 | { | 822 | { |
579 | set_csum_offload(adapter, F_TCP_CSUM, enable); | 823 | #ifdef CONFIG_CHELSIO_T1_1G |
824 | if (!t1_is_asic(adapter)) | ||
825 | return fpga_slow_intr(adapter); | ||
826 | #endif | ||
827 | return asic_slow_intr(adapter); | ||
580 | } | 828 | } |
581 | 829 | ||
582 | static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk) | 830 | /* Power sequencing is a work-around for Intel's XPAKs. */ |
831 | static void power_sequence_xpak(adapter_t* adapter) | ||
583 | { | 832 | { |
584 | u32 val; | 833 | u32 mod_detect; |
585 | 834 | u32 gpo; | |
586 | val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | | 835 | |
587 | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; | 836 | /* Check for XPAK */ |
588 | val |= F_TP_IN_ESPI_CHECK_IP_CSUM | | 837 | t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); |
589 | F_TP_IN_ESPI_CHECK_TCP_CSUM; | 838 | if (!(ELMER0_GP_BIT5 & mod_detect)) { |
590 | writel(val, adapter->regs + A_TP_IN_CONFIG); | 839 | /* XPAK is present */ |
591 | writel(F_TP_OUT_CSPI_CPL | | 840 | t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); |
592 | F_TP_OUT_ESPI_ETHERNET | | 841 | gpo |= ELMER0_GP_BIT18; |
593 | F_TP_OUT_ESPI_GENERATE_IP_CSUM | | 842 | t1_tpi_write(adapter, A_ELMER0_GPO, gpo); |
594 | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, | ||
595 | adapter->regs + A_TP_OUT_CONFIG); | ||
596 | |||
597 | val = readl(adapter->regs + A_TP_GLOBAL_CONFIG); | ||
598 | val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM); | ||
599 | writel(val, adapter->regs + A_TP_GLOBAL_CONFIG); | ||
600 | |||
601 | /* | ||
602 | * Enable pause frame deadlock prevention. | ||
603 | */ | ||
604 | if (is_T2(adapter)) { | ||
605 | u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); | ||
606 | |||
607 | writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | | ||
608 | V_DROP_TICKS_CNT(drop_ticks) | | ||
609 | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), | ||
610 | adapter->regs + A_TP_TX_DROP_CONFIG); | ||
611 | } | 843 | } |
612 | |||
613 | writel(F_TP_RESET, adapter->regs + A_TP_RESET); | ||
614 | } | 844 | } |
615 | 845 | ||
616 | int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, | 846 | int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, |
617 | struct adapter_params *p) | 847 | struct adapter_params *p) |
618 | { | 848 | { |
619 | p->chip_version = bi->chip_term; | 849 | p->chip_version = bi->chip_term; |
850 | p->is_asic = (p->chip_version != CHBT_TERM_FPGA); | ||
620 | if (p->chip_version == CHBT_TERM_T1 || | 851 | if (p->chip_version == CHBT_TERM_T1 || |
621 | p->chip_version == CHBT_TERM_T2) { | 852 | p->chip_version == CHBT_TERM_T2 || |
853 | p->chip_version == CHBT_TERM_FPGA) { | ||
622 | u32 val = readl(adapter->regs + A_TP_PC_CONFIG); | 854 | u32 val = readl(adapter->regs + A_TP_PC_CONFIG); |
623 | 855 | ||
624 | val = G_TP_PC_REV(val); | 856 | val = G_TP_PC_REV(val); |
@@ -640,11 +872,38 @@ int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, | |||
640 | static int board_init(adapter_t *adapter, const struct board_info *bi) | 872 | static int board_init(adapter_t *adapter, const struct board_info *bi) |
641 | { | 873 | { |
642 | switch (bi->board) { | 874 | switch (bi->board) { |
875 | case CHBT_BOARD_8000: | ||
643 | case CHBT_BOARD_N110: | 876 | case CHBT_BOARD_N110: |
644 | case CHBT_BOARD_N210: | 877 | case CHBT_BOARD_N210: |
645 | writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR); | 878 | case CHBT_BOARD_CHT210: |
879 | case CHBT_BOARD_COUGAR: | ||
880 | t1_tpi_par(adapter, 0xf); | ||
646 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); | 881 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); |
647 | break; | 882 | break; |
883 | case CHBT_BOARD_CHT110: | ||
884 | t1_tpi_par(adapter, 0xf); | ||
885 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); | ||
886 | |||
887 | /* TBD XXX Might not need. This fixes a problem | ||
888 | * described in the Intel SR XPAK errata. | ||
889 | */ | ||
890 | power_sequence_xpak(adapter); | ||
891 | break; | ||
892 | #ifdef CONFIG_CHELSIO_T1_1G | ||
893 | case CHBT_BOARD_CHT204E: | ||
894 | /* add config space write here */ | ||
895 | case CHBT_BOARD_CHT204: | ||
896 | case CHBT_BOARD_CHT204V: | ||
897 | case CHBT_BOARD_CHN204: | ||
898 | t1_tpi_par(adapter, 0xf); | ||
899 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); | ||
900 | break; | ||
901 | case CHBT_BOARD_CHT101: | ||
902 | case CHBT_BOARD_7500: | ||
903 | t1_tpi_par(adapter, 0xf); | ||
904 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); | ||
905 | break; | ||
906 | #endif | ||
648 | } | 907 | } |
649 | return 0; | 908 | return 0; |
650 | } | 909 | } |
@@ -666,11 +925,16 @@ int t1_init_hw_modules(adapter_t *adapter) | |||
666 | adapter->regs + A_MC5_CONFIG); | 925 | adapter->regs + A_MC5_CONFIG); |
667 | } | 926 | } |
668 | 927 | ||
928 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
929 | if (adapter->cspi && t1_cspi_init(adapter->cspi)) | ||
930 | goto out_err; | ||
931 | #endif | ||
669 | if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, | 932 | if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, |
670 | bi->espi_nports)) | 933 | bi->espi_nports)) |
671 | goto out_err; | 934 | goto out_err; |
672 | 935 | ||
673 | t1_tp_reset(adapter, bi->clock_core); | 936 | if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) |
937 | goto out_err; | ||
674 | 938 | ||
675 | err = t1_sge_configure(adapter->sge, &adapter->params.sge); | 939 | err = t1_sge_configure(adapter->sge, &adapter->params.sge); |
676 | if (err) | 940 | if (err) |
@@ -714,8 +978,14 @@ void t1_free_sw_modules(adapter_t *adapter) | |||
714 | 978 | ||
715 | if (adapter->sge) | 979 | if (adapter->sge) |
716 | t1_sge_destroy(adapter->sge); | 980 | t1_sge_destroy(adapter->sge); |
981 | if (adapter->tp) | ||
982 | t1_tp_destroy(adapter->tp); | ||
717 | if (adapter->espi) | 983 | if (adapter->espi) |
718 | t1_espi_destroy(adapter->espi); | 984 | t1_espi_destroy(adapter->espi); |
985 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
986 | if (adapter->cspi) | ||
987 | t1_cspi_destroy(adapter->cspi); | ||
988 | #endif | ||
719 | } | 989 | } |
720 | 990 | ||
721 | static void __devinit init_link_config(struct link_config *lc, | 991 | static void __devinit init_link_config(struct link_config *lc, |
@@ -735,6 +1005,13 @@ static void __devinit init_link_config(struct link_config *lc, | |||
735 | } | 1005 | } |
736 | } | 1006 | } |
737 | 1007 | ||
1008 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
1009 | if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) { | ||
1010 | CH_ERR("%s: CSPI initialization failed\n", | ||
1011 | adapter->name); | ||
1012 | goto error; | ||
1013 | } | ||
1014 | #endif | ||
738 | 1015 | ||
739 | /* | 1016 | /* |
740 | * Allocate and initialize the data structures that hold the SW state of | 1017 | * Allocate and initialize the data structures that hold the SW state of |
@@ -762,6 +1039,13 @@ int __devinit t1_init_sw_modules(adapter_t *adapter, | |||
762 | goto error; | 1039 | goto error; |
763 | } | 1040 | } |
764 | 1041 | ||
1042 | adapter->tp = t1_tp_create(adapter, &adapter->params.tp); | ||
1043 | if (!adapter->tp) { | ||
1044 | CH_ERR("%s: TP initialization failed\n", | ||
1045 | adapter->name); | ||
1046 | goto error; | ||
1047 | } | ||
1048 | |||
765 | board_init(adapter, bi); | 1049 | board_init(adapter, bi); |
766 | bi->mdio_ops->init(adapter, bi); | 1050 | bi->mdio_ops->init(adapter, bi); |
767 | if (bi->gphy->reset) | 1051 | if (bi->gphy->reset) |
@@ -793,7 +1077,9 @@ int __devinit t1_init_sw_modules(adapter_t *adapter, | |||
793 | * Get the port's MAC addresses either from the EEPROM if one | 1077 | * Get the port's MAC addresses either from the EEPROM if one |
794 | * exists or the one hardcoded in the MAC. | 1078 | * exists or the one hardcoded in the MAC. |
795 | */ | 1079 | */ |
796 | if (vpd_macaddress_get(adapter, i, hw_addr)) { | 1080 | if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) |
1081 | mac->ops->macaddress_get(mac, hw_addr); | ||
1082 | else if (vpd_macaddress_get(adapter, i, hw_addr)) { | ||
797 | CH_ERR("%s: could not read MAC address from VPD ROM\n", | 1083 | CH_ERR("%s: could not read MAC address from VPD ROM\n", |
798 | adapter->port[i].dev->name); | 1084 | adapter->port[i].dev->name); |
799 | goto error; | 1085 | goto error; |
@@ -806,7 +1092,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter, | |||
806 | t1_interrupts_clear(adapter); | 1092 | t1_interrupts_clear(adapter); |
807 | return 0; | 1093 | return 0; |
808 | 1094 | ||
809 | error: | 1095 | error: |
810 | t1_free_sw_modules(adapter); | 1096 | t1_free_sw_modules(adapter); |
811 | return -1; | 1097 | return -1; |
812 | } | 1098 | } |
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h index 81816c2b708a..269d097dd927 100644 --- a/drivers/net/chelsio/suni1x10gexp_regs.h +++ b/drivers/net/chelsio/suni1x10gexp_regs.h | |||
@@ -32,6 +32,30 @@ | |||
32 | #ifndef _CXGB_SUNI1x10GEXP_REGS_H_ | 32 | #ifndef _CXGB_SUNI1x10GEXP_REGS_H_ |
33 | #define _CXGB_SUNI1x10GEXP_REGS_H_ | 33 | #define _CXGB_SUNI1x10GEXP_REGS_H_ |
34 | 34 | ||
35 | /* | ||
36 | ** Space allocated for each Exact Match Filter | ||
37 | ** There are 8 filter configurations | ||
38 | */ | ||
39 | #define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003 | ||
40 | |||
41 | #define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER ) | ||
42 | |||
43 | /* | ||
44 | ** Space allocated for VLAN-Id Filter | ||
45 | ** There are 8 filter configurations | ||
46 | */ | ||
47 | #define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001 | ||
48 | |||
49 | #define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER ) | ||
50 | |||
51 | /* | ||
52 | ** Space allocated for each MSTAT Counter | ||
53 | */ | ||
54 | #define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004 | ||
55 | |||
56 | #define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT ) | ||
57 | |||
58 | |||
35 | /******************************************************************************/ | 59 | /******************************************************************************/ |
36 | /** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ | 60 | /** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ |
37 | /******************************************************************************/ | 61 | /******************************************************************************/ |
@@ -39,33 +63,125 @@ | |||
39 | /* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ | 63 | /* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ |
40 | /******************************************************************************/ | 64 | /******************************************************************************/ |
41 | 65 | ||
66 | |||
67 | #define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000 | ||
68 | #define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001 | ||
69 | #define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002 | ||
70 | #define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003 | ||
42 | #define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 | 71 | #define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 |
72 | #define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005 | ||
73 | |||
74 | #define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006 | ||
75 | #define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007 | ||
76 | #define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008 | ||
77 | #define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009 | ||
78 | #define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A | ||
79 | #define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B | ||
80 | |||
81 | #define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C | ||
43 | #define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D | 82 | #define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D |
44 | #define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E | 83 | #define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E |
84 | #define SUNI1x10GEXP_REG_FREE 0x000F | ||
85 | |||
86 | #define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010 | ||
87 | #define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011 | ||
88 | |||
89 | #define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100 | ||
90 | #define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101 | ||
45 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 | 91 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 |
92 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103 | ||
46 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 | 93 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 |
94 | #define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107 | ||
95 | |||
47 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 | 96 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 |
97 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041 | ||
48 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 | 98 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 |
49 | #define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 | 99 | #define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 |
50 | #define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 | 100 | #define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 |
51 | #define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 | 101 | #define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 |
52 | #define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 | 102 | #define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 |
53 | #define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 | 103 | #define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 |
104 | #define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049 | ||
105 | #define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) | ||
106 | #define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) | ||
107 | #define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) | ||
108 | #define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) | ||
109 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A | ||
110 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B | ||
111 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C | ||
54 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D | 112 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D |
55 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E | 113 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E |
56 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F | 114 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F |
115 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050 | ||
116 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051 | ||
117 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052 | ||
118 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053 | ||
119 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054 | ||
120 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055 | ||
121 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056 | ||
122 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057 | ||
123 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058 | ||
124 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059 | ||
125 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A | ||
126 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B | ||
127 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C | ||
128 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D | ||
129 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E | ||
130 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F | ||
131 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060 | ||
132 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061 | ||
133 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062 | ||
134 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063 | ||
135 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064 | ||
136 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065 | ||
137 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066 | ||
138 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067 | ||
139 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068 | ||
140 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069 | ||
57 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A | 141 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A |
58 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B | 142 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B |
59 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C | 143 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C |
60 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D | 144 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D |
61 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E | 145 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E |
146 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F | ||
62 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 | 147 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 |
148 | |||
149 | #define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081 | ||
150 | #define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084 | ||
151 | #define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085 | ||
152 | #define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086 | ||
153 | #define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087 | ||
63 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 | 154 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 |
64 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 | 155 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 |
156 | #define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A | ||
65 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B | 157 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B |
66 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C | 158 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C |
159 | #define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092 | ||
160 | |||
161 | #define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0 | ||
162 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1 | ||
163 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2 | ||
164 | #define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3 | ||
165 | #define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4 | ||
166 | #define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5 | ||
67 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 | 167 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 |
68 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 | 168 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 |
169 | #define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9 | ||
170 | #define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA | ||
171 | #define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB | ||
172 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC | ||
173 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD | ||
174 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE | ||
175 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF | ||
176 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0 | ||
177 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1 | ||
178 | #define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2 | ||
179 | #define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3 | ||
180 | #define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4 | ||
181 | #define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5 | ||
182 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6 | ||
183 | #define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7 | ||
184 | |||
69 | #define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 | 185 | #define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 |
70 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 | 186 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 |
71 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 | 187 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 |
@@ -75,50 +191,321 @@ | |||
75 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 | 191 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 |
76 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 | 192 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 |
77 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 | 193 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 |
194 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109 | ||
195 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A | ||
196 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B | ||
197 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C | ||
198 | #define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) | ||
199 | #define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) | ||
200 | #define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) | ||
78 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 | 201 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 |
202 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111 | ||
203 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112 | ||
204 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113 | ||
79 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 | 205 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 |
206 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115 | ||
207 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116 | ||
208 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117 | ||
209 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118 | ||
210 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119 | ||
211 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A | ||
212 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B | ||
213 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C | ||
214 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D | ||
215 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E | ||
216 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F | ||
80 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 | 217 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 |
218 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121 | ||
219 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122 | ||
220 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123 | ||
81 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 | 221 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 |
222 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125 | ||
223 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126 | ||
224 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127 | ||
82 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 | 225 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 |
226 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129 | ||
227 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A | ||
228 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B | ||
229 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C | ||
230 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D | ||
231 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E | ||
232 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F | ||
83 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 | 233 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 |
234 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131 | ||
235 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132 | ||
236 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133 | ||
237 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134 | ||
238 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135 | ||
239 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136 | ||
240 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137 | ||
84 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 | 241 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 |
242 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139 | ||
243 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A | ||
244 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B | ||
85 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C | 245 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C |
246 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D | ||
247 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E | ||
248 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F | ||
86 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 | 249 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 |
250 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141 | ||
251 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142 | ||
252 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143 | ||
87 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 | 253 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 |
254 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145 | ||
255 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146 | ||
256 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147 | ||
257 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148 | ||
258 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149 | ||
259 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A | ||
260 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B | ||
88 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C | 261 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C |
262 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D | ||
263 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E | ||
264 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F | ||
89 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 | 265 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 |
266 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151 | ||
267 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152 | ||
268 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153 | ||
90 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 | 269 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 |
270 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155 | ||
271 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156 | ||
272 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157 | ||
91 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 | 273 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 |
274 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159 | ||
275 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A | ||
276 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B | ||
277 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C | ||
278 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D | ||
279 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E | ||
280 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F | ||
281 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160 | ||
282 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161 | ||
283 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162 | ||
284 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163 | ||
285 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164 | ||
286 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165 | ||
287 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166 | ||
288 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167 | ||
289 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168 | ||
290 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169 | ||
291 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A | ||
292 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B | ||
293 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C | ||
294 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D | ||
295 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E | ||
296 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F | ||
297 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170 | ||
298 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171 | ||
299 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172 | ||
300 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173 | ||
301 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174 | ||
302 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175 | ||
303 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176 | ||
304 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177 | ||
305 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178 | ||
306 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179 | ||
307 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a | ||
308 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b | ||
309 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c | ||
310 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d | ||
311 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e | ||
312 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f | ||
313 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180 | ||
314 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181 | ||
315 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182 | ||
316 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183 | ||
317 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184 | ||
318 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185 | ||
319 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186 | ||
320 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187 | ||
321 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188 | ||
322 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189 | ||
323 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A | ||
324 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B | ||
325 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C | ||
326 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D | ||
327 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E | ||
328 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F | ||
329 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190 | ||
330 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191 | ||
331 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192 | ||
332 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193 | ||
92 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 | 333 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 |
334 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195 | ||
335 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196 | ||
336 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197 | ||
337 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198 | ||
338 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199 | ||
339 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A | ||
340 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B | ||
93 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C | 341 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C |
342 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D | ||
343 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E | ||
344 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F | ||
94 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 | 345 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 |
346 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1 | ||
347 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2 | ||
348 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3 | ||
349 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4 | ||
350 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5 | ||
351 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6 | ||
352 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7 | ||
95 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 | 353 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 |
354 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9 | ||
355 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA | ||
356 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB | ||
357 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC | ||
358 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD | ||
359 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE | ||
360 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF | ||
96 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 | 361 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 |
362 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1 | ||
363 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2 | ||
364 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3 | ||
365 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4 | ||
366 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5 | ||
367 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6 | ||
368 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7 | ||
97 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 | 369 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 |
370 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9 | ||
371 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA | ||
372 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB | ||
98 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC | 373 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC |
374 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD | ||
375 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE | ||
376 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF | ||
377 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0 | ||
378 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1 | ||
379 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2 | ||
380 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3 | ||
381 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4 | ||
382 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5 | ||
383 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6 | ||
384 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7 | ||
385 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8 | ||
386 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9 | ||
387 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA | ||
388 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB | ||
389 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC | ||
390 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD | ||
391 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE | ||
392 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF | ||
393 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0 | ||
394 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1 | ||
395 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2 | ||
396 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3 | ||
397 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4 | ||
398 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5 | ||
399 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6 | ||
400 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7 | ||
401 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8 | ||
402 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9 | ||
403 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA | ||
404 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB | ||
405 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC | ||
406 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD | ||
407 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE | ||
408 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF | ||
409 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0 | ||
410 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1 | ||
411 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2 | ||
412 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3 | ||
413 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4 | ||
414 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5 | ||
415 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6 | ||
416 | #define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51 | ||
417 | |||
418 | #define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200 | ||
419 | #define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201 | ||
99 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 | 420 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 |
100 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A | 421 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A |
422 | #define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D | ||
423 | #define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E | ||
424 | #define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F | ||
425 | #define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210 | ||
426 | #define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211 | ||
427 | |||
428 | #define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240 | ||
429 | #define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241 | ||
430 | #define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242 | ||
431 | #define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243 | ||
432 | #define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244 | ||
433 | #define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245 | ||
434 | |||
435 | #define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280 | ||
101 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 | 436 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 |
102 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 | 437 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 |
438 | #define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284 | ||
439 | |||
103 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 | 440 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 |
104 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 | 441 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 |
105 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 | 442 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 |
443 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303 | ||
444 | #define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304 | ||
445 | #define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305 | ||
446 | |||
106 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 | 447 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 |
448 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041 | ||
107 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 | 449 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 |
108 | #define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 | 450 | #define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 |
451 | #define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044 | ||
109 | #define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 | 452 | #define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 |
453 | #define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046 | ||
110 | #define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 | 454 | #define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 |
111 | #define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 | 455 | #define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 |
112 | #define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 | 456 | #define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 |
457 | #define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D | ||
458 | #define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E | ||
459 | #define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051 | ||
460 | #define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052 | ||
461 | |||
462 | #define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080 | ||
113 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 | 463 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 |
114 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 | 464 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 |
465 | #define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086 | ||
466 | |||
467 | #define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0 | ||
468 | #define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1 | ||
469 | #define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2 | ||
470 | #define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3 | ||
471 | #define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4 | ||
472 | #define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5 | ||
115 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 | 473 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 |
116 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 | 474 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 |
475 | #define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8 | ||
476 | #define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9 | ||
477 | #define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA | ||
478 | #define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB | ||
479 | #define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC | ||
480 | #define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD | ||
481 | #define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE | ||
482 | #define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF | ||
483 | #define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0 | ||
484 | #define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1 | ||
485 | #define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2 | ||
486 | #define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3 | ||
487 | |||
488 | |||
489 | #define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200 | ||
490 | #define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201 | ||
491 | #define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202 | ||
492 | #define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203 | ||
493 | #define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204 | ||
494 | #define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205 | ||
495 | #define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206 | ||
496 | #define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207 | ||
117 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C | 497 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C |
118 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D | 498 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D |
499 | #define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210 | ||
500 | |||
501 | #define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280 | ||
119 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 | 502 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 |
120 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 | 503 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 |
121 | 504 | ||
505 | |||
506 | /*----------------------------------------*/ | ||
507 | #define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480 | ||
508 | |||
122 | /******************************************************************************/ | 509 | /******************************************************************************/ |
123 | /* -- End register offset definitions -- */ | 510 | /* -- End register offset definitions -- */ |
124 | /******************************************************************************/ | 511 | /******************************************************************************/ |
@@ -127,6 +514,81 @@ | |||
127 | /** SUNI-1x10GE-XP REGISTER BIT MASKS **/ | 514 | /** SUNI-1x10GE-XP REGISTER BIT MASKS **/ |
128 | /******************************************************************************/ | 515 | /******************************************************************************/ |
129 | 516 | ||
517 | #define SUNI1x10GEXP_BITMSK_BITS_1 0x00001 | ||
518 | #define SUNI1x10GEXP_BITMSK_BITS_2 0x00003 | ||
519 | #define SUNI1x10GEXP_BITMSK_BITS_3 0x00007 | ||
520 | #define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f | ||
521 | #define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f | ||
522 | #define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f | ||
523 | #define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f | ||
524 | #define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff | ||
525 | #define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff | ||
526 | #define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff | ||
527 | #define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff | ||
528 | #define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff | ||
529 | #define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff | ||
530 | #define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff | ||
531 | #define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff | ||
532 | #define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff | ||
533 | |||
534 | #define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15) | ||
535 | #define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14) | ||
536 | #define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13) | ||
537 | #define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12) | ||
538 | #define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11) | ||
539 | #define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10) | ||
540 | #define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9) | ||
541 | #define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8) | ||
542 | #define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7) | ||
543 | #define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6) | ||
544 | #define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5) | ||
545 | #define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4) | ||
546 | #define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3) | ||
547 | #define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2) | ||
548 | #define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1) | ||
549 | |||
550 | #define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0) | ||
551 | |||
552 | |||
553 | |||
554 | /*---------------------------------------------------------------------------- | ||
555 | * Register 0x0001: S/UNI-1x10GE-XP Product Revision | ||
556 | * Bit 3-0 REVISION | ||
557 | *----------------------------------------------------------------------------*/ | ||
558 | #define SUNI1x10GEXP_BITMSK_REVISION 0x000F | ||
559 | |||
560 | /*---------------------------------------------------------------------------- | ||
561 | * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control | ||
562 | * Bit 2 XAUI_ARESETB | ||
563 | * Bit 1 PL4_ARESETB | ||
564 | * Bit 0 DRESETB | ||
565 | *----------------------------------------------------------------------------*/ | ||
566 | #define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004 | ||
567 | #define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002 | ||
568 | #define SUNI1x10GEXP_BITMSK_DRESETB 0x0001 | ||
569 | |||
570 | /*---------------------------------------------------------------------------- | ||
571 | * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control | ||
572 | * Bit 11 PL4IO_OUTCLKSEL | ||
573 | * Bit 9 SYSPCSLB | ||
574 | * Bit 8 LINEPCSLB | ||
575 | * Bit 7 MSTAT_BYPASS | ||
576 | * Bit 6 RXXG_BYPASS | ||
577 | * Bit 5 TXXG_BYPASS | ||
578 | * Bit 4 SOP_PAD_EN | ||
579 | * Bit 1 LOS_INV | ||
580 | * Bit 0 OVERRIDE_LOS | ||
581 | *----------------------------------------------------------------------------*/ | ||
582 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800 | ||
583 | #define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200 | ||
584 | #define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100 | ||
585 | #define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080 | ||
586 | #define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040 | ||
587 | #define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020 | ||
588 | #define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010 | ||
589 | #define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002 | ||
590 | #define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001 | ||
591 | |||
130 | /*---------------------------------------------------------------------------- | 592 | /*---------------------------------------------------------------------------- |
131 | * Register 0x0004: S/UNI-1x10GE-XP Device Status | 593 | * Register 0x0004: S/UNI-1x10GE-XP Device Status |
132 | * Bit 9 TOP_SXRA_EXPIRED | 594 | * Bit 9 TOP_SXRA_EXPIRED |
@@ -141,7 +603,10 @@ | |||
141 | * Bit 0 TOP_PL4_OUT_ROOL | 603 | * Bit 0 TOP_PL4_OUT_ROOL |
142 | *----------------------------------------------------------------------------*/ | 604 | *----------------------------------------------------------------------------*/ |
143 | #define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 | 605 | #define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 |
606 | #define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100 | ||
607 | #define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080 | ||
144 | #define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 | 608 | #define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 |
609 | #define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020 | ||
145 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 | 610 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 |
146 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 | 611 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 |
147 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 | 612 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 |
@@ -149,12 +614,219 @@ | |||
149 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 | 614 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 |
150 | 615 | ||
151 | /*---------------------------------------------------------------------------- | 616 | /*---------------------------------------------------------------------------- |
617 | * Register 0x0005: Global Performance Update and Clock Monitors | ||
618 | * Bit 15 TIP | ||
619 | * Bit 8 XAUI_REF_CLKA | ||
620 | * Bit 7 RXLANE3CLKA | ||
621 | * Bit 6 RXLANE2CLKA | ||
622 | * Bit 5 RXLANE1CLKA | ||
623 | * Bit 4 RXLANE0CLKA | ||
624 | * Bit 3 CSUCLKA | ||
625 | * Bit 2 TDCLKA | ||
626 | * Bit 1 RSCLKA | ||
627 | * Bit 0 RDCLKA | ||
628 | *----------------------------------------------------------------------------*/ | ||
629 | #define SUNI1x10GEXP_BITMSK_TIP 0x8000 | ||
630 | #define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100 | ||
631 | #define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080 | ||
632 | #define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040 | ||
633 | #define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020 | ||
634 | #define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010 | ||
635 | #define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008 | ||
636 | #define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004 | ||
637 | #define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002 | ||
638 | #define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001 | ||
639 | |||
640 | /*---------------------------------------------------------------------------- | ||
641 | * Register 0x0006: MDIO Command | ||
642 | * Bit 4 MDIO_RDINC | ||
643 | * Bit 3 MDIO_RSTAT | ||
644 | * Bit 2 MDIO_LCTLD | ||
645 | * Bit 1 MDIO_LCTLA | ||
646 | * Bit 0 MDIO_SPRE | ||
647 | *----------------------------------------------------------------------------*/ | ||
648 | #define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010 | ||
649 | #define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008 | ||
650 | #define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004 | ||
651 | #define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002 | ||
652 | #define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001 | ||
653 | |||
654 | /*---------------------------------------------------------------------------- | ||
655 | * Register 0x0007: MDIO Interrupt Enable | ||
656 | * Bit 0 MDIO_BUSY_EN | ||
657 | *----------------------------------------------------------------------------*/ | ||
658 | #define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001 | ||
659 | |||
660 | /*---------------------------------------------------------------------------- | ||
661 | * Register 0x0008: MDIO Interrupt Status | ||
662 | * Bit 0 MDIO_BUSYI | ||
663 | *----------------------------------------------------------------------------*/ | ||
664 | #define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001 | ||
665 | |||
666 | /*---------------------------------------------------------------------------- | ||
667 | * Register 0x0009: MMD PHY Address | ||
668 | * Bit 12-8 MDIO_DEVADR | ||
669 | * Bit 4-0 MDIO_PRTADR | ||
670 | *----------------------------------------------------------------------------*/ | ||
671 | #define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00 | ||
672 | #define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8 | ||
673 | #define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F | ||
674 | #define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0 | ||
675 | |||
676 | /*---------------------------------------------------------------------------- | ||
677 | * Register 0x000C: OAM Interface Control | ||
678 | * Bit 6 MDO_OD_ENB | ||
679 | * Bit 5 MDI_INV | ||
680 | * Bit 4 MDI_SEL | ||
681 | * Bit 3 RXOAMEN | ||
682 | * Bit 2 RXOAMCLKEN | ||
683 | * Bit 1 TXOAMEN | ||
684 | * Bit 0 TXOAMCLKEN | ||
685 | *----------------------------------------------------------------------------*/ | ||
686 | #define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040 | ||
687 | #define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020 | ||
688 | #define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010 | ||
689 | #define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008 | ||
690 | #define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004 | ||
691 | #define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002 | ||
692 | #define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001 | ||
693 | |||
694 | /*---------------------------------------------------------------------------- | ||
695 | * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status | ||
696 | * Bit 15 TOP_PL4IO_INT | ||
697 | * Bit 14 TOP_IRAM_INT | ||
698 | * Bit 13 TOP_ERAM_INT | ||
699 | * Bit 12 TOP_XAUI_INT | ||
700 | * Bit 11 TOP_MSTAT_INT | ||
701 | * Bit 10 TOP_RXXG_INT | ||
702 | * Bit 9 TOP_TXXG_INT | ||
703 | * Bit 8 TOP_XRF_INT | ||
704 | * Bit 7 TOP_XTEF_INT | ||
705 | * Bit 6 TOP_MDIO_BUSY_INT | ||
706 | * Bit 5 TOP_RXOAM_INT | ||
707 | * Bit 4 TOP_TXOAM_INT | ||
708 | * Bit 3 TOP_IFLX_INT | ||
709 | * Bit 2 TOP_EFLX_INT | ||
710 | * Bit 1 TOP_PL4ODP_INT | ||
711 | * Bit 0 TOP_PL4IDU_INT | ||
712 | *----------------------------------------------------------------------------*/ | ||
713 | #define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000 | ||
714 | #define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000 | ||
715 | #define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000 | ||
716 | #define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000 | ||
717 | #define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800 | ||
718 | #define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400 | ||
719 | #define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200 | ||
720 | #define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100 | ||
721 | #define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080 | ||
722 | #define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040 | ||
723 | #define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020 | ||
724 | #define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010 | ||
725 | #define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008 | ||
726 | #define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004 | ||
727 | #define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002 | ||
728 | #define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001 | ||
729 | |||
730 | /*---------------------------------------------------------------------------- | ||
152 | * Register 0x000E:PM3393 Global interrupt enable | 731 | * Register 0x000E:PM3393 Global interrupt enable |
153 | * Bit 15 TOP_INTE | 732 | * Bit 15 TOP_INTE |
154 | *----------------------------------------------------------------------------*/ | 733 | *----------------------------------------------------------------------------*/ |
155 | #define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 | 734 | #define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 |
156 | 735 | ||
157 | /*---------------------------------------------------------------------------- | 736 | /*---------------------------------------------------------------------------- |
737 | * Register 0x0010: XTEF Miscellaneous Control | ||
738 | * Bit 7 RF_VAL | ||
739 | * Bit 6 RF_OVERRIDE | ||
740 | * Bit 5 LF_VAL | ||
741 | * Bit 4 LF_OVERRIDE | ||
742 | *----------------------------------------------------------------------------*/ | ||
743 | #define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080 | ||
744 | #define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040 | ||
745 | #define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020 | ||
746 | #define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010 | ||
747 | #define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0 | ||
748 | |||
749 | /*---------------------------------------------------------------------------- | ||
750 | * Register 0x0011: XRF Miscellaneous Control | ||
751 | * Bit 6-4 EN_IDLE_REP | ||
752 | *----------------------------------------------------------------------------*/ | ||
753 | #define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070 | ||
754 | |||
755 | /*---------------------------------------------------------------------------- | ||
756 | * Register 0x0100: SERDES 3125 Configuration Register 1 | ||
757 | * Bit 10 RXEQB_3 | ||
758 | * Bit 8 RXEQB_2 | ||
759 | * Bit 6 RXEQB_1 | ||
760 | * Bit 4 RXEQB_0 | ||
761 | *----------------------------------------------------------------------------*/ | ||
762 | #define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0 | ||
763 | #define SUNI1x10GEXP_BITOFF_RXEQB_3 10 | ||
764 | #define SUNI1x10GEXP_BITOFF_RXEQB_2 8 | ||
765 | #define SUNI1x10GEXP_BITOFF_RXEQB_1 6 | ||
766 | #define SUNI1x10GEXP_BITOFF_RXEQB_0 4 | ||
767 | |||
768 | /*---------------------------------------------------------------------------- | ||
769 | * Register 0x0101: SERDES 3125 Configuration Register 2 | ||
770 | * Bit 12 YSEL | ||
771 | * Bit 7 PRE_EMPH_3 | ||
772 | * Bit 6 PRE_EMPH_2 | ||
773 | * Bit 5 PRE_EMPH_1 | ||
774 | * Bit 4 PRE_EMPH_0 | ||
775 | *----------------------------------------------------------------------------*/ | ||
776 | #define SUNI1x10GEXP_BITMSK_YSEL 0x1000 | ||
777 | #define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0 | ||
778 | #define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080 | ||
779 | #define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040 | ||
780 | #define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020 | ||
781 | #define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010 | ||
782 | |||
783 | /*---------------------------------------------------------------------------- | ||
784 | * Register 0x0102: SERDES 3125 Interrupt Enable Register | ||
785 | * Bit 3 LASIE | ||
786 | * Bit 2 SPLL_RAE | ||
787 | * Bit 1 MPLL_RAE | ||
788 | * Bit 0 PLL_LOCKE | ||
789 | *----------------------------------------------------------------------------*/ | ||
790 | #define SUNI1x10GEXP_BITMSK_LASIE 0x0008 | ||
791 | #define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004 | ||
792 | #define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002 | ||
793 | #define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001 | ||
794 | |||
795 | /*---------------------------------------------------------------------------- | ||
796 | * Register 0x0103: SERDES 3125 Interrupt Visibility Register | ||
797 | * Bit 3 LASIV | ||
798 | * Bit 2 SPLL_RAV | ||
799 | * Bit 1 MPLL_RAV | ||
800 | * Bit 0 PLL_LOCKV | ||
801 | *----------------------------------------------------------------------------*/ | ||
802 | #define SUNI1x10GEXP_BITMSK_LASIV 0x0008 | ||
803 | #define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004 | ||
804 | #define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002 | ||
805 | #define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001 | ||
806 | |||
807 | /*---------------------------------------------------------------------------- | ||
808 | * Register 0x0104: SERDES 3125 Interrupt Status Register | ||
809 | * Bit 3 LASII | ||
810 | * Bit 2 SPLL_RAI | ||
811 | * Bit 1 MPLL_RAI | ||
812 | * Bit 0 PLL_LOCKI | ||
813 | *----------------------------------------------------------------------------*/ | ||
814 | #define SUNI1x10GEXP_BITMSK_LASII 0x0008 | ||
815 | #define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004 | ||
816 | #define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002 | ||
817 | #define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001 | ||
818 | |||
819 | /*---------------------------------------------------------------------------- | ||
820 | * Register 0x0107: SERDES 3125 Test Configuration | ||
821 | * Bit 12 DUALTX | ||
822 | * Bit 10 HC_1 | ||
823 | * Bit 9 HC_0 | ||
824 | *----------------------------------------------------------------------------*/ | ||
825 | #define SUNI1x10GEXP_BITMSK_DUALTX 0x1000 | ||
826 | #define SUNI1x10GEXP_BITMSK_HC 0x0600 | ||
827 | #define SUNI1x10GEXP_BITOFF_HC_0 9 | ||
828 | |||
829 | /*---------------------------------------------------------------------------- | ||
158 | * Register 0x2040: RXXG Configuration 1 | 830 | * Register 0x2040: RXXG Configuration 1 |
159 | * Bit 15 RXXG_RXEN | 831 | * Bit 15 RXXG_RXEN |
160 | * Bit 14 RXXG_ROCF | 832 | * Bit 14 RXXG_ROCF |
@@ -168,11 +840,84 @@ | |||
168 | * Bit 2-0 RXXG_MIFG | 840 | * Bit 2-0 RXXG_MIFG |
169 | *----------------------------------------------------------------------------*/ | 841 | *----------------------------------------------------------------------------*/ |
170 | #define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 | 842 | #define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 |
843 | #define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000 | ||
844 | #define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000 | ||
171 | #define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 | 845 | #define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 |
846 | #define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200 | ||
847 | #define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100 | ||
172 | #define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 | 848 | #define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 |
849 | #define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020 | ||
173 | #define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 | 850 | #define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 |
174 | 851 | ||
175 | /*---------------------------------------------------------------------------- | 852 | /*---------------------------------------------------------------------------- |
853 | * Register 0x02041: RXXG Configuration 2 | ||
854 | * Bit 7-0 RXXG_HDRSIZE | ||
855 | *----------------------------------------------------------------------------*/ | ||
856 | #define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF | ||
857 | |||
858 | /*---------------------------------------------------------------------------- | ||
859 | * Register 0x2042: RXXG Configuration 3 | ||
860 | * Bit 15 RXXG_MIN_LERRE | ||
861 | * Bit 14 RXXG_MAX_LERRE | ||
862 | * Bit 12 RXXG_LINE_ERRE | ||
863 | * Bit 10 RXXG_RX_OVRE | ||
864 | * Bit 9 RXXG_ADR_FILTERE | ||
865 | * Bit 8 RXXG_ERR_FILTERE | ||
866 | * Bit 5 RXXG_PRMB_ERRE | ||
867 | *----------------------------------------------------------------------------*/ | ||
868 | #define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000 | ||
869 | #define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000 | ||
870 | #define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000 | ||
871 | #define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400 | ||
872 | #define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200 | ||
873 | #define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100 | ||
874 | #define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 | ||
875 | |||
876 | /*---------------------------------------------------------------------------- | ||
877 | * Register 0x2043: RXXG Interrupt | ||
878 | * Bit 15 RXXG_MIN_LERRI | ||
879 | * Bit 14 RXXG_MAX_LERRI | ||
880 | * Bit 12 RXXG_LINE_ERRI | ||
881 | * Bit 10 RXXG_RX_OVRI | ||
882 | * Bit 9 RXXG_ADR_FILTERI | ||
883 | * Bit 8 RXXG_ERR_FILTERI | ||
884 | * Bit 5 RXXG_PRMB_ERRE | ||
885 | *----------------------------------------------------------------------------*/ | ||
886 | #define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000 | ||
887 | #define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000 | ||
888 | #define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000 | ||
889 | #define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400 | ||
890 | #define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200 | ||
891 | #define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100 | ||
892 | #define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 | ||
893 | |||
894 | /*---------------------------------------------------------------------------- | ||
895 | * Register 0x2049: RXXG Receive FIFO Threshold | ||
896 | * Bit 2-0 RXXG_CUT_THRU | ||
897 | *----------------------------------------------------------------------------*/ | ||
898 | #define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007 | ||
899 | #define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0 | ||
900 | |||
901 | /*---------------------------------------------------------------------------- | ||
902 | * Register 0x2062H - 0x2069: RXXG Exact Match VID | ||
903 | * Bit 11-0 RXXG_VID_MATCH | ||
904 | *----------------------------------------------------------------------------*/ | ||
905 | #define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF | ||
906 | #define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0 | ||
907 | |||
908 | /*---------------------------------------------------------------------------- | ||
909 | * Register 0x206EH - 0x206F: RXXG Address Filter Control | ||
910 | * Bit 3 RXXG_FORWARD_ENABLE | ||
911 | * Bit 2 RXXG_VLAN_ENABLE | ||
912 | * Bit 1 RXXG_SRC_ADDR | ||
913 | * Bit 0 RXXG_MATCH_ENABLE | ||
914 | *----------------------------------------------------------------------------*/ | ||
915 | #define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008 | ||
916 | #define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004 | ||
917 | #define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002 | ||
918 | #define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001 | ||
919 | |||
920 | /*---------------------------------------------------------------------------- | ||
176 | * Register 0x2070: RXXG Address Filter Control 2 | 921 | * Register 0x2070: RXXG Address Filter Control 2 |
177 | * Bit 1 RXXG_PMODE | 922 | * Bit 1 RXXG_PMODE |
178 | * Bit 0 RXXG_MHASH_EN | 923 | * Bit 0 RXXG_MHASH_EN |
@@ -181,15 +926,446 @@ | |||
181 | #define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 | 926 | #define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 |
182 | 927 | ||
183 | /*---------------------------------------------------------------------------- | 928 | /*---------------------------------------------------------------------------- |
929 | * Register 0x2081: XRF Control Register 2 | ||
930 | * Bit 6 EN_PKT_GEN | ||
931 | * Bit 4-2 PATT | ||
932 | *----------------------------------------------------------------------------*/ | ||
933 | #define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040 | ||
934 | #define SUNI1x10GEXP_BITMSK_PATT 0x001C | ||
935 | #define SUNI1x10GEXP_BITOFF_PATT 2 | ||
936 | |||
937 | /*---------------------------------------------------------------------------- | ||
938 | * Register 0x2088: XRF Interrupt Enable | ||
939 | * Bit 12-9 LANE_HICERE | ||
940 | * Bit 8-5 HS_SD_LANEE | ||
941 | * Bit 4 ALIGN_STATUS_ERRE | ||
942 | * Bit 3-0 LANE_SYNC_STAT_ERRE | ||
943 | *----------------------------------------------------------------------------*/ | ||
944 | #define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00 | ||
945 | #define SUNI1x10GEXP_BITOFF_LANE_HICERE 9 | ||
946 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0 | ||
947 | #define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5 | ||
948 | #define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010 | ||
949 | #define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F | ||
950 | #define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0 | ||
951 | |||
952 | /*---------------------------------------------------------------------------- | ||
953 | * Register 0x2089: XRF Interrupt Status | ||
954 | * Bit 12-9 LANE_HICERI | ||
955 | * Bit 8-5 HS_SD_LANEI | ||
956 | * Bit 4 ALIGN_STATUS_ERRI | ||
957 | * Bit 3-0 LANE_SYNC_STAT_ERRI | ||
958 | *----------------------------------------------------------------------------*/ | ||
959 | #define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00 | ||
960 | #define SUNI1x10GEXP_BITOFF_LANE_HICERI 9 | ||
961 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0 | ||
962 | #define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5 | ||
963 | #define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010 | ||
964 | #define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F | ||
965 | #define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0 | ||
966 | |||
967 | /*---------------------------------------------------------------------------- | ||
968 | * Register 0x208A: XRF Error Status | ||
969 | * Bit 8-5 HS_SD_LANE | ||
970 | * Bit 4 ALIGN_STATUS_ERR | ||
971 | * Bit 3-0 LANE_SYNC_STAT_ERR | ||
972 | *----------------------------------------------------------------------------*/ | ||
973 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100 | ||
974 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080 | ||
975 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040 | ||
976 | #define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020 | ||
977 | #define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010 | ||
978 | #define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008 | ||
979 | #define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004 | ||
980 | #define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002 | ||
981 | #define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001 | ||
982 | |||
983 | /*---------------------------------------------------------------------------- | ||
984 | * Register 0x208B: XRF Diagnostic Interrupt Enable | ||
985 | * Bit 7-4 LANE_OVERRUNE | ||
986 | * Bit 3-0 LANE_UNDERRUNE | ||
987 | *----------------------------------------------------------------------------*/ | ||
988 | #define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0 | ||
989 | #define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4 | ||
990 | #define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F | ||
991 | #define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0 | ||
992 | |||
993 | /*---------------------------------------------------------------------------- | ||
994 | * Register 0x208C: XRF Diagnostic Interrupt Status | ||
995 | * Bit 7-4 LANE_OVERRUNI | ||
996 | * Bit 3-0 LANE_UNDERRUNI | ||
997 | *----------------------------------------------------------------------------*/ | ||
998 | #define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0 | ||
999 | #define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4 | ||
1000 | #define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F | ||
1001 | #define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0 | ||
1002 | |||
1003 | /*---------------------------------------------------------------------------- | ||
1004 | * Register 0x20C0: RXOAM Configuration | ||
1005 | * Bit 15 RXOAM_BUSY | ||
1006 | * Bit 14-12 RXOAM_F2_SEL | ||
1007 | * Bit 10-8 RXOAM_F1_SEL | ||
1008 | * Bit 7-6 RXOAM_FILTER_CTRL | ||
1009 | * Bit 5-0 RXOAM_PX_EN | ||
1010 | *----------------------------------------------------------------------------*/ | ||
1011 | #define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000 | ||
1012 | #define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000 | ||
1013 | #define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12 | ||
1014 | #define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700 | ||
1015 | #define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8 | ||
1016 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0 | ||
1017 | #define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6 | ||
1018 | #define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F | ||
1019 | #define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0 | ||
1020 | |||
1021 | /*---------------------------------------------------------------------------- | ||
1022 | * Register 0x20C1,0x20C2: RXOAM Filter Configuration | ||
1023 | * Bit 15-8 RXOAM_FX_MASK | ||
1024 | * Bit 7-0 RXOAM_FX_VAL | ||
1025 | *----------------------------------------------------------------------------*/ | ||
1026 | #define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00 | ||
1027 | #define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8 | ||
1028 | #define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF | ||
1029 | #define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0 | ||
1030 | |||
1031 | /*---------------------------------------------------------------------------- | ||
1032 | * Register 0x20C3: RXOAM Configuration Register 2 | ||
1033 | * Bit 13 RXOAM_REC_BYTE_VAL | ||
1034 | * Bit 11-10 RXOAM_BYPASS_MODE | ||
1035 | * Bit 5-0 RXOAM_PX_CLEAR | ||
1036 | *----------------------------------------------------------------------------*/ | ||
1037 | #define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000 | ||
1038 | #define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00 | ||
1039 | #define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10 | ||
1040 | #define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F | ||
1041 | #define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0 | ||
1042 | |||
1043 | /*---------------------------------------------------------------------------- | ||
1044 | * Register 0x20C4: RXOAM HEC Configuration | ||
1045 | * Bit 15-8 RXOAM_COSET | ||
1046 | * Bit 2 RXOAM_HEC_ERR_PKT | ||
1047 | * Bit 0 RXOAM_HEC_EN | ||
1048 | *----------------------------------------------------------------------------*/ | ||
1049 | #define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00 | ||
1050 | #define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8 | ||
1051 | #define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004 | ||
1052 | #define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001 | ||
1053 | |||
1054 | /*---------------------------------------------------------------------------- | ||
1055 | * Register 0x20C7: RXOAM Interrupt Enable | ||
1056 | * Bit 10 RXOAM_FILTER_THRSHE | ||
1057 | * Bit 9 RXOAM_OAM_ERRE | ||
1058 | * Bit 8 RXOAM_HECE_THRSHE | ||
1059 | * Bit 7 RXOAM_SOPE | ||
1060 | * Bit 6 RXOAM_RFE | ||
1061 | * Bit 5 RXOAM_LFE | ||
1062 | * Bit 4 RXOAM_DV_ERRE | ||
1063 | * Bit 3 RXOAM_DATA_INVALIDE | ||
1064 | * Bit 2 RXOAM_FILTER_DROPE | ||
1065 | * Bit 1 RXOAM_HECE | ||
1066 | * Bit 0 RXOAM_OFLE | ||
1067 | *----------------------------------------------------------------------------*/ | ||
1068 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400 | ||
1069 | #define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200 | ||
1070 | #define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100 | ||
1071 | #define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080 | ||
1072 | #define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040 | ||
1073 | #define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020 | ||
1074 | #define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010 | ||
1075 | #define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008 | ||
1076 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004 | ||
1077 | #define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002 | ||
1078 | #define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001 | ||
1079 | |||
1080 | /*---------------------------------------------------------------------------- | ||
1081 | * Register 0x20C8: RXOAM Interrupt Status | ||
1082 | * Bit 10 RXOAM_FILTER_THRSHI | ||
1083 | * Bit 9 RXOAM_OAM_ERRI | ||
1084 | * Bit 8 RXOAM_HECE_THRSHI | ||
1085 | * Bit 7 RXOAM_SOPI | ||
1086 | * Bit 6 RXOAM_RFI | ||
1087 | * Bit 5 RXOAM_LFI | ||
1088 | * Bit 4 RXOAM_DV_ERRI | ||
1089 | * Bit 3 RXOAM_DATA_INVALIDI | ||
1090 | * Bit 2 RXOAM_FILTER_DROPI | ||
1091 | * Bit 1 RXOAM_HECI | ||
1092 | * Bit 0 RXOAM_OFLI | ||
1093 | *----------------------------------------------------------------------------*/ | ||
1094 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400 | ||
1095 | #define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200 | ||
1096 | #define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100 | ||
1097 | #define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080 | ||
1098 | #define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040 | ||
1099 | #define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020 | ||
1100 | #define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010 | ||
1101 | #define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008 | ||
1102 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004 | ||
1103 | #define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002 | ||
1104 | #define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001 | ||
1105 | |||
1106 | /*---------------------------------------------------------------------------- | ||
1107 | * Register 0x20C9: RXOAM Status | ||
1108 | * Bit 10 RXOAM_FILTER_THRSHV | ||
1109 | * Bit 8 RXOAM_HECE_THRSHV | ||
1110 | * Bit 6 RXOAM_RFV | ||
1111 | * Bit 5 RXOAM_LFV | ||
1112 | *----------------------------------------------------------------------------*/ | ||
1113 | #define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400 | ||
1114 | #define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100 | ||
1115 | #define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040 | ||
1116 | #define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020 | ||
1117 | |||
1118 | /*---------------------------------------------------------------------------- | ||
184 | * Register 0x2100: MSTAT Control | 1119 | * Register 0x2100: MSTAT Control |
185 | * Bit 2 MSTAT_WRITE | 1120 | * Bit 2 MSTAT_WRITE |
186 | * Bit 1 MSTAT_CLEAR | 1121 | * Bit 1 MSTAT_CLEAR |
187 | * Bit 0 MSTAT_SNAP | 1122 | * Bit 0 MSTAT_SNAP |
188 | *----------------------------------------------------------------------------*/ | 1123 | *----------------------------------------------------------------------------*/ |
1124 | #define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004 | ||
189 | #define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 | 1125 | #define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 |
190 | #define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 | 1126 | #define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 |
191 | 1127 | ||
192 | /*---------------------------------------------------------------------------- | 1128 | /*---------------------------------------------------------------------------- |
1129 | * Register 0x2109: MSTAT Counter Write Address | ||
1130 | * Bit 5-0 MSTAT_WRITE_ADDRESS | ||
1131 | *----------------------------------------------------------------------------*/ | ||
1132 | #define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F | ||
1133 | #define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0 | ||
1134 | |||
1135 | /*---------------------------------------------------------------------------- | ||
1136 | * Register 0x2200: IFLX Global Configuration Register | ||
1137 | * Bit 15 IFLX_IRCU_ENABLE | ||
1138 | * Bit 14 IFLX_IDSWT_ENABLE | ||
1139 | * Bit 13-0 IFLX_IFD_CNT | ||
1140 | *----------------------------------------------------------------------------*/ | ||
1141 | #define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000 | ||
1142 | #define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000 | ||
1143 | #define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF | ||
1144 | #define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0 | ||
1145 | |||
1146 | /*---------------------------------------------------------------------------- | ||
1147 | * Register 0x2209: IFLX FIFO Overflow Enable | ||
1148 | * Bit 0 IFLX_OVFE | ||
1149 | *----------------------------------------------------------------------------*/ | ||
1150 | #define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001 | ||
1151 | |||
1152 | /*---------------------------------------------------------------------------- | ||
1153 | * Register 0x220A: IFLX FIFO Overflow Interrupt | ||
1154 | * Bit 0 IFLX_OVFI | ||
1155 | *----------------------------------------------------------------------------*/ | ||
1156 | #define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001 | ||
1157 | |||
1158 | /*---------------------------------------------------------------------------- | ||
1159 | * Register 0x220D: IFLX Indirect Channel Address | ||
1160 | * Bit 15 IFLX_BUSY | ||
1161 | * Bit 14 IFLX_RWB | ||
1162 | *----------------------------------------------------------------------------*/ | ||
1163 | #define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000 | ||
1164 | #define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000 | ||
1165 | |||
1166 | /*---------------------------------------------------------------------------- | ||
1167 | * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision | ||
1168 | * Bit 9-0 IFLX_LOLIM | ||
1169 | *----------------------------------------------------------------------------*/ | ||
1170 | #define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF | ||
1171 | #define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0 | ||
1172 | |||
1173 | /*---------------------------------------------------------------------------- | ||
1174 | * Register 0x220F: IFLX Indirect Logical FIFO High Limit | ||
1175 | * Bit 9-0 IFLX_HILIM | ||
1176 | *----------------------------------------------------------------------------*/ | ||
1177 | #define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF | ||
1178 | #define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0 | ||
1179 | |||
1180 | /*---------------------------------------------------------------------------- | ||
1181 | * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit | ||
1182 | * Bit 15 IFLX_FULL | ||
1183 | * Bit 14 IFLX_AFULL | ||
1184 | * Bit 13-0 IFLX_AFTH | ||
1185 | *----------------------------------------------------------------------------*/ | ||
1186 | #define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000 | ||
1187 | #define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000 | ||
1188 | #define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF | ||
1189 | #define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0 | ||
1190 | |||
1191 | /*---------------------------------------------------------------------------- | ||
1192 | * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit | ||
1193 | * Bit 15 IFLX_EMPTY | ||
1194 | * Bit 14 IFLX_AEMPTY | ||
1195 | * Bit 13-0 IFLX_AETH | ||
1196 | *----------------------------------------------------------------------------*/ | ||
1197 | #define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000 | ||
1198 | #define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000 | ||
1199 | #define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF | ||
1200 | #define SUNI1x10GEXP_BITOFF_IFLX_AETH 0 | ||
1201 | |||
1202 | /*---------------------------------------------------------------------------- | ||
1203 | * Register 0x2240: PL4MOS Configuration Register | ||
1204 | * Bit 3 PL4MOS_RE_INIT | ||
1205 | * Bit 2 PL4MOS_EN | ||
1206 | * Bit 1 PL4MOS_NO_STATUS | ||
1207 | *----------------------------------------------------------------------------*/ | ||
1208 | #define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008 | ||
1209 | #define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004 | ||
1210 | #define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002 | ||
1211 | |||
1212 | /*---------------------------------------------------------------------------- | ||
1213 | * Register 0x2243: PL4MOS MaxBurst1 Register | ||
1214 | * Bit 11-0 PL4MOS_MAX_BURST1 | ||
1215 | *----------------------------------------------------------------------------*/ | ||
1216 | #define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF | ||
1217 | #define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0 | ||
1218 | |||
1219 | /*---------------------------------------------------------------------------- | ||
1220 | * Register 0x2244: PL4MOS MaxBurst2 Register | ||
1221 | * Bit 11-0 PL4MOS_MAX_BURST2 | ||
1222 | *----------------------------------------------------------------------------*/ | ||
1223 | #define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF | ||
1224 | #define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0 | ||
1225 | |||
1226 | /*---------------------------------------------------------------------------- | ||
1227 | * Register 0x2245: PL4MOS Transfer Size Register | ||
1228 | * Bit 7-0 PL4MOS_MAX_TRANSFER | ||
1229 | *----------------------------------------------------------------------------*/ | ||
1230 | #define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF | ||
1231 | #define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0 | ||
1232 | |||
1233 | /*---------------------------------------------------------------------------- | ||
1234 | * Register 0x2280: PL4ODP Configuration | ||
1235 | * Bit 15-12 PL4ODP_REPEAT_T | ||
1236 | * Bit 8 PL4ODP_SOP_RULE | ||
1237 | * Bit 1 PL4ODP_EN_PORTS | ||
1238 | * Bit 0 PL4ODP_EN_DFWD | ||
1239 | *----------------------------------------------------------------------------*/ | ||
1240 | #define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000 | ||
1241 | #define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12 | ||
1242 | #define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100 | ||
1243 | #define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002 | ||
1244 | #define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001 | ||
1245 | |||
1246 | /*---------------------------------------------------------------------------- | ||
1247 | * Register 0x2282: PL4ODP Interrupt Mask | ||
1248 | * Bit 0 PL4ODP_OUT_DISE | ||
1249 | *----------------------------------------------------------------------------*/ | ||
1250 | #define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001 | ||
1251 | |||
1252 | |||
1253 | |||
1254 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080 | ||
1255 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040 | ||
1256 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008 | ||
1257 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004 | ||
1258 | #define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002 | ||
1259 | |||
1260 | |||
1261 | /*---------------------------------------------------------------------------- | ||
1262 | * Register 0x2283: PL4ODP Interrupt | ||
1263 | * Bit 0 PL4ODP_OUT_DISI | ||
1264 | *----------------------------------------------------------------------------*/ | ||
1265 | #define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001 | ||
1266 | |||
1267 | |||
1268 | |||
1269 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080 | ||
1270 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040 | ||
1271 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008 | ||
1272 | #define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004 | ||
1273 | #define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002 | ||
1274 | |||
1275 | /*---------------------------------------------------------------------------- | ||
1276 | * Register 0x2300: PL4IO Lock Detect Status | ||
1277 | * Bit 15 PL4IO_OUT_ROOLV | ||
1278 | * Bit 12 PL4IO_IS_ROOLV | ||
1279 | * Bit 11 PL4IO_DIP2_ERRV | ||
1280 | * Bit 8 PL4IO_ID_ROOLV | ||
1281 | * Bit 4 PL4IO_IS_DOOLV | ||
1282 | * Bit 0 PL4IO_ID_DOOLV | ||
1283 | *----------------------------------------------------------------------------*/ | ||
1284 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000 | ||
1285 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000 | ||
1286 | #define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800 | ||
1287 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100 | ||
1288 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010 | ||
1289 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001 | ||
1290 | |||
1291 | /*---------------------------------------------------------------------------- | ||
1292 | * Register 0x2301: PL4IO Lock Detect Change | ||
1293 | * Bit 15 PL4IO_OUT_ROOLI | ||
1294 | * Bit 12 PL4IO_IS_ROOLI | ||
1295 | * Bit 11 PL4IO_DIP2_ERRI | ||
1296 | * Bit 8 PL4IO_ID_ROOLI | ||
1297 | * Bit 4 PL4IO_IS_DOOLI | ||
1298 | * Bit 0 PL4IO_ID_DOOLI | ||
1299 | *----------------------------------------------------------------------------*/ | ||
1300 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000 | ||
1301 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000 | ||
1302 | #define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800 | ||
1303 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100 | ||
1304 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010 | ||
1305 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001 | ||
1306 | |||
1307 | /*---------------------------------------------------------------------------- | ||
1308 | * Register 0x2302: PL4IO Lock Detect Mask | ||
1309 | * Bit 15 PL4IO_OUT_ROOLE | ||
1310 | * Bit 12 PL4IO_IS_ROOLE | ||
1311 | * Bit 11 PL4IO_DIP2_ERRE | ||
1312 | * Bit 8 PL4IO_ID_ROOLE | ||
1313 | * Bit 4 PL4IO_IS_DOOLE | ||
1314 | * Bit 0 PL4IO_ID_DOOLE | ||
1315 | *----------------------------------------------------------------------------*/ | ||
1316 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000 | ||
1317 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000 | ||
1318 | #define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800 | ||
1319 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100 | ||
1320 | #define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010 | ||
1321 | #define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001 | ||
1322 | |||
1323 | /*---------------------------------------------------------------------------- | ||
1324 | * Register 0x2303: PL4IO Lock Detect Limits | ||
1325 | * Bit 15-8 PL4IO_REF_LIMIT | ||
1326 | * Bit 7-0 PL4IO_TRAN_LIMIT | ||
1327 | *----------------------------------------------------------------------------*/ | ||
1328 | #define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00 | ||
1329 | #define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8 | ||
1330 | #define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF | ||
1331 | #define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0 | ||
1332 | |||
1333 | /*---------------------------------------------------------------------------- | ||
1334 | * Register 0x2304: PL4IO Calendar Repetitions | ||
1335 | * Bit 15-8 PL4IO_IN_MUL | ||
1336 | * Bit 7-0 PL4IO_OUT_MUL | ||
1337 | *----------------------------------------------------------------------------*/ | ||
1338 | #define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00 | ||
1339 | #define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8 | ||
1340 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF | ||
1341 | #define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0 | ||
1342 | |||
1343 | /*---------------------------------------------------------------------------- | ||
1344 | * Register 0x2305: PL4IO Configuration | ||
1345 | * Bit 15 PL4IO_DIP2_ERR_CHK | ||
1346 | * Bit 11 PL4IO_ODAT_DIS | ||
1347 | * Bit 10 PL4IO_TRAIN_DIS | ||
1348 | * Bit 9 PL4IO_OSTAT_DIS | ||
1349 | * Bit 8 PL4IO_ISTAT_DIS | ||
1350 | * Bit 7 PL4IO_NO_ISTAT | ||
1351 | * Bit 6 PL4IO_STAT_OUTSEL | ||
1352 | * Bit 5 PL4IO_INSEL | ||
1353 | * Bit 4 PL4IO_DLSEL | ||
1354 | * Bit 1-0 PL4IO_OUTSEL | ||
1355 | *----------------------------------------------------------------------------*/ | ||
1356 | #define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000 | ||
1357 | #define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800 | ||
1358 | #define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400 | ||
1359 | #define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200 | ||
1360 | #define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100 | ||
1361 | #define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080 | ||
1362 | #define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040 | ||
1363 | #define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020 | ||
1364 | #define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010 | ||
1365 | #define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003 | ||
1366 | #define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0 | ||
1367 | |||
1368 | /*---------------------------------------------------------------------------- | ||
193 | * Register 0x3040: TXXG Configuration Register 1 | 1369 | * Register 0x3040: TXXG Configuration Register 1 |
194 | * Bit 15 TXXG_TXEN0 | 1370 | * Bit 15 TXXG_TXEN0 |
195 | * Bit 13 TXXG_HOSTPAUSE | 1371 | * Bit 13 TXXG_HOSTPAUSE |
@@ -202,12 +1378,266 @@ | |||
202 | * Bit 0 TXXG_SPRE | 1378 | * Bit 0 TXXG_SPRE |
203 | *----------------------------------------------------------------------------*/ | 1379 | *----------------------------------------------------------------------------*/ |
204 | #define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 | 1380 | #define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 |
1381 | #define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000 | ||
1382 | #define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80 | ||
205 | #define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 | 1383 | #define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 |
206 | #define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 | 1384 | #define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 |
207 | #define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 | 1385 | #define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 |
208 | #define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 | 1386 | #define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 |
209 | #define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 | 1387 | #define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 |
210 | #define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 | 1388 | #define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 |
1389 | #define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001 | ||
1390 | |||
1391 | /*---------------------------------------------------------------------------- | ||
1392 | * Register 0x3041: TXXG Configuration Register 2 | ||
1393 | * Bit 7-0 TXXG_HDRSIZE | ||
1394 | *----------------------------------------------------------------------------*/ | ||
1395 | #define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF | ||
1396 | |||
1397 | /*---------------------------------------------------------------------------- | ||
1398 | * Register 0x3042: TXXG Configuration Register 3 | ||
1399 | * Bit 15 TXXG_FIFO_ERRE | ||
1400 | * Bit 14 TXXG_FIFO_UDRE | ||
1401 | * Bit 13 TXXG_MAX_LERRE | ||
1402 | * Bit 12 TXXG_MIN_LERRE | ||
1403 | * Bit 11 TXXG_XFERE | ||
1404 | *----------------------------------------------------------------------------*/ | ||
1405 | #define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000 | ||
1406 | #define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000 | ||
1407 | #define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000 | ||
1408 | #define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000 | ||
1409 | #define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800 | ||
1410 | |||
1411 | /*---------------------------------------------------------------------------- | ||
1412 | * Register 0x3043: TXXG Interrupt | ||
1413 | * Bit 15 TXXG_FIFO_ERRI | ||
1414 | * Bit 14 TXXG_FIFO_UDRI | ||
1415 | * Bit 13 TXXG_MAX_LERRI | ||
1416 | * Bit 12 TXXG_MIN_LERRI | ||
1417 | * Bit 11 TXXG_XFERI | ||
1418 | *----------------------------------------------------------------------------*/ | ||
1419 | #define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000 | ||
1420 | #define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000 | ||
1421 | #define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000 | ||
1422 | #define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000 | ||
1423 | #define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800 | ||
1424 | |||
1425 | /*---------------------------------------------------------------------------- | ||
1426 | * Register 0x3044: TXXG Status Register | ||
1427 | * Bit 1 TXXG_TXACTIVE | ||
1428 | * Bit 0 TXXG_PAUSED | ||
1429 | *----------------------------------------------------------------------------*/ | ||
1430 | #define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002 | ||
1431 | #define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001 | ||
1432 | |||
1433 | /*---------------------------------------------------------------------------- | ||
1434 | * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register | ||
1435 | * Bit 7-0 TXXG_TX_MINFR | ||
1436 | *----------------------------------------------------------------------------*/ | ||
1437 | #define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF | ||
1438 | #define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0 | ||
1439 | |||
1440 | /*---------------------------------------------------------------------------- | ||
1441 | * Register 0x3052: TXXG Pause Quantum Value Configuration Register | ||
1442 | * Bit 7-0 TXXG_FC_PAUSE_QNTM | ||
1443 | *----------------------------------------------------------------------------*/ | ||
1444 | #define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF | ||
1445 | #define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0 | ||
1446 | |||
1447 | /*---------------------------------------------------------------------------- | ||
1448 | * Register 0x3080: XTEF Control | ||
1449 | * Bit 3-0 XTEF_FORCE_PARITY_ERR | ||
1450 | *----------------------------------------------------------------------------*/ | ||
1451 | #define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F | ||
1452 | #define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0 | ||
1453 | |||
1454 | /*---------------------------------------------------------------------------- | ||
1455 | * Register 0x3084: XTEF Interrupt Event Register | ||
1456 | * Bit 0 XTEF_LOST_SYNCI | ||
1457 | *----------------------------------------------------------------------------*/ | ||
1458 | #define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001 | ||
1459 | |||
1460 | /*---------------------------------------------------------------------------- | ||
1461 | * Register 0x3085: XTEF Interrupt Enable Register | ||
1462 | * Bit 0 XTEF_LOST_SYNCE | ||
1463 | *----------------------------------------------------------------------------*/ | ||
1464 | #define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001 | ||
1465 | |||
1466 | /*---------------------------------------------------------------------------- | ||
1467 | * Register 0x3086: XTEF Visibility Register | ||
1468 | * Bit 0 XTEF_LOST_SYNCV | ||
1469 | *----------------------------------------------------------------------------*/ | ||
1470 | #define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001 | ||
1471 | |||
1472 | /*---------------------------------------------------------------------------- | ||
1473 | * Register 0x30C0: TXOAM OAM Configuration | ||
1474 | * Bit 15 TXOAM_HEC_EN | ||
1475 | * Bit 14 TXOAM_EMPTYCODE_EN | ||
1476 | * Bit 13 TXOAM_FORCE_IDLE | ||
1477 | * Bit 12 TXOAM_IGNORE_IDLE | ||
1478 | * Bit 11-6 TXOAM_PX_OVERWRITE | ||
1479 | * Bit 5-0 TXOAM_PX_SEL | ||
1480 | *----------------------------------------------------------------------------*/ | ||
1481 | #define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000 | ||
1482 | #define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000 | ||
1483 | #define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000 | ||
1484 | #define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000 | ||
1485 | #define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0 | ||
1486 | #define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6 | ||
1487 | #define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F | ||
1488 | #define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0 | ||
1489 | |||
1490 | /*---------------------------------------------------------------------------- | ||
1491 | * Register 0x30C1: TXOAM Mini-Packet Rate Configuration | ||
1492 | * Bit 15 TXOAM_MINIDIS | ||
1493 | * Bit 14 TXOAM_BUSY | ||
1494 | * Bit 13 TXOAM_TRANS_EN | ||
1495 | * Bit 10-0 TXOAM_MINIRATE | ||
1496 | *----------------------------------------------------------------------------*/ | ||
1497 | #define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000 | ||
1498 | #define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000 | ||
1499 | #define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000 | ||
1500 | #define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF | ||
1501 | |||
1502 | /*---------------------------------------------------------------------------- | ||
1503 | * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration | ||
1504 | * Bit 13-10 TXOAM_FTHRESH | ||
1505 | * Bit 9-6 TXOAM_MINIPOST | ||
1506 | * Bit 5-0 TXOAM_MINIPRE | ||
1507 | *----------------------------------------------------------------------------*/ | ||
1508 | #define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00 | ||
1509 | #define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10 | ||
1510 | #define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0 | ||
1511 | #define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6 | ||
1512 | #define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F | ||
1513 | |||
1514 | /*---------------------------------------------------------------------------- | ||
1515 | * Register 0x30C6: TXOAM Interrupt Enable | ||
1516 | * Bit 2 TXOAM_SOP_ERRE | ||
1517 | * Bit 1 TXOAM_OFLE | ||
1518 | * Bit 0 TXOAM_ERRE | ||
1519 | *----------------------------------------------------------------------------*/ | ||
1520 | #define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004 | ||
1521 | #define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002 | ||
1522 | #define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001 | ||
1523 | |||
1524 | /*---------------------------------------------------------------------------- | ||
1525 | * Register 0x30C7: TXOAM Interrupt Status | ||
1526 | * Bit 2 TXOAM_SOP_ERRI | ||
1527 | * Bit 1 TXOAM_OFLI | ||
1528 | * Bit 0 TXOAM_ERRI | ||
1529 | *----------------------------------------------------------------------------*/ | ||
1530 | #define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004 | ||
1531 | #define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002 | ||
1532 | #define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001 | ||
1533 | |||
1534 | /*---------------------------------------------------------------------------- | ||
1535 | * Register 0x30CF: TXOAM Coset | ||
1536 | * Bit 7-0 TXOAM_COSET | ||
1537 | *----------------------------------------------------------------------------*/ | ||
1538 | #define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF | ||
1539 | |||
1540 | /*---------------------------------------------------------------------------- | ||
1541 | * Register 0x3200: EFLX Global Configuration | ||
1542 | * Bit 15 EFLX_ERCU_EN | ||
1543 | * Bit 7 EFLX_EN_EDSWT | ||
1544 | *----------------------------------------------------------------------------*/ | ||
1545 | #define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000 | ||
1546 | #define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080 | ||
1547 | |||
1548 | /*---------------------------------------------------------------------------- | ||
1549 | * Register 0x3201: EFLX ERCU Global Status | ||
1550 | * Bit 13 EFLX_OVF_ERR | ||
1551 | *----------------------------------------------------------------------------*/ | ||
1552 | #define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000 | ||
1553 | |||
1554 | /*---------------------------------------------------------------------------- | ||
1555 | * Register 0x3202: EFLX Indirect Channel Address | ||
1556 | * Bit 15 EFLX_BUSY | ||
1557 | * Bit 14 EFLX_RDWRB | ||
1558 | *----------------------------------------------------------------------------*/ | ||
1559 | #define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000 | ||
1560 | #define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000 | ||
1561 | |||
1562 | /*---------------------------------------------------------------------------- | ||
1563 | * Register 0x3203: EFLX Indirect Logical FIFO Low Limit | ||
1564 | *----------------------------------------------------------------------------*/ | ||
1565 | #define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF | ||
1566 | #define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0 | ||
1567 | |||
1568 | /*---------------------------------------------------------------------------- | ||
1569 | * Register 0x3204: EFLX Indirect Logical FIFO High Limit | ||
1570 | *----------------------------------------------------------------------------*/ | ||
1571 | #define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF | ||
1572 | #define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0 | ||
1573 | |||
1574 | /*---------------------------------------------------------------------------- | ||
1575 | * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit | ||
1576 | * Bit 15 EFLX_FULL | ||
1577 | * Bit 14 EFLX_AFULL | ||
1578 | * Bit 13-0 EFLX_AFTH | ||
1579 | *----------------------------------------------------------------------------*/ | ||
1580 | #define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000 | ||
1581 | #define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000 | ||
1582 | #define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF | ||
1583 | #define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0 | ||
1584 | |||
1585 | /*---------------------------------------------------------------------------- | ||
1586 | * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit | ||
1587 | * Bit 15 EFLX_EMPTY | ||
1588 | * Bit 14 EFLX_AEMPTY | ||
1589 | * Bit 13-0 EFLX_AETH | ||
1590 | *----------------------------------------------------------------------------*/ | ||
1591 | #define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000 | ||
1592 | #define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000 | ||
1593 | #define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF | ||
1594 | #define SUNI1x10GEXP_BITOFF_EFLX_AETH 0 | ||
1595 | |||
1596 | /*---------------------------------------------------------------------------- | ||
1597 | * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold | ||
1598 | *----------------------------------------------------------------------------*/ | ||
1599 | #define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF | ||
1600 | #define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0 | ||
1601 | |||
1602 | /*---------------------------------------------------------------------------- | ||
1603 | * Register 0x320C: EFLX FIFO Overflow Error Enable | ||
1604 | * Bit 0 EFLX_OVFE | ||
1605 | *----------------------------------------------------------------------------*/ | ||
1606 | #define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001 | ||
1607 | |||
1608 | /*---------------------------------------------------------------------------- | ||
1609 | * Register 0x320D: EFLX FIFO Overflow Error Indication | ||
1610 | * Bit 0 EFLX_OVFI | ||
1611 | *----------------------------------------------------------------------------*/ | ||
1612 | #define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001 | ||
1613 | |||
1614 | /*---------------------------------------------------------------------------- | ||
1615 | * Register 0x3210: EFLX Channel Provision | ||
1616 | * Bit 0 EFLX_PROV | ||
1617 | *----------------------------------------------------------------------------*/ | ||
1618 | #define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001 | ||
1619 | |||
1620 | /*---------------------------------------------------------------------------- | ||
1621 | * Register 0x3280: PL4IDU Configuration | ||
1622 | * Bit 2 PL4IDU_SYNCH_ON_TRAIN | ||
1623 | * Bit 1 PL4IDU_EN_PORTS | ||
1624 | * Bit 0 PL4IDU_EN_DFWD | ||
1625 | *----------------------------------------------------------------------------*/ | ||
1626 | #define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004 | ||
1627 | #define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002 | ||
1628 | #define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001 | ||
1629 | |||
1630 | /*---------------------------------------------------------------------------- | ||
1631 | * Register 0x3282: PL4IDU Interrupt Mask | ||
1632 | * Bit 1 PL4IDU_DIP4E | ||
1633 | *----------------------------------------------------------------------------*/ | ||
1634 | #define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002 | ||
1635 | |||
1636 | /*---------------------------------------------------------------------------- | ||
1637 | * Register 0x3283: PL4IDU Interrupt | ||
1638 | * Bit 1 PL4IDU_DIP4I | ||
1639 | *----------------------------------------------------------------------------*/ | ||
1640 | #define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002 | ||
211 | 1641 | ||
212 | #endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ | 1642 | #endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ |
213 | 1643 | ||
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c new file mode 100644 index 000000000000..0ca0b6e19e43 --- /dev/null +++ b/drivers/net/chelsio/tp.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */ | ||
2 | #include "common.h" | ||
3 | #include "regs.h" | ||
4 | #include "tp.h" | ||
5 | #ifdef CONFIG_CHELSIO_T1_1G | ||
6 | #include "fpga_defs.h" | ||
7 | #endif | ||
8 | |||
9 | struct petp { | ||
10 | adapter_t *adapter; | ||
11 | }; | ||
12 | |||
13 | /* Pause deadlock avoidance parameters */ | ||
14 | #define DROP_MSEC 16 | ||
15 | #define DROP_PKTS_CNT 1 | ||
16 | |||
17 | static void tp_init(adapter_t * ap, const struct tp_params *p, | ||
18 | unsigned int tp_clk) | ||
19 | { | ||
20 | if (t1_is_asic(ap)) { | ||
21 | u32 val; | ||
22 | |||
23 | val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | | ||
24 | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; | ||
25 | if (!p->pm_size) | ||
26 | val |= F_OFFLOAD_DISABLE; | ||
27 | else | ||
28 | val |= F_TP_IN_ESPI_CHECK_IP_CSUM | | ||
29 | F_TP_IN_ESPI_CHECK_TCP_CSUM; | ||
30 | writel(val, ap->regs + A_TP_IN_CONFIG); | ||
31 | writel(F_TP_OUT_CSPI_CPL | | ||
32 | F_TP_OUT_ESPI_ETHERNET | | ||
33 | F_TP_OUT_ESPI_GENERATE_IP_CSUM | | ||
34 | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, | ||
35 | ap->regs + A_TP_OUT_CONFIG); | ||
36 | writel(V_IP_TTL(64) | | ||
37 | F_PATH_MTU /* IP DF bit */ | | ||
38 | V_5TUPLE_LOOKUP(p->use_5tuple_mode) | | ||
39 | V_SYN_COOKIE_PARAMETER(29), | ||
40 | ap->regs + A_TP_GLOBAL_CONFIG); | ||
41 | /* | ||
42 | * Enable pause frame deadlock prevention. | ||
43 | */ | ||
44 | if (is_T2(ap) && ap->params.nports > 1) { | ||
45 | u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); | ||
46 | |||
47 | writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | | ||
48 | V_DROP_TICKS_CNT(drop_ticks) | | ||
49 | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), | ||
50 | ap->regs + A_TP_TX_DROP_CONFIG); | ||
51 | } | ||
52 | |||
53 | } | ||
54 | } | ||
55 | |||
56 | void t1_tp_destroy(struct petp *tp) | ||
57 | { | ||
58 | kfree(tp); | ||
59 | } | ||
60 | |||
61 | struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) | ||
62 | { | ||
63 | struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); | ||
64 | if (!tp) | ||
65 | return NULL; | ||
66 | |||
67 | tp->adapter = adapter; | ||
68 | |||
69 | return tp; | ||
70 | } | ||
71 | |||
72 | void t1_tp_intr_enable(struct petp *tp) | ||
73 | { | ||
74 | u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); | ||
75 | |||
76 | #ifdef CONFIG_CHELSIO_T1_1G | ||
77 | if (!t1_is_asic(tp->adapter)) { | ||
78 | /* FPGA */ | ||
79 | writel(0xffffffff, | ||
80 | tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); | ||
81 | writel(tp_intr | FPGA_PCIX_INTERRUPT_TP, | ||
82 | tp->adapter->regs + A_PL_ENABLE); | ||
83 | } else | ||
84 | #endif | ||
85 | { | ||
86 | /* We don't use any TP interrupts */ | ||
87 | writel(0, tp->adapter->regs + A_TP_INT_ENABLE); | ||
88 | writel(tp_intr | F_PL_INTR_TP, | ||
89 | tp->adapter->regs + A_PL_ENABLE); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void t1_tp_intr_disable(struct petp *tp) | ||
94 | { | ||
95 | u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); | ||
96 | |||
97 | #ifdef CONFIG_CHELSIO_T1_1G | ||
98 | if (!t1_is_asic(tp->adapter)) { | ||
99 | /* FPGA */ | ||
100 | writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); | ||
101 | writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP, | ||
102 | tp->adapter->regs + A_PL_ENABLE); | ||
103 | } else | ||
104 | #endif | ||
105 | { | ||
106 | writel(0, tp->adapter->regs + A_TP_INT_ENABLE); | ||
107 | writel(tp_intr & ~F_PL_INTR_TP, | ||
108 | tp->adapter->regs + A_PL_ENABLE); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | void t1_tp_intr_clear(struct petp *tp) | ||
113 | { | ||
114 | #ifdef CONFIG_CHELSIO_T1_1G | ||
115 | if (!t1_is_asic(tp->adapter)) { | ||
116 | writel(0xffffffff, | ||
117 | tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); | ||
118 | writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE); | ||
119 | return; | ||
120 | } | ||
121 | #endif | ||
122 | writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE); | ||
123 | writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE); | ||
124 | } | ||
125 | |||
126 | int t1_tp_intr_handler(struct petp *tp) | ||
127 | { | ||
128 | u32 cause; | ||
129 | |||
130 | #ifdef CONFIG_CHELSIO_T1_1G | ||
131 | /* FPGA doesn't support TP interrupts. */ | ||
132 | if (!t1_is_asic(tp->adapter)) | ||
133 | return 1; | ||
134 | #endif | ||
135 | |||
136 | cause = readl(tp->adapter->regs + A_TP_INT_CAUSE); | ||
137 | writel(cause, tp->adapter->regs + A_TP_INT_CAUSE); | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable) | ||
142 | { | ||
143 | u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG); | ||
144 | |||
145 | if (enable) | ||
146 | val |= csum_bit; | ||
147 | else | ||
148 | val &= ~csum_bit; | ||
149 | writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG); | ||
150 | } | ||
151 | |||
152 | void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable) | ||
153 | { | ||
154 | set_csum_offload(tp, F_IP_CSUM, enable); | ||
155 | } | ||
156 | |||
157 | void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable) | ||
158 | { | ||
159 | set_csum_offload(tp, F_UDP_CSUM, enable); | ||
160 | } | ||
161 | |||
162 | void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) | ||
163 | { | ||
164 | set_csum_offload(tp, F_TCP_CSUM, enable); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Initialize TP state. tp_params contains initial settings for some TP | ||
169 | * parameters, particularly the one-time PM and CM settings. | ||
170 | */ | ||
171 | int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk) | ||
172 | { | ||
173 | adapter_t *adapter = tp->adapter; | ||
174 | |||
175 | tp_init(adapter, p, tp_clk); | ||
176 | writel(F_TP_RESET, adapter->regs + A_TP_RESET); | ||
177 | return 0; | ||
178 | } | ||
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h new file mode 100644 index 000000000000..32fc71e58913 --- /dev/null +++ b/drivers/net/chelsio/tp.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */ | ||
2 | #ifndef CHELSIO_TP_H | ||
3 | #define CHELSIO_TP_H | ||
4 | |||
5 | #include "common.h" | ||
6 | |||
7 | #define TP_MAX_RX_COALESCING_SIZE 16224U | ||
8 | |||
9 | struct tp_mib_statistics { | ||
10 | |||
11 | /* IP */ | ||
12 | u32 ipInReceive_hi; | ||
13 | u32 ipInReceive_lo; | ||
14 | u32 ipInHdrErrors_hi; | ||
15 | u32 ipInHdrErrors_lo; | ||
16 | u32 ipInAddrErrors_hi; | ||
17 | u32 ipInAddrErrors_lo; | ||
18 | u32 ipInUnknownProtos_hi; | ||
19 | u32 ipInUnknownProtos_lo; | ||
20 | u32 ipInDiscards_hi; | ||
21 | u32 ipInDiscards_lo; | ||
22 | u32 ipInDelivers_hi; | ||
23 | u32 ipInDelivers_lo; | ||
24 | u32 ipOutRequests_hi; | ||
25 | u32 ipOutRequests_lo; | ||
26 | u32 ipOutDiscards_hi; | ||
27 | u32 ipOutDiscards_lo; | ||
28 | u32 ipOutNoRoutes_hi; | ||
29 | u32 ipOutNoRoutes_lo; | ||
30 | u32 ipReasmTimeout; | ||
31 | u32 ipReasmReqds; | ||
32 | u32 ipReasmOKs; | ||
33 | u32 ipReasmFails; | ||
34 | |||
35 | u32 reserved[8]; | ||
36 | |||
37 | /* TCP */ | ||
38 | u32 tcpActiveOpens; | ||
39 | u32 tcpPassiveOpens; | ||
40 | u32 tcpAttemptFails; | ||
41 | u32 tcpEstabResets; | ||
42 | u32 tcpOutRsts; | ||
43 | u32 tcpCurrEstab; | ||
44 | u32 tcpInSegs_hi; | ||
45 | u32 tcpInSegs_lo; | ||
46 | u32 tcpOutSegs_hi; | ||
47 | u32 tcpOutSegs_lo; | ||
48 | u32 tcpRetransSeg_hi; | ||
49 | u32 tcpRetransSeg_lo; | ||
50 | u32 tcpInErrs_hi; | ||
51 | u32 tcpInErrs_lo; | ||
52 | u32 tcpRtoMin; | ||
53 | u32 tcpRtoMax; | ||
54 | }; | ||
55 | |||
56 | struct petp; | ||
57 | struct tp_params; | ||
58 | |||
59 | struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p); | ||
60 | void t1_tp_destroy(struct petp *tp); | ||
61 | |||
62 | void t1_tp_intr_disable(struct petp *tp); | ||
63 | void t1_tp_intr_enable(struct petp *tp); | ||
64 | void t1_tp_intr_clear(struct petp *tp); | ||
65 | int t1_tp_intr_handler(struct petp *tp); | ||
66 | |||
67 | void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); | ||
68 | void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable); | ||
69 | void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); | ||
70 | void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); | ||
71 | int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); | ||
72 | int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk); | ||
73 | #endif | ||
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c new file mode 100644 index 000000000000..85dc3b1dc309 --- /dev/null +++ b/drivers/net/chelsio/vsc7326.c | |||
@@ -0,0 +1,725 @@ | |||
1 | /* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */ | ||
2 | |||
3 | /* Driver for Vitesse VSC7326 (Schaumburg) MAC */ | ||
4 | |||
5 | #include "gmac.h" | ||
6 | #include "elmer0.h" | ||
7 | #include "vsc7326_reg.h" | ||
8 | |||
9 | /* Update fast changing statistics every 15 seconds */ | ||
10 | #define STATS_TICK_SECS 15 | ||
11 | /* 30 minutes for full statistics update */ | ||
12 | #define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) | ||
13 | |||
14 | #define MAX_MTU 9600 | ||
15 | |||
16 | /* The egress WM value 0x01a01fff should be used only when the | ||
17 | * interface is down (MAC port disabled). This is a workaround | ||
18 | * for disabling the T2/MAC flow-control. When the interface is | ||
19 | * enabled, the WM value should be set to 0x014a03F0. | ||
20 | */ | ||
21 | #define WM_DISABLE 0x01a01fff | ||
22 | #define WM_ENABLE 0x014a03F0 | ||
23 | |||
24 | struct init_table { | ||
25 | u32 addr; | ||
26 | u32 data; | ||
27 | }; | ||
28 | |||
29 | struct _cmac_instance { | ||
30 | u32 index; | ||
31 | u32 ticks; | ||
32 | }; | ||
33 | |||
34 | #define INITBLOCK_SLEEP 0xffffffff | ||
35 | |||
36 | static void vsc_read(adapter_t *adapter, u32 addr, u32 *val) | ||
37 | { | ||
38 | u32 status, vlo, vhi; | ||
39 | int i; | ||
40 | |||
41 | spin_lock_bh(&adapter->mac_lock); | ||
42 | t1_tpi_read(adapter, (addr << 2) + 4, &vlo); | ||
43 | i = 0; | ||
44 | do { | ||
45 | t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); | ||
46 | t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); | ||
47 | status = (vhi << 16) | vlo; | ||
48 | i++; | ||
49 | } while (((status & 1) == 0) && (i < 50)); | ||
50 | if (i == 50) | ||
51 | CH_ERR("Invalid tpi read from MAC, breaking loop.\n"); | ||
52 | |||
53 | t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo); | ||
54 | t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi); | ||
55 | |||
56 | *val = (vhi << 16) | vlo; | ||
57 | |||
58 | /* CH_ERR("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", | ||
59 | ((addr&0xe000)>>13), ((addr&0x1e00)>>9), | ||
60 | ((addr&0x01fe)>>1), *val); */ | ||
61 | spin_unlock_bh(&adapter->mac_lock); | ||
62 | } | ||
63 | |||
64 | static void vsc_write(adapter_t *adapter, u32 addr, u32 data) | ||
65 | { | ||
66 | spin_lock_bh(&adapter->mac_lock); | ||
67 | t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF); | ||
68 | t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF); | ||
69 | /* CH_ERR("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", | ||
70 | ((addr&0xe000)>>13), ((addr&0x1e00)>>9), | ||
71 | ((addr&0x01fe)>>1), data); */ | ||
72 | spin_unlock_bh(&adapter->mac_lock); | ||
73 | } | ||
74 | |||
75 | /* Hard reset the MAC. This wipes out *all* configuration. */ | ||
76 | static void vsc7326_full_reset(adapter_t* adapter) | ||
77 | { | ||
78 | u32 val; | ||
79 | u32 result = 0xffff; | ||
80 | |||
81 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
82 | val &= ~1; | ||
83 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
84 | udelay(2); | ||
85 | val |= 0x1; /* Enable mac MAC itself */ | ||
86 | val |= 0x800; /* Turn off the red LED */ | ||
87 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
88 | mdelay(1); | ||
89 | vsc_write(adapter, REG_SW_RESET, 0x80000001); | ||
90 | do { | ||
91 | mdelay(1); | ||
92 | vsc_read(adapter, REG_SW_RESET, &result); | ||
93 | } while (result != 0x0); | ||
94 | } | ||
95 | |||
96 | static struct init_table vsc7326_reset[] = { | ||
97 | { REG_IFACE_MODE, 0x00000000 }, | ||
98 | { REG_CRC_CFG, 0x00000020 }, | ||
99 | { REG_PLL_CLK_SPEED, 0x00050c00 }, | ||
100 | { REG_PLL_CLK_SPEED, 0x00050c00 }, | ||
101 | { REG_MSCH, 0x00002f14 }, | ||
102 | { REG_SPI4_MISC, 0x00040409 }, | ||
103 | { REG_SPI4_DESKEW, 0x00080000 }, | ||
104 | { REG_SPI4_ING_SETUP2, 0x08080004 }, | ||
105 | { REG_SPI4_ING_SETUP0, 0x04111004 }, | ||
106 | { REG_SPI4_EGR_SETUP0, 0x80001a04 }, | ||
107 | { REG_SPI4_ING_SETUP1, 0x02010000 }, | ||
108 | { REG_AGE_INC(0), 0x00000000 }, | ||
109 | { REG_AGE_INC(1), 0x00000000 }, | ||
110 | { REG_ING_CONTROL, 0x0a200011 }, | ||
111 | { REG_EGR_CONTROL, 0xa0010091 }, | ||
112 | }; | ||
113 | |||
114 | static struct init_table vsc7326_portinit[4][22] = { | ||
115 | { /* Port 0 */ | ||
116 | /* FIFO setup */ | ||
117 | { REG_DBG(0), 0x000004f0 }, | ||
118 | { REG_HDX(0), 0x00073101 }, | ||
119 | { REG_TEST(0,0), 0x00000022 }, | ||
120 | { REG_TEST(1,0), 0x00000022 }, | ||
121 | { REG_TOP_BOTTOM(0,0), 0x003f0000 }, | ||
122 | { REG_TOP_BOTTOM(1,0), 0x00120000 }, | ||
123 | { REG_HIGH_LOW_WM(0,0), 0x07460757 }, | ||
124 | { REG_HIGH_LOW_WM(1,0), WM_DISABLE }, | ||
125 | { REG_CT_THRHLD(0,0), 0x00000000 }, | ||
126 | { REG_CT_THRHLD(1,0), 0x00000000 }, | ||
127 | { REG_BUCKE(0), 0x0002ffff }, | ||
128 | { REG_BUCKI(0), 0x0002ffff }, | ||
129 | { REG_TEST(0,0), 0x00000020 }, | ||
130 | { REG_TEST(1,0), 0x00000020 }, | ||
131 | /* Port config */ | ||
132 | { REG_MAX_LEN(0), 0x00002710 }, | ||
133 | { REG_PORT_FAIL(0), 0x00000002 }, | ||
134 | { REG_NORMALIZER(0), 0x00000a64 }, | ||
135 | { REG_DENORM(0), 0x00000010 }, | ||
136 | { REG_STICK_BIT(0), 0x03baa370 }, | ||
137 | { REG_DEV_SETUP(0), 0x00000083 }, | ||
138 | { REG_DEV_SETUP(0), 0x00000082 }, | ||
139 | { REG_MODE_CFG(0), 0x0200259f }, | ||
140 | }, | ||
141 | { /* Port 1 */ | ||
142 | /* FIFO setup */ | ||
143 | { REG_DBG(1), 0x000004f0 }, | ||
144 | { REG_HDX(1), 0x00073101 }, | ||
145 | { REG_TEST(0,1), 0x00000022 }, | ||
146 | { REG_TEST(1,1), 0x00000022 }, | ||
147 | { REG_TOP_BOTTOM(0,1), 0x007e003f }, | ||
148 | { REG_TOP_BOTTOM(1,1), 0x00240012 }, | ||
149 | { REG_HIGH_LOW_WM(0,1), 0x07460757 }, | ||
150 | { REG_HIGH_LOW_WM(1,1), WM_DISABLE }, | ||
151 | { REG_CT_THRHLD(0,1), 0x00000000 }, | ||
152 | { REG_CT_THRHLD(1,1), 0x00000000 }, | ||
153 | { REG_BUCKE(1), 0x0002ffff }, | ||
154 | { REG_BUCKI(1), 0x0002ffff }, | ||
155 | { REG_TEST(0,1), 0x00000020 }, | ||
156 | { REG_TEST(1,1), 0x00000020 }, | ||
157 | /* Port config */ | ||
158 | { REG_MAX_LEN(1), 0x00002710 }, | ||
159 | { REG_PORT_FAIL(1), 0x00000002 }, | ||
160 | { REG_NORMALIZER(1), 0x00000a64 }, | ||
161 | { REG_DENORM(1), 0x00000010 }, | ||
162 | { REG_STICK_BIT(1), 0x03baa370 }, | ||
163 | { REG_DEV_SETUP(1), 0x00000083 }, | ||
164 | { REG_DEV_SETUP(1), 0x00000082 }, | ||
165 | { REG_MODE_CFG(1), 0x0200259f }, | ||
166 | }, | ||
167 | { /* Port 2 */ | ||
168 | /* FIFO setup */ | ||
169 | { REG_DBG(2), 0x000004f0 }, | ||
170 | { REG_HDX(2), 0x00073101 }, | ||
171 | { REG_TEST(0,2), 0x00000022 }, | ||
172 | { REG_TEST(1,2), 0x00000022 }, | ||
173 | { REG_TOP_BOTTOM(0,2), 0x00bd007e }, | ||
174 | { REG_TOP_BOTTOM(1,2), 0x00360024 }, | ||
175 | { REG_HIGH_LOW_WM(0,2), 0x07460757 }, | ||
176 | { REG_HIGH_LOW_WM(1,2), WM_DISABLE }, | ||
177 | { REG_CT_THRHLD(0,2), 0x00000000 }, | ||
178 | { REG_CT_THRHLD(1,2), 0x00000000 }, | ||
179 | { REG_BUCKE(2), 0x0002ffff }, | ||
180 | { REG_BUCKI(2), 0x0002ffff }, | ||
181 | { REG_TEST(0,2), 0x00000020 }, | ||
182 | { REG_TEST(1,2), 0x00000020 }, | ||
183 | /* Port config */ | ||
184 | { REG_MAX_LEN(2), 0x00002710 }, | ||
185 | { REG_PORT_FAIL(2), 0x00000002 }, | ||
186 | { REG_NORMALIZER(2), 0x00000a64 }, | ||
187 | { REG_DENORM(2), 0x00000010 }, | ||
188 | { REG_STICK_BIT(2), 0x03baa370 }, | ||
189 | { REG_DEV_SETUP(2), 0x00000083 }, | ||
190 | { REG_DEV_SETUP(2), 0x00000082 }, | ||
191 | { REG_MODE_CFG(2), 0x0200259f }, | ||
192 | }, | ||
193 | { /* Port 3 */ | ||
194 | /* FIFO setup */ | ||
195 | { REG_DBG(3), 0x000004f0 }, | ||
196 | { REG_HDX(3), 0x00073101 }, | ||
197 | { REG_TEST(0,3), 0x00000022 }, | ||
198 | { REG_TEST(1,3), 0x00000022 }, | ||
199 | { REG_TOP_BOTTOM(0,3), 0x00fc00bd }, | ||
200 | { REG_TOP_BOTTOM(1,3), 0x00480036 }, | ||
201 | { REG_HIGH_LOW_WM(0,3), 0x07460757 }, | ||
202 | { REG_HIGH_LOW_WM(1,3), WM_DISABLE }, | ||
203 | { REG_CT_THRHLD(0,3), 0x00000000 }, | ||
204 | { REG_CT_THRHLD(1,3), 0x00000000 }, | ||
205 | { REG_BUCKE(3), 0x0002ffff }, | ||
206 | { REG_BUCKI(3), 0x0002ffff }, | ||
207 | { REG_TEST(0,3), 0x00000020 }, | ||
208 | { REG_TEST(1,3), 0x00000020 }, | ||
209 | /* Port config */ | ||
210 | { REG_MAX_LEN(3), 0x00002710 }, | ||
211 | { REG_PORT_FAIL(3), 0x00000002 }, | ||
212 | { REG_NORMALIZER(3), 0x00000a64 }, | ||
213 | { REG_DENORM(3), 0x00000010 }, | ||
214 | { REG_STICK_BIT(3), 0x03baa370 }, | ||
215 | { REG_DEV_SETUP(3), 0x00000083 }, | ||
216 | { REG_DEV_SETUP(3), 0x00000082 }, | ||
217 | { REG_MODE_CFG(3), 0x0200259f }, | ||
218 | }, | ||
219 | }; | ||
220 | |||
221 | static void run_table(adapter_t *adapter, struct init_table *ib, int len) | ||
222 | { | ||
223 | int i; | ||
224 | |||
225 | for (i = 0; i < len; i++) { | ||
226 | if (ib[i].addr == INITBLOCK_SLEEP) { | ||
227 | udelay( ib[i].data ); | ||
228 | CH_ERR("sleep %d us\n",ib[i].data); | ||
229 | } else { | ||
230 | vsc_write( adapter, ib[i].addr, ib[i].data ); | ||
231 | } | ||
232 | } | ||
233 | } | ||
234 | |||
235 | static int bist_rd(adapter_t *adapter, int moduleid, int address) | ||
236 | { | ||
237 | int data=0; | ||
238 | u32 result=0; | ||
239 | |||
240 | if( (address != 0x0) && | ||
241 | (address != 0x1) && | ||
242 | (address != 0x2) && | ||
243 | (address != 0xd) && | ||
244 | (address != 0xe)) | ||
245 | CH_ERR("No bist address: 0x%x\n", address); | ||
246 | |||
247 | data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | | ||
248 | ((moduleid & 0xff) << 0)); | ||
249 | vsc_write(adapter, REG_RAM_BIST_CMD, data); | ||
250 | |||
251 | udelay(10); | ||
252 | |||
253 | vsc_read(adapter, REG_RAM_BIST_RESULT, &result); | ||
254 | if((result & (1<<9)) != 0x0) | ||
255 | CH_ERR("Still in bist read: 0x%x\n", result); | ||
256 | else if((result & (1<<8)) != 0x0) | ||
257 | CH_ERR("bist read error: 0x%x\n", result); | ||
258 | |||
259 | return(result & 0xff); | ||
260 | } | ||
261 | |||
262 | static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) | ||
263 | { | ||
264 | int data=0; | ||
265 | u32 result=0; | ||
266 | |||
267 | if( (address != 0x0) && | ||
268 | (address != 0x1) && | ||
269 | (address != 0x2) && | ||
270 | (address != 0xd) && | ||
271 | (address != 0xe)) | ||
272 | CH_ERR("No bist address: 0x%x\n", address); | ||
273 | |||
274 | if( value>255 ) | ||
275 | CH_ERR("Suspicious write out of range value: 0x%x\n", value); | ||
276 | |||
277 | data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | | ||
278 | ((moduleid & 0xff) << 0)); | ||
279 | vsc_write(adapter, REG_RAM_BIST_CMD, data); | ||
280 | |||
281 | udelay(5); | ||
282 | |||
283 | vsc_read(adapter, REG_RAM_BIST_CMD, &result); | ||
284 | if((result & (1<<27)) != 0x0) | ||
285 | CH_ERR("Still in bist write: 0x%x\n", result); | ||
286 | else if((result & (1<<26)) != 0x0) | ||
287 | CH_ERR("bist write error: 0x%x\n", result); | ||
288 | |||
289 | return(0); | ||
290 | } | ||
291 | |||
292 | static int run_bist(adapter_t *adapter, int moduleid) | ||
293 | { | ||
294 | /*run bist*/ | ||
295 | (void) bist_wr(adapter,moduleid, 0x00, 0x02); | ||
296 | (void) bist_wr(adapter,moduleid, 0x01, 0x01); | ||
297 | |||
298 | return(0); | ||
299 | } | ||
300 | |||
301 | static int check_bist(adapter_t *adapter, int moduleid) | ||
302 | { | ||
303 | int result=0; | ||
304 | int column=0; | ||
305 | /*check bist*/ | ||
306 | result = bist_rd(adapter,moduleid, 0x02); | ||
307 | column = ((bist_rd(adapter,moduleid, 0x0e)<<8) + | ||
308 | (bist_rd(adapter,moduleid, 0x0d))); | ||
309 | if ((result & 3) != 0x3) | ||
310 | CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", | ||
311 | result, moduleid, column); | ||
312 | return(0); | ||
313 | } | ||
314 | |||
315 | static int enable_mem(adapter_t *adapter, int moduleid) | ||
316 | { | ||
317 | /*enable mem*/ | ||
318 | (void) bist_wr(adapter,moduleid, 0x00, 0x00); | ||
319 | return(0); | ||
320 | } | ||
321 | |||
322 | static int run_bist_all(adapter_t *adapter) | ||
323 | { | ||
324 | int port=0; | ||
325 | u32 val=0; | ||
326 | |||
327 | vsc_write(adapter, REG_MEM_BIST, 0x5); | ||
328 | vsc_read(adapter, REG_MEM_BIST, &val); | ||
329 | |||
330 | for(port=0; port<12; port++){ | ||
331 | vsc_write(adapter, REG_DEV_SETUP(port), 0x0); | ||
332 | } | ||
333 | |||
334 | udelay(300); | ||
335 | vsc_write(adapter, REG_SPI4_MISC, 0x00040409); | ||
336 | udelay(300); | ||
337 | |||
338 | (void) run_bist(adapter,13); | ||
339 | (void) run_bist(adapter,14); | ||
340 | (void) run_bist(adapter,20); | ||
341 | (void) run_bist(adapter,21); | ||
342 | mdelay(200); | ||
343 | (void) check_bist(adapter,13); | ||
344 | (void) check_bist(adapter,14); | ||
345 | (void) check_bist(adapter,20); | ||
346 | (void) check_bist(adapter,21); | ||
347 | udelay(100); | ||
348 | (void) enable_mem(adapter,13); | ||
349 | (void) enable_mem(adapter,14); | ||
350 | (void) enable_mem(adapter,20); | ||
351 | (void) enable_mem(adapter,21); | ||
352 | udelay(300); | ||
353 | vsc_write(adapter, REG_SPI4_MISC, 0x60040400); | ||
354 | udelay(300); | ||
355 | for(port=0; port<12; port++){ | ||
356 | vsc_write(adapter, REG_DEV_SETUP(port), 0x1); | ||
357 | } | ||
358 | udelay(300); | ||
359 | vsc_write(adapter, REG_MEM_BIST, 0x0); | ||
360 | mdelay(10); | ||
361 | return(0); | ||
362 | } | ||
363 | |||
364 | static int mac_intr_handler(struct cmac *mac) | ||
365 | { | ||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static int mac_intr_enable(struct cmac *mac) | ||
370 | { | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int mac_intr_disable(struct cmac *mac) | ||
375 | { | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int mac_intr_clear(struct cmac *mac) | ||
380 | { | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* Expect MAC address to be in network byte order. */ | ||
385 | static int mac_set_address(struct cmac* mac, u8 addr[6]) | ||
386 | { | ||
387 | u32 val; | ||
388 | int port = mac->instance->index; | ||
389 | |||
390 | vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port), | ||
391 | (addr[3] << 16) | (addr[4] << 8) | addr[5]); | ||
392 | vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port), | ||
393 | (addr[0] << 16) | (addr[1] << 8) | addr[2]); | ||
394 | |||
395 | vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val); | ||
396 | val &= ~0xf0000000; | ||
397 | vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28)); | ||
398 | |||
399 | vsc_write(mac->adapter, REG_ING_FFILT_MASK0, | ||
400 | 0xffff0000 | (addr[4] << 8) | addr[5]); | ||
401 | vsc_write(mac->adapter, REG_ING_FFILT_MASK1, | ||
402 | 0xffff0000 | (addr[2] << 8) | addr[3]); | ||
403 | vsc_write(mac->adapter, REG_ING_FFILT_MASK2, | ||
404 | 0xffff0000 | (addr[0] << 8) | addr[1]); | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int mac_get_address(struct cmac *mac, u8 addr[6]) | ||
409 | { | ||
410 | u32 addr_lo, addr_hi; | ||
411 | int port = mac->instance->index; | ||
412 | |||
413 | vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo); | ||
414 | vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi); | ||
415 | |||
416 | addr[0] = (u8) (addr_hi >> 16); | ||
417 | addr[1] = (u8) (addr_hi >> 8); | ||
418 | addr[2] = (u8) addr_hi; | ||
419 | addr[3] = (u8) (addr_lo >> 16); | ||
420 | addr[4] = (u8) (addr_lo >> 8); | ||
421 | addr[5] = (u8) addr_lo; | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | /* This is intended to reset a port, not the whole MAC */ | ||
426 | static int mac_reset(struct cmac *mac) | ||
427 | { | ||
428 | int index = mac->instance->index; | ||
429 | |||
430 | run_table(mac->adapter, vsc7326_portinit[index], | ||
431 | ARRAY_SIZE(vsc7326_portinit[index])); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) | ||
437 | { | ||
438 | u32 v; | ||
439 | int port = mac->instance->index; | ||
440 | |||
441 | vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v); | ||
442 | v |= 1 << 12; | ||
443 | |||
444 | if (t1_rx_mode_promisc(rm)) | ||
445 | v &= ~(1 << (port + 16)); | ||
446 | else | ||
447 | v |= 1 << (port + 16); | ||
448 | |||
449 | vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v); | ||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static int mac_set_mtu(struct cmac *mac, int mtu) | ||
454 | { | ||
455 | int port = mac->instance->index; | ||
456 | |||
457 | if (mtu > MAX_MTU) | ||
458 | return -EINVAL; | ||
459 | |||
460 | /* max_len includes header and FCS */ | ||
461 | vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4); | ||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, | ||
466 | int fc) | ||
467 | { | ||
468 | u32 v; | ||
469 | int enable, port = mac->instance->index; | ||
470 | |||
471 | if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 && | ||
472 | speed != SPEED_1000) | ||
473 | return -1; | ||
474 | if (duplex > 0 && duplex != DUPLEX_FULL) | ||
475 | return -1; | ||
476 | |||
477 | if (speed >= 0) { | ||
478 | vsc_read(mac->adapter, REG_MODE_CFG(port), &v); | ||
479 | enable = v & 3; /* save tx/rx enables */ | ||
480 | v &= ~0xf; | ||
481 | v |= 4; /* full duplex */ | ||
482 | if (speed == SPEED_1000) | ||
483 | v |= 8; /* GigE */ | ||
484 | enable |= v; | ||
485 | vsc_write(mac->adapter, REG_MODE_CFG(port), v); | ||
486 | |||
487 | if (speed == SPEED_1000) | ||
488 | v = 0x82; | ||
489 | else if (speed == SPEED_100) | ||
490 | v = 0x84; | ||
491 | else /* SPEED_10 */ | ||
492 | v = 0x86; | ||
493 | vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */ | ||
494 | vsc_write(mac->adapter, REG_DEV_SETUP(port), v); | ||
495 | vsc_read(mac->adapter, REG_DBG(port), &v); | ||
496 | v &= ~0xff00; | ||
497 | if (speed == SPEED_1000) | ||
498 | v |= 0x400; | ||
499 | else if (speed == SPEED_100) | ||
500 | v |= 0x2000; | ||
501 | else /* SPEED_10 */ | ||
502 | v |= 0xff00; | ||
503 | vsc_write(mac->adapter, REG_DBG(port), v); | ||
504 | |||
505 | vsc_write(mac->adapter, REG_TX_IFG(port), | ||
506 | speed == SPEED_1000 ? 5 : 0x11); | ||
507 | if (duplex == DUPLEX_HALF) | ||
508 | enable = 0x0; /* 100 or 10 */ | ||
509 | else if (speed == SPEED_1000) | ||
510 | enable = 0xc; | ||
511 | else /* SPEED_100 or 10 */ | ||
512 | enable = 0x4; | ||
513 | enable |= 0x9 << 10; /* IFG1 */ | ||
514 | enable |= 0x6 << 6; /* IFG2 */ | ||
515 | enable |= 0x1 << 4; /* VLAN */ | ||
516 | enable |= 0x3; /* RX/TX EN */ | ||
517 | vsc_write(mac->adapter, REG_MODE_CFG(port), enable); | ||
518 | |||
519 | } | ||
520 | |||
521 | vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v); | ||
522 | v &= 0xfff0ffff; | ||
523 | v |= 0x20000; /* xon/xoff */ | ||
524 | if (fc & PAUSE_RX) | ||
525 | v |= 0x40000; | ||
526 | if (fc & PAUSE_TX) | ||
527 | v |= 0x80000; | ||
528 | if (fc == (PAUSE_RX | PAUSE_TX)) | ||
529 | v |= 0x10000; | ||
530 | vsc_write(mac->adapter, REG_PAUSE_CFG(port), v); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | static int mac_enable(struct cmac *mac, int which) | ||
535 | { | ||
536 | u32 val; | ||
537 | int port = mac->instance->index; | ||
538 | |||
539 | /* Write the correct WM value when the port is enabled. */ | ||
540 | vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE); | ||
541 | |||
542 | vsc_read(mac->adapter, REG_MODE_CFG(port), &val); | ||
543 | if (which & MAC_DIRECTION_RX) | ||
544 | val |= 0x2; | ||
545 | if (which & MAC_DIRECTION_TX) | ||
546 | val |= 1; | ||
547 | vsc_write(mac->adapter, REG_MODE_CFG(port), val); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int mac_disable(struct cmac *mac, int which) | ||
552 | { | ||
553 | u32 val; | ||
554 | int i, port = mac->instance->index; | ||
555 | |||
556 | /* Reset the port, this also writes the correct WM value */ | ||
557 | mac_reset(mac); | ||
558 | |||
559 | vsc_read(mac->adapter, REG_MODE_CFG(port), &val); | ||
560 | if (which & MAC_DIRECTION_RX) | ||
561 | val &= ~0x2; | ||
562 | if (which & MAC_DIRECTION_TX) | ||
563 | val &= ~0x1; | ||
564 | vsc_write(mac->adapter, REG_MODE_CFG(port), val); | ||
565 | vsc_read(mac->adapter, REG_MODE_CFG(port), &val); | ||
566 | |||
567 | /* Clear stats */ | ||
568 | for (i = 0; i <= 0x3a; ++i) | ||
569 | vsc_write(mac->adapter, CRA(4, port, i), 0); | ||
570 | |||
571 | /* Clear sofware counters */ | ||
572 | memset(&mac->stats, 0, sizeof(struct cmac_statistics)); | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat) | ||
578 | { | ||
579 | u32 v, lo; | ||
580 | |||
581 | vsc_read(mac->adapter, addr, &v); | ||
582 | lo = *stat; | ||
583 | *stat = *stat - lo + v; | ||
584 | |||
585 | if (v == 0) | ||
586 | return; | ||
587 | |||
588 | if (v < lo) | ||
589 | *stat += (1ULL << 32); | ||
590 | } | ||
591 | |||
592 | static void port_stats_update(struct cmac *mac) | ||
593 | { | ||
594 | int port = mac->instance->index; | ||
595 | |||
596 | /* Rx stats */ | ||
597 | rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); | ||
598 | rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); | ||
599 | rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK); | ||
600 | rmon_update(mac, REG_RX_MULTICAST(port), | ||
601 | &mac->stats.RxMulticastFramesOK); | ||
602 | rmon_update(mac, REG_RX_BROADCAST(port), | ||
603 | &mac->stats.RxBroadcastFramesOK); | ||
604 | rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors); | ||
605 | rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors); | ||
606 | rmon_update(mac, REG_RX_OVERSIZE(port), | ||
607 | &mac->stats.RxFrameTooLongErrors); | ||
608 | rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames); | ||
609 | rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors); | ||
610 | rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors); | ||
611 | rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors); | ||
612 | rmon_update(mac, REG_RX_SYMBOL_CARRIER(port), | ||
613 | &mac->stats.RxSymbolErrors); | ||
614 | rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port), | ||
615 | &mac->stats.RxJumboFramesOK); | ||
616 | |||
617 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
618 | rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); | ||
619 | rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK); | ||
620 | rmon_update(mac, REG_TX_MULTICAST(port), | ||
621 | &mac->stats.TxMulticastFramesOK); | ||
622 | rmon_update(mac, REG_TX_BROADCAST(port), | ||
623 | &mac->stats.TxBroadcastFramesOK); | ||
624 | rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames); | ||
625 | rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun); | ||
626 | rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port), | ||
627 | &mac->stats.TxJumboFramesOK); | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * This function is called periodically to accumulate the current values of the | ||
632 | * RMON counters into the port statistics. Since the counters are only 32 bits | ||
633 | * some of them can overflow in less than a minute at GigE speeds, so this | ||
634 | * function should be called every 30 seconds or so. | ||
635 | * | ||
636 | * To cut down on reading costs we update only the octet counters at each tick | ||
637 | * and do a full update at major ticks, which can be every 30 minutes or more. | ||
638 | */ | ||
639 | static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, | ||
640 | int flag) | ||
641 | { | ||
642 | if (flag == MAC_STATS_UPDATE_FULL || | ||
643 | mac->instance->ticks >= MAJOR_UPDATE_TICKS) { | ||
644 | port_stats_update(mac); | ||
645 | mac->instance->ticks = 0; | ||
646 | } else { | ||
647 | int port = mac->instance->index; | ||
648 | |||
649 | rmon_update(mac, REG_RX_OK_BYTES(port), | ||
650 | &mac->stats.RxOctetsOK); | ||
651 | rmon_update(mac, REG_RX_BAD_BYTES(port), | ||
652 | &mac->stats.RxOctetsBad); | ||
653 | rmon_update(mac, REG_TX_OK_BYTES(port), | ||
654 | &mac->stats.TxOctetsOK); | ||
655 | mac->instance->ticks++; | ||
656 | } | ||
657 | return &mac->stats; | ||
658 | } | ||
659 | |||
660 | static void mac_destroy(struct cmac *mac) | ||
661 | { | ||
662 | kfree(mac); | ||
663 | } | ||
664 | |||
665 | static struct cmac_ops vsc7326_ops = { | ||
666 | .destroy = mac_destroy, | ||
667 | .reset = mac_reset, | ||
668 | .interrupt_handler = mac_intr_handler, | ||
669 | .interrupt_enable = mac_intr_enable, | ||
670 | .interrupt_disable = mac_intr_disable, | ||
671 | .interrupt_clear = mac_intr_clear, | ||
672 | .enable = mac_enable, | ||
673 | .disable = mac_disable, | ||
674 | .set_mtu = mac_set_mtu, | ||
675 | .set_rx_mode = mac_set_rx_mode, | ||
676 | .set_speed_duplex_fc = mac_set_speed_duplex_fc, | ||
677 | .statistics_update = mac_update_statistics, | ||
678 | .macaddress_get = mac_get_address, | ||
679 | .macaddress_set = mac_set_address, | ||
680 | }; | ||
681 | |||
682 | static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index) | ||
683 | { | ||
684 | struct cmac *mac; | ||
685 | u32 val; | ||
686 | int i; | ||
687 | |||
688 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); | ||
689 | if (!mac) return NULL; | ||
690 | |||
691 | mac->ops = &vsc7326_ops; | ||
692 | mac->instance = (cmac_instance *)(mac + 1); | ||
693 | mac->adapter = adapter; | ||
694 | |||
695 | mac->instance->index = index; | ||
696 | mac->instance->ticks = 0; | ||
697 | |||
698 | i = 0; | ||
699 | do { | ||
700 | u32 vhi, vlo; | ||
701 | |||
702 | vhi = vlo = 0; | ||
703 | t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); | ||
704 | udelay(1); | ||
705 | t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); | ||
706 | udelay(5); | ||
707 | val = (vhi << 16) | vlo; | ||
708 | } while ((++i < 10000) && (val == 0xffffffff)); | ||
709 | |||
710 | return mac; | ||
711 | } | ||
712 | |||
713 | static int vsc7326_mac_reset(adapter_t *adapter) | ||
714 | { | ||
715 | vsc7326_full_reset(adapter); | ||
716 | (void) run_bist_all(adapter); | ||
717 | run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset)); | ||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | struct gmac t1_vsc7326_ops = { | ||
722 | .stats_update_period = STATS_TICK_SECS, | ||
723 | .create = vsc7326_mac_create, | ||
724 | .reset = vsc7326_mac_reset, | ||
725 | }; | ||
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h new file mode 100644 index 000000000000..491bcf75c4fb --- /dev/null +++ b/drivers/net/chelsio/vsc7326_reg.h | |||
@@ -0,0 +1,286 @@ | |||
1 | /* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */ | ||
2 | #ifndef _VSC7321_REG_H_ | ||
3 | #define _VSC7321_REG_H_ | ||
4 | |||
5 | /* Register definitions for Vitesse VSC7321 (Meigs II) MAC | ||
6 | * | ||
7 | * Straight off the data sheet, VMDS-10038 Rev 2.0 and | ||
8 | * PD0011-01-14-Meigs-II 2002-12-12 | ||
9 | */ | ||
10 | |||
11 | /* Just 'cause it's in here doesn't mean it's used. */ | ||
12 | |||
13 | #define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1)) | ||
14 | |||
15 | /* System and CPU comm's registers */ | ||
16 | #define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */ | ||
17 | #define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */ | ||
18 | #define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */ | ||
19 | #define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */ | ||
20 | #define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */ | ||
21 | #define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */ | ||
22 | #define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */ | ||
23 | #define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */ | ||
24 | #define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */ | ||
25 | #define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */ | ||
26 | #define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */ | ||
27 | #define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */ | ||
28 | #define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */ | ||
29 | #define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */ | ||
30 | #define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */ | ||
31 | #define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */ | ||
32 | #define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */ | ||
33 | |||
34 | /* Aggregator registers */ | ||
35 | #define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */ | ||
36 | #define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */ | ||
37 | #define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */ | ||
38 | #define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */ | ||
39 | #define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */ | ||
40 | #define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */ | ||
41 | #define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */ | ||
42 | #define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */ | ||
43 | #define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */ | ||
44 | #define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */ | ||
45 | #define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */ | ||
46 | #define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */ | ||
47 | |||
48 | /* BIST registers */ | ||
49 | /*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */ | ||
50 | /*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */ | ||
51 | #define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */ | ||
52 | #define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */ | ||
53 | #define BIST_PORT_SELECT 0x00 /* BIST port select */ | ||
54 | #define BIST_COMMAND 0x01 /* BIST enable/disable */ | ||
55 | #define BIST_STATUS 0x02 /* BIST operation status */ | ||
56 | #define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */ | ||
57 | #define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */ | ||
58 | #define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */ | ||
59 | #define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */ | ||
60 | #define BIST_ERROR_STATE 0x07 /* BIST engine internal state */ | ||
61 | #define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */ | ||
62 | #define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */ | ||
63 | #define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */ | ||
64 | #define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */ | ||
65 | |||
66 | /* FIFO registers | ||
67 | * ie = 0 for ingress, 1 for egress | ||
68 | * fn = FIFO number, 0-9 | ||
69 | */ | ||
70 | #define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */ | ||
71 | #define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */ | ||
72 | #define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */ | ||
73 | #define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */ | ||
74 | #define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */ | ||
75 | #define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */ | ||
76 | #define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */ | ||
77 | #define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */ | ||
78 | #define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */ | ||
79 | #define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */ | ||
80 | |||
81 | /* Traffic shaper buckets | ||
82 | * ie = 0 for ingress, 1 for egress | ||
83 | * bn = bucket number 0-10 (yes, 11 buckets) | ||
84 | */ | ||
85 | /* OK, this one's kinda ugly. Some hardware designers are perverse. */ | ||
86 | #define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4)) | ||
87 | #define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b) | ||
88 | |||
89 | #define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */ | ||
90 | #define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */ | ||
91 | #define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */ | ||
92 | #define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */ | ||
93 | #define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */ | ||
94 | #define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */ | ||
95 | #define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */ | ||
96 | #define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */ | ||
97 | /* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */ | ||
98 | #define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */ | ||
99 | #define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */ | ||
100 | #define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */ | ||
101 | #define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */ | ||
102 | #define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */ | ||
103 | #define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */ | ||
104 | #define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */ | ||
105 | |||
106 | /* SPI4 interface */ | ||
107 | #define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */ | ||
108 | #define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */ | ||
109 | #define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */ | ||
110 | #define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */ | ||
111 | #define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */ | ||
112 | #define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */ | ||
113 | #define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */ | ||
114 | #define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */ | ||
115 | #define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */ | ||
116 | #define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */ | ||
117 | #define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */ | ||
118 | #define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */ | ||
119 | #define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */ | ||
120 | #define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */ | ||
121 | #define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */ | ||
122 | #define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */ | ||
123 | #define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */ | ||
124 | #define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */ | ||
125 | #define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */ | ||
126 | #define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */ | ||
127 | |||
128 | #define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */ | ||
129 | |||
130 | /* 10GbE MAC Block Registers */ | ||
131 | /* Note that those registers that are exactly the same for 10GbE as for | ||
132 | * tri-speed are only defined with the version that needs a port number. | ||
133 | * Pass 0xa in those cases. | ||
134 | * | ||
135 | * Also note that despite the presence of a MAC address register, this part | ||
136 | * does no ingress MAC address filtering. That register is used only for | ||
137 | * pause frame detection and generation. | ||
138 | */ | ||
139 | /* 10GbE specific, and different from tri-speed */ | ||
140 | #define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */ | ||
141 | #define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */ | ||
142 | #define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */ | ||
143 | #define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */ | ||
144 | #define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */ | ||
145 | #define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */ | ||
146 | #define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */ | ||
147 | #define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */ | ||
148 | #define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */ | ||
149 | #define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */ | ||
150 | #define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */ | ||
151 | #define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */ | ||
152 | #define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */ | ||
153 | #define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */ | ||
154 | #define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */ | ||
155 | #define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */ | ||
156 | #define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */ | ||
157 | #define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */ | ||
158 | #define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */ | ||
159 | #define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */ | ||
160 | #define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */ | ||
161 | #define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */ | ||
162 | #define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */ | ||
163 | |||
164 | /* pn = port number 0-9 for tri-speed, 10 for 10GbE */ | ||
165 | /* Both tri-speed and 10GbE */ | ||
166 | #define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */ | ||
167 | #define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */ | ||
168 | #define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */ | ||
169 | |||
170 | /* tri-speed only | ||
171 | * pn = port number, 0-9 | ||
172 | */ | ||
173 | #define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */ | ||
174 | #define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */ | ||
175 | #define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */ | ||
176 | #define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */ | ||
177 | #define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */ | ||
178 | #define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */ | ||
179 | #define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */ | ||
180 | #define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */ | ||
181 | #define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */ | ||
182 | #define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */ | ||
183 | #define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */ | ||
184 | #define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */ | ||
185 | #define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */ | ||
186 | #define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */ | ||
187 | #define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */ | ||
188 | #define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */ | ||
189 | #define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */ | ||
190 | #define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */ | ||
191 | #define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */ | ||
192 | #define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ | ||
193 | |||
194 | /* Statistics */ | ||
195 | /* pn = port number, 0-a, a = 10GbE */ | ||
196 | #define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */ | ||
197 | #define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */ | ||
198 | #define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */ | ||
199 | #define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */ | ||
200 | #define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */ | ||
201 | #define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */ | ||
202 | #define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */ | ||
203 | #define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */ | ||
204 | #define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */ | ||
205 | #define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */ | ||
206 | #define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */ | ||
207 | #define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */ | ||
208 | #define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */ | ||
209 | #define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */ | ||
210 | #define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */ | ||
211 | #define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */ | ||
212 | #define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */ | ||
213 | #define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */ | ||
214 | #define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */ | ||
215 | #define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */ | ||
216 | #define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */ | ||
217 | #define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */ | ||
218 | #define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */ | ||
219 | #define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */ | ||
220 | |||
221 | #define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */ | ||
222 | #define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */ | ||
223 | #define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */ | ||
224 | #define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */ | ||
225 | #define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */ | ||
226 | #define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */ | ||
227 | #define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */ | ||
228 | #define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */ | ||
229 | #define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */ | ||
230 | #define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */ | ||
231 | #define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */ | ||
232 | #define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */ | ||
233 | #define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */ | ||
234 | #define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */ | ||
235 | #define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */ | ||
236 | #define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */ | ||
237 | #define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */ | ||
238 | #define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */ | ||
239 | #define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */ | ||
240 | #define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */ | ||
241 | #define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */ | ||
242 | #define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */ | ||
243 | #define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */ | ||
244 | #define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */ | ||
245 | #define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */ | ||
246 | #define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */ | ||
247 | #define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */ | ||
248 | #define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */ | ||
249 | #define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */ | ||
250 | #define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */ | ||
251 | #define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */ | ||
252 | #define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */ | ||
253 | #define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */ | ||
254 | #define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */ | ||
255 | #define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */ | ||
256 | #define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ | ||
257 | #define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */ | ||
258 | |||
259 | #define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */ | ||
260 | #define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */ | ||
261 | #define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */ | ||
262 | |||
263 | /* MII-Management Block registers */ | ||
264 | /* These are for MII-M interface 0, which is the bidirectional LVTTL one. If | ||
265 | * we hooked up to the one with separate directions, the middle 0x0 needs to | ||
266 | * change to 0x1. And the current errata states that MII-M 1 doesn't work. | ||
267 | */ | ||
268 | |||
269 | #define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */ | ||
270 | #define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */ | ||
271 | #define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */ | ||
272 | #define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */ | ||
273 | |||
274 | #define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd) | ||
275 | #define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d) | ||
276 | #define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d) | ||
277 | #define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d) | ||
278 | #define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d) | ||
279 | #define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d) | ||
280 | #define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d) | ||
281 | #define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d) | ||
282 | |||
283 | |||
284 | /* Whew. */ | ||
285 | |||
286 | #endif | ||
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c new file mode 100644 index 000000000000..c493e783d459 --- /dev/null +++ b/drivers/net/chelsio/vsc8244.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T2 Ethernet driver. | ||
3 | * | ||
4 | * Copyright (C) 2005 Chelsio Communications. All rights reserved. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | ||
9 | * release for licensing terms and conditions. | ||
10 | */ | ||
11 | |||
12 | #include "common.h" | ||
13 | #include "cphy.h" | ||
14 | #include "elmer0.h" | ||
15 | |||
16 | #ifndef ADVERTISE_PAUSE_CAP | ||
17 | # define ADVERTISE_PAUSE_CAP 0x400 | ||
18 | #endif | ||
19 | #ifndef ADVERTISE_PAUSE_ASYM | ||
20 | # define ADVERTISE_PAUSE_ASYM 0x800 | ||
21 | #endif | ||
22 | |||
23 | /* Gigabit MII registers */ | ||
24 | #ifndef MII_CTRL1000 | ||
25 | # define MII_CTRL1000 9 | ||
26 | #endif | ||
27 | |||
28 | #ifndef ADVERTISE_1000FULL | ||
29 | # define ADVERTISE_1000FULL 0x200 | ||
30 | # define ADVERTISE_1000HALF 0x100 | ||
31 | #endif | ||
32 | |||
33 | /* VSC8244 PHY specific registers. */ | ||
34 | enum { | ||
35 | VSC8244_INTR_ENABLE = 25, | ||
36 | VSC8244_INTR_STATUS = 26, | ||
37 | VSC8244_AUX_CTRL_STAT = 28, | ||
38 | }; | ||
39 | |||
40 | enum { | ||
41 | VSC_INTR_RX_ERR = 1 << 0, | ||
42 | VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */ | ||
43 | VSC_INTR_CABLE = 1 << 2, /* cable impairment */ | ||
44 | VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */ | ||
45 | VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */ | ||
46 | VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */ | ||
47 | VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */ | ||
48 | VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */ | ||
49 | VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ | ||
50 | VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ | ||
51 | VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ | ||
52 | VSC_INTR_LINK_CHG = 1 << 13, /* link change */ | ||
53 | VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ | ||
54 | }; | ||
55 | |||
56 | #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ | ||
57 | VSC_INTR_NEG_DONE) | ||
58 | #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ | ||
59 | VSC_INTR_ENABLE) | ||
60 | |||
61 | /* PHY specific auxiliary control & status register fields */ | ||
62 | #define S_ACSR_ACTIPHY_TMR 0 | ||
63 | #define M_ACSR_ACTIPHY_TMR 0x3 | ||
64 | #define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR) | ||
65 | |||
66 | #define S_ACSR_SPEED 3 | ||
67 | #define M_ACSR_SPEED 0x3 | ||
68 | #define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED) | ||
69 | |||
70 | #define S_ACSR_DUPLEX 5 | ||
71 | #define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX) | ||
72 | |||
73 | #define S_ACSR_ACTIPHY 6 | ||
74 | #define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY) | ||
75 | |||
76 | /* | ||
77 | * Reset the PHY. This PHY completes reset immediately so we never wait. | ||
78 | */ | ||
79 | static int vsc8244_reset(struct cphy *cphy, int wait) | ||
80 | { | ||
81 | int err; | ||
82 | unsigned int ctl; | ||
83 | |||
84 | err = simple_mdio_read(cphy, MII_BMCR, &ctl); | ||
85 | if (err) | ||
86 | return err; | ||
87 | |||
88 | ctl &= ~BMCR_PDOWN; | ||
89 | ctl |= BMCR_RESET; | ||
90 | return simple_mdio_write(cphy, MII_BMCR, ctl); | ||
91 | } | ||
92 | |||
93 | static int vsc8244_intr_enable(struct cphy *cphy) | ||
94 | { | ||
95 | simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); | ||
96 | |||
97 | /* Enable interrupts through Elmer */ | ||
98 | if (t1_is_asic(cphy->adapter)) { | ||
99 | u32 elmer; | ||
100 | |||
101 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
102 | elmer |= ELMER0_GP_BIT1; | ||
103 | if (is_T2(cphy->adapter)) { | ||
104 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | ||
105 | } | ||
106 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
107 | } | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int vsc8244_intr_disable(struct cphy *cphy) | ||
113 | { | ||
114 | simple_mdio_write(cphy, VSC8244_INTR_ENABLE, 0); | ||
115 | |||
116 | if (t1_is_asic(cphy->adapter)) { | ||
117 | u32 elmer; | ||
118 | |||
119 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
120 | elmer &= ~ELMER0_GP_BIT1; | ||
121 | if (is_T2(cphy->adapter)) { | ||
122 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); | ||
123 | } | ||
124 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
125 | } | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int vsc8244_intr_clear(struct cphy *cphy) | ||
131 | { | ||
132 | u32 val; | ||
133 | u32 elmer; | ||
134 | |||
135 | /* Clear PHY interrupts by reading the register. */ | ||
136 | simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); | ||
137 | |||
138 | if (t1_is_asic(cphy->adapter)) { | ||
139 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | ||
140 | elmer |= ELMER0_GP_BIT1; | ||
141 | if (is_T2(cphy->adapter)) { | ||
142 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | ||
143 | } | ||
144 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | ||
145 | } | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Force the PHY speed and duplex. This also disables auto-negotiation, except | ||
152 | * for 1Gb/s, where auto-negotiation is mandatory. | ||
153 | */ | ||
154 | static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex) | ||
155 | { | ||
156 | int err; | ||
157 | unsigned int ctl; | ||
158 | |||
159 | err = simple_mdio_read(phy, MII_BMCR, &ctl); | ||
160 | if (err) | ||
161 | return err; | ||
162 | |||
163 | if (speed >= 0) { | ||
164 | ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); | ||
165 | if (speed == SPEED_100) | ||
166 | ctl |= BMCR_SPEED100; | ||
167 | else if (speed == SPEED_1000) | ||
168 | ctl |= BMCR_SPEED1000; | ||
169 | } | ||
170 | if (duplex >= 0) { | ||
171 | ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); | ||
172 | if (duplex == DUPLEX_FULL) | ||
173 | ctl |= BMCR_FULLDPLX; | ||
174 | } | ||
175 | if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ | ||
176 | ctl |= BMCR_ANENABLE; | ||
177 | return simple_mdio_write(phy, MII_BMCR, ctl); | ||
178 | } | ||
179 | |||
180 | int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) | ||
181 | { | ||
182 | int ret; | ||
183 | unsigned int val; | ||
184 | |||
185 | ret = mdio_read(phy, mmd, reg, &val); | ||
186 | if (!ret) | ||
187 | ret = mdio_write(phy, mmd, reg, val | bits); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static int vsc8244_autoneg_enable(struct cphy *cphy) | ||
192 | { | ||
193 | return t1_mdio_set_bits(cphy, 0, MII_BMCR, | ||
194 | BMCR_ANENABLE | BMCR_ANRESTART); | ||
195 | } | ||
196 | |||
197 | static int vsc8244_autoneg_restart(struct cphy *cphy) | ||
198 | { | ||
199 | return t1_mdio_set_bits(cphy, 0, MII_BMCR, BMCR_ANRESTART); | ||
200 | } | ||
201 | |||
202 | static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map) | ||
203 | { | ||
204 | int err; | ||
205 | unsigned int val = 0; | ||
206 | |||
207 | err = simple_mdio_read(phy, MII_CTRL1000, &val); | ||
208 | if (err) | ||
209 | return err; | ||
210 | |||
211 | val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); | ||
212 | if (advertise_map & ADVERTISED_1000baseT_Half) | ||
213 | val |= ADVERTISE_1000HALF; | ||
214 | if (advertise_map & ADVERTISED_1000baseT_Full) | ||
215 | val |= ADVERTISE_1000FULL; | ||
216 | |||
217 | err = simple_mdio_write(phy, MII_CTRL1000, val); | ||
218 | if (err) | ||
219 | return err; | ||
220 | |||
221 | val = 1; | ||
222 | if (advertise_map & ADVERTISED_10baseT_Half) | ||
223 | val |= ADVERTISE_10HALF; | ||
224 | if (advertise_map & ADVERTISED_10baseT_Full) | ||
225 | val |= ADVERTISE_10FULL; | ||
226 | if (advertise_map & ADVERTISED_100baseT_Half) | ||
227 | val |= ADVERTISE_100HALF; | ||
228 | if (advertise_map & ADVERTISED_100baseT_Full) | ||
229 | val |= ADVERTISE_100FULL; | ||
230 | if (advertise_map & ADVERTISED_PAUSE) | ||
231 | val |= ADVERTISE_PAUSE_CAP; | ||
232 | if (advertise_map & ADVERTISED_ASYM_PAUSE) | ||
233 | val |= ADVERTISE_PAUSE_ASYM; | ||
234 | return simple_mdio_write(phy, MII_ADVERTISE, val); | ||
235 | } | ||
236 | |||
237 | static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, | ||
238 | int *speed, int *duplex, int *fc) | ||
239 | { | ||
240 | unsigned int bmcr, status, lpa, adv; | ||
241 | int err, sp = -1, dplx = -1, pause = 0; | ||
242 | |||
243 | err = simple_mdio_read(cphy, MII_BMCR, &bmcr); | ||
244 | if (!err) | ||
245 | err = simple_mdio_read(cphy, MII_BMSR, &status); | ||
246 | if (err) | ||
247 | return err; | ||
248 | |||
249 | if (link_ok) { | ||
250 | /* | ||
251 | * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it | ||
252 | * once more to get the current link state. | ||
253 | */ | ||
254 | if (!(status & BMSR_LSTATUS)) | ||
255 | err = simple_mdio_read(cphy, MII_BMSR, &status); | ||
256 | if (err) | ||
257 | return err; | ||
258 | *link_ok = (status & BMSR_LSTATUS) != 0; | ||
259 | } | ||
260 | if (!(bmcr & BMCR_ANENABLE)) { | ||
261 | dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; | ||
262 | if (bmcr & BMCR_SPEED1000) | ||
263 | sp = SPEED_1000; | ||
264 | else if (bmcr & BMCR_SPEED100) | ||
265 | sp = SPEED_100; | ||
266 | else | ||
267 | sp = SPEED_10; | ||
268 | } else if (status & BMSR_ANEGCOMPLETE) { | ||
269 | err = simple_mdio_read(cphy, VSC8244_AUX_CTRL_STAT, &status); | ||
270 | if (err) | ||
271 | return err; | ||
272 | |||
273 | dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; | ||
274 | sp = G_ACSR_SPEED(status); | ||
275 | if (sp == 0) | ||
276 | sp = SPEED_10; | ||
277 | else if (sp == 1) | ||
278 | sp = SPEED_100; | ||
279 | else | ||
280 | sp = SPEED_1000; | ||
281 | |||
282 | if (fc && dplx == DUPLEX_FULL) { | ||
283 | err = simple_mdio_read(cphy, MII_LPA, &lpa); | ||
284 | if (!err) | ||
285 | err = simple_mdio_read(cphy, MII_ADVERTISE, | ||
286 | &adv); | ||
287 | if (err) | ||
288 | return err; | ||
289 | |||
290 | if (lpa & adv & ADVERTISE_PAUSE_CAP) | ||
291 | pause = PAUSE_RX | PAUSE_TX; | ||
292 | else if ((lpa & ADVERTISE_PAUSE_CAP) && | ||
293 | (lpa & ADVERTISE_PAUSE_ASYM) && | ||
294 | (adv & ADVERTISE_PAUSE_ASYM)) | ||
295 | pause = PAUSE_TX; | ||
296 | else if ((lpa & ADVERTISE_PAUSE_ASYM) && | ||
297 | (adv & ADVERTISE_PAUSE_CAP)) | ||
298 | pause = PAUSE_RX; | ||
299 | } | ||
300 | } | ||
301 | if (speed) | ||
302 | *speed = sp; | ||
303 | if (duplex) | ||
304 | *duplex = dplx; | ||
305 | if (fc) | ||
306 | *fc = pause; | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int vsc8244_intr_handler(struct cphy *cphy) | ||
311 | { | ||
312 | unsigned int cause; | ||
313 | int err, cphy_cause = 0; | ||
314 | |||
315 | err = simple_mdio_read(cphy, VSC8244_INTR_STATUS, &cause); | ||
316 | if (err) | ||
317 | return err; | ||
318 | |||
319 | cause &= INTR_MASK; | ||
320 | if (cause & CFG_CHG_INTR_MASK) | ||
321 | cphy_cause |= cphy_cause_link_change; | ||
322 | if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO)) | ||
323 | cphy_cause |= cphy_cause_fifo_error; | ||
324 | return cphy_cause; | ||
325 | } | ||
326 | |||
327 | static void vsc8244_destroy(struct cphy *cphy) | ||
328 | { | ||
329 | kfree(cphy); | ||
330 | } | ||
331 | |||
332 | static struct cphy_ops vsc8244_ops = { | ||
333 | .destroy = vsc8244_destroy, | ||
334 | .reset = vsc8244_reset, | ||
335 | .interrupt_enable = vsc8244_intr_enable, | ||
336 | .interrupt_disable = vsc8244_intr_disable, | ||
337 | .interrupt_clear = vsc8244_intr_clear, | ||
338 | .interrupt_handler = vsc8244_intr_handler, | ||
339 | .autoneg_enable = vsc8244_autoneg_enable, | ||
340 | .autoneg_restart = vsc8244_autoneg_restart, | ||
341 | .advertise = vsc8244_advertise, | ||
342 | .set_speed_duplex = vsc8244_set_speed_duplex, | ||
343 | .get_link_status = vsc8244_get_link_status | ||
344 | }; | ||
345 | |||
346 | static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops) | ||
347 | { | ||
348 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); | ||
349 | |||
350 | if (!cphy) return NULL; | ||
351 | |||
352 | cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); | ||
353 | |||
354 | return cphy; | ||
355 | } | ||
356 | |||
357 | |||
358 | static int vsc8244_phy_reset(adapter_t* adapter) | ||
359 | { | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | struct gphy t1_vsc8244_ops = { | ||
364 | vsc8244_phy_create, | ||
365 | vsc8244_phy_reset | ||
366 | }; | ||
367 | |||
368 | |||
diff --git a/drivers/net/chelsio/vsc8244_reg.h b/drivers/net/chelsio/vsc8244_reg.h new file mode 100644 index 000000000000..d3c1829055cb --- /dev/null +++ b/drivers/net/chelsio/vsc8244_reg.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* $Date: 2005/11/23 16:28:53 $ $RCSfile: vsc8244_reg.h,v $ $Revision: 1.1 $ */ | ||
2 | #ifndef CHELSIO_MV8E1XXX_H | ||
3 | #define CHELSIO_MV8E1XXX_H | ||
4 | |||
5 | #ifndef BMCR_SPEED1000 | ||
6 | # define BMCR_SPEED1000 0x40 | ||
7 | #endif | ||
8 | |||
9 | #ifndef ADVERTISE_PAUSE | ||
10 | # define ADVERTISE_PAUSE 0x400 | ||
11 | #endif | ||
12 | #ifndef ADVERTISE_PAUSE_ASYM | ||
13 | # define ADVERTISE_PAUSE_ASYM 0x800 | ||
14 | #endif | ||
15 | |||
16 | /* Gigabit MII registers */ | ||
17 | #define MII_GBMR 1 /* 1000Base-T mode register */ | ||
18 | #define MII_GBCR 9 /* 1000Base-T control register */ | ||
19 | #define MII_GBSR 10 /* 1000Base-T status register */ | ||
20 | |||
21 | /* 1000Base-T control register fields */ | ||
22 | #define GBCR_ADV_1000HALF 0x100 | ||
23 | #define GBCR_ADV_1000FULL 0x200 | ||
24 | #define GBCR_PREFER_MASTER 0x400 | ||
25 | #define GBCR_MANUAL_AS_MASTER 0x800 | ||
26 | #define GBCR_MANUAL_CONFIG_ENABLE 0x1000 | ||
27 | |||
28 | /* 1000Base-T status register fields */ | ||
29 | #define GBSR_LP_1000HALF 0x400 | ||
30 | #define GBSR_LP_1000FULL 0x800 | ||
31 | #define GBSR_REMOTE_OK 0x1000 | ||
32 | #define GBSR_LOCAL_OK 0x2000 | ||
33 | #define GBSR_LOCAL_MASTER 0x4000 | ||
34 | #define GBSR_MASTER_FAULT 0x8000 | ||
35 | |||
36 | /* Vitesse PHY interrupt status bits. */ | ||
37 | #if 0 | ||
38 | #define VSC8244_INTR_JABBER 0x0001 | ||
39 | #define VSC8244_INTR_POLARITY_CHNG 0x0002 | ||
40 | #define VSC8244_INTR_ENG_DETECT_CHNG 0x0010 | ||
41 | #define VSC8244_INTR_DOWNSHIFT 0x0020 | ||
42 | #define VSC8244_INTR_MDI_XOVER_CHNG 0x0040 | ||
43 | #define VSC8244_INTR_FIFO_OVER_UNDER 0x0080 | ||
44 | #define VSC8244_INTR_FALSE_CARRIER 0x0100 | ||
45 | #define VSC8244_INTR_SYMBOL_ERROR 0x0200 | ||
46 | #define VSC8244_INTR_LINK_CHNG 0x0400 | ||
47 | #define VSC8244_INTR_AUTONEG_DONE 0x0800 | ||
48 | #define VSC8244_INTR_PAGE_RECV 0x1000 | ||
49 | #define VSC8244_INTR_DUPLEX_CHNG 0x2000 | ||
50 | #define VSC8244_INTR_SPEED_CHNG 0x4000 | ||
51 | #define VSC8244_INTR_AUTONEG_ERR 0x8000 | ||
52 | #else | ||
53 | //#define VSC8244_INTR_JABBER 0x0001 | ||
54 | //#define VSC8244_INTR_POLARITY_CHNG 0x0002 | ||
55 | //#define VSC8244_INTR_BIT2 0x0004 | ||
56 | //#define VSC8244_INTR_BIT3 0x0008 | ||
57 | #define VSC8244_INTR_RX_ERR 0x0001 | ||
58 | #define VSC8244_INTR_MASTER_SLAVE 0x0002 | ||
59 | #define VSC8244_INTR_CABLE_IMPAIRED 0x0004 | ||
60 | #define VSC8244_INTR_FALSE_CARRIER 0x0008 | ||
61 | //#define VSC8244_INTR_ENG_DETECT_CHNG 0x0010 | ||
62 | //#define VSC8244_INTR_DOWNSHIFT 0x0020 | ||
63 | //#define VSC8244_INTR_MDI_XOVER_CHNG 0x0040 | ||
64 | //#define VSC8244_INTR_FIFO_OVER_UNDER 0x0080 | ||
65 | #define VSC8244_INTR_BIT4 0x0010 | ||
66 | #define VSC8244_INTR_FIFO_RX 0x0020 | ||
67 | #define VSC8244_INTR_FIFO_OVER_UNDER 0x0040 | ||
68 | #define VSC8244_INTR_LOCK_LOST 0x0080 | ||
69 | //#define VSC8244_INTR_FALSE_CARRIER 0x0100 | ||
70 | //#define VSC8244_INTR_SYMBOL_ERROR 0x0200 | ||
71 | //#define VSC8244_INTR_LINK_CHNG 0x0400 | ||
72 | //#define VSC8244_INTR_AUTONEG_DONE 0x0800 | ||
73 | #define VSC8244_INTR_SYMBOL_ERROR 0x0100 | ||
74 | #define VSC8244_INTR_ENG_DETECT_CHNG 0x0200 | ||
75 | #define VSC8244_INTR_AUTONEG_DONE 0x0400 | ||
76 | #define VSC8244_INTR_AUTONEG_ERR 0x0800 | ||
77 | //#define VSC8244_INTR_PAGE_RECV 0x1000 | ||
78 | //#define VSC8244_INTR_DUPLEX_CHNG 0x2000 | ||
79 | //#define VSC8244_INTR_SPEED_CHNG 0x4000 | ||
80 | //#define VSC8244_INTR_AUTONEG_ERR 0x8000 | ||
81 | #define VSC8244_INTR_DUPLEX_CHNG 0x1000 | ||
82 | #define VSC8244_INTR_LINK_CHNG 0x2000 | ||
83 | #define VSC8244_INTR_SPEED_CHNG 0x4000 | ||
84 | #define VSC8244_INTR_STATUS 0x8000 | ||
85 | #endif | ||
86 | |||
87 | |||
88 | /* Vitesse PHY specific registers. */ | ||
89 | #define VSC8244_SPECIFIC_CNTRL_REGISTER 16 | ||
90 | #define VSC8244_SPECIFIC_STATUS_REGISTER 0x1c | ||
91 | #define VSC8244_INTERRUPT_ENABLE_REGISTER 0x19 | ||
92 | #define VSC8244_INTERRUPT_STATUS_REGISTER 0x1a | ||
93 | #define VSC8244_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20 | ||
94 | #define VSC8244_RECV_ERR_CNTR_REGISTER 21 | ||
95 | #define VSC8244_RES_REGISTER 22 | ||
96 | #define VSC8244_GLOBAL_STATUS_REGISTER 23 | ||
97 | #define VSC8244_LED_CONTROL_REGISTER 24 | ||
98 | #define VSC8244_MANUAL_LED_OVERRIDE_REGISTER 25 | ||
99 | #define VSC8244_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26 | ||
100 | #define VSC8244_EXT_PHY_SPECIFIC_STATUS_REGISTER 27 | ||
101 | #define VSC8244_VIRTUAL_CABLE_TESTER_REGISTER 28 | ||
102 | #define VSC8244_EXTENDED_ADDR_REGISTER 29 | ||
103 | #define VSC8244_EXTENDED_REGISTER 30 | ||
104 | |||
105 | /* PHY specific control register fields */ | ||
106 | #define S_PSCR_MDI_XOVER_MODE 5 | ||
107 | #define M_PSCR_MDI_XOVER_MODE 0x3 | ||
108 | #define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) | ||
109 | #define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE) | ||
110 | |||
111 | /* Extended PHY specific control register fields */ | ||
112 | #define S_DOWNSHIFT_ENABLE 8 | ||
113 | #define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) | ||
114 | |||
115 | #define S_DOWNSHIFT_CNT 9 | ||
116 | #define M_DOWNSHIFT_CNT 0x7 | ||
117 | #define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) | ||
118 | #define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT) | ||
119 | |||
120 | /* PHY specific status register fields */ | ||
121 | #define S_PSSR_JABBER 0 | ||
122 | #define V_PSSR_JABBER (1 << S_PSSR_JABBER) | ||
123 | |||
124 | #define S_PSSR_POLARITY 1 | ||
125 | #define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) | ||
126 | |||
127 | #define S_PSSR_RX_PAUSE 2 | ||
128 | #define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) | ||
129 | |||
130 | #define S_PSSR_TX_PAUSE 3 | ||
131 | #define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) | ||
132 | |||
133 | #define S_PSSR_ENERGY_DETECT 4 | ||
134 | #define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) | ||
135 | |||
136 | #define S_PSSR_DOWNSHIFT_STATUS 5 | ||
137 | #define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) | ||
138 | |||
139 | #define S_PSSR_MDI 6 | ||
140 | #define V_PSSR_MDI (1 << S_PSSR_MDI) | ||
141 | |||
142 | #define S_PSSR_CABLE_LEN 7 | ||
143 | #define M_PSSR_CABLE_LEN 0x7 | ||
144 | #define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) | ||
145 | #define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) | ||
146 | |||
147 | //#define S_PSSR_LINK 10 | ||
148 | //#define S_PSSR_LINK 13 | ||
149 | #define S_PSSR_LINK 2 | ||
150 | #define V_PSSR_LINK (1 << S_PSSR_LINK) | ||
151 | |||
152 | //#define S_PSSR_STATUS_RESOLVED 11 | ||
153 | //#define S_PSSR_STATUS_RESOLVED 10 | ||
154 | #define S_PSSR_STATUS_RESOLVED 15 | ||
155 | #define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) | ||
156 | |||
157 | #define S_PSSR_PAGE_RECEIVED 12 | ||
158 | #define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) | ||
159 | |||
160 | //#define S_PSSR_DUPLEX 13 | ||
161 | //#define S_PSSR_DUPLEX 12 | ||
162 | #define S_PSSR_DUPLEX 5 | ||
163 | #define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) | ||
164 | |||
165 | //#define S_PSSR_SPEED 14 | ||
166 | //#define S_PSSR_SPEED 14 | ||
167 | #define S_PSSR_SPEED 3 | ||
168 | #define M_PSSR_SPEED 0x3 | ||
169 | #define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) | ||
170 | #define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) | ||
171 | |||
172 | #endif | ||
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 8f514cc0debd..dc3ab3b5c8cb 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c | |||
@@ -192,6 +192,7 @@ | |||
192 | * 04 Aug 2003 macro Converted to the DMA API. | 192 | * 04 Aug 2003 macro Converted to the DMA API. |
193 | * 14 Aug 2004 macro Fix device names reported. | 193 | * 14 Aug 2004 macro Fix device names reported. |
194 | * 14 Jun 2005 macro Use irqreturn_t. | 194 | * 14 Jun 2005 macro Use irqreturn_t. |
195 | * 23 Oct 2006 macro Big-endian host support. | ||
195 | */ | 196 | */ |
196 | 197 | ||
197 | /* Include files */ | 198 | /* Include files */ |
@@ -218,8 +219,8 @@ | |||
218 | 219 | ||
219 | /* Version information string should be updated prior to each new release! */ | 220 | /* Version information string should be updated prior to each new release! */ |
220 | #define DRV_NAME "defxx" | 221 | #define DRV_NAME "defxx" |
221 | #define DRV_VERSION "v1.08" | 222 | #define DRV_VERSION "v1.09" |
222 | #define DRV_RELDATE "2005/06/14" | 223 | #define DRV_RELDATE "2006/10/23" |
223 | 224 | ||
224 | static char version[] __devinitdata = | 225 | static char version[] __devinitdata = |
225 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE | 226 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE |
@@ -859,6 +860,7 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
859 | print_name); | 860 | print_name); |
860 | return(DFX_K_FAILURE); | 861 | return(DFX_K_FAILURE); |
861 | } | 862 | } |
863 | data = cpu_to_le32(data); | ||
862 | memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32)); | 864 | memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32)); |
863 | 865 | ||
864 | if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, | 866 | if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, |
@@ -867,6 +869,7 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
867 | print_name); | 869 | print_name); |
868 | return(DFX_K_FAILURE); | 870 | return(DFX_K_FAILURE); |
869 | } | 871 | } |
872 | data = cpu_to_le32(data); | ||
870 | memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16)); | 873 | memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16)); |
871 | 874 | ||
872 | /* | 875 | /* |
@@ -1085,27 +1088,23 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers) | |||
1085 | } | 1088 | } |
1086 | 1089 | ||
1087 | /* | 1090 | /* |
1088 | * Set base address of Descriptor Block and bring adapter to DMA_AVAILABLE state | 1091 | * Set the base address of Descriptor Block and bring adapter |
1092 | * to DMA_AVAILABLE state. | ||
1089 | * | 1093 | * |
1090 | * Note: We also set the literal and data swapping requirements in this | 1094 | * Note: We also set the literal and data swapping requirements |
1091 | * command. Since this driver presently runs on Intel platforms | 1095 | * in this command. |
1092 | * which are Little Endian, we'll tell the adapter to byte swap | ||
1093 | * data only. This code will need to change when we support | ||
1094 | * Big Endian systems (eg. PowerPC). | ||
1095 | * | 1096 | * |
1096 | * Assumption: 32-bit physical address of descriptor block is 8Kbyte | 1097 | * Assumption: 32-bit physical address of descriptor block |
1097 | * aligned. That is, bits 0-12 of the address must be zero. | 1098 | * is 8Kbyte aligned. |
1098 | */ | 1099 | */ |
1099 | 1100 | if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, | |
1100 | if (dfx_hw_port_ctrl_req(bp, | 1101 | (u32)(bp->descr_block_phys | |
1101 | PI_PCTRL_M_INIT, | 1102 | PI_PDATA_A_INIT_M_BSWAP_INIT), |
1102 | (u32) (bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_DATA), | 1103 | 0, NULL) != DFX_K_SUCCESS) { |
1103 | 0, | 1104 | printk("%s: Could not set descriptor block address!\n", |
1104 | NULL) != DFX_K_SUCCESS) | 1105 | bp->dev->name); |
1105 | { | 1106 | return DFX_K_FAILURE; |
1106 | printk("%s: Could not set descriptor block address!\n", bp->dev->name); | 1107 | } |
1107 | return(DFX_K_FAILURE); | ||
1108 | } | ||
1109 | 1108 | ||
1110 | /* Set transmit flush timeout value */ | 1109 | /* Set transmit flush timeout value */ |
1111 | 1110 | ||
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h index 8b1e9a11ca21..2ce8f97253eb 100644 --- a/drivers/net/defxx.h +++ b/drivers/net/defxx.h | |||
@@ -25,6 +25,7 @@ | |||
25 | * macros to DEFXX.C. | 25 | * macros to DEFXX.C. |
26 | * 12-Sep-96 LVS Removed packet request header pointers. | 26 | * 12-Sep-96 LVS Removed packet request header pointers. |
27 | * 04 Aug 2003 macro Converted to the DMA API. | 27 | * 04 Aug 2003 macro Converted to the DMA API. |
28 | * 23 Oct 2006 macro Big-endian host support. | ||
28 | */ | 29 | */ |
29 | 30 | ||
30 | #ifndef _DEFXX_H_ | 31 | #ifndef _DEFXX_H_ |
@@ -1344,7 +1345,7 @@ typedef struct | |||
1344 | 1345 | ||
1345 | /* Register definition structures are defined for both big and little endian systems */ | 1346 | /* Register definition structures are defined for both big and little endian systems */ |
1346 | 1347 | ||
1347 | #ifndef BIG_ENDIAN | 1348 | #ifndef __BIG_ENDIAN |
1348 | 1349 | ||
1349 | /* Little endian format of Type 1 Producer register */ | 1350 | /* Little endian format of Type 1 Producer register */ |
1350 | 1351 | ||
@@ -1402,7 +1403,11 @@ typedef union | |||
1402 | } index; | 1403 | } index; |
1403 | } PI_TYPE_2_CONSUMER; | 1404 | } PI_TYPE_2_CONSUMER; |
1404 | 1405 | ||
1405 | #else | 1406 | /* Define swapping required by DMA transfers. */ |
1407 | #define PI_PDATA_A_INIT_M_BSWAP_INIT \ | ||
1408 | (PI_PDATA_A_INIT_M_BSWAP_DATA) | ||
1409 | |||
1410 | #else /* __BIG_ENDIAN */ | ||
1406 | 1411 | ||
1407 | /* Big endian format of Type 1 Producer register */ | 1412 | /* Big endian format of Type 1 Producer register */ |
1408 | 1413 | ||
@@ -1460,7 +1465,11 @@ typedef union | |||
1460 | } index; | 1465 | } index; |
1461 | } PI_TYPE_2_CONSUMER; | 1466 | } PI_TYPE_2_CONSUMER; |
1462 | 1467 | ||
1463 | #endif /* #ifndef BIG_ENDIAN */ | 1468 | /* Define swapping required by DMA transfers. */ |
1469 | #define PI_PDATA_A_INIT_M_BSWAP_INIT \ | ||
1470 | (PI_PDATA_A_INIT_M_BSWAP_DATA | PI_PDATA_A_INIT_M_BSWAP_LITERAL) | ||
1471 | |||
1472 | #endif /* __BIG_ENDIAN */ | ||
1464 | 1473 | ||
1465 | /* Define EISA controller register offsets */ | 1474 | /* Define EISA controller register offsets */ |
1466 | 1475 | ||
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index f87f6e3dc721..5113eef755b9 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -1252,24 +1252,22 @@ static void set_multicast_list(struct net_device *dev) | |||
1252 | struct depca_private *lp = (struct depca_private *) dev->priv; | 1252 | struct depca_private *lp = (struct depca_private *) dev->priv; |
1253 | u_long ioaddr = dev->base_addr; | 1253 | u_long ioaddr = dev->base_addr; |
1254 | 1254 | ||
1255 | if (dev) { | 1255 | netif_stop_queue(dev); |
1256 | netif_stop_queue(dev); | 1256 | while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */ |
1257 | while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */ | ||
1258 | |||
1259 | STOP_DEPCA; /* Temporarily stop the depca. */ | ||
1260 | depca_init_ring(dev); /* Initialize the descriptor rings */ | ||
1261 | 1257 | ||
1262 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */ | 1258 | STOP_DEPCA; /* Temporarily stop the depca. */ |
1263 | lp->init_block.mode |= PROM; | 1259 | depca_init_ring(dev); /* Initialize the descriptor rings */ |
1264 | } else { | ||
1265 | SetMulticastFilter(dev); | ||
1266 | lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */ | ||
1267 | } | ||
1268 | 1260 | ||
1269 | LoadCSRs(dev); /* Reload CSR3 */ | 1261 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */ |
1270 | InitRestartDepca(dev); /* Resume normal operation. */ | 1262 | lp->init_block.mode |= PROM; |
1271 | netif_start_queue(dev); /* Unlock the TX ring */ | 1263 | } else { |
1264 | SetMulticastFilter(dev); | ||
1265 | lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */ | ||
1272 | } | 1266 | } |
1267 | |||
1268 | LoadCSRs(dev); /* Reload CSR3 */ | ||
1269 | InitRestartDepca(dev); /* Resume normal operation. */ | ||
1270 | netif_start_queue(dev); /* Unlock the TX ring */ | ||
1273 | } | 1271 | } |
1274 | 1272 | ||
1275 | /* | 1273 | /* |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 7ecce438d258..f091042b146e 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -59,6 +59,9 @@ | |||
59 | #include <linux/capability.h> | 59 | #include <linux/capability.h> |
60 | #include <linux/in.h> | 60 | #include <linux/in.h> |
61 | #include <linux/ip.h> | 61 | #include <linux/ip.h> |
62 | #ifdef NETIF_F_TSO6 | ||
63 | #include <linux/ipv6.h> | ||
64 | #endif | ||
62 | #include <linux/tcp.h> | 65 | #include <linux/tcp.h> |
63 | #include <linux/udp.h> | 66 | #include <linux/udp.h> |
64 | #include <net/pkt_sched.h> | 67 | #include <net/pkt_sched.h> |
@@ -254,6 +257,17 @@ struct e1000_adapter { | |||
254 | spinlock_t tx_queue_lock; | 257 | spinlock_t tx_queue_lock; |
255 | #endif | 258 | #endif |
256 | atomic_t irq_sem; | 259 | atomic_t irq_sem; |
260 | unsigned int detect_link; | ||
261 | unsigned int total_tx_bytes; | ||
262 | unsigned int total_tx_packets; | ||
263 | unsigned int total_rx_bytes; | ||
264 | unsigned int total_rx_packets; | ||
265 | /* Interrupt Throttle Rate */ | ||
266 | uint32_t itr; | ||
267 | uint32_t itr_setting; | ||
268 | uint16_t tx_itr; | ||
269 | uint16_t rx_itr; | ||
270 | |||
257 | struct work_struct reset_task; | 271 | struct work_struct reset_task; |
258 | uint8_t fc_autoneg; | 272 | uint8_t fc_autoneg; |
259 | 273 | ||
@@ -262,6 +276,7 @@ struct e1000_adapter { | |||
262 | 276 | ||
263 | /* TX */ | 277 | /* TX */ |
264 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 278 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
279 | unsigned int restart_queue; | ||
265 | unsigned long tx_queue_len; | 280 | unsigned long tx_queue_len; |
266 | uint32_t txd_cmd; | 281 | uint32_t txd_cmd; |
267 | uint32_t tx_int_delay; | 282 | uint32_t tx_int_delay; |
@@ -310,8 +325,6 @@ struct e1000_adapter { | |||
310 | uint64_t gorcl_old; | 325 | uint64_t gorcl_old; |
311 | uint16_t rx_ps_bsize0; | 326 | uint16_t rx_ps_bsize0; |
312 | 327 | ||
313 | /* Interrupt Throttle Rate */ | ||
314 | uint32_t itr; | ||
315 | 328 | ||
316 | /* OS defined structs */ | 329 | /* OS defined structs */ |
317 | struct net_device *netdev; | 330 | struct net_device *netdev; |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index c564adbd669b..da459f7177c6 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -85,6 +85,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
85 | { "tx_single_coll_ok", E1000_STAT(stats.scc) }, | 85 | { "tx_single_coll_ok", E1000_STAT(stats.scc) }, |
86 | { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, | 86 | { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, |
87 | { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, | 87 | { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, |
88 | { "tx_restart_queue", E1000_STAT(restart_queue) }, | ||
88 | { "rx_long_length_errors", E1000_STAT(stats.roc) }, | 89 | { "rx_long_length_errors", E1000_STAT(stats.roc) }, |
89 | { "rx_short_length_errors", E1000_STAT(stats.ruc) }, | 90 | { "rx_short_length_errors", E1000_STAT(stats.ruc) }, |
90 | { "rx_align_errors", E1000_STAT(stats.algnerrc) }, | 91 | { "rx_align_errors", E1000_STAT(stats.algnerrc) }, |
@@ -133,9 +134,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
133 | 134 | ||
134 | if (hw->autoneg == 1) { | 135 | if (hw->autoneg == 1) { |
135 | ecmd->advertising |= ADVERTISED_Autoneg; | 136 | ecmd->advertising |= ADVERTISED_Autoneg; |
136 | |||
137 | /* the e1000 autoneg seems to match ethtool nicely */ | 137 | /* the e1000 autoneg seems to match ethtool nicely */ |
138 | |||
139 | ecmd->advertising |= hw->autoneg_advertised; | 138 | ecmd->advertising |= hw->autoneg_advertised; |
140 | } | 139 | } |
141 | 140 | ||
@@ -285,7 +284,7 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
285 | e1000_reset(adapter); | 284 | e1000_reset(adapter); |
286 | } else | 285 | } else |
287 | retval = ((hw->media_type == e1000_media_type_fiber) ? | 286 | retval = ((hw->media_type == e1000_media_type_fiber) ? |
288 | e1000_setup_link(hw) : e1000_force_mac_fc(hw)); | 287 | e1000_setup_link(hw) : e1000_force_mac_fc(hw)); |
289 | 288 | ||
290 | clear_bit(__E1000_RESETTING, &adapter->flags); | 289 | clear_bit(__E1000_RESETTING, &adapter->flags); |
291 | return retval; | 290 | return retval; |
@@ -350,6 +349,13 @@ e1000_set_tso(struct net_device *netdev, uint32_t data) | |||
350 | else | 349 | else |
351 | netdev->features &= ~NETIF_F_TSO; | 350 | netdev->features &= ~NETIF_F_TSO; |
352 | 351 | ||
352 | #ifdef NETIF_F_TSO6 | ||
353 | if (data) | ||
354 | netdev->features |= NETIF_F_TSO6; | ||
355 | else | ||
356 | netdev->features &= ~NETIF_F_TSO6; | ||
357 | #endif | ||
358 | |||
353 | DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); | 359 | DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); |
354 | adapter->tso_force = TRUE; | 360 | adapter->tso_force = TRUE; |
355 | return 0; | 361 | return 0; |
@@ -774,7 +780,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
774 | /* The status register is Read Only, so a write should fail. | 780 | /* The status register is Read Only, so a write should fail. |
775 | * Some bits that get toggled are ignored. | 781 | * Some bits that get toggled are ignored. |
776 | */ | 782 | */ |
777 | switch (adapter->hw.mac_type) { | 783 | switch (adapter->hw.mac_type) { |
778 | /* there are several bits on newer hardware that are r/w */ | 784 | /* there are several bits on newer hardware that are r/w */ |
779 | case e1000_82571: | 785 | case e1000_82571: |
780 | case e1000_82572: | 786 | case e1000_82572: |
@@ -802,12 +808,14 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
802 | } | 808 | } |
803 | /* restore previous status */ | 809 | /* restore previous status */ |
804 | E1000_WRITE_REG(&adapter->hw, STATUS, before); | 810 | E1000_WRITE_REG(&adapter->hw, STATUS, before); |
811 | |||
805 | if (adapter->hw.mac_type != e1000_ich8lan) { | 812 | if (adapter->hw.mac_type != e1000_ich8lan) { |
806 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 813 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
807 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); | 814 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); |
808 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); | 815 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); |
809 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); | 816 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); |
810 | } | 817 | } |
818 | |||
811 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); | 819 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); |
812 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | 820 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); |
813 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); | 821 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); |
@@ -820,8 +828,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
820 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); | 828 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); |
821 | 829 | ||
822 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); | 830 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); |
831 | |||
823 | before = (adapter->hw.mac_type == e1000_ich8lan ? | 832 | before = (adapter->hw.mac_type == e1000_ich8lan ? |
824 | 0x06C3B33E : 0x06DFB3FE); | 833 | 0x06C3B33E : 0x06DFB3FE); |
825 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); | 834 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); |
826 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 835 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
827 | 836 | ||
@@ -834,10 +843,10 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
834 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 843 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
835 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 844 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
836 | value = (adapter->hw.mac_type == e1000_ich8lan ? | 845 | value = (adapter->hw.mac_type == e1000_ich8lan ? |
837 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); | 846 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); |
838 | for (i = 0; i < value; i++) { | 847 | for (i = 0; i < value; i++) { |
839 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 848 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
840 | 0xFFFFFFFF); | 849 | 0xFFFFFFFF); |
841 | } | 850 | } |
842 | 851 | ||
843 | } else { | 852 | } else { |
@@ -883,8 +892,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) | |||
883 | } | 892 | } |
884 | 893 | ||
885 | static irqreturn_t | 894 | static irqreturn_t |
886 | e1000_test_intr(int irq, | 895 | e1000_test_intr(int irq, void *data) |
887 | void *data) | ||
888 | { | 896 | { |
889 | struct net_device *netdev = (struct net_device *) data; | 897 | struct net_device *netdev = (struct net_device *) data; |
890 | struct e1000_adapter *adapter = netdev_priv(netdev); | 898 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -905,11 +913,11 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
905 | 913 | ||
906 | /* NOTE: we don't test MSI interrupts here, yet */ | 914 | /* NOTE: we don't test MSI interrupts here, yet */ |
907 | /* Hook up test interrupt handler just for this test */ | 915 | /* Hook up test interrupt handler just for this test */ |
908 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, | 916 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, |
909 | netdev->name, netdev)) | 917 | netdev)) |
910 | shared_int = FALSE; | 918 | shared_int = FALSE; |
911 | else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, | 919 | else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, |
912 | netdev->name, netdev)) { | 920 | netdev->name, netdev)) { |
913 | *data = 1; | 921 | *data = 1; |
914 | return -1; | 922 | return -1; |
915 | } | 923 | } |
@@ -925,6 +933,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
925 | 933 | ||
926 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) | 934 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) |
927 | continue; | 935 | continue; |
936 | |||
928 | /* Interrupt to test */ | 937 | /* Interrupt to test */ |
929 | mask = 1 << i; | 938 | mask = 1 << i; |
930 | 939 | ||
@@ -1674,7 +1683,7 @@ e1000_diag_test(struct net_device *netdev, | |||
1674 | if (e1000_link_test(adapter, &data[4])) | 1683 | if (e1000_link_test(adapter, &data[4])) |
1675 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1684 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1676 | 1685 | ||
1677 | /* Offline tests aren't run; pass by default */ | 1686 | /* Online tests aren't run; pass by default */ |
1678 | data[0] = 0; | 1687 | data[0] = 0; |
1679 | data[1] = 0; | 1688 | data[1] = 0; |
1680 | data[2] = 0; | 1689 | data[2] = 0; |
@@ -1717,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol | |||
1717 | retval = 0; | 1726 | retval = 0; |
1718 | break; | 1727 | break; |
1719 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 1728 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
1729 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | ||
1720 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1730 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1721 | /* quad port adapters only support WoL on port A */ | 1731 | /* quad port adapters only support WoL on port A */ |
1722 | if (!adapter->quad_port_a) { | 1732 | if (!adapter->quad_port_a) { |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 796c4f7d4260..3655d902b0bd 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -385,6 +385,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
385 | case E1000_DEV_ID_82571EB_FIBER: | 385 | case E1000_DEV_ID_82571EB_FIBER: |
386 | case E1000_DEV_ID_82571EB_SERDES: | 386 | case E1000_DEV_ID_82571EB_SERDES: |
387 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 387 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
388 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | ||
388 | hw->mac_type = e1000_82571; | 389 | hw->mac_type = e1000_82571; |
389 | break; | 390 | break; |
390 | case E1000_DEV_ID_82572EI_COPPER: | 391 | case E1000_DEV_ID_82572EI_COPPER: |
@@ -408,6 +409,8 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
408 | case E1000_DEV_ID_ICH8_IGP_AMT: | 409 | case E1000_DEV_ID_ICH8_IGP_AMT: |
409 | case E1000_DEV_ID_ICH8_IGP_C: | 410 | case E1000_DEV_ID_ICH8_IGP_C: |
410 | case E1000_DEV_ID_ICH8_IFE: | 411 | case E1000_DEV_ID_ICH8_IFE: |
412 | case E1000_DEV_ID_ICH8_IFE_GT: | ||
413 | case E1000_DEV_ID_ICH8_IFE_G: | ||
411 | case E1000_DEV_ID_ICH8_IGP_M: | 414 | case E1000_DEV_ID_ICH8_IGP_M: |
412 | hw->mac_type = e1000_ich8lan; | 415 | hw->mac_type = e1000_ich8lan; |
413 | break; | 416 | break; |
@@ -2367,6 +2370,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2367 | 2370 | ||
2368 | /* Need to reset the PHY or these changes will be ignored */ | 2371 | /* Need to reset the PHY or these changes will be ignored */ |
2369 | mii_ctrl_reg |= MII_CR_RESET; | 2372 | mii_ctrl_reg |= MII_CR_RESET; |
2373 | |||
2370 | /* Disable MDI-X support for 10/100 */ | 2374 | /* Disable MDI-X support for 10/100 */ |
2371 | } else if (hw->phy_type == e1000_phy_ife) { | 2375 | } else if (hw->phy_type == e1000_phy_ife) { |
2372 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); | 2376 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); |
@@ -2379,6 +2383,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2379 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); | 2383 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); |
2380 | if (ret_val) | 2384 | if (ret_val) |
2381 | return ret_val; | 2385 | return ret_val; |
2386 | |||
2382 | } else { | 2387 | } else { |
2383 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI | 2388 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI |
2384 | * forced whenever speed or duplex are forced. | 2389 | * forced whenever speed or duplex are forced. |
@@ -3940,14 +3945,15 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3940 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | 3945 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | |
3941 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 3946 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
3942 | 3947 | ||
3943 | /* Write VR power-down enable */ | 3948 | /* Write VR power-down enable - bits 9:8 should be 10b */ |
3944 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | 3949 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); |
3945 | e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data | | 3950 | phy_data |= (1 << 9); |
3946 | IGP3_VR_CTRL_MODE_SHUT); | 3951 | phy_data &= ~(1 << 8); |
3952 | e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data); | ||
3947 | 3953 | ||
3948 | /* Read it back and test */ | 3954 | /* Read it back and test */ |
3949 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | 3955 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); |
3950 | if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry) | 3956 | if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry) |
3951 | break; | 3957 | break; |
3952 | 3958 | ||
3953 | /* Issue PHY reset and repeat at most one more time */ | 3959 | /* Issue PHY reset and repeat at most one more time */ |
@@ -4549,7 +4555,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4549 | case e1000_ich8lan: | 4555 | case e1000_ich8lan: |
4550 | { | 4556 | { |
4551 | int32_t i = 0; | 4557 | int32_t i = 0; |
4552 | uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG); | 4558 | uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); |
4553 | 4559 | ||
4554 | eeprom->type = e1000_eeprom_ich8; | 4560 | eeprom->type = e1000_eeprom_ich8; |
4555 | eeprom->use_eerd = FALSE; | 4561 | eeprom->use_eerd = FALSE; |
@@ -4565,12 +4571,14 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4565 | } | 4571 | } |
4566 | } | 4572 | } |
4567 | 4573 | ||
4568 | hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) * | 4574 | hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) * |
4569 | ICH8_FLASH_SECTOR_SIZE; | 4575 | ICH_FLASH_SECTOR_SIZE; |
4576 | |||
4577 | hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1; | ||
4578 | hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK); | ||
4579 | |||
4580 | hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE; | ||
4570 | 4581 | ||
4571 | hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1; | ||
4572 | hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK); | ||
4573 | hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE; | ||
4574 | hw->flash_bank_size /= 2 * sizeof(uint16_t); | 4582 | hw->flash_bank_size /= 2 * sizeof(uint16_t); |
4575 | 4583 | ||
4576 | break; | 4584 | break; |
@@ -5620,8 +5628,8 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5620 | * signature is valid. We want to do this after the write | 5628 | * signature is valid. We want to do this after the write |
5621 | * has completed so that we don't mark the segment valid | 5629 | * has completed so that we don't mark the segment valid |
5622 | * while the write is still in progress */ | 5630 | * while the write is still in progress */ |
5623 | if (i == E1000_ICH8_NVM_SIG_WORD) | 5631 | if (i == E1000_ICH_NVM_SIG_WORD) |
5624 | high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte; | 5632 | high_byte = E1000_ICH_NVM_SIG_MASK | high_byte; |
5625 | 5633 | ||
5626 | error = e1000_verify_write_ich8_byte(hw, | 5634 | error = e1000_verify_write_ich8_byte(hw, |
5627 | (i << 1) + new_bank_offset + 1, high_byte); | 5635 | (i << 1) + new_bank_offset + 1, high_byte); |
@@ -5643,18 +5651,18 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5643 | * erase as well since these bits are 11 to start with | 5651 | * erase as well since these bits are 11 to start with |
5644 | * and we need to change bit 14 to 0b */ | 5652 | * and we need to change bit 14 to 0b */ |
5645 | e1000_read_ich8_byte(hw, | 5653 | e1000_read_ich8_byte(hw, |
5646 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, | 5654 | E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, |
5647 | &high_byte); | 5655 | &high_byte); |
5648 | high_byte &= 0xBF; | 5656 | high_byte &= 0xBF; |
5649 | error = e1000_verify_write_ich8_byte(hw, | 5657 | error = e1000_verify_write_ich8_byte(hw, |
5650 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte); | 5658 | E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte); |
5651 | /* And invalidate the previously valid segment by setting | 5659 | /* And invalidate the previously valid segment by setting |
5652 | * its signature word (0x13) high_byte to 0b. This can be | 5660 | * its signature word (0x13) high_byte to 0b. This can be |
5653 | * done without an erase because flash erase sets all bits | 5661 | * done without an erase because flash erase sets all bits |
5654 | * to 1's. We can write 1's to 0's without an erase */ | 5662 | * to 1's. We can write 1's to 0's without an erase */ |
5655 | if (error == E1000_SUCCESS) { | 5663 | if (error == E1000_SUCCESS) { |
5656 | error = e1000_verify_write_ich8_byte(hw, | 5664 | error = e1000_verify_write_ich8_byte(hw, |
5657 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0); | 5665 | E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0); |
5658 | } | 5666 | } |
5659 | 5667 | ||
5660 | /* Clear the now not used entry in the cache */ | 5668 | /* Clear the now not used entry in the cache */ |
@@ -5841,6 +5849,7 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5841 | hash_reg = (hash_value >> 5) & 0x7F; | 5849 | hash_reg = (hash_value >> 5) & 0x7F; |
5842 | if (hw->mac_type == e1000_ich8lan) | 5850 | if (hw->mac_type == e1000_ich8lan) |
5843 | hash_reg &= 0x1F; | 5851 | hash_reg &= 0x1F; |
5852 | |||
5844 | hash_bit = hash_value & 0x1F; | 5853 | hash_bit = hash_value & 0x1F; |
5845 | 5854 | ||
5846 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); | 5855 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); |
@@ -6026,6 +6035,7 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
6026 | else | 6035 | else |
6027 | eeprom_data = ID_LED_DEFAULT; | 6036 | eeprom_data = ID_LED_DEFAULT; |
6028 | } | 6037 | } |
6038 | |||
6029 | for (i = 0; i < 4; i++) { | 6039 | for (i = 0; i < 4; i++) { |
6030 | temp = (eeprom_data >> (i << 2)) & led_mask; | 6040 | temp = (eeprom_data >> (i << 2)) & led_mask; |
6031 | switch (temp) { | 6041 | switch (temp) { |
@@ -8486,7 +8496,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8486 | 8496 | ||
8487 | DEBUGFUNC("e1000_ich8_cycle_init"); | 8497 | DEBUGFUNC("e1000_ich8_cycle_init"); |
8488 | 8498 | ||
8489 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8499 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8490 | 8500 | ||
8491 | /* May be check the Flash Des Valid bit in Hw status */ | 8501 | /* May be check the Flash Des Valid bit in Hw status */ |
8492 | if (hsfsts.hsf_status.fldesvalid == 0) { | 8502 | if (hsfsts.hsf_status.fldesvalid == 0) { |
@@ -8499,7 +8509,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8499 | hsfsts.hsf_status.flcerr = 1; | 8509 | hsfsts.hsf_status.flcerr = 1; |
8500 | hsfsts.hsf_status.dael = 1; | 8510 | hsfsts.hsf_status.dael = 1; |
8501 | 8511 | ||
8502 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | 8512 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); |
8503 | 8513 | ||
8504 | /* Either we should have a hardware SPI cycle in progress bit to check | 8514 | /* Either we should have a hardware SPI cycle in progress bit to check |
8505 | * against, in order to start a new cycle or FDONE bit should be changed | 8515 | * against, in order to start a new cycle or FDONE bit should be changed |
@@ -8514,13 +8524,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8514 | /* There is no cycle running at present, so we can start a cycle */ | 8524 | /* There is no cycle running at present, so we can start a cycle */ |
8515 | /* Begin by setting Flash Cycle Done. */ | 8525 | /* Begin by setting Flash Cycle Done. */ |
8516 | hsfsts.hsf_status.flcdone = 1; | 8526 | hsfsts.hsf_status.flcdone = 1; |
8517 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | 8527 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); |
8518 | error = E1000_SUCCESS; | 8528 | error = E1000_SUCCESS; |
8519 | } else { | 8529 | } else { |
8520 | /* otherwise poll for sometime so the current cycle has a chance | 8530 | /* otherwise poll for sometime so the current cycle has a chance |
8521 | * to end before giving up. */ | 8531 | * to end before giving up. */ |
8522 | for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) { | 8532 | for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { |
8523 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8533 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8524 | if (hsfsts.hsf_status.flcinprog == 0) { | 8534 | if (hsfsts.hsf_status.flcinprog == 0) { |
8525 | error = E1000_SUCCESS; | 8535 | error = E1000_SUCCESS; |
8526 | break; | 8536 | break; |
@@ -8531,7 +8541,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8531 | /* Successful in waiting for previous cycle to timeout, | 8541 | /* Successful in waiting for previous cycle to timeout, |
8532 | * now set the Flash Cycle Done. */ | 8542 | * now set the Flash Cycle Done. */ |
8533 | hsfsts.hsf_status.flcdone = 1; | 8543 | hsfsts.hsf_status.flcdone = 1; |
8534 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | 8544 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); |
8535 | } else { | 8545 | } else { |
8536 | DEBUGOUT("Flash controller busy, cannot get access"); | 8546 | DEBUGOUT("Flash controller busy, cannot get access"); |
8537 | } | 8547 | } |
@@ -8553,13 +8563,13 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) | |||
8553 | uint32_t i = 0; | 8563 | uint32_t i = 0; |
8554 | 8564 | ||
8555 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | 8565 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ |
8556 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | 8566 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); |
8557 | hsflctl.hsf_ctrl.flcgo = 1; | 8567 | hsflctl.hsf_ctrl.flcgo = 1; |
8558 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | 8568 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); |
8559 | 8569 | ||
8560 | /* wait till FDONE bit is set to 1 */ | 8570 | /* wait till FDONE bit is set to 1 */ |
8561 | do { | 8571 | do { |
8562 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8572 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8563 | if (hsfsts.hsf_status.flcdone == 1) | 8573 | if (hsfsts.hsf_status.flcdone == 1) |
8564 | break; | 8574 | break; |
8565 | udelay(1); | 8575 | udelay(1); |
@@ -8593,10 +8603,10 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8593 | DEBUGFUNC("e1000_read_ich8_data"); | 8603 | DEBUGFUNC("e1000_read_ich8_data"); |
8594 | 8604 | ||
8595 | if (size < 1 || size > 2 || data == 0x0 || | 8605 | if (size < 1 || size > 2 || data == 0x0 || |
8596 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | 8606 | index > ICH_FLASH_LINEAR_ADDR_MASK) |
8597 | return error; | 8607 | return error; |
8598 | 8608 | ||
8599 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | 8609 | flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + |
8600 | hw->flash_base_addr; | 8610 | hw->flash_base_addr; |
8601 | 8611 | ||
8602 | do { | 8612 | do { |
@@ -8606,25 +8616,25 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8606 | if (error != E1000_SUCCESS) | 8616 | if (error != E1000_SUCCESS) |
8607 | break; | 8617 | break; |
8608 | 8618 | ||
8609 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | 8619 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); |
8610 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | 8620 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ |
8611 | hsflctl.hsf_ctrl.fldbcount = size - 1; | 8621 | hsflctl.hsf_ctrl.fldbcount = size - 1; |
8612 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ; | 8622 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; |
8613 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | 8623 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); |
8614 | 8624 | ||
8615 | /* Write the last 24 bits of index into Flash Linear address field in | 8625 | /* Write the last 24 bits of index into Flash Linear address field in |
8616 | * Flash Address */ | 8626 | * Flash Address */ |
8617 | /* TODO: TBD maybe check the index against the size of flash */ | 8627 | /* TODO: TBD maybe check the index against the size of flash */ |
8618 | 8628 | ||
8619 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | 8629 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); |
8620 | 8630 | ||
8621 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | 8631 | error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT); |
8622 | 8632 | ||
8623 | /* Check if FCERR is set to 1, if set to 1, clear it and try the whole | 8633 | /* Check if FCERR is set to 1, if set to 1, clear it and try the whole |
8624 | * sequence a few more times, else read in (shift in) the Flash Data0, | 8634 | * sequence a few more times, else read in (shift in) the Flash Data0, |
8625 | * the order is least significant byte first msb to lsb */ | 8635 | * the order is least significant byte first msb to lsb */ |
8626 | if (error == E1000_SUCCESS) { | 8636 | if (error == E1000_SUCCESS) { |
8627 | flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0); | 8637 | flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0); |
8628 | if (size == 1) { | 8638 | if (size == 1) { |
8629 | *data = (uint8_t)(flash_data & 0x000000FF); | 8639 | *data = (uint8_t)(flash_data & 0x000000FF); |
8630 | } else if (size == 2) { | 8640 | } else if (size == 2) { |
@@ -8634,9 +8644,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8634 | } else { | 8644 | } else { |
8635 | /* If we've gotten here, then things are probably completely hosed, | 8645 | /* If we've gotten here, then things are probably completely hosed, |
8636 | * but if the error condition is detected, it won't hurt to give | 8646 | * but if the error condition is detected, it won't hurt to give |
8637 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | 8647 | * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times. |
8638 | */ | 8648 | */ |
8639 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8649 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8640 | if (hsfsts.hsf_status.flcerr == 1) { | 8650 | if (hsfsts.hsf_status.flcerr == 1) { |
8641 | /* Repeat for some time before giving up. */ | 8651 | /* Repeat for some time before giving up. */ |
8642 | continue; | 8652 | continue; |
@@ -8645,7 +8655,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8645 | break; | 8655 | break; |
8646 | } | 8656 | } |
8647 | } | 8657 | } |
8648 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | 8658 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); |
8649 | 8659 | ||
8650 | return error; | 8660 | return error; |
8651 | } | 8661 | } |
@@ -8672,10 +8682,10 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | |||
8672 | DEBUGFUNC("e1000_write_ich8_data"); | 8682 | DEBUGFUNC("e1000_write_ich8_data"); |
8673 | 8683 | ||
8674 | if (size < 1 || size > 2 || data > size * 0xff || | 8684 | if (size < 1 || size > 2 || data > size * 0xff || |
8675 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | 8685 | index > ICH_FLASH_LINEAR_ADDR_MASK) |
8676 | return error; | 8686 | return error; |
8677 | 8687 | ||
8678 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | 8688 | flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + |
8679 | hw->flash_base_addr; | 8689 | hw->flash_base_addr; |
8680 | 8690 | ||
8681 | do { | 8691 | do { |
@@ -8685,34 +8695,34 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | |||
8685 | if (error != E1000_SUCCESS) | 8695 | if (error != E1000_SUCCESS) |
8686 | break; | 8696 | break; |
8687 | 8697 | ||
8688 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | 8698 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); |
8689 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | 8699 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ |
8690 | hsflctl.hsf_ctrl.fldbcount = size -1; | 8700 | hsflctl.hsf_ctrl.fldbcount = size -1; |
8691 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE; | 8701 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; |
8692 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | 8702 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); |
8693 | 8703 | ||
8694 | /* Write the last 24 bits of index into Flash Linear address field in | 8704 | /* Write the last 24 bits of index into Flash Linear address field in |
8695 | * Flash Address */ | 8705 | * Flash Address */ |
8696 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | 8706 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); |
8697 | 8707 | ||
8698 | if (size == 1) | 8708 | if (size == 1) |
8699 | flash_data = (uint32_t)data & 0x00FF; | 8709 | flash_data = (uint32_t)data & 0x00FF; |
8700 | else | 8710 | else |
8701 | flash_data = (uint32_t)data; | 8711 | flash_data = (uint32_t)data; |
8702 | 8712 | ||
8703 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data); | 8713 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); |
8704 | 8714 | ||
8705 | /* check if FCERR is set to 1 , if set to 1, clear it and try the whole | 8715 | /* check if FCERR is set to 1 , if set to 1, clear it and try the whole |
8706 | * sequence a few more times else done */ | 8716 | * sequence a few more times else done */ |
8707 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | 8717 | error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT); |
8708 | if (error == E1000_SUCCESS) { | 8718 | if (error == E1000_SUCCESS) { |
8709 | break; | 8719 | break; |
8710 | } else { | 8720 | } else { |
8711 | /* If we're here, then things are most likely completely hosed, | 8721 | /* If we're here, then things are most likely completely hosed, |
8712 | * but if the error condition is detected, it won't hurt to give | 8722 | * but if the error condition is detected, it won't hurt to give |
8713 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | 8723 | * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times. |
8714 | */ | 8724 | */ |
8715 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8725 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8716 | if (hsfsts.hsf_status.flcerr == 1) { | 8726 | if (hsfsts.hsf_status.flcerr == 1) { |
8717 | /* Repeat for some time before giving up. */ | 8727 | /* Repeat for some time before giving up. */ |
8718 | continue; | 8728 | continue; |
@@ -8721,7 +8731,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | |||
8721 | break; | 8731 | break; |
8722 | } | 8732 | } |
8723 | } | 8733 | } |
8724 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | 8734 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); |
8725 | 8735 | ||
8726 | return error; | 8736 | return error; |
8727 | } | 8737 | } |
@@ -8840,7 +8850,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8840 | int32_t j = 0; | 8850 | int32_t j = 0; |
8841 | int32_t error_flag = 0; | 8851 | int32_t error_flag = 0; |
8842 | 8852 | ||
8843 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8853 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8844 | 8854 | ||
8845 | /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */ | 8855 | /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */ |
8846 | /* 00: The Hw sector is 256 bytes, hence we need to erase 16 | 8856 | /* 00: The Hw sector is 256 bytes, hence we need to erase 16 |
@@ -8853,19 +8863,14 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8853 | * 11: The Hw sector size is 64K bytes */ | 8863 | * 11: The Hw sector size is 64K bytes */ |
8854 | if (hsfsts.hsf_status.berasesz == 0x0) { | 8864 | if (hsfsts.hsf_status.berasesz == 0x0) { |
8855 | /* Hw sector size 256 */ | 8865 | /* Hw sector size 256 */ |
8856 | sub_sector_size = ICH8_FLASH_SEG_SIZE_256; | 8866 | sub_sector_size = ICH_FLASH_SEG_SIZE_256; |
8857 | bank_size = ICH8_FLASH_SECTOR_SIZE; | 8867 | bank_size = ICH_FLASH_SECTOR_SIZE; |
8858 | iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256; | 8868 | iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256; |
8859 | } else if (hsfsts.hsf_status.berasesz == 0x1) { | 8869 | } else if (hsfsts.hsf_status.berasesz == 0x1) { |
8860 | bank_size = ICH8_FLASH_SEG_SIZE_4K; | 8870 | bank_size = ICH_FLASH_SEG_SIZE_4K; |
8861 | iteration = 1; | ||
8862 | } else if (hw->mac_type != e1000_ich8lan && | ||
8863 | hsfsts.hsf_status.berasesz == 0x2) { | ||
8864 | /* 8K erase size invalid for ICH8 - added in for ICH9 */ | ||
8865 | bank_size = ICH9_FLASH_SEG_SIZE_8K; | ||
8866 | iteration = 1; | 8871 | iteration = 1; |
8867 | } else if (hsfsts.hsf_status.berasesz == 0x3) { | 8872 | } else if (hsfsts.hsf_status.berasesz == 0x3) { |
8868 | bank_size = ICH8_FLASH_SEG_SIZE_64K; | 8873 | bank_size = ICH_FLASH_SEG_SIZE_64K; |
8869 | iteration = 1; | 8874 | iteration = 1; |
8870 | } else { | 8875 | } else { |
8871 | return error; | 8876 | return error; |
@@ -8883,9 +8888,9 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8883 | 8888 | ||
8884 | /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash | 8889 | /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash |
8885 | * Control */ | 8890 | * Control */ |
8886 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | 8891 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); |
8887 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE; | 8892 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; |
8888 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | 8893 | E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); |
8889 | 8894 | ||
8890 | /* Write the last 24 bits of an index within the block into Flash | 8895 | /* Write the last 24 bits of an index within the block into Flash |
8891 | * Linear address field in Flash Address. This probably needs to | 8896 | * Linear address field in Flash Address. This probably needs to |
@@ -8893,17 +8898,17 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8893 | * the software bank size (4, 8 or 64 KBytes) */ | 8898 | * the software bank size (4, 8 or 64 KBytes) */ |
8894 | flash_linear_address = bank * bank_size + j * sub_sector_size; | 8899 | flash_linear_address = bank * bank_size + j * sub_sector_size; |
8895 | flash_linear_address += hw->flash_base_addr; | 8900 | flash_linear_address += hw->flash_base_addr; |
8896 | flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK; | 8901 | flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK; |
8897 | 8902 | ||
8898 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | 8903 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); |
8899 | 8904 | ||
8900 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_ERASE_TIMEOUT); | 8905 | error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT); |
8901 | /* Check if FCERR is set to 1. If 1, clear it and try the whole | 8906 | /* Check if FCERR is set to 1. If 1, clear it and try the whole |
8902 | * sequence a few more times else Done */ | 8907 | * sequence a few more times else Done */ |
8903 | if (error == E1000_SUCCESS) { | 8908 | if (error == E1000_SUCCESS) { |
8904 | break; | 8909 | break; |
8905 | } else { | 8910 | } else { |
8906 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | 8911 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8907 | if (hsfsts.hsf_status.flcerr == 1) { | 8912 | if (hsfsts.hsf_status.flcerr == 1) { |
8908 | /* repeat for some time before giving up */ | 8913 | /* repeat for some time before giving up */ |
8909 | continue; | 8914 | continue; |
@@ -8912,7 +8917,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8912 | break; | 8917 | break; |
8913 | } | 8918 | } |
8914 | } | 8919 | } |
8915 | } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag); | 8920 | } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag); |
8916 | if (error_flag == 1) | 8921 | if (error_flag == 1) |
8917 | break; | 8922 | break; |
8918 | } | 8923 | } |
@@ -9013,5 +9018,3 @@ e1000_init_lcd_from_nvm(struct e1000_hw *hw) | |||
9013 | return E1000_SUCCESS; | 9018 | return E1000_SUCCESS; |
9014 | } | 9019 | } |
9015 | 9020 | ||
9016 | |||
9017 | |||
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 449a60303e07..3321fb13bfa9 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -128,11 +128,13 @@ typedef enum { | |||
128 | /* PCI bus widths */ | 128 | /* PCI bus widths */ |
129 | typedef enum { | 129 | typedef enum { |
130 | e1000_bus_width_unknown = 0, | 130 | e1000_bus_width_unknown = 0, |
131 | /* These PCIe values should literally match the possible return values | ||
132 | * from config space */ | ||
133 | e1000_bus_width_pciex_1 = 1, | ||
134 | e1000_bus_width_pciex_2 = 2, | ||
135 | e1000_bus_width_pciex_4 = 4, | ||
131 | e1000_bus_width_32, | 136 | e1000_bus_width_32, |
132 | e1000_bus_width_64, | 137 | e1000_bus_width_64, |
133 | e1000_bus_width_pciex_1, | ||
134 | e1000_bus_width_pciex_2, | ||
135 | e1000_bus_width_pciex_4, | ||
136 | e1000_bus_width_reserved | 138 | e1000_bus_width_reserved |
137 | } e1000_bus_width; | 139 | } e1000_bus_width; |
138 | 140 | ||
@@ -326,6 +328,7 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | |||
326 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 328 | int32_t e1000_phy_reset(struct e1000_hw *hw); |
327 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 329 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
328 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 330 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); |
331 | |||
329 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); | 332 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); |
330 | 333 | ||
331 | /* EEPROM Functions */ | 334 | /* EEPROM Functions */ |
@@ -390,7 +393,6 @@ int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, | |||
390 | uint16_t length); | 393 | uint16_t length); |
391 | boolean_t e1000_check_mng_mode(struct e1000_hw *hw); | 394 | boolean_t e1000_check_mng_mode(struct e1000_hw *hw); |
392 | boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); | 395 | boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); |
393 | |||
394 | int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); | 396 | int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); |
395 | int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); | 397 | int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); |
396 | int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); | 398 | int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); |
@@ -473,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
473 | #define E1000_DEV_ID_82571EB_FIBER 0x105F | 475 | #define E1000_DEV_ID_82571EB_FIBER 0x105F |
474 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 | 476 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 |
475 | #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 | 477 | #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 |
478 | #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC | ||
476 | #define E1000_DEV_ID_82572EI_COPPER 0x107D | 479 | #define E1000_DEV_ID_82572EI_COPPER 0x107D |
477 | #define E1000_DEV_ID_82572EI_FIBER 0x107E | 480 | #define E1000_DEV_ID_82572EI_FIBER 0x107E |
478 | #define E1000_DEV_ID_82572EI_SERDES 0x107F | 481 | #define E1000_DEV_ID_82572EI_SERDES 0x107F |
@@ -490,6 +493,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
490 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | 493 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A |
491 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | 494 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B |
492 | #define E1000_DEV_ID_ICH8_IFE 0x104C | 495 | #define E1000_DEV_ID_ICH8_IFE 0x104C |
496 | #define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 | ||
497 | #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 | ||
493 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D | 498 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D |
494 | 499 | ||
495 | 500 | ||
@@ -576,6 +581,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
576 | * E1000_RAR_ENTRIES - 1 multicast addresses. | 581 | * E1000_RAR_ENTRIES - 1 multicast addresses. |
577 | */ | 582 | */ |
578 | #define E1000_RAR_ENTRIES 15 | 583 | #define E1000_RAR_ENTRIES 15 |
584 | |||
579 | #define E1000_RAR_ENTRIES_ICH8LAN 6 | 585 | #define E1000_RAR_ENTRIES_ICH8LAN 6 |
580 | 586 | ||
581 | #define MIN_NUMBER_OF_DESCRIPTORS 8 | 587 | #define MIN_NUMBER_OF_DESCRIPTORS 8 |
@@ -1335,9 +1341,9 @@ struct e1000_hw_stats { | |||
1335 | uint64_t gotch; | 1341 | uint64_t gotch; |
1336 | uint64_t rnbc; | 1342 | uint64_t rnbc; |
1337 | uint64_t ruc; | 1343 | uint64_t ruc; |
1344 | uint64_t rfc; | ||
1338 | uint64_t roc; | 1345 | uint64_t roc; |
1339 | uint64_t rlerrc; | 1346 | uint64_t rlerrc; |
1340 | uint64_t rfc; | ||
1341 | uint64_t rjc; | 1347 | uint64_t rjc; |
1342 | uint64_t mgprc; | 1348 | uint64_t mgprc; |
1343 | uint64_t mgpdc; | 1349 | uint64_t mgpdc; |
@@ -1577,8 +1583,8 @@ struct e1000_hw { | |||
1577 | #define E1000_HICR_FW_RESET 0xC0 | 1583 | #define E1000_HICR_FW_RESET 0xC0 |
1578 | 1584 | ||
1579 | #define E1000_SHADOW_RAM_WORDS 2048 | 1585 | #define E1000_SHADOW_RAM_WORDS 2048 |
1580 | #define E1000_ICH8_NVM_SIG_WORD 0x13 | 1586 | #define E1000_ICH_NVM_SIG_WORD 0x13 |
1581 | #define E1000_ICH8_NVM_SIG_MASK 0xC0 | 1587 | #define E1000_ICH_NVM_SIG_MASK 0xC0 |
1582 | 1588 | ||
1583 | /* EEPROM Read */ | 1589 | /* EEPROM Read */ |
1584 | #define E1000_EERD_START 0x00000001 /* Start Read */ | 1590 | #define E1000_EERD_START 0x00000001 /* Start Read */ |
@@ -3172,6 +3178,7 @@ struct e1000_host_command_info { | |||
3172 | #define IGP3_VR_CTRL \ | 3178 | #define IGP3_VR_CTRL \ |
3173 | PHY_REG(776, 18) /* Voltage regulator control register */ | 3179 | PHY_REG(776, 18) /* Voltage regulator control register */ |
3174 | #define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ | 3180 | #define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ |
3181 | #define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ | ||
3175 | 3182 | ||
3176 | #define IGP3_CAPABILITY \ | 3183 | #define IGP3_CAPABILITY \ |
3177 | PHY_REG(776, 19) /* IGP3 Capability Register */ | 3184 | PHY_REG(776, 19) /* IGP3 Capability Register */ |
@@ -3256,41 +3263,40 @@ struct e1000_host_command_info { | |||
3256 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ | 3263 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ |
3257 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ | 3264 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ |
3258 | 3265 | ||
3259 | #define ICH8_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ | 3266 | #define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ |
3260 | #define ICH8_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ | 3267 | #define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ |
3261 | #define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ | 3268 | #define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ |
3262 | #define ICH8_FLASH_SEG_SIZE_256 256 | 3269 | #define ICH_FLASH_SEG_SIZE_256 256 |
3263 | #define ICH8_FLASH_SEG_SIZE_4K 4096 | 3270 | #define ICH_FLASH_SEG_SIZE_4K 4096 |
3264 | #define ICH9_FLASH_SEG_SIZE_8K 8192 | 3271 | #define ICH_FLASH_SEG_SIZE_64K 65536 |
3265 | #define ICH8_FLASH_SEG_SIZE_64K 65536 | 3272 | |
3266 | 3273 | #define ICH_CYCLE_READ 0x0 | |
3267 | #define ICH8_CYCLE_READ 0x0 | 3274 | #define ICH_CYCLE_RESERVED 0x1 |
3268 | #define ICH8_CYCLE_RESERVED 0x1 | 3275 | #define ICH_CYCLE_WRITE 0x2 |
3269 | #define ICH8_CYCLE_WRITE 0x2 | 3276 | #define ICH_CYCLE_ERASE 0x3 |
3270 | #define ICH8_CYCLE_ERASE 0x3 | 3277 | |
3271 | 3278 | #define ICH_FLASH_GFPREG 0x0000 | |
3272 | #define ICH8_FLASH_GFPREG 0x0000 | 3279 | #define ICH_FLASH_HSFSTS 0x0004 |
3273 | #define ICH8_FLASH_HSFSTS 0x0004 | 3280 | #define ICH_FLASH_HSFCTL 0x0006 |
3274 | #define ICH8_FLASH_HSFCTL 0x0006 | 3281 | #define ICH_FLASH_FADDR 0x0008 |
3275 | #define ICH8_FLASH_FADDR 0x0008 | 3282 | #define ICH_FLASH_FDATA0 0x0010 |
3276 | #define ICH8_FLASH_FDATA0 0x0010 | 3283 | #define ICH_FLASH_FRACC 0x0050 |
3277 | #define ICH8_FLASH_FRACC 0x0050 | 3284 | #define ICH_FLASH_FREG0 0x0054 |
3278 | #define ICH8_FLASH_FREG0 0x0054 | 3285 | #define ICH_FLASH_FREG1 0x0058 |
3279 | #define ICH8_FLASH_FREG1 0x0058 | 3286 | #define ICH_FLASH_FREG2 0x005C |
3280 | #define ICH8_FLASH_FREG2 0x005C | 3287 | #define ICH_FLASH_FREG3 0x0060 |
3281 | #define ICH8_FLASH_FREG3 0x0060 | 3288 | #define ICH_FLASH_FPR0 0x0074 |
3282 | #define ICH8_FLASH_FPR0 0x0074 | 3289 | #define ICH_FLASH_FPR1 0x0078 |
3283 | #define ICH8_FLASH_FPR1 0x0078 | 3290 | #define ICH_FLASH_SSFSTS 0x0090 |
3284 | #define ICH8_FLASH_SSFSTS 0x0090 | 3291 | #define ICH_FLASH_SSFCTL 0x0092 |
3285 | #define ICH8_FLASH_SSFCTL 0x0092 | 3292 | #define ICH_FLASH_PREOP 0x0094 |
3286 | #define ICH8_FLASH_PREOP 0x0094 | 3293 | #define ICH_FLASH_OPTYPE 0x0096 |
3287 | #define ICH8_FLASH_OPTYPE 0x0096 | 3294 | #define ICH_FLASH_OPMENU 0x0098 |
3288 | #define ICH8_FLASH_OPMENU 0x0098 | 3295 | |
3289 | 3296 | #define ICH_FLASH_REG_MAPSIZE 0x00A0 | |
3290 | #define ICH8_FLASH_REG_MAPSIZE 0x00A0 | 3297 | #define ICH_FLASH_SECTOR_SIZE 4096 |
3291 | #define ICH8_FLASH_SECTOR_SIZE 4096 | 3298 | #define ICH_GFPREG_BASE_MASK 0x1FFF |
3292 | #define ICH8_GFPREG_BASE_MASK 0x1FFF | 3299 | #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF |
3293 | #define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF | ||
3294 | 3300 | ||
3295 | /* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | 3301 | /* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ |
3296 | /* Offset 04h HSFSTS */ | 3302 | /* Offset 04h HSFSTS */ |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 726ec5e88ab2..7a0828869ecf 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -27,6 +27,7 @@ | |||
27 | *******************************************************************************/ | 27 | *******************************************************************************/ |
28 | 28 | ||
29 | #include "e1000.h" | 29 | #include "e1000.h" |
30 | #include <net/ip6_checksum.h> | ||
30 | 31 | ||
31 | char e1000_driver_name[] = "e1000"; | 32 | char e1000_driver_name[] = "e1000"; |
32 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
@@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
35 | #else | 36 | #else |
36 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
37 | #endif | 38 | #endif |
38 | #define DRV_VERSION "7.2.9-k4"DRIVERNAPI | 39 | #define DRV_VERSION "7.3.15-k2"DRIVERNAPI |
39 | char e1000_driver_version[] = DRV_VERSION; | 40 | char e1000_driver_version[] = DRV_VERSION; |
40 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
41 | 42 | ||
@@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
103 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | 104 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), |
104 | INTEL_E1000_ETHERNET_DEVICE(0x10BA), | 105 | INTEL_E1000_ETHERNET_DEVICE(0x10BA), |
105 | INTEL_E1000_ETHERNET_DEVICE(0x10BB), | 106 | INTEL_E1000_ETHERNET_DEVICE(0x10BB), |
107 | INTEL_E1000_ETHERNET_DEVICE(0x10BC), | ||
108 | INTEL_E1000_ETHERNET_DEVICE(0x10C4), | ||
109 | INTEL_E1000_ETHERNET_DEVICE(0x10C5), | ||
106 | /* required last entry */ | 110 | /* required last entry */ |
107 | {0,} | 111 | {0,} |
108 | }; | 112 | }; |
@@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); | |||
154 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); | 158 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); |
155 | static int e1000_set_mac(struct net_device *netdev, void *p); | 159 | static int e1000_set_mac(struct net_device *netdev, void *p); |
156 | static irqreturn_t e1000_intr(int irq, void *data); | 160 | static irqreturn_t e1000_intr(int irq, void *data); |
161 | #ifdef CONFIG_PCI_MSI | ||
162 | static irqreturn_t e1000_intr_msi(int irq, void *data); | ||
163 | #endif | ||
157 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, | 164 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, |
158 | struct e1000_tx_ring *tx_ring); | 165 | struct e1000_tx_ring *tx_ring); |
159 | #ifdef CONFIG_E1000_NAPI | 166 | #ifdef CONFIG_E1000_NAPI |
@@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
285 | 292 | ||
286 | flags = IRQF_SHARED; | 293 | flags = IRQF_SHARED; |
287 | #ifdef CONFIG_PCI_MSI | 294 | #ifdef CONFIG_PCI_MSI |
288 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | 295 | if (adapter->hw.mac_type >= e1000_82571) { |
289 | adapter->have_msi = TRUE; | 296 | adapter->have_msi = TRUE; |
290 | if ((err = pci_enable_msi(adapter->pdev))) { | 297 | if ((err = pci_enable_msi(adapter->pdev))) { |
291 | DPRINTK(PROBE, ERR, | 298 | DPRINTK(PROBE, ERR, |
@@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
293 | adapter->have_msi = FALSE; | 300 | adapter->have_msi = FALSE; |
294 | } | 301 | } |
295 | } | 302 | } |
296 | if (adapter->have_msi) | 303 | if (adapter->have_msi) { |
297 | flags &= ~IRQF_SHARED; | 304 | flags &= ~IRQF_SHARED; |
305 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags, | ||
306 | netdev->name, netdev); | ||
307 | if (err) | ||
308 | DPRINTK(PROBE, ERR, | ||
309 | "Unable to allocate interrupt Error: %d\n", err); | ||
310 | } else | ||
298 | #endif | 311 | #endif |
299 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, | 312 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, |
300 | netdev->name, netdev))) | 313 | netdev->name, netdev))) |
@@ -375,7 +388,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
375 | * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. | 388 | * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. |
376 | * For ASF and Pass Through versions of f/w this means that the | 389 | * For ASF and Pass Through versions of f/w this means that the |
377 | * driver is no longer loaded. For AMT version (only with 82573) i | 390 | * driver is no longer loaded. For AMT version (only with 82573) i |
378 | * of the f/w this means that the netowrk i/f is closed. | 391 | * of the f/w this means that the network i/f is closed. |
379 | * | 392 | * |
380 | **/ | 393 | **/ |
381 | 394 | ||
@@ -416,7 +429,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
416 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | 429 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. |
417 | * For ASF and Pass Through versions of f/w this means that | 430 | * For ASF and Pass Through versions of f/w this means that |
418 | * the driver is loaded. For AMT version (only with 82573) | 431 | * the driver is loaded. For AMT version (only with 82573) |
419 | * of the f/w this means that the netowrk i/f is open. | 432 | * of the f/w this means that the network i/f is open. |
420 | * | 433 | * |
421 | **/ | 434 | **/ |
422 | 435 | ||
@@ -426,6 +439,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
426 | uint32_t ctrl_ext; | 439 | uint32_t ctrl_ext; |
427 | uint32_t swsm; | 440 | uint32_t swsm; |
428 | uint32_t extcnf; | 441 | uint32_t extcnf; |
442 | |||
429 | /* Let firmware know the driver has taken over */ | 443 | /* Let firmware know the driver has taken over */ |
430 | switch (adapter->hw.mac_type) { | 444 | switch (adapter->hw.mac_type) { |
431 | case e1000_82571: | 445 | case e1000_82571: |
@@ -601,9 +615,6 @@ void | |||
601 | e1000_reset(struct e1000_adapter *adapter) | 615 | e1000_reset(struct e1000_adapter *adapter) |
602 | { | 616 | { |
603 | uint32_t pba, manc; | 617 | uint32_t pba, manc; |
604 | #ifdef DISABLE_MULR | ||
605 | uint32_t tctl; | ||
606 | #endif | ||
607 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; | 618 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; |
608 | 619 | ||
609 | /* Repartition Pba for greater than 9k mtu | 620 | /* Repartition Pba for greater than 9k mtu |
@@ -670,12 +681,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
670 | e1000_reset_hw(&adapter->hw); | 681 | e1000_reset_hw(&adapter->hw); |
671 | if (adapter->hw.mac_type >= e1000_82544) | 682 | if (adapter->hw.mac_type >= e1000_82544) |
672 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 683 | E1000_WRITE_REG(&adapter->hw, WUC, 0); |
673 | #ifdef DISABLE_MULR | ||
674 | /* disable Multiple Reads in Transmit Control Register for debugging */ | ||
675 | tctl = E1000_READ_REG(hw, TCTL); | ||
676 | E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR); | ||
677 | 684 | ||
678 | #endif | ||
679 | if (e1000_init_hw(&adapter->hw)) | 685 | if (e1000_init_hw(&adapter->hw)) |
680 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 686 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
681 | e1000_update_mng_vlan(adapter); | 687 | e1000_update_mng_vlan(adapter); |
@@ -851,9 +857,9 @@ e1000_probe(struct pci_dev *pdev, | |||
851 | (adapter->hw.mac_type != e1000_82547)) | 857 | (adapter->hw.mac_type != e1000_82547)) |
852 | netdev->features |= NETIF_F_TSO; | 858 | netdev->features |= NETIF_F_TSO; |
853 | 859 | ||
854 | #ifdef NETIF_F_TSO_IPV6 | 860 | #ifdef NETIF_F_TSO6 |
855 | if (adapter->hw.mac_type > e1000_82547_rev_2) | 861 | if (adapter->hw.mac_type > e1000_82547_rev_2) |
856 | netdev->features |= NETIF_F_TSO_IPV6; | 862 | netdev->features |= NETIF_F_TSO6; |
857 | #endif | 863 | #endif |
858 | #endif | 864 | #endif |
859 | if (pci_using_dac) | 865 | if (pci_using_dac) |
@@ -968,6 +974,7 @@ e1000_probe(struct pci_dev *pdev, | |||
968 | break; | 974 | break; |
969 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 975 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
970 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 976 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
977 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | ||
971 | /* if quad port adapter, disable WoL on all but port A */ | 978 | /* if quad port adapter, disable WoL on all but port A */ |
972 | if (global_quad_port_a != 0) | 979 | if (global_quad_port_a != 0) |
973 | adapter->eeprom_wol = 0; | 980 | adapter->eeprom_wol = 0; |
@@ -1279,12 +1286,10 @@ e1000_open(struct net_device *netdev) | |||
1279 | return -EBUSY; | 1286 | return -EBUSY; |
1280 | 1287 | ||
1281 | /* allocate transmit descriptors */ | 1288 | /* allocate transmit descriptors */ |
1282 | |||
1283 | if ((err = e1000_setup_all_tx_resources(adapter))) | 1289 | if ((err = e1000_setup_all_tx_resources(adapter))) |
1284 | goto err_setup_tx; | 1290 | goto err_setup_tx; |
1285 | 1291 | ||
1286 | /* allocate receive descriptors */ | 1292 | /* allocate receive descriptors */ |
1287 | |||
1288 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1293 | if ((err = e1000_setup_all_rx_resources(adapter))) |
1289 | goto err_setup_rx; | 1294 | goto err_setup_rx; |
1290 | 1295 | ||
@@ -1569,6 +1574,8 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1569 | 1574 | ||
1570 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1575 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1571 | tarc = E1000_READ_REG(hw, TARC0); | 1576 | tarc = E1000_READ_REG(hw, TARC0); |
1577 | /* set the speed mode bit, we'll clear it if we're not at | ||
1578 | * gigabit link later */ | ||
1572 | tarc |= (1 << 21); | 1579 | tarc |= (1 << 21); |
1573 | E1000_WRITE_REG(hw, TARC0, tarc); | 1580 | E1000_WRITE_REG(hw, TARC0, tarc); |
1574 | } else if (hw->mac_type == e1000_80003es2lan) { | 1581 | } else if (hw->mac_type == e1000_80003es2lan) { |
@@ -1583,8 +1590,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1583 | e1000_config_collision_dist(hw); | 1590 | e1000_config_collision_dist(hw); |
1584 | 1591 | ||
1585 | /* Setup Transmit Descriptor Settings for eop descriptor */ | 1592 | /* Setup Transmit Descriptor Settings for eop descriptor */ |
1586 | adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | | 1593 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; |
1587 | E1000_TXD_CMD_IFCS; | 1594 | |
1595 | /* only set IDE if we are delaying interrupts using the timers */ | ||
1596 | if (adapter->tx_int_delay) | ||
1597 | adapter->txd_cmd |= E1000_TXD_CMD_IDE; | ||
1588 | 1598 | ||
1589 | if (hw->mac_type < e1000_82543) | 1599 | if (hw->mac_type < e1000_82543) |
1590 | adapter->txd_cmd |= E1000_TXD_CMD_RPS; | 1600 | adapter->txd_cmd |= E1000_TXD_CMD_RPS; |
@@ -1821,8 +1831,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1821 | /* Configure extra packet-split registers */ | 1831 | /* Configure extra packet-split registers */ |
1822 | rfctl = E1000_READ_REG(&adapter->hw, RFCTL); | 1832 | rfctl = E1000_READ_REG(&adapter->hw, RFCTL); |
1823 | rfctl |= E1000_RFCTL_EXTEN; | 1833 | rfctl |= E1000_RFCTL_EXTEN; |
1824 | /* disable IPv6 packet split support */ | 1834 | /* disable packet split support for IPv6 extension headers, |
1825 | rfctl |= E1000_RFCTL_IPV6_DIS; | 1835 | * because some malformed IPv6 headers can hang the RX */ |
1836 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | ||
1837 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | ||
1838 | |||
1826 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1839 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
1827 | 1840 | ||
1828 | rctl |= E1000_RCTL_DTYP_PS; | 1841 | rctl |= E1000_RCTL_DTYP_PS; |
@@ -1885,7 +1898,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1885 | 1898 | ||
1886 | if (hw->mac_type >= e1000_82540) { | 1899 | if (hw->mac_type >= e1000_82540) { |
1887 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 1900 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); |
1888 | if (adapter->itr > 1) | 1901 | if (adapter->itr_setting != 0) |
1889 | E1000_WRITE_REG(hw, ITR, | 1902 | E1000_WRITE_REG(hw, ITR, |
1890 | 1000000000 / (adapter->itr * 256)); | 1903 | 1000000000 / (adapter->itr * 256)); |
1891 | } | 1904 | } |
@@ -1895,11 +1908,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1895 | /* Reset delay timers after every interrupt */ | 1908 | /* Reset delay timers after every interrupt */ |
1896 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | 1909 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
1897 | #ifdef CONFIG_E1000_NAPI | 1910 | #ifdef CONFIG_E1000_NAPI |
1898 | /* Auto-Mask interrupts upon ICR read. */ | 1911 | /* Auto-Mask interrupts upon ICR access */ |
1899 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 1912 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
1913 | E1000_WRITE_REG(hw, IAM, 0xffffffff); | ||
1900 | #endif | 1914 | #endif |
1901 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1915 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); |
1902 | E1000_WRITE_REG(hw, IAM, ~0); | ||
1903 | E1000_WRITE_FLUSH(hw); | 1916 | E1000_WRITE_FLUSH(hw); |
1904 | } | 1917 | } |
1905 | 1918 | ||
@@ -1938,6 +1951,12 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1938 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1951 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1939 | } | 1952 | } |
1940 | 1953 | ||
1954 | /* enable early receives on 82573, only takes effect if using > 2048 | ||
1955 | * byte total frame size. for example only for jumbo frames */ | ||
1956 | #define E1000_ERT_2048 0x100 | ||
1957 | if (hw->mac_type == e1000_82573) | ||
1958 | E1000_WRITE_REG(hw, ERT, E1000_ERT_2048); | ||
1959 | |||
1941 | /* Enable Receives */ | 1960 | /* Enable Receives */ |
1942 | E1000_WRITE_REG(hw, RCTL, rctl); | 1961 | E1000_WRITE_REG(hw, RCTL, rctl); |
1943 | } | 1962 | } |
@@ -1991,10 +2010,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |||
1991 | buffer_info->dma, | 2010 | buffer_info->dma, |
1992 | buffer_info->length, | 2011 | buffer_info->length, |
1993 | PCI_DMA_TODEVICE); | 2012 | PCI_DMA_TODEVICE); |
2013 | buffer_info->dma = 0; | ||
1994 | } | 2014 | } |
1995 | if (buffer_info->skb) | 2015 | if (buffer_info->skb) { |
1996 | dev_kfree_skb_any(buffer_info->skb); | 2016 | dev_kfree_skb_any(buffer_info->skb); |
1997 | memset(buffer_info, 0, sizeof(struct e1000_buffer)); | 2017 | buffer_info->skb = NULL; |
2018 | } | ||
2019 | /* buffer_info must be completely set up in the transmit path */ | ||
1998 | } | 2020 | } |
1999 | 2021 | ||
2000 | /** | 2022 | /** |
@@ -2418,6 +2440,7 @@ e1000_watchdog(unsigned long data) | |||
2418 | DPRINTK(LINK, INFO, | 2440 | DPRINTK(LINK, INFO, |
2419 | "Gigabit has been disabled, downgrading speed\n"); | 2441 | "Gigabit has been disabled, downgrading speed\n"); |
2420 | } | 2442 | } |
2443 | |||
2421 | if (adapter->hw.mac_type == e1000_82573) { | 2444 | if (adapter->hw.mac_type == e1000_82573) { |
2422 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2445 | e1000_enable_tx_pkt_filtering(&adapter->hw); |
2423 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2446 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
@@ -2462,13 +2485,12 @@ e1000_watchdog(unsigned long data) | |||
2462 | if ((adapter->hw.mac_type == e1000_82571 || | 2485 | if ((adapter->hw.mac_type == e1000_82571 || |
2463 | adapter->hw.mac_type == e1000_82572) && | 2486 | adapter->hw.mac_type == e1000_82572) && |
2464 | txb2b == 0) { | 2487 | txb2b == 0) { |
2465 | #define SPEED_MODE_BIT (1 << 21) | ||
2466 | uint32_t tarc0; | 2488 | uint32_t tarc0; |
2467 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | 2489 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); |
2468 | tarc0 &= ~SPEED_MODE_BIT; | 2490 | tarc0 &= ~(1 << 21); |
2469 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | 2491 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); |
2470 | } | 2492 | } |
2471 | 2493 | ||
2472 | #ifdef NETIF_F_TSO | 2494 | #ifdef NETIF_F_TSO |
2473 | /* disable TSO for pcie and 10/100 speeds, to avoid | 2495 | /* disable TSO for pcie and 10/100 speeds, to avoid |
2474 | * some hardware issues */ | 2496 | * some hardware issues */ |
@@ -2480,9 +2502,15 @@ e1000_watchdog(unsigned long data) | |||
2480 | DPRINTK(PROBE,INFO, | 2502 | DPRINTK(PROBE,INFO, |
2481 | "10/100 speed: disabling TSO\n"); | 2503 | "10/100 speed: disabling TSO\n"); |
2482 | netdev->features &= ~NETIF_F_TSO; | 2504 | netdev->features &= ~NETIF_F_TSO; |
2505 | #ifdef NETIF_F_TSO6 | ||
2506 | netdev->features &= ~NETIF_F_TSO6; | ||
2507 | #endif | ||
2483 | break; | 2508 | break; |
2484 | case SPEED_1000: | 2509 | case SPEED_1000: |
2485 | netdev->features |= NETIF_F_TSO; | 2510 | netdev->features |= NETIF_F_TSO; |
2511 | #ifdef NETIF_F_TSO6 | ||
2512 | netdev->features |= NETIF_F_TSO6; | ||
2513 | #endif | ||
2486 | break; | 2514 | break; |
2487 | default: | 2515 | default: |
2488 | /* oops */ | 2516 | /* oops */ |
@@ -2549,19 +2577,6 @@ e1000_watchdog(unsigned long data) | |||
2549 | } | 2577 | } |
2550 | } | 2578 | } |
2551 | 2579 | ||
2552 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ | ||
2553 | if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { | ||
2554 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total | ||
2555 | * asymmetrical Tx or Rx gets ITR=8000; everyone | ||
2556 | * else is between 2000-8000. */ | ||
2557 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; | ||
2558 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? | ||
2559 | adapter->gotcl - adapter->gorcl : | ||
2560 | adapter->gorcl - adapter->gotcl) / 10000; | ||
2561 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | ||
2562 | E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256)); | ||
2563 | } | ||
2564 | |||
2565 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2580 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2566 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); | 2581 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); |
2567 | 2582 | ||
@@ -2577,6 +2592,135 @@ e1000_watchdog(unsigned long data) | |||
2577 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 2592 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
2578 | } | 2593 | } |
2579 | 2594 | ||
2595 | enum latency_range { | ||
2596 | lowest_latency = 0, | ||
2597 | low_latency = 1, | ||
2598 | bulk_latency = 2, | ||
2599 | latency_invalid = 255 | ||
2600 | }; | ||
2601 | |||
2602 | /** | ||
2603 | * e1000_update_itr - update the dynamic ITR value based on statistics | ||
2604 | * Stores a new ITR value based on packets and byte | ||
2605 | * counts during the last interrupt. The advantage of per interrupt | ||
2606 | * computation is faster updates and more accurate ITR for the current | ||
2607 | * traffic pattern. Constants in this function were computed | ||
2608 | * based on theoretical maximum wire speed and thresholds were set based | ||
2609 | * on testing data as well as attempting to minimize response time | ||
2610 | * while increasing bulk throughput. | ||
2611 | * this functionality is controlled by the InterruptThrottleRate module | ||
2612 | * parameter (see e1000_param.c) | ||
2613 | * @adapter: pointer to adapter | ||
2614 | * @itr_setting: current adapter->itr | ||
2615 | * @packets: the number of packets during this measurement interval | ||
2616 | * @bytes: the number of bytes during this measurement interval | ||
2617 | **/ | ||
2618 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | ||
2619 | uint16_t itr_setting, | ||
2620 | int packets, | ||
2621 | int bytes) | ||
2622 | { | ||
2623 | unsigned int retval = itr_setting; | ||
2624 | struct e1000_hw *hw = &adapter->hw; | ||
2625 | |||
2626 | if (unlikely(hw->mac_type < e1000_82540)) | ||
2627 | goto update_itr_done; | ||
2628 | |||
2629 | if (packets == 0) | ||
2630 | goto update_itr_done; | ||
2631 | |||
2632 | |||
2633 | switch (itr_setting) { | ||
2634 | case lowest_latency: | ||
2635 | if ((packets < 5) && (bytes > 512)) | ||
2636 | retval = low_latency; | ||
2637 | break; | ||
2638 | case low_latency: /* 50 usec aka 20000 ints/s */ | ||
2639 | if (bytes > 10000) { | ||
2640 | if ((packets < 10) || | ||
2641 | ((bytes/packets) > 1200)) | ||
2642 | retval = bulk_latency; | ||
2643 | else if ((packets > 35)) | ||
2644 | retval = lowest_latency; | ||
2645 | } else if (packets <= 2 && bytes < 512) | ||
2646 | retval = lowest_latency; | ||
2647 | break; | ||
2648 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | ||
2649 | if (bytes > 25000) { | ||
2650 | if (packets > 35) | ||
2651 | retval = low_latency; | ||
2652 | } else { | ||
2653 | if (bytes < 6000) | ||
2654 | retval = low_latency; | ||
2655 | } | ||
2656 | break; | ||
2657 | } | ||
2658 | |||
2659 | update_itr_done: | ||
2660 | return retval; | ||
2661 | } | ||
2662 | |||
2663 | static void e1000_set_itr(struct e1000_adapter *adapter) | ||
2664 | { | ||
2665 | struct e1000_hw *hw = &adapter->hw; | ||
2666 | uint16_t current_itr; | ||
2667 | uint32_t new_itr = adapter->itr; | ||
2668 | |||
2669 | if (unlikely(hw->mac_type < e1000_82540)) | ||
2670 | return; | ||
2671 | |||
2672 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | ||
2673 | if (unlikely(adapter->link_speed != SPEED_1000)) { | ||
2674 | current_itr = 0; | ||
2675 | new_itr = 4000; | ||
2676 | goto set_itr_now; | ||
2677 | } | ||
2678 | |||
2679 | adapter->tx_itr = e1000_update_itr(adapter, | ||
2680 | adapter->tx_itr, | ||
2681 | adapter->total_tx_packets, | ||
2682 | adapter->total_tx_bytes); | ||
2683 | adapter->rx_itr = e1000_update_itr(adapter, | ||
2684 | adapter->rx_itr, | ||
2685 | adapter->total_rx_packets, | ||
2686 | adapter->total_rx_bytes); | ||
2687 | |||
2688 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
2689 | |||
2690 | /* conservative mode eliminates the lowest_latency setting */ | ||
2691 | if (current_itr == lowest_latency && (adapter->itr_setting == 3)) | ||
2692 | current_itr = low_latency; | ||
2693 | |||
2694 | switch (current_itr) { | ||
2695 | /* counts and packets in update_itr are dependent on these numbers */ | ||
2696 | case lowest_latency: | ||
2697 | new_itr = 70000; | ||
2698 | break; | ||
2699 | case low_latency: | ||
2700 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
2701 | break; | ||
2702 | case bulk_latency: | ||
2703 | new_itr = 4000; | ||
2704 | break; | ||
2705 | default: | ||
2706 | break; | ||
2707 | } | ||
2708 | |||
2709 | set_itr_now: | ||
2710 | if (new_itr != adapter->itr) { | ||
2711 | /* this attempts to bias the interrupt rate towards Bulk | ||
2712 | * by adding intermediate steps when interrupt rate is | ||
2713 | * increasing */ | ||
2714 | new_itr = new_itr > adapter->itr ? | ||
2715 | min(adapter->itr + (new_itr >> 2), new_itr) : | ||
2716 | new_itr; | ||
2717 | adapter->itr = new_itr; | ||
2718 | E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); | ||
2719 | } | ||
2720 | |||
2721 | return; | ||
2722 | } | ||
2723 | |||
2580 | #define E1000_TX_FLAGS_CSUM 0x00000001 | 2724 | #define E1000_TX_FLAGS_CSUM 0x00000001 |
2581 | #define E1000_TX_FLAGS_VLAN 0x00000002 | 2725 | #define E1000_TX_FLAGS_VLAN 0x00000002 |
2582 | #define E1000_TX_FLAGS_TSO 0x00000004 | 2726 | #define E1000_TX_FLAGS_TSO 0x00000004 |
@@ -2617,7 +2761,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2617 | 0); | 2761 | 0); |
2618 | cmd_length = E1000_TXD_CMD_IP; | 2762 | cmd_length = E1000_TXD_CMD_IP; |
2619 | ipcse = skb->h.raw - skb->data - 1; | 2763 | ipcse = skb->h.raw - skb->data - 1; |
2620 | #ifdef NETIF_F_TSO_IPV6 | 2764 | #ifdef NETIF_F_TSO6 |
2621 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2765 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
2622 | skb->nh.ipv6h->payload_len = 0; | 2766 | skb->nh.ipv6h->payload_len = 0; |
2623 | skb->h.th->check = | 2767 | skb->h.th->check = |
@@ -2653,6 +2797,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2653 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | 2797 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
2654 | 2798 | ||
2655 | buffer_info->time_stamp = jiffies; | 2799 | buffer_info->time_stamp = jiffies; |
2800 | buffer_info->next_to_watch = i; | ||
2656 | 2801 | ||
2657 | if (++i == tx_ring->count) i = 0; | 2802 | if (++i == tx_ring->count) i = 0; |
2658 | tx_ring->next_to_use = i; | 2803 | tx_ring->next_to_use = i; |
@@ -2687,6 +2832,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2687 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); | 2832 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
2688 | 2833 | ||
2689 | buffer_info->time_stamp = jiffies; | 2834 | buffer_info->time_stamp = jiffies; |
2835 | buffer_info->next_to_watch = i; | ||
2690 | 2836 | ||
2691 | if (unlikely(++i == tx_ring->count)) i = 0; | 2837 | if (unlikely(++i == tx_ring->count)) i = 0; |
2692 | tx_ring->next_to_use = i; | 2838 | tx_ring->next_to_use = i; |
@@ -2755,6 +2901,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2755 | size, | 2901 | size, |
2756 | PCI_DMA_TODEVICE); | 2902 | PCI_DMA_TODEVICE); |
2757 | buffer_info->time_stamp = jiffies; | 2903 | buffer_info->time_stamp = jiffies; |
2904 | buffer_info->next_to_watch = i; | ||
2758 | 2905 | ||
2759 | len -= size; | 2906 | len -= size; |
2760 | offset += size; | 2907 | offset += size; |
@@ -2794,6 +2941,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2794 | size, | 2941 | size, |
2795 | PCI_DMA_TODEVICE); | 2942 | PCI_DMA_TODEVICE); |
2796 | buffer_info->time_stamp = jiffies; | 2943 | buffer_info->time_stamp = jiffies; |
2944 | buffer_info->next_to_watch = i; | ||
2797 | 2945 | ||
2798 | len -= size; | 2946 | len -= size; |
2799 | offset += size; | 2947 | offset += size; |
@@ -2859,6 +3007,9 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2859 | 3007 | ||
2860 | tx_ring->next_to_use = i; | 3008 | tx_ring->next_to_use = i; |
2861 | writel(i, adapter->hw.hw_addr + tx_ring->tdt); | 3009 | writel(i, adapter->hw.hw_addr + tx_ring->tdt); |
3010 | /* we need this if more than one processor can write to our tail | ||
3011 | * at a time, it syncronizes IO on IA64/Altix systems */ | ||
3012 | mmiowb(); | ||
2862 | } | 3013 | } |
2863 | 3014 | ||
2864 | /** | 3015 | /** |
@@ -2952,6 +3103,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |||
2952 | 3103 | ||
2953 | /* A reprieve! */ | 3104 | /* A reprieve! */ |
2954 | netif_start_queue(netdev); | 3105 | netif_start_queue(netdev); |
3106 | ++adapter->restart_queue; | ||
2955 | return 0; | 3107 | return 0; |
2956 | } | 3108 | } |
2957 | 3109 | ||
@@ -3010,9 +3162,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3010 | max_per_txd = min(mss << 2, max_per_txd); | 3162 | max_per_txd = min(mss << 2, max_per_txd); |
3011 | max_txd_pwr = fls(max_per_txd) - 1; | 3163 | max_txd_pwr = fls(max_per_txd) - 1; |
3012 | 3164 | ||
3013 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | 3165 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
3014 | * points to just header, pull a few bytes of payload from | 3166 | * points to just header, pull a few bytes of payload from |
3015 | * frags into skb->data */ | 3167 | * frags into skb->data */ |
3016 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 3168 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
3017 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { | 3169 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
3018 | switch (adapter->hw.mac_type) { | 3170 | switch (adapter->hw.mac_type) { |
@@ -3316,12 +3468,12 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3316 | adapter->stats.roc += E1000_READ_REG(hw, ROC); | 3468 | adapter->stats.roc += E1000_READ_REG(hw, ROC); |
3317 | 3469 | ||
3318 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3470 | if (adapter->hw.mac_type != e1000_ich8lan) { |
3319 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); | 3471 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); |
3320 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); | 3472 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); |
3321 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); | 3473 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); |
3322 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); | 3474 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); |
3323 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); | 3475 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); |
3324 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); | 3476 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); |
3325 | } | 3477 | } |
3326 | 3478 | ||
3327 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); | 3479 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); |
@@ -3352,12 +3504,12 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3352 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); | 3504 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); |
3353 | 3505 | ||
3354 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3506 | if (adapter->hw.mac_type != e1000_ich8lan) { |
3355 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); | 3507 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); |
3356 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); | 3508 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); |
3357 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); | 3509 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); |
3358 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); | 3510 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); |
3359 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); | 3511 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); |
3360 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); | 3512 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); |
3361 | } | 3513 | } |
3362 | 3514 | ||
3363 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); | 3515 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); |
@@ -3383,18 +3535,17 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3383 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3535 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
3384 | 3536 | ||
3385 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3537 | if (adapter->hw.mac_type != e1000_ich8lan) { |
3386 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3538 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
3387 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); | 3539 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); |
3388 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); | 3540 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); |
3389 | adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); | 3541 | adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); |
3390 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); | 3542 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); |
3391 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); | 3543 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); |
3392 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); | 3544 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); |
3393 | } | 3545 | } |
3394 | } | 3546 | } |
3395 | 3547 | ||
3396 | /* Fill out the OS statistics structure */ | 3548 | /* Fill out the OS statistics structure */ |
3397 | |||
3398 | adapter->net_stats.rx_packets = adapter->stats.gprc; | 3549 | adapter->net_stats.rx_packets = adapter->stats.gprc; |
3399 | adapter->net_stats.tx_packets = adapter->stats.gptc; | 3550 | adapter->net_stats.tx_packets = adapter->stats.gptc; |
3400 | adapter->net_stats.rx_bytes = adapter->stats.gorcl; | 3551 | adapter->net_stats.rx_bytes = adapter->stats.gorcl; |
@@ -3426,7 +3577,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3426 | /* Tx Dropped needs to be maintained elsewhere */ | 3577 | /* Tx Dropped needs to be maintained elsewhere */ |
3427 | 3578 | ||
3428 | /* Phy Stats */ | 3579 | /* Phy Stats */ |
3429 | |||
3430 | if (hw->media_type == e1000_media_type_copper) { | 3580 | if (hw->media_type == e1000_media_type_copper) { |
3431 | if ((adapter->link_speed == SPEED_1000) && | 3581 | if ((adapter->link_speed == SPEED_1000) && |
3432 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { | 3582 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { |
@@ -3442,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3442 | 3592 | ||
3443 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3593 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3444 | } | 3594 | } |
3595 | #ifdef CONFIG_PCI_MSI | ||
3596 | |||
3597 | /** | ||
3598 | * e1000_intr_msi - Interrupt Handler | ||
3599 | * @irq: interrupt number | ||
3600 | * @data: pointer to a network interface device structure | ||
3601 | **/ | ||
3602 | |||
3603 | static | ||
3604 | irqreturn_t e1000_intr_msi(int irq, void *data) | ||
3605 | { | ||
3606 | struct net_device *netdev = data; | ||
3607 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3608 | struct e1000_hw *hw = &adapter->hw; | ||
3609 | #ifndef CONFIG_E1000_NAPI | ||
3610 | int i; | ||
3611 | #endif | ||
3612 | |||
3613 | /* this code avoids the read of ICR but has to get 1000 interrupts | ||
3614 | * at every link change event before it will notice the change */ | ||
3615 | if (++adapter->detect_link >= 1000) { | ||
3616 | uint32_t icr = E1000_READ_REG(hw, ICR); | ||
3617 | #ifdef CONFIG_E1000_NAPI | ||
3618 | /* read ICR disables interrupts using IAM, so keep up with our | ||
3619 | * enable/disable accounting */ | ||
3620 | atomic_inc(&adapter->irq_sem); | ||
3621 | #endif | ||
3622 | adapter->detect_link = 0; | ||
3623 | if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) && | ||
3624 | (icr & E1000_ICR_INT_ASSERTED)) { | ||
3625 | hw->get_link_status = 1; | ||
3626 | /* 80003ES2LAN workaround-- | ||
3627 | * For packet buffer work-around on link down event; | ||
3628 | * disable receives here in the ISR and | ||
3629 | * reset adapter in watchdog | ||
3630 | */ | ||
3631 | if (netif_carrier_ok(netdev) && | ||
3632 | (adapter->hw.mac_type == e1000_80003es2lan)) { | ||
3633 | /* disable receives */ | ||
3634 | uint32_t rctl = E1000_READ_REG(hw, RCTL); | ||
3635 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | ||
3636 | } | ||
3637 | /* guard against interrupt when we're going down */ | ||
3638 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | ||
3639 | mod_timer(&adapter->watchdog_timer, | ||
3640 | jiffies + 1); | ||
3641 | } | ||
3642 | } else { | ||
3643 | E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ | | ||
3644 | E1000_ICR_LSC))); | ||
3645 | /* bummer we have to flush here, but things break otherwise as | ||
3646 | * some event appears to be lost or delayed and throughput | ||
3647 | * drops. In almost all tests this flush is un-necessary */ | ||
3648 | E1000_WRITE_FLUSH(hw); | ||
3649 | #ifdef CONFIG_E1000_NAPI | ||
3650 | /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are | ||
3651 | * masked. No need for the IMC write, but it does mean we | ||
3652 | * should account for it ASAP. */ | ||
3653 | atomic_inc(&adapter->irq_sem); | ||
3654 | #endif | ||
3655 | } | ||
3656 | |||
3657 | #ifdef CONFIG_E1000_NAPI | ||
3658 | if (likely(netif_rx_schedule_prep(netdev))) { | ||
3659 | adapter->total_tx_bytes = 0; | ||
3660 | adapter->total_tx_packets = 0; | ||
3661 | adapter->total_rx_bytes = 0; | ||
3662 | adapter->total_rx_packets = 0; | ||
3663 | __netif_rx_schedule(netdev); | ||
3664 | } else | ||
3665 | e1000_irq_enable(adapter); | ||
3666 | #else | ||
3667 | adapter->total_tx_bytes = 0; | ||
3668 | adapter->total_rx_bytes = 0; | ||
3669 | adapter->total_tx_packets = 0; | ||
3670 | adapter->total_rx_packets = 0; | ||
3671 | |||
3672 | for (i = 0; i < E1000_MAX_INTR; i++) | ||
3673 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | ||
3674 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | ||
3675 | break; | ||
3676 | |||
3677 | if (likely(adapter->itr_setting & 3)) | ||
3678 | e1000_set_itr(adapter); | ||
3679 | #endif | ||
3680 | |||
3681 | return IRQ_HANDLED; | ||
3682 | } | ||
3683 | #endif | ||
3445 | 3684 | ||
3446 | /** | 3685 | /** |
3447 | * e1000_intr - Interrupt Handler | 3686 | * e1000_intr - Interrupt Handler |
@@ -3458,7 +3697,17 @@ e1000_intr(int irq, void *data) | |||
3458 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); | 3697 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); |
3459 | #ifndef CONFIG_E1000_NAPI | 3698 | #ifndef CONFIG_E1000_NAPI |
3460 | int i; | 3699 | int i; |
3461 | #else | 3700 | #endif |
3701 | if (unlikely(!icr)) | ||
3702 | return IRQ_NONE; /* Not our interrupt */ | ||
3703 | |||
3704 | #ifdef CONFIG_E1000_NAPI | ||
3705 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | ||
3706 | * not set, then the adapter didn't send an interrupt */ | ||
3707 | if (unlikely(hw->mac_type >= e1000_82571 && | ||
3708 | !(icr & E1000_ICR_INT_ASSERTED))) | ||
3709 | return IRQ_NONE; | ||
3710 | |||
3462 | /* Interrupt Auto-Mask...upon reading ICR, | 3711 | /* Interrupt Auto-Mask...upon reading ICR, |
3463 | * interrupts are masked. No need for the | 3712 | * interrupts are masked. No need for the |
3464 | * IMC write, but it does mean we should | 3713 | * IMC write, but it does mean we should |
@@ -3467,14 +3716,6 @@ e1000_intr(int irq, void *data) | |||
3467 | atomic_inc(&adapter->irq_sem); | 3716 | atomic_inc(&adapter->irq_sem); |
3468 | #endif | 3717 | #endif |
3469 | 3718 | ||
3470 | if (unlikely(!icr)) { | ||
3471 | #ifdef CONFIG_E1000_NAPI | ||
3472 | if (hw->mac_type >= e1000_82571) | ||
3473 | e1000_irq_enable(adapter); | ||
3474 | #endif | ||
3475 | return IRQ_NONE; /* Not our interrupt */ | ||
3476 | } | ||
3477 | |||
3478 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3719 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3479 | hw->get_link_status = 1; | 3720 | hw->get_link_status = 1; |
3480 | /* 80003ES2LAN workaround-- | 3721 | /* 80003ES2LAN workaround-- |
@@ -3495,13 +3736,20 @@ e1000_intr(int irq, void *data) | |||
3495 | 3736 | ||
3496 | #ifdef CONFIG_E1000_NAPI | 3737 | #ifdef CONFIG_E1000_NAPI |
3497 | if (unlikely(hw->mac_type < e1000_82571)) { | 3738 | if (unlikely(hw->mac_type < e1000_82571)) { |
3739 | /* disable interrupts, without the synchronize_irq bit */ | ||
3498 | atomic_inc(&adapter->irq_sem); | 3740 | atomic_inc(&adapter->irq_sem); |
3499 | E1000_WRITE_REG(hw, IMC, ~0); | 3741 | E1000_WRITE_REG(hw, IMC, ~0); |
3500 | E1000_WRITE_FLUSH(hw); | 3742 | E1000_WRITE_FLUSH(hw); |
3501 | } | 3743 | } |
3502 | if (likely(netif_rx_schedule_prep(netdev))) | 3744 | if (likely(netif_rx_schedule_prep(netdev))) { |
3745 | adapter->total_tx_bytes = 0; | ||
3746 | adapter->total_tx_packets = 0; | ||
3747 | adapter->total_rx_bytes = 0; | ||
3748 | adapter->total_rx_packets = 0; | ||
3503 | __netif_rx_schedule(netdev); | 3749 | __netif_rx_schedule(netdev); |
3504 | else | 3750 | } else |
3751 | /* this really should not happen! if it does it is basically a | ||
3752 | * bug, but not a hard error, so enable ints and continue */ | ||
3505 | e1000_irq_enable(adapter); | 3753 | e1000_irq_enable(adapter); |
3506 | #else | 3754 | #else |
3507 | /* Writing IMC and IMS is needed for 82547. | 3755 | /* Writing IMC and IMS is needed for 82547. |
@@ -3519,16 +3767,23 @@ e1000_intr(int irq, void *data) | |||
3519 | E1000_WRITE_REG(hw, IMC, ~0); | 3767 | E1000_WRITE_REG(hw, IMC, ~0); |
3520 | } | 3768 | } |
3521 | 3769 | ||
3770 | adapter->total_tx_bytes = 0; | ||
3771 | adapter->total_rx_bytes = 0; | ||
3772 | adapter->total_tx_packets = 0; | ||
3773 | adapter->total_rx_packets = 0; | ||
3774 | |||
3522 | for (i = 0; i < E1000_MAX_INTR; i++) | 3775 | for (i = 0; i < E1000_MAX_INTR; i++) |
3523 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3776 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3524 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3777 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3525 | break; | 3778 | break; |
3526 | 3779 | ||
3780 | if (likely(adapter->itr_setting & 3)) | ||
3781 | e1000_set_itr(adapter); | ||
3782 | |||
3527 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3783 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3528 | e1000_irq_enable(adapter); | 3784 | e1000_irq_enable(adapter); |
3529 | 3785 | ||
3530 | #endif | 3786 | #endif |
3531 | |||
3532 | return IRQ_HANDLED; | 3787 | return IRQ_HANDLED; |
3533 | } | 3788 | } |
3534 | 3789 | ||
@@ -3572,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3572 | if ((!tx_cleaned && (work_done == 0)) || | 3827 | if ((!tx_cleaned && (work_done == 0)) || |
3573 | !netif_running(poll_dev)) { | 3828 | !netif_running(poll_dev)) { |
3574 | quit_polling: | 3829 | quit_polling: |
3830 | if (likely(adapter->itr_setting & 3)) | ||
3831 | e1000_set_itr(adapter); | ||
3575 | netif_rx_complete(poll_dev); | 3832 | netif_rx_complete(poll_dev); |
3576 | e1000_irq_enable(adapter); | 3833 | e1000_irq_enable(adapter); |
3577 | return 0; | 3834 | return 0; |
@@ -3598,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3598 | unsigned int count = 0; | 3855 | unsigned int count = 0; |
3599 | #endif | 3856 | #endif |
3600 | boolean_t cleaned = FALSE; | 3857 | boolean_t cleaned = FALSE; |
3858 | unsigned int total_tx_bytes=0, total_tx_packets=0; | ||
3601 | 3859 | ||
3602 | i = tx_ring->next_to_clean; | 3860 | i = tx_ring->next_to_clean; |
3603 | eop = tx_ring->buffer_info[i].next_to_watch; | 3861 | eop = tx_ring->buffer_info[i].next_to_watch; |
@@ -3609,13 +3867,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3609 | buffer_info = &tx_ring->buffer_info[i]; | 3867 | buffer_info = &tx_ring->buffer_info[i]; |
3610 | cleaned = (i == eop); | 3868 | cleaned = (i == eop); |
3611 | 3869 | ||
3870 | if (cleaned) { | ||
3871 | /* this packet count is wrong for TSO but has a | ||
3872 | * tendency to make dynamic ITR change more | ||
3873 | * towards bulk */ | ||
3874 | total_tx_packets++; | ||
3875 | total_tx_bytes += buffer_info->skb->len; | ||
3876 | } | ||
3612 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3877 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3613 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3878 | tx_desc->upper.data = 0; |
3614 | 3879 | ||
3615 | if (unlikely(++i == tx_ring->count)) i = 0; | 3880 | if (unlikely(++i == tx_ring->count)) i = 0; |
3616 | } | 3881 | } |
3617 | 3882 | ||
3618 | |||
3619 | eop = tx_ring->buffer_info[i].next_to_watch; | 3883 | eop = tx_ring->buffer_info[i].next_to_watch; |
3620 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3884 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3621 | #ifdef CONFIG_E1000_NAPI | 3885 | #ifdef CONFIG_E1000_NAPI |
@@ -3634,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3634 | * sees the new next_to_clean. | 3898 | * sees the new next_to_clean. |
3635 | */ | 3899 | */ |
3636 | smp_mb(); | 3900 | smp_mb(); |
3637 | if (netif_queue_stopped(netdev)) | 3901 | if (netif_queue_stopped(netdev)) { |
3638 | netif_wake_queue(netdev); | 3902 | netif_wake_queue(netdev); |
3903 | ++adapter->restart_queue; | ||
3904 | } | ||
3639 | } | 3905 | } |
3640 | 3906 | ||
3641 | if (adapter->detect_tx_hung) { | 3907 | if (adapter->detect_tx_hung) { |
@@ -3673,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3673 | netif_stop_queue(netdev); | 3939 | netif_stop_queue(netdev); |
3674 | } | 3940 | } |
3675 | } | 3941 | } |
3942 | adapter->total_tx_bytes += total_tx_bytes; | ||
3943 | adapter->total_tx_packets += total_tx_packets; | ||
3676 | return cleaned; | 3944 | return cleaned; |
3677 | } | 3945 | } |
3678 | 3946 | ||
@@ -3752,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3752 | unsigned int i; | 4020 | unsigned int i; |
3753 | int cleaned_count = 0; | 4021 | int cleaned_count = 0; |
3754 | boolean_t cleaned = FALSE; | 4022 | boolean_t cleaned = FALSE; |
4023 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
3755 | 4024 | ||
3756 | i = rx_ring->next_to_clean; | 4025 | i = rx_ring->next_to_clean; |
3757 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4026 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
@@ -3760,6 +4029,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3760 | while (rx_desc->status & E1000_RXD_STAT_DD) { | 4029 | while (rx_desc->status & E1000_RXD_STAT_DD) { |
3761 | struct sk_buff *skb; | 4030 | struct sk_buff *skb; |
3762 | u8 status; | 4031 | u8 status; |
4032 | |||
3763 | #ifdef CONFIG_E1000_NAPI | 4033 | #ifdef CONFIG_E1000_NAPI |
3764 | if (*work_done >= work_to_do) | 4034 | if (*work_done >= work_to_do) |
3765 | break; | 4035 | break; |
@@ -3817,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3817 | * done after the TBI_ACCEPT workaround above */ | 4087 | * done after the TBI_ACCEPT workaround above */ |
3818 | length -= 4; | 4088 | length -= 4; |
3819 | 4089 | ||
4090 | /* probably a little skewed due to removing CRC */ | ||
4091 | total_rx_bytes += length; | ||
4092 | total_rx_packets++; | ||
4093 | |||
3820 | /* code added for copybreak, this should improve | 4094 | /* code added for copybreak, this should improve |
3821 | * performance for small packets with large amounts | 4095 | * performance for small packets with large amounts |
3822 | * of reassembly being done in the stack */ | 4096 | * of reassembly being done in the stack */ |
@@ -3832,12 +4106,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3832 | /* save the skb in buffer_info as good */ | 4106 | /* save the skb in buffer_info as good */ |
3833 | buffer_info->skb = skb; | 4107 | buffer_info->skb = skb; |
3834 | skb = new_skb; | 4108 | skb = new_skb; |
3835 | skb_put(skb, length); | ||
3836 | } | 4109 | } |
3837 | } else | 4110 | /* else just continue with the old one */ |
3838 | skb_put(skb, length); | 4111 | } |
3839 | |||
3840 | /* end copybreak code */ | 4112 | /* end copybreak code */ |
4113 | skb_put(skb, length); | ||
3841 | 4114 | ||
3842 | /* Receive Checksum Offload */ | 4115 | /* Receive Checksum Offload */ |
3843 | e1000_rx_checksum(adapter, | 4116 | e1000_rx_checksum(adapter, |
@@ -3886,6 +4159,8 @@ next_desc: | |||
3886 | if (cleaned_count) | 4159 | if (cleaned_count) |
3887 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | 4160 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
3888 | 4161 | ||
4162 | adapter->total_rx_packets += total_rx_packets; | ||
4163 | adapter->total_rx_bytes += total_rx_bytes; | ||
3889 | return cleaned; | 4164 | return cleaned; |
3890 | } | 4165 | } |
3891 | 4166 | ||
@@ -3915,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3915 | uint32_t length, staterr; | 4190 | uint32_t length, staterr; |
3916 | int cleaned_count = 0; | 4191 | int cleaned_count = 0; |
3917 | boolean_t cleaned = FALSE; | 4192 | boolean_t cleaned = FALSE; |
4193 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
3918 | 4194 | ||
3919 | i = rx_ring->next_to_clean; | 4195 | i = rx_ring->next_to_clean; |
3920 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 4196 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
@@ -3999,7 +4275,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3999 | goto copydone; | 4275 | goto copydone; |
4000 | } /* if */ | 4276 | } /* if */ |
4001 | } | 4277 | } |
4002 | 4278 | ||
4003 | for (j = 0; j < adapter->rx_ps_pages; j++) { | 4279 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
4004 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) | 4280 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) |
4005 | break; | 4281 | break; |
@@ -4019,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4019 | pskb_trim(skb, skb->len - 4); | 4295 | pskb_trim(skb, skb->len - 4); |
4020 | 4296 | ||
4021 | copydone: | 4297 | copydone: |
4298 | total_rx_bytes += skb->len; | ||
4299 | total_rx_packets++; | ||
4300 | |||
4022 | e1000_rx_checksum(adapter, staterr, | 4301 | e1000_rx_checksum(adapter, staterr, |
4023 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 4302 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
4024 | skb->protocol = eth_type_trans(skb, netdev); | 4303 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -4067,6 +4346,8 @@ next_desc: | |||
4067 | if (cleaned_count) | 4346 | if (cleaned_count) |
4068 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | 4347 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4069 | 4348 | ||
4349 | adapter->total_rx_packets += total_rx_packets; | ||
4350 | adapter->total_rx_bytes += total_rx_bytes; | ||
4070 | return cleaned; | 4351 | return cleaned; |
4071 | } | 4352 | } |
4072 | 4353 | ||
@@ -4234,7 +4515,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
4234 | } | 4515 | } |
4235 | 4516 | ||
4236 | skb = netdev_alloc_skb(netdev, | 4517 | skb = netdev_alloc_skb(netdev, |
4237 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 4518 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); |
4238 | 4519 | ||
4239 | if (unlikely(!skb)) { | 4520 | if (unlikely(!skb)) { |
4240 | adapter->alloc_rx_buff_failed++; | 4521 | adapter->alloc_rx_buff_failed++; |
@@ -4511,7 +4792,6 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) | |||
4511 | return E1000_SUCCESS; | 4792 | return E1000_SUCCESS; |
4512 | } | 4793 | } |
4513 | 4794 | ||
4514 | |||
4515 | void | 4795 | void |
4516 | e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) | 4796 | e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) |
4517 | { | 4797 | { |
@@ -4534,12 +4814,12 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4534 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4814 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4535 | 4815 | ||
4536 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4816 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4537 | /* enable VLAN receive filtering */ | 4817 | /* enable VLAN receive filtering */ |
4538 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4818 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4539 | rctl |= E1000_RCTL_VFE; | 4819 | rctl |= E1000_RCTL_VFE; |
4540 | rctl &= ~E1000_RCTL_CFIEN; | 4820 | rctl &= ~E1000_RCTL_CFIEN; |
4541 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4821 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4542 | e1000_update_mng_vlan(adapter); | 4822 | e1000_update_mng_vlan(adapter); |
4543 | } | 4823 | } |
4544 | } else { | 4824 | } else { |
4545 | /* disable VLAN tag insert/strip */ | 4825 | /* disable VLAN tag insert/strip */ |
@@ -4548,14 +4828,16 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4548 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4828 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4549 | 4829 | ||
4550 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4830 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4551 | /* disable VLAN filtering */ | 4831 | /* disable VLAN filtering */ |
4552 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4832 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4553 | rctl &= ~E1000_RCTL_VFE; | 4833 | rctl &= ~E1000_RCTL_VFE; |
4554 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4834 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4555 | if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { | 4835 | if (adapter->mng_vlan_id != |
4556 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 4836 | (uint16_t)E1000_MNG_VLAN_NONE) { |
4557 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 4837 | e1000_vlan_rx_kill_vid(netdev, |
4558 | } | 4838 | adapter->mng_vlan_id); |
4839 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | ||
4840 | } | ||
4559 | } | 4841 | } |
4560 | } | 4842 | } |
4561 | 4843 | ||
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index a464cb290621..18afc0c25dac 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -107,17 +107,16 @@ typedef enum { | |||
107 | 107 | ||
108 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) | 108 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) |
109 | 109 | ||
110 | #define E1000_WRITE_ICH8_REG(a, reg, value) ( \ | 110 | #define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ |
111 | writel((value), ((a)->flash_address + reg))) | 111 | writel((value), ((a)->flash_address + reg))) |
112 | 112 | ||
113 | #define E1000_READ_ICH8_REG(a, reg) ( \ | 113 | #define E1000_READ_ICH_FLASH_REG(a, reg) ( \ |
114 | readl((a)->flash_address + reg)) | 114 | readl((a)->flash_address + reg)) |
115 | 115 | ||
116 | #define E1000_WRITE_ICH8_REG16(a, reg, value) ( \ | 116 | #define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ |
117 | writew((value), ((a)->flash_address + reg))) | 117 | writew((value), ((a)->flash_address + reg))) |
118 | 118 | ||
119 | #define E1000_READ_ICH8_REG16(a, reg) ( \ | 119 | #define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ |
120 | readw((a)->flash_address + reg)) | 120 | readw((a)->flash_address + reg)) |
121 | 121 | ||
122 | |||
123 | #endif /* _E1000_OSDEP_H_ */ | 122 | #endif /* _E1000_OSDEP_H_ */ |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 9c3c1acefccc..cbfcd7f2889f 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -44,16 +44,6 @@ | |||
44 | */ | 44 | */ |
45 | 45 | ||
46 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } | 46 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } |
47 | /* Module Parameters are always initialized to -1, so that the driver | ||
48 | * can tell the difference between no user specified value or the | ||
49 | * user asking for the default value. | ||
50 | * The true default values are loaded in when e1000_check_options is called. | ||
51 | * | ||
52 | * This is a GCC extension to ANSI C. | ||
53 | * See the item "Labeled Elements in Initializers" in the section | ||
54 | * "Extensions to the C Language Family" of the GCC documentation. | ||
55 | */ | ||
56 | |||
57 | #define E1000_PARAM(X, desc) \ | 47 | #define E1000_PARAM(X, desc) \ |
58 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ | 48 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ |
59 | static int num_##X = 0; \ | 49 | static int num_##X = 0; \ |
@@ -67,7 +57,6 @@ | |||
67 | * | 57 | * |
68 | * Default Value: 256 | 58 | * Default Value: 256 |
69 | */ | 59 | */ |
70 | |||
71 | E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); | 60 | E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); |
72 | 61 | ||
73 | /* Receive Descriptor Count | 62 | /* Receive Descriptor Count |
@@ -77,7 +66,6 @@ E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); | |||
77 | * | 66 | * |
78 | * Default Value: 256 | 67 | * Default Value: 256 |
79 | */ | 68 | */ |
80 | |||
81 | E1000_PARAM(RxDescriptors, "Number of receive descriptors"); | 69 | E1000_PARAM(RxDescriptors, "Number of receive descriptors"); |
82 | 70 | ||
83 | /* User Specified Speed Override | 71 | /* User Specified Speed Override |
@@ -90,7 +78,6 @@ E1000_PARAM(RxDescriptors, "Number of receive descriptors"); | |||
90 | * | 78 | * |
91 | * Default Value: 0 | 79 | * Default Value: 0 |
92 | */ | 80 | */ |
93 | |||
94 | E1000_PARAM(Speed, "Speed setting"); | 81 | E1000_PARAM(Speed, "Speed setting"); |
95 | 82 | ||
96 | /* User Specified Duplex Override | 83 | /* User Specified Duplex Override |
@@ -102,7 +89,6 @@ E1000_PARAM(Speed, "Speed setting"); | |||
102 | * | 89 | * |
103 | * Default Value: 0 | 90 | * Default Value: 0 |
104 | */ | 91 | */ |
105 | |||
106 | E1000_PARAM(Duplex, "Duplex setting"); | 92 | E1000_PARAM(Duplex, "Duplex setting"); |
107 | 93 | ||
108 | /* Auto-negotiation Advertisement Override | 94 | /* Auto-negotiation Advertisement Override |
@@ -119,8 +105,9 @@ E1000_PARAM(Duplex, "Duplex setting"); | |||
119 | * | 105 | * |
120 | * Default Value: 0x2F (copper); 0x20 (fiber) | 106 | * Default Value: 0x2F (copper); 0x20 (fiber) |
121 | */ | 107 | */ |
122 | |||
123 | E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); | 108 | E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); |
109 | #define AUTONEG_ADV_DEFAULT 0x2F | ||
110 | #define AUTONEG_ADV_MASK 0x2F | ||
124 | 111 | ||
125 | /* User Specified Flow Control Override | 112 | /* User Specified Flow Control Override |
126 | * | 113 | * |
@@ -132,8 +119,8 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); | |||
132 | * | 119 | * |
133 | * Default Value: Read flow control settings from the EEPROM | 120 | * Default Value: Read flow control settings from the EEPROM |
134 | */ | 121 | */ |
135 | |||
136 | E1000_PARAM(FlowControl, "Flow Control setting"); | 122 | E1000_PARAM(FlowControl, "Flow Control setting"); |
123 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | ||
137 | 124 | ||
138 | /* XsumRX - Receive Checksum Offload Enable/Disable | 125 | /* XsumRX - Receive Checksum Offload Enable/Disable |
139 | * | 126 | * |
@@ -144,53 +131,54 @@ E1000_PARAM(FlowControl, "Flow Control setting"); | |||
144 | * | 131 | * |
145 | * Default Value: 1 | 132 | * Default Value: 1 |
146 | */ | 133 | */ |
147 | |||
148 | E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); | 134 | E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); |
149 | 135 | ||
150 | /* Transmit Interrupt Delay in units of 1.024 microseconds | 136 | /* Transmit Interrupt Delay in units of 1.024 microseconds |
137 | * Tx interrupt delay needs to typically be set to something non zero | ||
151 | * | 138 | * |
152 | * Valid Range: 0-65535 | 139 | * Valid Range: 0-65535 |
153 | * | ||
154 | * Default Value: 64 | ||
155 | */ | 140 | */ |
156 | |||
157 | E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); | 141 | E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); |
142 | #define DEFAULT_TIDV 8 | ||
143 | #define MAX_TXDELAY 0xFFFF | ||
144 | #define MIN_TXDELAY 0 | ||
158 | 145 | ||
159 | /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds | 146 | /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds |
160 | * | 147 | * |
161 | * Valid Range: 0-65535 | 148 | * Valid Range: 0-65535 |
162 | * | ||
163 | * Default Value: 0 | ||
164 | */ | 149 | */ |
165 | |||
166 | E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); | 150 | E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); |
151 | #define DEFAULT_TADV 32 | ||
152 | #define MAX_TXABSDELAY 0xFFFF | ||
153 | #define MIN_TXABSDELAY 0 | ||
167 | 154 | ||
168 | /* Receive Interrupt Delay in units of 1.024 microseconds | 155 | /* Receive Interrupt Delay in units of 1.024 microseconds |
156 | * hardware will likely hang if you set this to anything but zero. | ||
169 | * | 157 | * |
170 | * Valid Range: 0-65535 | 158 | * Valid Range: 0-65535 |
171 | * | ||
172 | * Default Value: 0 | ||
173 | */ | 159 | */ |
174 | |||
175 | E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); | 160 | E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); |
161 | #define DEFAULT_RDTR 0 | ||
162 | #define MAX_RXDELAY 0xFFFF | ||
163 | #define MIN_RXDELAY 0 | ||
176 | 164 | ||
177 | /* Receive Absolute Interrupt Delay in units of 1.024 microseconds | 165 | /* Receive Absolute Interrupt Delay in units of 1.024 microseconds |
178 | * | 166 | * |
179 | * Valid Range: 0-65535 | 167 | * Valid Range: 0-65535 |
180 | * | ||
181 | * Default Value: 128 | ||
182 | */ | 168 | */ |
183 | |||
184 | E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); | 169 | E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); |
170 | #define DEFAULT_RADV 8 | ||
171 | #define MAX_RXABSDELAY 0xFFFF | ||
172 | #define MIN_RXABSDELAY 0 | ||
185 | 173 | ||
186 | /* Interrupt Throttle Rate (interrupts/sec) | 174 | /* Interrupt Throttle Rate (interrupts/sec) |
187 | * | 175 | * |
188 | * Valid Range: 100-100000 (0=off, 1=dynamic) | 176 | * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) |
189 | * | ||
190 | * Default Value: 8000 | ||
191 | */ | 177 | */ |
192 | |||
193 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | 178 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); |
179 | #define DEFAULT_ITR 3 | ||
180 | #define MAX_ITR 100000 | ||
181 | #define MIN_ITR 100 | ||
194 | 182 | ||
195 | /* Enable Smart Power Down of the PHY | 183 | /* Enable Smart Power Down of the PHY |
196 | * | 184 | * |
@@ -198,7 +186,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |||
198 | * | 186 | * |
199 | * Default Value: 0 (disabled) | 187 | * Default Value: 0 (disabled) |
200 | */ | 188 | */ |
201 | |||
202 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | 189 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); |
203 | 190 | ||
204 | /* Enable Kumeran Lock Loss workaround | 191 | /* Enable Kumeran Lock Loss workaround |
@@ -207,33 +194,8 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | |||
207 | * | 194 | * |
208 | * Default Value: 1 (enabled) | 195 | * Default Value: 1 (enabled) |
209 | */ | 196 | */ |
210 | |||
211 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | 197 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); |
212 | 198 | ||
213 | #define AUTONEG_ADV_DEFAULT 0x2F | ||
214 | #define AUTONEG_ADV_MASK 0x2F | ||
215 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | ||
216 | |||
217 | #define DEFAULT_RDTR 0 | ||
218 | #define MAX_RXDELAY 0xFFFF | ||
219 | #define MIN_RXDELAY 0 | ||
220 | |||
221 | #define DEFAULT_RADV 128 | ||
222 | #define MAX_RXABSDELAY 0xFFFF | ||
223 | #define MIN_RXABSDELAY 0 | ||
224 | |||
225 | #define DEFAULT_TIDV 64 | ||
226 | #define MAX_TXDELAY 0xFFFF | ||
227 | #define MIN_TXDELAY 0 | ||
228 | |||
229 | #define DEFAULT_TADV 64 | ||
230 | #define MAX_TXABSDELAY 0xFFFF | ||
231 | #define MIN_TXABSDELAY 0 | ||
232 | |||
233 | #define DEFAULT_ITR 8000 | ||
234 | #define MAX_ITR 100000 | ||
235 | #define MIN_ITR 100 | ||
236 | |||
237 | struct e1000_option { | 199 | struct e1000_option { |
238 | enum { enable_option, range_option, list_option } type; | 200 | enum { enable_option, range_option, list_option } type; |
239 | char *name; | 201 | char *name; |
@@ -510,15 +472,27 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
510 | break; | 472 | break; |
511 | case 1: | 473 | case 1: |
512 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", | 474 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", |
513 | opt.name); | 475 | opt.name); |
476 | adapter->itr_setting = adapter->itr; | ||
477 | adapter->itr = 20000; | ||
478 | break; | ||
479 | case 3: | ||
480 | DPRINTK(PROBE, INFO, | ||
481 | "%s set to dynamic conservative mode\n", | ||
482 | opt.name); | ||
483 | adapter->itr_setting = adapter->itr; | ||
484 | adapter->itr = 20000; | ||
514 | break; | 485 | break; |
515 | default: | 486 | default: |
516 | e1000_validate_option(&adapter->itr, &opt, | 487 | e1000_validate_option(&adapter->itr, &opt, |
517 | adapter); | 488 | adapter); |
489 | /* save the setting, because the dynamic bits change itr */ | ||
490 | adapter->itr_setting = adapter->itr; | ||
518 | break; | 491 | break; |
519 | } | 492 | } |
520 | } else { | 493 | } else { |
521 | adapter->itr = opt.def; | 494 | adapter->itr_setting = opt.def; |
495 | adapter->itr = 20000; | ||
522 | } | 496 | } |
523 | } | 497 | } |
524 | { /* Smart Power Down */ | 498 | { /* Smart Power Down */ |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index c5ed635bce36..439f41338291 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -110,6 +110,8 @@ | |||
110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). | 110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). |
111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. | 111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. |
112 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. | 112 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. |
113 | * 0.58: 30 Oct 2006: Added support for sideband management unit. | ||
114 | * 0.59: 30 Oct 2006: Added support for recoverable error. | ||
113 | * | 115 | * |
114 | * Known bugs: | 116 | * Known bugs: |
115 | * We suspect that on some hardware no TX done interrupts are generated. | 117 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -126,7 +128,7 @@ | |||
126 | #else | 128 | #else |
127 | #define DRIVERNAPI | 129 | #define DRIVERNAPI |
128 | #endif | 130 | #endif |
129 | #define FORCEDETH_VERSION "0.57" | 131 | #define FORCEDETH_VERSION "0.59" |
130 | #define DRV_NAME "forcedeth" | 132 | #define DRV_NAME "forcedeth" |
131 | 133 | ||
132 | #include <linux/module.h> | 134 | #include <linux/module.h> |
@@ -174,11 +176,12 @@ | |||
174 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ | 176 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
175 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ | 177 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ |
176 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ | 178 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ |
179 | #define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ | ||
177 | 180 | ||
178 | enum { | 181 | enum { |
179 | NvRegIrqStatus = 0x000, | 182 | NvRegIrqStatus = 0x000, |
180 | #define NVREG_IRQSTAT_MIIEVENT 0x040 | 183 | #define NVREG_IRQSTAT_MIIEVENT 0x040 |
181 | #define NVREG_IRQSTAT_MASK 0x1ff | 184 | #define NVREG_IRQSTAT_MASK 0x81ff |
182 | NvRegIrqMask = 0x004, | 185 | NvRegIrqMask = 0x004, |
183 | #define NVREG_IRQ_RX_ERROR 0x0001 | 186 | #define NVREG_IRQ_RX_ERROR 0x0001 |
184 | #define NVREG_IRQ_RX 0x0002 | 187 | #define NVREG_IRQ_RX 0x0002 |
@@ -189,15 +192,16 @@ enum { | |||
189 | #define NVREG_IRQ_LINK 0x0040 | 192 | #define NVREG_IRQ_LINK 0x0040 |
190 | #define NVREG_IRQ_RX_FORCED 0x0080 | 193 | #define NVREG_IRQ_RX_FORCED 0x0080 |
191 | #define NVREG_IRQ_TX_FORCED 0x0100 | 194 | #define NVREG_IRQ_TX_FORCED 0x0100 |
195 | #define NVREG_IRQ_RECOVER_ERROR 0x8000 | ||
192 | #define NVREG_IRQMASK_THROUGHPUT 0x00df | 196 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
193 | #define NVREG_IRQMASK_CPU 0x0040 | 197 | #define NVREG_IRQMASK_CPU 0x0040 |
194 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | 198 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) |
195 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | 199 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) |
196 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | 200 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) |
197 | 201 | ||
198 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | 202 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
199 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ | 203 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
200 | NVREG_IRQ_TX_FORCED)) | 204 | NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) |
201 | 205 | ||
202 | NvRegUnknownSetupReg6 = 0x008, | 206 | NvRegUnknownSetupReg6 = 0x008, |
203 | #define NVREG_UNKSETUP6_VAL 3 | 207 | #define NVREG_UNKSETUP6_VAL 3 |
@@ -222,6 +226,15 @@ enum { | |||
222 | #define NVREG_MAC_RESET_ASSERT 0x0F3 | 226 | #define NVREG_MAC_RESET_ASSERT 0x0F3 |
223 | NvRegTransmitterControl = 0x084, | 227 | NvRegTransmitterControl = 0x084, |
224 | #define NVREG_XMITCTL_START 0x01 | 228 | #define NVREG_XMITCTL_START 0x01 |
229 | #define NVREG_XMITCTL_MGMT_ST 0x40000000 | ||
230 | #define NVREG_XMITCTL_SYNC_MASK 0x000f0000 | ||
231 | #define NVREG_XMITCTL_SYNC_NOT_READY 0x0 | ||
232 | #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 | ||
233 | #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 | ||
234 | #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 | ||
235 | #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 | ||
236 | #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 | ||
237 | #define NVREG_XMITCTL_HOST_LOADED 0x00004000 | ||
225 | NvRegTransmitterStatus = 0x088, | 238 | NvRegTransmitterStatus = 0x088, |
226 | #define NVREG_XMITSTAT_BUSY 0x01 | 239 | #define NVREG_XMITSTAT_BUSY 0x01 |
227 | 240 | ||
@@ -304,8 +317,8 @@ enum { | |||
304 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 317 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
305 | #define NVREG_MIISTAT_MASK 0x000f | 318 | #define NVREG_MIISTAT_MASK 0x000f |
306 | #define NVREG_MIISTAT_MASK2 0x000f | 319 | #define NVREG_MIISTAT_MASK2 0x000f |
307 | NvRegUnknownSetupReg4 = 0x184, | 320 | NvRegMIIMask = 0x184, |
308 | #define NVREG_UNKSETUP4_VAL 8 | 321 | #define NVREG_MII_LINKCHANGE 0x0008 |
309 | 322 | ||
310 | NvRegAdapterControl = 0x188, | 323 | NvRegAdapterControl = 0x188, |
311 | #define NVREG_ADAPTCTL_START 0x02 | 324 | #define NVREG_ADAPTCTL_START 0x02 |
@@ -707,6 +720,7 @@ struct fe_priv { | |||
707 | unsigned int phy_model; | 720 | unsigned int phy_model; |
708 | u16 gigabit; | 721 | u16 gigabit; |
709 | int intr_test; | 722 | int intr_test; |
723 | int recover_error; | ||
710 | 724 | ||
711 | /* General data: RO fields */ | 725 | /* General data: RO fields */ |
712 | dma_addr_t ring_addr; | 726 | dma_addr_t ring_addr; |
@@ -719,6 +733,7 @@ struct fe_priv { | |||
719 | u32 driver_data; | 733 | u32 driver_data; |
720 | u32 register_size; | 734 | u32 register_size; |
721 | int rx_csum; | 735 | int rx_csum; |
736 | u32 mac_in_use; | ||
722 | 737 | ||
723 | void __iomem *base; | 738 | void __iomem *base; |
724 | 739 | ||
@@ -2443,6 +2458,23 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2443 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | 2458 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
2444 | dev->name, events); | 2459 | dev->name, events); |
2445 | } | 2460 | } |
2461 | if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { | ||
2462 | spin_lock(&np->lock); | ||
2463 | /* disable interrupts on the nic */ | ||
2464 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
2465 | writel(0, base + NvRegIrqMask); | ||
2466 | else | ||
2467 | writel(np->irqmask, base + NvRegIrqMask); | ||
2468 | pci_push(base); | ||
2469 | |||
2470 | if (!np->in_shutdown) { | ||
2471 | np->nic_poll_irq = np->irqmask; | ||
2472 | np->recover_error = 1; | ||
2473 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2474 | } | ||
2475 | spin_unlock(&np->lock); | ||
2476 | break; | ||
2477 | } | ||
2446 | #ifdef CONFIG_FORCEDETH_NAPI | 2478 | #ifdef CONFIG_FORCEDETH_NAPI |
2447 | if (events & NVREG_IRQ_RX_ALL) { | 2479 | if (events & NVREG_IRQ_RX_ALL) { |
2448 | netif_rx_schedule(dev); | 2480 | netif_rx_schedule(dev); |
@@ -2673,6 +2705,20 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2673 | spin_unlock_irqrestore(&np->lock, flags); | 2705 | spin_unlock_irqrestore(&np->lock, flags); |
2674 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2706 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2675 | } | 2707 | } |
2708 | if (events & NVREG_IRQ_RECOVER_ERROR) { | ||
2709 | spin_lock_irq(&np->lock); | ||
2710 | /* disable interrupts on the nic */ | ||
2711 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | ||
2712 | pci_push(base); | ||
2713 | |||
2714 | if (!np->in_shutdown) { | ||
2715 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | ||
2716 | np->recover_error = 1; | ||
2717 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2718 | } | ||
2719 | spin_unlock_irq(&np->lock); | ||
2720 | break; | ||
2721 | } | ||
2676 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2722 | if (events & (NVREG_IRQ_UNKNOWN)) { |
2677 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | 2723 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
2678 | dev->name, events); | 2724 | dev->name, events); |
@@ -2902,6 +2948,42 @@ static void nv_do_nic_poll(unsigned long data) | |||
2902 | } | 2948 | } |
2903 | np->nic_poll_irq = 0; | 2949 | np->nic_poll_irq = 0; |
2904 | 2950 | ||
2951 | if (np->recover_error) { | ||
2952 | np->recover_error = 0; | ||
2953 | printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); | ||
2954 | if (netif_running(dev)) { | ||
2955 | netif_tx_lock_bh(dev); | ||
2956 | spin_lock(&np->lock); | ||
2957 | /* stop engines */ | ||
2958 | nv_stop_rx(dev); | ||
2959 | nv_stop_tx(dev); | ||
2960 | nv_txrx_reset(dev); | ||
2961 | /* drain rx queue */ | ||
2962 | nv_drain_rx(dev); | ||
2963 | nv_drain_tx(dev); | ||
2964 | /* reinit driver view of the rx queue */ | ||
2965 | set_bufsize(dev); | ||
2966 | if (nv_init_ring(dev)) { | ||
2967 | if (!np->in_shutdown) | ||
2968 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2969 | } | ||
2970 | /* reinit nic view of the rx queue */ | ||
2971 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
2972 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
2973 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
2974 | base + NvRegRingSizes); | ||
2975 | pci_push(base); | ||
2976 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
2977 | pci_push(base); | ||
2978 | |||
2979 | /* restart rx engine */ | ||
2980 | nv_start_rx(dev); | ||
2981 | nv_start_tx(dev); | ||
2982 | spin_unlock(&np->lock); | ||
2983 | netif_tx_unlock_bh(dev); | ||
2984 | } | ||
2985 | } | ||
2986 | |||
2905 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | 2987 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ |
2906 | 2988 | ||
2907 | writel(mask, base + NvRegIrqMask); | 2989 | writel(mask, base + NvRegIrqMask); |
@@ -4030,6 +4112,54 @@ static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
4030 | /* nothing to do */ | 4112 | /* nothing to do */ |
4031 | }; | 4113 | }; |
4032 | 4114 | ||
4115 | /* The mgmt unit and driver use a semaphore to access the phy during init */ | ||
4116 | static int nv_mgmt_acquire_sema(struct net_device *dev) | ||
4117 | { | ||
4118 | u8 __iomem *base = get_hwbase(dev); | ||
4119 | int i; | ||
4120 | u32 tx_ctrl, mgmt_sema; | ||
4121 | |||
4122 | for (i = 0; i < 10; i++) { | ||
4123 | mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; | ||
4124 | if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) | ||
4125 | break; | ||
4126 | msleep(500); | ||
4127 | } | ||
4128 | |||
4129 | if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) | ||
4130 | return 0; | ||
4131 | |||
4132 | for (i = 0; i < 2; i++) { | ||
4133 | tx_ctrl = readl(base + NvRegTransmitterControl); | ||
4134 | tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; | ||
4135 | writel(tx_ctrl, base + NvRegTransmitterControl); | ||
4136 | |||
4137 | /* verify that semaphore was acquired */ | ||
4138 | tx_ctrl = readl(base + NvRegTransmitterControl); | ||
4139 | if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && | ||
4140 | ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) | ||
4141 | return 1; | ||
4142 | else | ||
4143 | udelay(50); | ||
4144 | } | ||
4145 | |||
4146 | return 0; | ||
4147 | } | ||
4148 | |||
4149 | /* Indicate to mgmt unit whether driver is loaded or not */ | ||
4150 | static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded) | ||
4151 | { | ||
4152 | u8 __iomem *base = get_hwbase(dev); | ||
4153 | u32 tx_ctrl; | ||
4154 | |||
4155 | tx_ctrl = readl(base + NvRegTransmitterControl); | ||
4156 | if (loaded) | ||
4157 | tx_ctrl |= NVREG_XMITCTL_HOST_LOADED; | ||
4158 | else | ||
4159 | tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED; | ||
4160 | writel(tx_ctrl, base + NvRegTransmitterControl); | ||
4161 | } | ||
4162 | |||
4033 | static int nv_open(struct net_device *dev) | 4163 | static int nv_open(struct net_device *dev) |
4034 | { | 4164 | { |
4035 | struct fe_priv *np = netdev_priv(dev); | 4165 | struct fe_priv *np = netdev_priv(dev); |
@@ -4085,7 +4215,7 @@ static int nv_open(struct net_device *dev) | |||
4085 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, | 4215 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, |
4086 | KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); | 4216 | KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); |
4087 | 4217 | ||
4088 | writel(0, base + NvRegUnknownSetupReg4); | 4218 | writel(0, base + NvRegMIIMask); |
4089 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 4219 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
4090 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | 4220 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); |
4091 | 4221 | ||
@@ -4111,7 +4241,7 @@ static int nv_open(struct net_device *dev) | |||
4111 | writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, | 4241 | writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, |
4112 | base + NvRegAdapterControl); | 4242 | base + NvRegAdapterControl); |
4113 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | 4243 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); |
4114 | writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); | 4244 | writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); |
4115 | if (np->wolenabled) | 4245 | if (np->wolenabled) |
4116 | writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); | 4246 | writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); |
4117 | 4247 | ||
@@ -4230,6 +4360,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4230 | u8 __iomem *base; | 4360 | u8 __iomem *base; |
4231 | int err, i; | 4361 | int err, i; |
4232 | u32 powerstate, txreg; | 4362 | u32 powerstate, txreg; |
4363 | u32 phystate_orig = 0, phystate; | ||
4364 | int phyinitialized = 0; | ||
4233 | 4365 | ||
4234 | dev = alloc_etherdev(sizeof(struct fe_priv)); | 4366 | dev = alloc_etherdev(sizeof(struct fe_priv)); |
4235 | err = -ENOMEM; | 4367 | err = -ENOMEM; |
@@ -4514,6 +4646,48 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4514 | np->need_linktimer = 0; | 4646 | np->need_linktimer = 0; |
4515 | } | 4647 | } |
4516 | 4648 | ||
4649 | /* clear phy state and temporarily halt phy interrupts */ | ||
4650 | writel(0, base + NvRegMIIMask); | ||
4651 | phystate = readl(base + NvRegAdapterControl); | ||
4652 | if (phystate & NVREG_ADAPTCTL_RUNNING) { | ||
4653 | phystate_orig = 1; | ||
4654 | phystate &= ~NVREG_ADAPTCTL_RUNNING; | ||
4655 | writel(phystate, base + NvRegAdapterControl); | ||
4656 | } | ||
4657 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | ||
4658 | |||
4659 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { | ||
4660 | writel(0x1, base + 0x204); pci_push(base); | ||
4661 | msleep(500); | ||
4662 | /* management unit running on the mac? */ | ||
4663 | np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; | ||
4664 | if (np->mac_in_use) { | ||
4665 | u32 mgmt_sync; | ||
4666 | /* management unit setup the phy already? */ | ||
4667 | mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; | ||
4668 | if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { | ||
4669 | if (!nv_mgmt_acquire_sema(dev)) { | ||
4670 | for (i = 0; i < 5000; i++) { | ||
4671 | msleep(1); | ||
4672 | mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; | ||
4673 | if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) | ||
4674 | continue; | ||
4675 | if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) | ||
4676 | phyinitialized = 1; | ||
4677 | break; | ||
4678 | } | ||
4679 | } else { | ||
4680 | /* we need to init the phy */ | ||
4681 | } | ||
4682 | } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) { | ||
4683 | /* phy is inited by SMU */ | ||
4684 | phyinitialized = 1; | ||
4685 | } else { | ||
4686 | /* we need to init the phy */ | ||
4687 | } | ||
4688 | } | ||
4689 | } | ||
4690 | |||
4517 | /* find a suitable phy */ | 4691 | /* find a suitable phy */ |
4518 | for (i = 1; i <= 32; i++) { | 4692 | for (i = 1; i <= 32; i++) { |
4519 | int id1, id2; | 4693 | int id1, id2; |
@@ -4545,8 +4719,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4545 | goto out_error; | 4719 | goto out_error; |
4546 | } | 4720 | } |
4547 | 4721 | ||
4548 | /* reset it */ | 4722 | if (!phyinitialized) { |
4549 | phy_init(dev); | 4723 | /* reset it */ |
4724 | phy_init(dev); | ||
4725 | } | ||
4726 | |||
4727 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { | ||
4728 | nv_mgmt_driver_loaded(dev, 1); | ||
4729 | } | ||
4550 | 4730 | ||
4551 | /* set default link speed settings */ | 4731 | /* set default link speed settings */ |
4552 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 4732 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
@@ -4565,6 +4745,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4565 | return 0; | 4745 | return 0; |
4566 | 4746 | ||
4567 | out_error: | 4747 | out_error: |
4748 | if (phystate_orig) | ||
4749 | writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | ||
4750 | if (np->mac_in_use) | ||
4751 | nv_mgmt_driver_loaded(dev, 0); | ||
4568 | pci_set_drvdata(pci_dev, NULL); | 4752 | pci_set_drvdata(pci_dev, NULL); |
4569 | out_freering: | 4753 | out_freering: |
4570 | free_rings(dev); | 4754 | free_rings(dev); |
@@ -4594,6 +4778,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
4594 | writel(np->orig_mac[0], base + NvRegMacAddrA); | 4778 | writel(np->orig_mac[0], base + NvRegMacAddrA); |
4595 | writel(np->orig_mac[1], base + NvRegMacAddrB); | 4779 | writel(np->orig_mac[1], base + NvRegMacAddrB); |
4596 | 4780 | ||
4781 | if (np->mac_in_use) | ||
4782 | nv_mgmt_driver_loaded(dev, 0); | ||
4783 | |||
4597 | /* free all structures */ | 4784 | /* free all structures */ |
4598 | free_rings(dev); | 4785 | free_rings(dev); |
4599 | iounmap(get_hwbase(dev)); | 4786 | iounmap(get_hwbase(dev)); |
@@ -4603,6 +4790,50 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
4603 | pci_set_drvdata(pci_dev, NULL); | 4790 | pci_set_drvdata(pci_dev, NULL); |
4604 | } | 4791 | } |
4605 | 4792 | ||
4793 | #ifdef CONFIG_PM | ||
4794 | static int nv_suspend(struct pci_dev *pdev, pm_message_t state) | ||
4795 | { | ||
4796 | struct net_device *dev = pci_get_drvdata(pdev); | ||
4797 | struct fe_priv *np = netdev_priv(dev); | ||
4798 | |||
4799 | if (!netif_running(dev)) | ||
4800 | goto out; | ||
4801 | |||
4802 | netif_device_detach(dev); | ||
4803 | |||
4804 | // Gross. | ||
4805 | nv_close(dev); | ||
4806 | |||
4807 | pci_save_state(pdev); | ||
4808 | pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); | ||
4809 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
4810 | out: | ||
4811 | return 0; | ||
4812 | } | ||
4813 | |||
4814 | static int nv_resume(struct pci_dev *pdev) | ||
4815 | { | ||
4816 | struct net_device *dev = pci_get_drvdata(pdev); | ||
4817 | int rc = 0; | ||
4818 | |||
4819 | if (!netif_running(dev)) | ||
4820 | goto out; | ||
4821 | |||
4822 | netif_device_attach(dev); | ||
4823 | |||
4824 | pci_set_power_state(pdev, PCI_D0); | ||
4825 | pci_restore_state(pdev); | ||
4826 | pci_enable_wake(pdev, PCI_D0, 0); | ||
4827 | |||
4828 | rc = nv_open(dev); | ||
4829 | out: | ||
4830 | return rc; | ||
4831 | } | ||
4832 | #else | ||
4833 | #define nv_suspend NULL | ||
4834 | #define nv_resume NULL | ||
4835 | #endif /* CONFIG_PM */ | ||
4836 | |||
4606 | static struct pci_device_id pci_tbl[] = { | 4837 | static struct pci_device_id pci_tbl[] = { |
4607 | { /* nForce Ethernet Controller */ | 4838 | { /* nForce Ethernet Controller */ |
4608 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), | 4839 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), |
@@ -4658,43 +4889,59 @@ static struct pci_device_id pci_tbl[] = { | |||
4658 | }, | 4889 | }, |
4659 | { /* MCP55 Ethernet Controller */ | 4890 | { /* MCP55 Ethernet Controller */ |
4660 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 4891 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
4661 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4892 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4662 | }, | 4893 | }, |
4663 | { /* MCP55 Ethernet Controller */ | 4894 | { /* MCP55 Ethernet Controller */ |
4664 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 4895 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
4665 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4896 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4666 | }, | 4897 | }, |
4667 | { /* MCP61 Ethernet Controller */ | 4898 | { /* MCP61 Ethernet Controller */ |
4668 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), | 4899 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), |
4669 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4900 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4670 | }, | 4901 | }, |
4671 | { /* MCP61 Ethernet Controller */ | 4902 | { /* MCP61 Ethernet Controller */ |
4672 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), | 4903 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), |
4673 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4904 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4674 | }, | 4905 | }, |
4675 | { /* MCP61 Ethernet Controller */ | 4906 | { /* MCP61 Ethernet Controller */ |
4676 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), | 4907 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), |
4677 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4908 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4678 | }, | 4909 | }, |
4679 | { /* MCP61 Ethernet Controller */ | 4910 | { /* MCP61 Ethernet Controller */ |
4680 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), | 4911 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), |
4681 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4912 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4682 | }, | 4913 | }, |
4683 | { /* MCP65 Ethernet Controller */ | 4914 | { /* MCP65 Ethernet Controller */ |
4684 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), | 4915 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), |
4685 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4916 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4686 | }, | 4917 | }, |
4687 | { /* MCP65 Ethernet Controller */ | 4918 | { /* MCP65 Ethernet Controller */ |
4688 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), | 4919 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), |
4689 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4920 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4690 | }, | 4921 | }, |
4691 | { /* MCP65 Ethernet Controller */ | 4922 | { /* MCP65 Ethernet Controller */ |
4692 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), | 4923 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), |
4693 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4924 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4694 | }, | 4925 | }, |
4695 | { /* MCP65 Ethernet Controller */ | 4926 | { /* MCP65 Ethernet Controller */ |
4696 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), | 4927 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), |
4697 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | 4928 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4929 | }, | ||
4930 | { /* MCP67 Ethernet Controller */ | ||
4931 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), | ||
4932 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
4933 | }, | ||
4934 | { /* MCP67 Ethernet Controller */ | ||
4935 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), | ||
4936 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
4937 | }, | ||
4938 | { /* MCP67 Ethernet Controller */ | ||
4939 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), | ||
4940 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
4941 | }, | ||
4942 | { /* MCP67 Ethernet Controller */ | ||
4943 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), | ||
4944 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
4698 | }, | 4945 | }, |
4699 | {0,}, | 4946 | {0,}, |
4700 | }; | 4947 | }; |
@@ -4704,9 +4951,10 @@ static struct pci_driver driver = { | |||
4704 | .id_table = pci_tbl, | 4951 | .id_table = pci_tbl, |
4705 | .probe = nv_probe, | 4952 | .probe = nv_probe, |
4706 | .remove = __devexit_p(nv_remove), | 4953 | .remove = __devexit_p(nv_remove), |
4954 | .suspend = nv_suspend, | ||
4955 | .resume = nv_resume, | ||
4707 | }; | 4956 | }; |
4708 | 4957 | ||
4709 | |||
4710 | static int __init init_nic(void) | 4958 | static int __init init_nic(void) |
4711 | { | 4959 | { |
4712 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | 4960 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index cb3958704a87..889d3a13e95e 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -779,7 +779,8 @@ static int fs_init_phy(struct net_device *dev) | |||
779 | fep->oldspeed = 0; | 779 | fep->oldspeed = 0; |
780 | fep->oldduplex = -1; | 780 | fep->oldduplex = -1; |
781 | if(fep->fpi->bus_id) | 781 | if(fep->fpi->bus_id) |
782 | phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0); | 782 | phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0, |
783 | PHY_INTERFACE_MODE_MII); | ||
783 | else { | 784 | else { |
784 | printk("No phy bus ID specified in BSP code\n"); | 785 | printk("No phy bus ID specified in BSP code\n"); |
785 | return -EINVAL; | 786 | return -EINVAL; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index a06d8d1aaceb..baa35144134c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * Author: Andy Fleming | 9 | * Author: Andy Fleming |
10 | * Maintainer: Kumar Gala | 10 | * Maintainer: Kumar Gala |
11 | * | 11 | * |
12 | * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. | 12 | * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify it | 14 | * This program is free software; you can redistribute it and/or modify it |
15 | * under the terms of the GNU General Public License as published by the | 15 | * under the terms of the GNU General Public License as published by the |
@@ -133,6 +133,9 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |||
133 | #ifdef CONFIG_GFAR_NAPI | 133 | #ifdef CONFIG_GFAR_NAPI |
134 | static int gfar_poll(struct net_device *dev, int *budget); | 134 | static int gfar_poll(struct net_device *dev, int *budget); |
135 | #endif | 135 | #endif |
136 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
137 | static void gfar_netpoll(struct net_device *dev); | ||
138 | #endif | ||
136 | int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); | 139 | int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); |
137 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); | 140 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); |
138 | static void gfar_vlan_rx_register(struct net_device *netdev, | 141 | static void gfar_vlan_rx_register(struct net_device *netdev, |
@@ -260,6 +263,9 @@ static int gfar_probe(struct platform_device *pdev) | |||
260 | dev->poll = gfar_poll; | 263 | dev->poll = gfar_poll; |
261 | dev->weight = GFAR_DEV_WEIGHT; | 264 | dev->weight = GFAR_DEV_WEIGHT; |
262 | #endif | 265 | #endif |
266 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
267 | dev->poll_controller = gfar_netpoll; | ||
268 | #endif | ||
263 | dev->stop = gfar_close; | 269 | dev->stop = gfar_close; |
264 | dev->get_stats = gfar_get_stats; | 270 | dev->get_stats = gfar_get_stats; |
265 | dev->change_mtu = gfar_change_mtu; | 271 | dev->change_mtu = gfar_change_mtu; |
@@ -392,6 +398,38 @@ static int gfar_remove(struct platform_device *pdev) | |||
392 | } | 398 | } |
393 | 399 | ||
394 | 400 | ||
401 | /* Reads the controller's registers to determine what interface | ||
402 | * connects it to the PHY. | ||
403 | */ | ||
404 | static phy_interface_t gfar_get_interface(struct net_device *dev) | ||
405 | { | ||
406 | struct gfar_private *priv = netdev_priv(dev); | ||
407 | u32 ecntrl = gfar_read(&priv->regs->ecntrl); | ||
408 | |||
409 | if (ecntrl & ECNTRL_SGMII_MODE) | ||
410 | return PHY_INTERFACE_MODE_SGMII; | ||
411 | |||
412 | if (ecntrl & ECNTRL_TBI_MODE) { | ||
413 | if (ecntrl & ECNTRL_REDUCED_MODE) | ||
414 | return PHY_INTERFACE_MODE_RTBI; | ||
415 | else | ||
416 | return PHY_INTERFACE_MODE_TBI; | ||
417 | } | ||
418 | |||
419 | if (ecntrl & ECNTRL_REDUCED_MODE) { | ||
420 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | ||
421 | return PHY_INTERFACE_MODE_RMII; | ||
422 | else | ||
423 | return PHY_INTERFACE_MODE_RGMII; | ||
424 | } | ||
425 | |||
426 | if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) | ||
427 | return PHY_INTERFACE_MODE_GMII; | ||
428 | |||
429 | return PHY_INTERFACE_MODE_MII; | ||
430 | } | ||
431 | |||
432 | |||
395 | /* Initializes driver's PHY state, and attaches to the PHY. | 433 | /* Initializes driver's PHY state, and attaches to the PHY. |
396 | * Returns 0 on success. | 434 | * Returns 0 on success. |
397 | */ | 435 | */ |
@@ -403,6 +441,7 @@ static int init_phy(struct net_device *dev) | |||
403 | SUPPORTED_1000baseT_Full : 0; | 441 | SUPPORTED_1000baseT_Full : 0; |
404 | struct phy_device *phydev; | 442 | struct phy_device *phydev; |
405 | char phy_id[BUS_ID_SIZE]; | 443 | char phy_id[BUS_ID_SIZE]; |
444 | phy_interface_t interface; | ||
406 | 445 | ||
407 | priv->oldlink = 0; | 446 | priv->oldlink = 0; |
408 | priv->oldspeed = 0; | 447 | priv->oldspeed = 0; |
@@ -410,7 +449,9 @@ static int init_phy(struct net_device *dev) | |||
410 | 449 | ||
411 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); | 450 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); |
412 | 451 | ||
413 | phydev = phy_connect(dev, phy_id, &adjust_link, 0); | 452 | interface = gfar_get_interface(dev); |
453 | |||
454 | phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); | ||
414 | 455 | ||
415 | if (IS_ERR(phydev)) { | 456 | if (IS_ERR(phydev)) { |
416 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 457 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
@@ -1536,6 +1577,33 @@ static int gfar_poll(struct net_device *dev, int *budget) | |||
1536 | } | 1577 | } |
1537 | #endif | 1578 | #endif |
1538 | 1579 | ||
1580 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1581 | /* | ||
1582 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
1583 | * without having to re-enable interrupts. It's not called while | ||
1584 | * the interrupt routine is executing. | ||
1585 | */ | ||
1586 | static void gfar_netpoll(struct net_device *dev) | ||
1587 | { | ||
1588 | struct gfar_private *priv = netdev_priv(dev); | ||
1589 | |||
1590 | /* If the device has multiple interrupts, run tx/rx */ | ||
1591 | if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1592 | disable_irq(priv->interruptTransmit); | ||
1593 | disable_irq(priv->interruptReceive); | ||
1594 | disable_irq(priv->interruptError); | ||
1595 | gfar_interrupt(priv->interruptTransmit, dev); | ||
1596 | enable_irq(priv->interruptError); | ||
1597 | enable_irq(priv->interruptReceive); | ||
1598 | enable_irq(priv->interruptTransmit); | ||
1599 | } else { | ||
1600 | disable_irq(priv->interruptTransmit); | ||
1601 | gfar_interrupt(priv->interruptTransmit, dev); | ||
1602 | enable_irq(priv->interruptTransmit); | ||
1603 | } | ||
1604 | } | ||
1605 | #endif | ||
1606 | |||
1539 | /* The interrupt handler for devices with one interrupt */ | 1607 | /* The interrupt handler for devices with one interrupt */ |
1540 | static irqreturn_t gfar_interrupt(int irq, void *dev_id) | 1608 | static irqreturn_t gfar_interrupt(int irq, void *dev_id) |
1541 | { | 1609 | { |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 9e81a50cf2be..39e9e321fcbc 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -160,7 +160,10 @@ extern const char gfar_driver_version[]; | |||
160 | 160 | ||
161 | #define ECNTRL_INIT_SETTINGS 0x00001000 | 161 | #define ECNTRL_INIT_SETTINGS 0x00001000 |
162 | #define ECNTRL_TBI_MODE 0x00000020 | 162 | #define ECNTRL_TBI_MODE 0x00000020 |
163 | #define ECNTRL_REDUCED_MODE 0x00000010 | ||
163 | #define ECNTRL_R100 0x00000008 | 164 | #define ECNTRL_R100 0x00000008 |
165 | #define ECNTRL_REDUCED_MII_MODE 0x00000004 | ||
166 | #define ECNTRL_SGMII_MODE 0x00000002 | ||
164 | 167 | ||
165 | #define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE | 168 | #define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE |
166 | 169 | ||
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index 91326ea3e12b..f970bfbb9db2 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c | |||
@@ -31,7 +31,16 @@ | |||
31 | #include <asm/amigahw.h> | 31 | #include <asm/amigahw.h> |
32 | #include <linux/zorro.h> | 32 | #include <linux/zorro.h> |
33 | 33 | ||
34 | #include "8390.h" | 34 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) |
35 | #define ei_inb(port) in_8(port) | ||
36 | #define ei_outb(val,port) out_8(port,val) | ||
37 | #define ei_inb_p(port) in_8(port) | ||
38 | #define ei_outb_p(val,port) out_8(port,val) | ||
39 | |||
40 | static const char version[] = | ||
41 | "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; | ||
42 | |||
43 | #include "lib8390.c" | ||
35 | 44 | ||
36 | #define NE_EN0_DCFG (0x0e*2) | 45 | #define NE_EN0_DCFG (0x0e*2) |
37 | 46 | ||
@@ -100,7 +109,7 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
100 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, | 109 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, |
101 | }; | 110 | }; |
102 | 111 | ||
103 | dev = alloc_ei_netdev(); | 112 | dev = ____alloc_ei_netdev(0); |
104 | if (!dev) | 113 | if (!dev) |
105 | return -ENOMEM; | 114 | return -ENOMEM; |
106 | SET_MODULE_OWNER(dev); | 115 | SET_MODULE_OWNER(dev); |
@@ -117,7 +126,7 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
117 | dev->irq = IRQ_AMIGA_PORTS; | 126 | dev->irq = IRQ_AMIGA_PORTS; |
118 | 127 | ||
119 | /* Install the Interrupt handler */ | 128 | /* Install the Interrupt handler */ |
120 | if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, "Hydra Ethernet", | 129 | if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet", |
121 | dev)) { | 130 | dev)) { |
122 | free_netdev(dev); | 131 | free_netdev(dev); |
123 | return -EAGAIN; | 132 | return -EAGAIN; |
@@ -139,10 +148,10 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
139 | dev->open = &hydra_open; | 148 | dev->open = &hydra_open; |
140 | dev->stop = &hydra_close; | 149 | dev->stop = &hydra_close; |
141 | #ifdef CONFIG_NET_POLL_CONTROLLER | 150 | #ifdef CONFIG_NET_POLL_CONTROLLER |
142 | dev->poll_controller = ei_poll; | 151 | dev->poll_controller = __ei_poll; |
143 | #endif | 152 | #endif |
144 | 153 | ||
145 | NS8390_init(dev, 0); | 154 | __NS8390_init(dev, 0); |
146 | 155 | ||
147 | err = register_netdev(dev); | 156 | err = register_netdev(dev); |
148 | if (err) { | 157 | if (err) { |
@@ -164,7 +173,7 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
164 | 173 | ||
165 | static int hydra_open(struct net_device *dev) | 174 | static int hydra_open(struct net_device *dev) |
166 | { | 175 | { |
167 | ei_open(dev); | 176 | __ei_open(dev); |
168 | return 0; | 177 | return 0; |
169 | } | 178 | } |
170 | 179 | ||
@@ -172,7 +181,7 @@ static int hydra_close(struct net_device *dev) | |||
172 | { | 181 | { |
173 | if (ei_debug > 1) | 182 | if (ei_debug > 1) |
174 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); | 183 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); |
175 | ei_close(dev); | 184 | __ei_close(dev); |
176 | return 0; | 185 | return 0; |
177 | } | 186 | } |
178 | 187 | ||
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c new file mode 100644 index 000000000000..e726c06b8dc6 --- /dev/null +++ b/drivers/net/lib8390.c | |||
@@ -0,0 +1,1097 @@ | |||
1 | /* 8390.c: A general NS8390 ethernet driver core for linux. */ | ||
2 | /* | ||
3 | Written 1992-94 by Donald Becker. | ||
4 | |||
5 | Copyright 1993 United States Government as represented by the | ||
6 | Director, National Security Agency. | ||
7 | |||
8 | This software may be used and distributed according to the terms | ||
9 | of the GNU General Public License, incorporated herein by reference. | ||
10 | |||
11 | The author may be reached as becker@scyld.com, or C/O | ||
12 | Scyld Computing Corporation | ||
13 | 410 Severn Ave., Suite 210 | ||
14 | Annapolis MD 21403 | ||
15 | |||
16 | |||
17 | This is the chip-specific code for many 8390-based ethernet adaptors. | ||
18 | This is not a complete driver, it must be combined with board-specific | ||
19 | code such as ne.c, wd.c, 3c503.c, etc. | ||
20 | |||
21 | Seeing how at least eight drivers use this code, (not counting the | ||
22 | PCMCIA ones either) it is easy to break some card by what seems like | ||
23 | a simple innocent change. Please contact me or Donald if you think | ||
24 | you have found something that needs changing. -- PG | ||
25 | |||
26 | |||
27 | Changelog: | ||
28 | |||
29 | Paul Gortmaker : remove set_bit lock, other cleanups. | ||
30 | Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to | ||
31 | ei_block_input() for eth_io_copy_and_sum(). | ||
32 | Paul Gortmaker : exchange static int ei_pingpong for a #define, | ||
33 | also add better Tx error handling. | ||
34 | Paul Gortmaker : rewrite Rx overrun handling as per NS specs. | ||
35 | Alexey Kuznetsov : use the 8390's six bit hash multicast filter. | ||
36 | Paul Gortmaker : tweak ANK's above multicast changes a bit. | ||
37 | Paul Gortmaker : update packet statistics for v2.1.x | ||
38 | Alan Cox : support arbitary stupid port mappings on the | ||
39 | 68K Macintosh. Support >16bit I/O spaces | ||
40 | Paul Gortmaker : add kmod support for auto-loading of the 8390 | ||
41 | module by all drivers that require it. | ||
42 | Alan Cox : Spinlocking work, added 'BUG_83C690' | ||
43 | Paul Gortmaker : Separate out Tx timeout code from Tx path. | ||
44 | Paul Gortmaker : Remove old unused single Tx buffer code. | ||
45 | Hayato Fujiwara : Add m32r support. | ||
46 | Paul Gortmaker : use skb_padto() instead of stack scratch area | ||
47 | |||
48 | Sources: | ||
49 | The National Semiconductor LAN Databook, and the 3Com 3c503 databook. | ||
50 | |||
51 | */ | ||
52 | |||
53 | #include <linux/module.h> | ||
54 | #include <linux/kernel.h> | ||
55 | #include <linux/jiffies.h> | ||
56 | #include <linux/fs.h> | ||
57 | #include <linux/types.h> | ||
58 | #include <linux/string.h> | ||
59 | #include <linux/bitops.h> | ||
60 | #include <asm/system.h> | ||
61 | #include <asm/uaccess.h> | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/irq.h> | ||
64 | #include <linux/delay.h> | ||
65 | #include <linux/errno.h> | ||
66 | #include <linux/fcntl.h> | ||
67 | #include <linux/in.h> | ||
68 | #include <linux/interrupt.h> | ||
69 | #include <linux/init.h> | ||
70 | #include <linux/crc32.h> | ||
71 | |||
72 | #include <linux/netdevice.h> | ||
73 | #include <linux/etherdevice.h> | ||
74 | |||
75 | #define NS8390_CORE | ||
76 | #include "8390.h" | ||
77 | |||
78 | #define BUG_83C690 | ||
79 | |||
80 | /* These are the operational function interfaces to board-specific | ||
81 | routines. | ||
82 | void reset_8390(struct net_device *dev) | ||
83 | Resets the board associated with DEV, including a hardware reset of | ||
84 | the 8390. This is only called when there is a transmit timeout, and | ||
85 | it is always followed by 8390_init(). | ||
86 | void block_output(struct net_device *dev, int count, const unsigned char *buf, | ||
87 | int start_page) | ||
88 | Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The | ||
89 | "page" value uses the 8390's 256-byte pages. | ||
90 | void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) | ||
91 | Read the 4 byte, page aligned 8390 header. *If* there is a | ||
92 | subsequent read, it will be of the rest of the packet. | ||
93 | void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) | ||
94 | Read COUNT bytes from the packet buffer into the skb data area. Start | ||
95 | reading from RING_OFFSET, the address as the 8390 sees it. This will always | ||
96 | follow the read of the 8390 header. | ||
97 | */ | ||
98 | #define ei_reset_8390 (ei_local->reset_8390) | ||
99 | #define ei_block_output (ei_local->block_output) | ||
100 | #define ei_block_input (ei_local->block_input) | ||
101 | #define ei_get_8390_hdr (ei_local->get_8390_hdr) | ||
102 | |||
103 | /* use 0 for production, 1 for verification, >2 for debug */ | ||
104 | #ifndef ei_debug | ||
105 | int ei_debug = 1; | ||
106 | #endif | ||
107 | |||
108 | /* Index to functions. */ | ||
109 | static void ei_tx_intr(struct net_device *dev); | ||
110 | static void ei_tx_err(struct net_device *dev); | ||
111 | static void ei_tx_timeout(struct net_device *dev); | ||
112 | static void ei_receive(struct net_device *dev); | ||
113 | static void ei_rx_overrun(struct net_device *dev); | ||
114 | |||
115 | /* Routines generic to NS8390-based boards. */ | ||
116 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | ||
117 | int start_page); | ||
118 | static void set_multicast_list(struct net_device *dev); | ||
119 | static void do_set_multicast_list(struct net_device *dev); | ||
120 | static void __NS8390_init(struct net_device *dev, int startp); | ||
121 | |||
122 | /* | ||
123 | * SMP and the 8390 setup. | ||
124 | * | ||
125 | * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is | ||
126 | * a page register that controls bank and packet buffer access. We guard | ||
127 | * this with ei_local->page_lock. Nobody should assume or set the page other | ||
128 | * than zero when the lock is not held. Lock holders must restore page 0 | ||
129 | * before unlocking. Even pure readers must take the lock to protect in | ||
130 | * page 0. | ||
131 | * | ||
132 | * To make life difficult the chip can also be very slow. We therefore can't | ||
133 | * just use spinlocks. For the longer lockups we disable the irq the device | ||
134 | * sits on and hold the lock. We must hold the lock because there is a dual | ||
135 | * processor case other than interrupts (get stats/set multicast list in | ||
136 | * parallel with each other and transmit). | ||
137 | * | ||
138 | * Note: in theory we can just disable the irq on the card _but_ there is | ||
139 | * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" | ||
140 | * enter lock, take the queued irq. So we waddle instead of flying. | ||
141 | * | ||
142 | * Finally by special arrangement for the purpose of being generally | ||
143 | * annoying the transmit function is called bh atomic. That places | ||
144 | * restrictions on the user context callers as disable_irq won't save | ||
145 | * them. | ||
146 | */ | ||
147 | |||
148 | |||
149 | |||
150 | /** | ||
151 | * ei_open - Open/initialize the board. | ||
152 | * @dev: network device to initialize | ||
153 | * | ||
154 | * This routine goes all-out, setting everything | ||
155 | * up anew at each open, even though many of these registers should only | ||
156 | * need to be set once at boot. | ||
157 | */ | ||
158 | static int __ei_open(struct net_device *dev) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
162 | |||
163 | /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout | ||
164 | wrapper that does e.g. media check & then calls ei_tx_timeout. */ | ||
165 | if (dev->tx_timeout == NULL) | ||
166 | dev->tx_timeout = ei_tx_timeout; | ||
167 | if (dev->watchdog_timeo <= 0) | ||
168 | dev->watchdog_timeo = TX_TIMEOUT; | ||
169 | |||
170 | /* | ||
171 | * Grab the page lock so we own the register set, then call | ||
172 | * the init function. | ||
173 | */ | ||
174 | |||
175 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
176 | __NS8390_init(dev, 1); | ||
177 | /* Set the flag before we drop the lock, That way the IRQ arrives | ||
178 | after its set and we get no silly warnings */ | ||
179 | netif_start_queue(dev); | ||
180 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
181 | ei_local->irqlock = 0; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * ei_close - shut down network device | ||
187 | * @dev: network device to close | ||
188 | * | ||
189 | * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done. | ||
190 | */ | ||
191 | static int __ei_close(struct net_device *dev) | ||
192 | { | ||
193 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
194 | unsigned long flags; | ||
195 | |||
196 | /* | ||
197 | * Hold the page lock during close | ||
198 | */ | ||
199 | |||
200 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
201 | __NS8390_init(dev, 0); | ||
202 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
203 | netif_stop_queue(dev); | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * ei_tx_timeout - handle transmit time out condition | ||
209 | * @dev: network device which has apparently fallen asleep | ||
210 | * | ||
211 | * Called by kernel when device never acknowledges a transmit has | ||
212 | * completed (or failed) - i.e. never posted a Tx related interrupt. | ||
213 | */ | ||
214 | |||
215 | static void ei_tx_timeout(struct net_device *dev) | ||
216 | { | ||
217 | unsigned long e8390_base = dev->base_addr; | ||
218 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
219 | int txsr, isr, tickssofar = jiffies - dev->trans_start; | ||
220 | unsigned long flags; | ||
221 | |||
222 | #if defined(CONFIG_M32R) && defined(CONFIG_SMP) | ||
223 | unsigned long icucr; | ||
224 | |||
225 | local_irq_save(flags); | ||
226 | icucr = inl(M32R_ICU_CR1_PORTL); | ||
227 | icucr |= M32R_ICUCR_ISMOD11; | ||
228 | outl(icucr, M32R_ICU_CR1_PORTL); | ||
229 | local_irq_restore(flags); | ||
230 | #endif | ||
231 | ei_local->stat.tx_errors++; | ||
232 | |||
233 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
234 | txsr = ei_inb(e8390_base+EN0_TSR); | ||
235 | isr = ei_inb(e8390_base+EN0_ISR); | ||
236 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
237 | |||
238 | printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", | ||
239 | dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : | ||
240 | (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); | ||
241 | |||
242 | if (!isr && !ei_local->stat.tx_packets) | ||
243 | { | ||
244 | /* The 8390 probably hasn't gotten on the cable yet. */ | ||
245 | ei_local->interface_num ^= 1; /* Try a different xcvr. */ | ||
246 | } | ||
247 | |||
248 | /* Ugly but a reset can be slow, yet must be protected */ | ||
249 | |||
250 | disable_irq_nosync_lockdep(dev->irq); | ||
251 | spin_lock(&ei_local->page_lock); | ||
252 | |||
253 | /* Try to restart the card. Perhaps the user has fixed something. */ | ||
254 | ei_reset_8390(dev); | ||
255 | __NS8390_init(dev, 1); | ||
256 | |||
257 | spin_unlock(&ei_local->page_lock); | ||
258 | enable_irq_lockdep(dev->irq); | ||
259 | netif_wake_queue(dev); | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * ei_start_xmit - begin packet transmission | ||
264 | * @skb: packet to be sent | ||
265 | * @dev: network device to which packet is sent | ||
266 | * | ||
267 | * Sends a packet to an 8390 network device. | ||
268 | */ | ||
269 | |||
270 | static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
271 | { | ||
272 | unsigned long e8390_base = dev->base_addr; | ||
273 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
274 | int send_length = skb->len, output_page; | ||
275 | unsigned long flags; | ||
276 | char buf[ETH_ZLEN]; | ||
277 | char *data = skb->data; | ||
278 | |||
279 | if (skb->len < ETH_ZLEN) { | ||
280 | memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ | ||
281 | memcpy(buf, data, skb->len); | ||
282 | send_length = ETH_ZLEN; | ||
283 | data = buf; | ||
284 | } | ||
285 | |||
286 | /* Mask interrupts from the ethercard. | ||
287 | SMP: We have to grab the lock here otherwise the IRQ handler | ||
288 | on another CPU can flip window and race the IRQ mask set. We end | ||
289 | up trashing the mcast filter not disabling irqs if we don't lock */ | ||
290 | |||
291 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
292 | ei_outb_p(0x00, e8390_base + EN0_IMR); | ||
293 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
294 | |||
295 | |||
296 | /* | ||
297 | * Slow phase with lock held. | ||
298 | */ | ||
299 | |||
300 | disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); | ||
301 | |||
302 | spin_lock(&ei_local->page_lock); | ||
303 | |||
304 | ei_local->irqlock = 1; | ||
305 | |||
306 | /* | ||
307 | * We have two Tx slots available for use. Find the first free | ||
308 | * slot, and then perform some sanity checks. With two Tx bufs, | ||
309 | * you get very close to transmitting back-to-back packets. With | ||
310 | * only one Tx buf, the transmitter sits idle while you reload the | ||
311 | * card, leaving a substantial gap between each transmitted packet. | ||
312 | */ | ||
313 | |||
314 | if (ei_local->tx1 == 0) | ||
315 | { | ||
316 | output_page = ei_local->tx_start_page; | ||
317 | ei_local->tx1 = send_length; | ||
318 | if (ei_debug && ei_local->tx2 > 0) | ||
319 | printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", | ||
320 | dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); | ||
321 | } | ||
322 | else if (ei_local->tx2 == 0) | ||
323 | { | ||
324 | output_page = ei_local->tx_start_page + TX_PAGES/2; | ||
325 | ei_local->tx2 = send_length; | ||
326 | if (ei_debug && ei_local->tx1 > 0) | ||
327 | printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", | ||
328 | dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); | ||
329 | } | ||
330 | else | ||
331 | { /* We should never get here. */ | ||
332 | if (ei_debug) | ||
333 | printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", | ||
334 | dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); | ||
335 | ei_local->irqlock = 0; | ||
336 | netif_stop_queue(dev); | ||
337 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
338 | spin_unlock(&ei_local->page_lock); | ||
339 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | ||
340 | ei_local->stat.tx_errors++; | ||
341 | return 1; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Okay, now upload the packet and trigger a send if the transmitter | ||
346 | * isn't already sending. If it is busy, the interrupt handler will | ||
347 | * trigger the send later, upon receiving a Tx done interrupt. | ||
348 | */ | ||
349 | |||
350 | ei_block_output(dev, send_length, data, output_page); | ||
351 | |||
352 | if (! ei_local->txing) | ||
353 | { | ||
354 | ei_local->txing = 1; | ||
355 | NS8390_trigger_send(dev, send_length, output_page); | ||
356 | dev->trans_start = jiffies; | ||
357 | if (output_page == ei_local->tx_start_page) | ||
358 | { | ||
359 | ei_local->tx1 = -1; | ||
360 | ei_local->lasttx = -1; | ||
361 | } | ||
362 | else | ||
363 | { | ||
364 | ei_local->tx2 = -1; | ||
365 | ei_local->lasttx = -2; | ||
366 | } | ||
367 | } | ||
368 | else ei_local->txqueue++; | ||
369 | |||
370 | if (ei_local->tx1 && ei_local->tx2) | ||
371 | netif_stop_queue(dev); | ||
372 | else | ||
373 | netif_start_queue(dev); | ||
374 | |||
375 | /* Turn 8390 interrupts back on. */ | ||
376 | ei_local->irqlock = 0; | ||
377 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
378 | |||
379 | spin_unlock(&ei_local->page_lock); | ||
380 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | ||
381 | |||
382 | dev_kfree_skb (skb); | ||
383 | ei_local->stat.tx_bytes += send_length; | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * ei_interrupt - handle the interrupts from an 8390 | ||
390 | * @irq: interrupt number | ||
391 | * @dev_id: a pointer to the net_device | ||
392 | * | ||
393 | * Handle the ether interface interrupts. We pull packets from | ||
394 | * the 8390 via the card specific functions and fire them at the networking | ||
395 | * stack. We also handle transmit completions and wake the transmit path if | ||
396 | * necessary. We also update the counters and do other housekeeping as | ||
397 | * needed. | ||
398 | */ | ||
399 | |||
400 | static irqreturn_t __ei_interrupt(int irq, void *dev_id) | ||
401 | { | ||
402 | struct net_device *dev = dev_id; | ||
403 | unsigned long e8390_base = dev->base_addr; | ||
404 | int interrupts, nr_serviced = 0; | ||
405 | struct ei_device *ei_local = netdev_priv(dev); | ||
406 | |||
407 | /* | ||
408 | * Protect the irq test too. | ||
409 | */ | ||
410 | |||
411 | spin_lock(&ei_local->page_lock); | ||
412 | |||
413 | if (ei_local->irqlock) | ||
414 | { | ||
415 | #if 1 /* This might just be an interrupt for a PCI device sharing this line */ | ||
416 | /* The "irqlock" check is only for testing. */ | ||
417 | printk(ei_local->irqlock | ||
418 | ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" | ||
419 | : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", | ||
420 | dev->name, ei_inb_p(e8390_base + EN0_ISR), | ||
421 | ei_inb_p(e8390_base + EN0_IMR)); | ||
422 | #endif | ||
423 | spin_unlock(&ei_local->page_lock); | ||
424 | return IRQ_NONE; | ||
425 | } | ||
426 | |||
427 | /* Change to page 0 and read the intr status reg. */ | ||
428 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | ||
429 | if (ei_debug > 3) | ||
430 | printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, | ||
431 | ei_inb_p(e8390_base + EN0_ISR)); | ||
432 | |||
433 | /* !!Assumption!! -- we stay in page 0. Don't break this. */ | ||
434 | while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 | ||
435 | && ++nr_serviced < MAX_SERVICE) | ||
436 | { | ||
437 | if (!netif_running(dev)) { | ||
438 | printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); | ||
439 | /* rmk - acknowledge the interrupts */ | ||
440 | ei_outb_p(interrupts, e8390_base + EN0_ISR); | ||
441 | interrupts = 0; | ||
442 | break; | ||
443 | } | ||
444 | if (interrupts & ENISR_OVER) | ||
445 | ei_rx_overrun(dev); | ||
446 | else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) | ||
447 | { | ||
448 | /* Got a good (?) packet. */ | ||
449 | ei_receive(dev); | ||
450 | } | ||
451 | /* Push the next to-transmit packet through. */ | ||
452 | if (interrupts & ENISR_TX) | ||
453 | ei_tx_intr(dev); | ||
454 | else if (interrupts & ENISR_TX_ERR) | ||
455 | ei_tx_err(dev); | ||
456 | |||
457 | if (interrupts & ENISR_COUNTERS) | ||
458 | { | ||
459 | ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); | ||
460 | ei_local->stat.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); | ||
461 | ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); | ||
462 | ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ | ||
463 | } | ||
464 | |||
465 | /* Ignore any RDC interrupts that make it back to here. */ | ||
466 | if (interrupts & ENISR_RDC) | ||
467 | { | ||
468 | ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); | ||
469 | } | ||
470 | |||
471 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | ||
472 | } | ||
473 | |||
474 | if (interrupts && ei_debug) | ||
475 | { | ||
476 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | ||
477 | if (nr_serviced >= MAX_SERVICE) | ||
478 | { | ||
479 | /* 0xFF is valid for a card removal */ | ||
480 | if(interrupts!=0xFF) | ||
481 | printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", | ||
482 | dev->name, interrupts); | ||
483 | ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ | ||
484 | } else { | ||
485 | printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); | ||
486 | ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ | ||
487 | } | ||
488 | } | ||
489 | spin_unlock(&ei_local->page_lock); | ||
490 | return IRQ_RETVAL(nr_serviced > 0); | ||
491 | } | ||
492 | |||
493 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
494 | static void __ei_poll(struct net_device *dev) | ||
495 | { | ||
496 | disable_irq_lockdep(dev->irq); | ||
497 | __ei_interrupt(dev->irq, dev); | ||
498 | enable_irq_lockdep(dev->irq); | ||
499 | } | ||
500 | #endif | ||
501 | |||
502 | /** | ||
503 | * ei_tx_err - handle transmitter error | ||
504 | * @dev: network device which threw the exception | ||
505 | * | ||
506 | * A transmitter error has happened. Most likely excess collisions (which | ||
507 | * is a fairly normal condition). If the error is one where the Tx will | ||
508 | * have been aborted, we try and send another one right away, instead of | ||
509 | * letting the failed packet sit and collect dust in the Tx buffer. This | ||
510 | * is a much better solution as it avoids kernel based Tx timeouts, and | ||
511 | * an unnecessary card reset. | ||
512 | * | ||
513 | * Called with lock held. | ||
514 | */ | ||
515 | |||
516 | static void ei_tx_err(struct net_device *dev) | ||
517 | { | ||
518 | unsigned long e8390_base = dev->base_addr; | ||
519 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
520 | unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); | ||
521 | unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); | ||
522 | |||
523 | #ifdef VERBOSE_ERROR_DUMP | ||
524 | printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); | ||
525 | if (txsr & ENTSR_ABT) | ||
526 | printk("excess-collisions "); | ||
527 | if (txsr & ENTSR_ND) | ||
528 | printk("non-deferral "); | ||
529 | if (txsr & ENTSR_CRS) | ||
530 | printk("lost-carrier "); | ||
531 | if (txsr & ENTSR_FU) | ||
532 | printk("FIFO-underrun "); | ||
533 | if (txsr & ENTSR_CDH) | ||
534 | printk("lost-heartbeat "); | ||
535 | printk("\n"); | ||
536 | #endif | ||
537 | |||
538 | ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ | ||
539 | |||
540 | if (tx_was_aborted) | ||
541 | ei_tx_intr(dev); | ||
542 | else | ||
543 | { | ||
544 | ei_local->stat.tx_errors++; | ||
545 | if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; | ||
546 | if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; | ||
547 | if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; | ||
548 | } | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * ei_tx_intr - transmit interrupt handler | ||
553 | * @dev: network device for which tx intr is handled | ||
554 | * | ||
555 | * We have finished a transmit: check for errors and then trigger the next | ||
556 | * packet to be sent. Called with lock held. | ||
557 | */ | ||
558 | |||
559 | static void ei_tx_intr(struct net_device *dev) | ||
560 | { | ||
561 | unsigned long e8390_base = dev->base_addr; | ||
562 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
563 | int status = ei_inb(e8390_base + EN0_TSR); | ||
564 | |||
565 | ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ | ||
566 | |||
567 | /* | ||
568 | * There are two Tx buffers, see which one finished, and trigger | ||
569 | * the send of another one if it exists. | ||
570 | */ | ||
571 | ei_local->txqueue--; | ||
572 | |||
573 | if (ei_local->tx1 < 0) | ||
574 | { | ||
575 | if (ei_local->lasttx != 1 && ei_local->lasttx != -1) | ||
576 | printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", | ||
577 | ei_local->name, ei_local->lasttx, ei_local->tx1); | ||
578 | ei_local->tx1 = 0; | ||
579 | if (ei_local->tx2 > 0) | ||
580 | { | ||
581 | ei_local->txing = 1; | ||
582 | NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); | ||
583 | dev->trans_start = jiffies; | ||
584 | ei_local->tx2 = -1, | ||
585 | ei_local->lasttx = 2; | ||
586 | } | ||
587 | else ei_local->lasttx = 20, ei_local->txing = 0; | ||
588 | } | ||
589 | else if (ei_local->tx2 < 0) | ||
590 | { | ||
591 | if (ei_local->lasttx != 2 && ei_local->lasttx != -2) | ||
592 | printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", | ||
593 | ei_local->name, ei_local->lasttx, ei_local->tx2); | ||
594 | ei_local->tx2 = 0; | ||
595 | if (ei_local->tx1 > 0) | ||
596 | { | ||
597 | ei_local->txing = 1; | ||
598 | NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); | ||
599 | dev->trans_start = jiffies; | ||
600 | ei_local->tx1 = -1; | ||
601 | ei_local->lasttx = 1; | ||
602 | } | ||
603 | else | ||
604 | ei_local->lasttx = 10, ei_local->txing = 0; | ||
605 | } | ||
606 | // else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", | ||
607 | // dev->name, ei_local->lasttx); | ||
608 | |||
609 | /* Minimize Tx latency: update the statistics after we restart TXing. */ | ||
610 | if (status & ENTSR_COL) | ||
611 | ei_local->stat.collisions++; | ||
612 | if (status & ENTSR_PTX) | ||
613 | ei_local->stat.tx_packets++; | ||
614 | else | ||
615 | { | ||
616 | ei_local->stat.tx_errors++; | ||
617 | if (status & ENTSR_ABT) | ||
618 | { | ||
619 | ei_local->stat.tx_aborted_errors++; | ||
620 | ei_local->stat.collisions += 16; | ||
621 | } | ||
622 | if (status & ENTSR_CRS) | ||
623 | ei_local->stat.tx_carrier_errors++; | ||
624 | if (status & ENTSR_FU) | ||
625 | ei_local->stat.tx_fifo_errors++; | ||
626 | if (status & ENTSR_CDH) | ||
627 | ei_local->stat.tx_heartbeat_errors++; | ||
628 | if (status & ENTSR_OWC) | ||
629 | ei_local->stat.tx_window_errors++; | ||
630 | } | ||
631 | netif_wake_queue(dev); | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * ei_receive - receive some packets | ||
636 | * @dev: network device with which receive will be run | ||
637 | * | ||
638 | * We have a good packet(s), get it/them out of the buffers. | ||
639 | * Called with lock held. | ||
640 | */ | ||
641 | |||
642 | static void ei_receive(struct net_device *dev) | ||
643 | { | ||
644 | unsigned long e8390_base = dev->base_addr; | ||
645 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
646 | unsigned char rxing_page, this_frame, next_frame; | ||
647 | unsigned short current_offset; | ||
648 | int rx_pkt_count = 0; | ||
649 | struct e8390_pkt_hdr rx_frame; | ||
650 | int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; | ||
651 | |||
652 | while (++rx_pkt_count < 10) | ||
653 | { | ||
654 | int pkt_len, pkt_stat; | ||
655 | |||
656 | /* Get the rx page (incoming packet pointer). */ | ||
657 | ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); | ||
658 | rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); | ||
659 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | ||
660 | |||
661 | /* Remove one frame from the ring. Boundary is always a page behind. */ | ||
662 | this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; | ||
663 | if (this_frame >= ei_local->stop_page) | ||
664 | this_frame = ei_local->rx_start_page; | ||
665 | |||
666 | /* Someday we'll omit the previous, iff we never get this message. | ||
667 | (There is at least one clone claimed to have a problem.) | ||
668 | |||
669 | Keep quiet if it looks like a card removal. One problem here | ||
670 | is that some clones crash in roughly the same way. | ||
671 | */ | ||
672 | if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) | ||
673 | printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", | ||
674 | dev->name, this_frame, ei_local->current_page); | ||
675 | |||
676 | if (this_frame == rxing_page) /* Read all the frames? */ | ||
677 | break; /* Done for now */ | ||
678 | |||
679 | current_offset = this_frame << 8; | ||
680 | ei_get_8390_hdr(dev, &rx_frame, this_frame); | ||
681 | |||
682 | pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); | ||
683 | pkt_stat = rx_frame.status; | ||
684 | |||
685 | next_frame = this_frame + 1 + ((pkt_len+4)>>8); | ||
686 | |||
687 | /* Check for bogosity warned by 3c503 book: the status byte is never | ||
688 | written. This happened a lot during testing! This code should be | ||
689 | cleaned up someday. */ | ||
690 | if (rx_frame.next != next_frame | ||
691 | && rx_frame.next != next_frame + 1 | ||
692 | && rx_frame.next != next_frame - num_rx_pages | ||
693 | && rx_frame.next != next_frame + 1 - num_rx_pages) { | ||
694 | ei_local->current_page = rxing_page; | ||
695 | ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); | ||
696 | ei_local->stat.rx_errors++; | ||
697 | continue; | ||
698 | } | ||
699 | |||
700 | if (pkt_len < 60 || pkt_len > 1518) | ||
701 | { | ||
702 | if (ei_debug) | ||
703 | printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", | ||
704 | dev->name, rx_frame.count, rx_frame.status, | ||
705 | rx_frame.next); | ||
706 | ei_local->stat.rx_errors++; | ||
707 | ei_local->stat.rx_length_errors++; | ||
708 | } | ||
709 | else if ((pkt_stat & 0x0F) == ENRSR_RXOK) | ||
710 | { | ||
711 | struct sk_buff *skb; | ||
712 | |||
713 | skb = dev_alloc_skb(pkt_len+2); | ||
714 | if (skb == NULL) | ||
715 | { | ||
716 | if (ei_debug > 1) | ||
717 | printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", | ||
718 | dev->name, pkt_len); | ||
719 | ei_local->stat.rx_dropped++; | ||
720 | break; | ||
721 | } | ||
722 | else | ||
723 | { | ||
724 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | ||
725 | skb->dev = dev; | ||
726 | skb_put(skb, pkt_len); /* Make room */ | ||
727 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | ||
728 | skb->protocol=eth_type_trans(skb,dev); | ||
729 | netif_rx(skb); | ||
730 | dev->last_rx = jiffies; | ||
731 | ei_local->stat.rx_packets++; | ||
732 | ei_local->stat.rx_bytes += pkt_len; | ||
733 | if (pkt_stat & ENRSR_PHY) | ||
734 | ei_local->stat.multicast++; | ||
735 | } | ||
736 | } | ||
737 | else | ||
738 | { | ||
739 | if (ei_debug) | ||
740 | printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", | ||
741 | dev->name, rx_frame.status, rx_frame.next, | ||
742 | rx_frame.count); | ||
743 | ei_local->stat.rx_errors++; | ||
744 | /* NB: The NIC counts CRC, frame and missed errors. */ | ||
745 | if (pkt_stat & ENRSR_FO) | ||
746 | ei_local->stat.rx_fifo_errors++; | ||
747 | } | ||
748 | next_frame = rx_frame.next; | ||
749 | |||
750 | /* This _should_ never happen: it's here for avoiding bad clones. */ | ||
751 | if (next_frame >= ei_local->stop_page) { | ||
752 | printk("%s: next frame inconsistency, %#2x\n", dev->name, | ||
753 | next_frame); | ||
754 | next_frame = ei_local->rx_start_page; | ||
755 | } | ||
756 | ei_local->current_page = next_frame; | ||
757 | ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); | ||
758 | } | ||
759 | |||
760 | /* We used to also ack ENISR_OVER here, but that would sometimes mask | ||
761 | a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ | ||
762 | ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); | ||
763 | return; | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * ei_rx_overrun - handle receiver overrun | ||
768 | * @dev: network device which threw exception | ||
769 | * | ||
770 | * We have a receiver overrun: we have to kick the 8390 to get it started | ||
771 | * again. Problem is that you have to kick it exactly as NS prescribes in | ||
772 | * the updated datasheets, or "the NIC may act in an unpredictable manner." | ||
773 | * This includes causing "the NIC to defer indefinitely when it is stopped | ||
774 | * on a busy network." Ugh. | ||
775 | * Called with lock held. Don't call this with the interrupts off or your | ||
776 | * computer will hate you - it takes 10ms or so. | ||
777 | */ | ||
778 | |||
779 | static void ei_rx_overrun(struct net_device *dev) | ||
780 | { | ||
781 | unsigned long e8390_base = dev->base_addr; | ||
782 | unsigned char was_txing, must_resend = 0; | ||
783 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
784 | |||
785 | /* | ||
786 | * Record whether a Tx was in progress and then issue the | ||
787 | * stop command. | ||
788 | */ | ||
789 | was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; | ||
790 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | ||
791 | |||
792 | if (ei_debug > 1) | ||
793 | printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); | ||
794 | ei_local->stat.rx_over_errors++; | ||
795 | |||
796 | /* | ||
797 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. | ||
798 | * Early datasheets said to poll the reset bit, but now they say that | ||
799 | * it "is not a reliable indicator and subsequently should be ignored." | ||
800 | * We wait at least 10ms. | ||
801 | */ | ||
802 | |||
803 | mdelay(10); | ||
804 | |||
805 | /* | ||
806 | * Reset RBCR[01] back to zero as per magic incantation. | ||
807 | */ | ||
808 | ei_outb_p(0x00, e8390_base+EN0_RCNTLO); | ||
809 | ei_outb_p(0x00, e8390_base+EN0_RCNTHI); | ||
810 | |||
811 | /* | ||
812 | * See if any Tx was interrupted or not. According to NS, this | ||
813 | * step is vital, and skipping it will cause no end of havoc. | ||
814 | */ | ||
815 | |||
816 | if (was_txing) | ||
817 | { | ||
818 | unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); | ||
819 | if (!tx_completed) | ||
820 | must_resend = 1; | ||
821 | } | ||
822 | |||
823 | /* | ||
824 | * Have to enter loopback mode and then restart the NIC before | ||
825 | * you are allowed to slurp packets up off the ring. | ||
826 | */ | ||
827 | ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); | ||
828 | ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); | ||
829 | |||
830 | /* | ||
831 | * Clear the Rx ring of all the debris, and ack the interrupt. | ||
832 | */ | ||
833 | ei_receive(dev); | ||
834 | ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); | ||
835 | |||
836 | /* | ||
837 | * Leave loopback mode, and resend any packet that got stopped. | ||
838 | */ | ||
839 | ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); | ||
840 | if (must_resend) | ||
841 | ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Collect the stats. This is called unlocked and from several contexts. | ||
846 | */ | ||
847 | |||
848 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
849 | { | ||
850 | unsigned long ioaddr = dev->base_addr; | ||
851 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
852 | unsigned long flags; | ||
853 | |||
854 | /* If the card is stopped, just return the present stats. */ | ||
855 | if (!netif_running(dev)) | ||
856 | return &ei_local->stat; | ||
857 | |||
858 | spin_lock_irqsave(&ei_local->page_lock,flags); | ||
859 | /* Read the counter registers, assuming we are in page 0. */ | ||
860 | ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); | ||
861 | ei_local->stat.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); | ||
862 | ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); | ||
863 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
864 | |||
865 | return &ei_local->stat; | ||
866 | } | ||
867 | |||
868 | /* | ||
869 | * Form the 64 bit 8390 multicast table from the linked list of addresses | ||
870 | * associated with this dev structure. | ||
871 | */ | ||
872 | |||
873 | static inline void make_mc_bits(u8 *bits, struct net_device *dev) | ||
874 | { | ||
875 | struct dev_mc_list *dmi; | ||
876 | |||
877 | for (dmi=dev->mc_list; dmi; dmi=dmi->next) | ||
878 | { | ||
879 | u32 crc; | ||
880 | if (dmi->dmi_addrlen != ETH_ALEN) | ||
881 | { | ||
882 | printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); | ||
883 | continue; | ||
884 | } | ||
885 | crc = ether_crc(ETH_ALEN, dmi->dmi_addr); | ||
886 | /* | ||
887 | * The 8390 uses the 6 most significant bits of the | ||
888 | * CRC to index the multicast table. | ||
889 | */ | ||
890 | bits[crc>>29] |= (1<<((crc>>26)&7)); | ||
891 | } | ||
892 | } | ||
893 | |||
894 | /** | ||
895 | * do_set_multicast_list - set/clear multicast filter | ||
896 | * @dev: net device for which multicast filter is adjusted | ||
897 | * | ||
898 | * Set or clear the multicast filter for this adaptor. May be called | ||
899 | * from a BH in 2.1.x. Must be called with lock held. | ||
900 | */ | ||
901 | |||
902 | static void do_set_multicast_list(struct net_device *dev) | ||
903 | { | ||
904 | unsigned long e8390_base = dev->base_addr; | ||
905 | int i; | ||
906 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | ||
907 | |||
908 | if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) | ||
909 | { | ||
910 | memset(ei_local->mcfilter, 0, 8); | ||
911 | if (dev->mc_list) | ||
912 | make_mc_bits(ei_local->mcfilter, dev); | ||
913 | } | ||
914 | else | ||
915 | memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ | ||
916 | |||
917 | /* | ||
918 | * DP8390 manuals don't specify any magic sequence for altering | ||
919 | * the multicast regs on an already running card. To be safe, we | ||
920 | * ensure multicast mode is off prior to loading up the new hash | ||
921 | * table. If this proves to be not enough, we can always resort | ||
922 | * to stopping the NIC, loading the table and then restarting. | ||
923 | * | ||
924 | * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC | ||
925 | * Elite16) appear to be write-only. The NS 8390 data sheet lists | ||
926 | * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and | ||
927 | * Ultra32 EISA) appears to have this bug fixed. | ||
928 | */ | ||
929 | |||
930 | if (netif_running(dev)) | ||
931 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
932 | ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); | ||
933 | for(i = 0; i < 8; i++) | ||
934 | { | ||
935 | ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); | ||
936 | #ifndef BUG_83C690 | ||
937 | if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i]) | ||
938 | printk(KERN_ERR "Multicast filter read/write mismap %d\n",i); | ||
939 | #endif | ||
940 | } | ||
941 | ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); | ||
942 | |||
943 | if(dev->flags&IFF_PROMISC) | ||
944 | ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); | ||
945 | else if(dev->flags&IFF_ALLMULTI || dev->mc_list) | ||
946 | ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); | ||
947 | else | ||
948 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
949 | } | ||
950 | |||
951 | /* | ||
952 | * Called without lock held. This is invoked from user context and may | ||
953 | * be parallel to just about everything else. Its also fairly quick and | ||
954 | * not called too often. Must protect against both bh and irq users | ||
955 | */ | ||
956 | |||
957 | static void set_multicast_list(struct net_device *dev) | ||
958 | { | ||
959 | unsigned long flags; | ||
960 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | ||
961 | |||
962 | spin_lock_irqsave(&ei_local->page_lock, flags); | ||
963 | do_set_multicast_list(dev); | ||
964 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | ||
965 | } | ||
966 | |||
967 | /** | ||
968 | * ethdev_setup - init rest of 8390 device struct | ||
969 | * @dev: network device structure to init | ||
970 | * | ||
971 | * Initialize the rest of the 8390 device structure. Do NOT __init | ||
972 | * this, as it is used by 8390 based modular drivers too. | ||
973 | */ | ||
974 | |||
975 | static void ethdev_setup(struct net_device *dev) | ||
976 | { | ||
977 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
978 | if (ei_debug > 1) | ||
979 | printk(version); | ||
980 | |||
981 | dev->hard_start_xmit = &ei_start_xmit; | ||
982 | dev->get_stats = get_stats; | ||
983 | dev->set_multicast_list = &set_multicast_list; | ||
984 | |||
985 | ether_setup(dev); | ||
986 | |||
987 | spin_lock_init(&ei_local->page_lock); | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * alloc_ei_netdev - alloc_etherdev counterpart for 8390 | ||
992 | * @size: extra bytes to allocate | ||
993 | * | ||
994 | * Allocate 8390-specific net_device. | ||
995 | */ | ||
996 | static struct net_device *____alloc_ei_netdev(int size) | ||
997 | { | ||
998 | return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", | ||
999 | ethdev_setup); | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | |||
1004 | |||
1005 | /* This page of functions should be 8390 generic */ | ||
1006 | /* Follow National Semi's recommendations for initializing the "NIC". */ | ||
1007 | |||
1008 | /** | ||
1009 | * NS8390_init - initialize 8390 hardware | ||
1010 | * @dev: network device to initialize | ||
1011 | * @startp: boolean. non-zero value to initiate chip processing | ||
1012 | * | ||
1013 | * Must be called with lock held. | ||
1014 | */ | ||
1015 | |||
1016 | static void __NS8390_init(struct net_device *dev, int startp) | ||
1017 | { | ||
1018 | unsigned long e8390_base = dev->base_addr; | ||
1019 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | ||
1020 | int i; | ||
1021 | int endcfg = ei_local->word16 | ||
1022 | ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) | ||
1023 | : 0x48; | ||
1024 | |||
1025 | if(sizeof(struct e8390_pkt_hdr)!=4) | ||
1026 | panic("8390.c: header struct mispacked\n"); | ||
1027 | /* Follow National Semi's recommendations for initing the DP83902. */ | ||
1028 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ | ||
1029 | ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ | ||
1030 | /* Clear the remote byte count registers. */ | ||
1031 | ei_outb_p(0x00, e8390_base + EN0_RCNTLO); | ||
1032 | ei_outb_p(0x00, e8390_base + EN0_RCNTHI); | ||
1033 | /* Set to monitor and loopback mode -- this is vital!. */ | ||
1034 | ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ | ||
1035 | ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ | ||
1036 | /* Set the transmit page and receive ring. */ | ||
1037 | ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); | ||
1038 | ei_local->tx1 = ei_local->tx2 = 0; | ||
1039 | ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); | ||
1040 | ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ | ||
1041 | ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ | ||
1042 | ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); | ||
1043 | /* Clear the pending interrupts and mask. */ | ||
1044 | ei_outb_p(0xFF, e8390_base + EN0_ISR); | ||
1045 | ei_outb_p(0x00, e8390_base + EN0_IMR); | ||
1046 | |||
1047 | /* Copy the station address into the DS8390 registers. */ | ||
1048 | |||
1049 | ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ | ||
1050 | for(i = 0; i < 6; i++) | ||
1051 | { | ||
1052 | ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); | ||
1053 | if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) | ||
1054 | printk(KERN_ERR "Hw. address read/write mismap %d\n",i); | ||
1055 | } | ||
1056 | |||
1057 | ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); | ||
1058 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | ||
1059 | |||
1060 | netif_start_queue(dev); | ||
1061 | ei_local->tx1 = ei_local->tx2 = 0; | ||
1062 | ei_local->txing = 0; | ||
1063 | |||
1064 | if (startp) | ||
1065 | { | ||
1066 | ei_outb_p(0xff, e8390_base + EN0_ISR); | ||
1067 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | ||
1068 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); | ||
1069 | ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ | ||
1070 | /* 3c503 TechMan says rxconfig only after the NIC is started. */ | ||
1071 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ | ||
1072 | do_set_multicast_list(dev); /* (re)load the mcast table */ | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | /* Trigger a transmit start, assuming the length is valid. | ||
1077 | Always called with the page lock held */ | ||
1078 | |||
1079 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | ||
1080 | int start_page) | ||
1081 | { | ||
1082 | unsigned long e8390_base = dev->base_addr; | ||
1083 | struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); | ||
1084 | |||
1085 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); | ||
1086 | |||
1087 | if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) | ||
1088 | { | ||
1089 | printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", | ||
1090 | dev->name); | ||
1091 | return; | ||
1092 | } | ||
1093 | ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); | ||
1094 | ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); | ||
1095 | ei_outb_p(start_page, e8390_base + EN0_TPSR); | ||
1096 | ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); | ||
1097 | } | ||
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index ade6ff852e1a..a12bb64e3694 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c | |||
@@ -39,7 +39,16 @@ | |||
39 | #include <asm/hwtest.h> | 39 | #include <asm/hwtest.h> |
40 | #include <asm/macints.h> | 40 | #include <asm/macints.h> |
41 | 41 | ||
42 | #include "8390.h" | 42 | static char version[] = |
43 | "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n"; | ||
44 | |||
45 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) | ||
46 | #define ei_inb(port) in_8(port) | ||
47 | #define ei_outb(val,port) out_8(port,val) | ||
48 | #define ei_inb_p(port) in_8(port) | ||
49 | #define ei_outb_p(val,port) out_8(port,val) | ||
50 | |||
51 | #include "lib8390.c" | ||
43 | 52 | ||
44 | #define WD_START_PG 0x00 /* First page of TX buffer */ | 53 | #define WD_START_PG 0x00 /* First page of TX buffer */ |
45 | #define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ | 54 | #define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ |
@@ -116,9 +125,6 @@ static int useresources[] = { | |||
116 | 1, /* dayna-lc */ | 125 | 1, /* dayna-lc */ |
117 | }; | 126 | }; |
118 | 127 | ||
119 | static char version[] __initdata = | ||
120 | "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n"; | ||
121 | |||
122 | extern enum mac8390_type mac8390_ident(struct nubus_dev * dev); | 128 | extern enum mac8390_type mac8390_ident(struct nubus_dev * dev); |
123 | extern int mac8390_memsize(unsigned long membase); | 129 | extern int mac8390_memsize(unsigned long membase); |
124 | extern int mac8390_memtest(struct net_device * dev); | 130 | extern int mac8390_memtest(struct net_device * dev); |
@@ -237,7 +243,7 @@ struct net_device * __init mac8390_probe(int unit) | |||
237 | if (!MACH_IS_MAC) | 243 | if (!MACH_IS_MAC) |
238 | return ERR_PTR(-ENODEV); | 244 | return ERR_PTR(-ENODEV); |
239 | 245 | ||
240 | dev = alloc_ei_netdev(); | 246 | dev = ____alloc_ei_netdev(0); |
241 | if (!dev) | 247 | if (!dev) |
242 | return ERR_PTR(-ENOMEM); | 248 | return ERR_PTR(-ENOMEM); |
243 | 249 | ||
@@ -438,7 +444,7 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd | |||
438 | dev->open = &mac8390_open; | 444 | dev->open = &mac8390_open; |
439 | dev->stop = &mac8390_close; | 445 | dev->stop = &mac8390_close; |
440 | #ifdef CONFIG_NET_POLL_CONTROLLER | 446 | #ifdef CONFIG_NET_POLL_CONTROLLER |
441 | dev->poll_controller = ei_poll; | 447 | dev->poll_controller = __ei_poll; |
442 | #endif | 448 | #endif |
443 | 449 | ||
444 | /* GAR, ei_status is actually a macro even though it looks global */ | 450 | /* GAR, ei_status is actually a macro even though it looks global */ |
@@ -510,7 +516,7 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd | |||
510 | return -ENODEV; | 516 | return -ENODEV; |
511 | } | 517 | } |
512 | 518 | ||
513 | NS8390_init(dev, 0); | 519 | __NS8390_init(dev, 0); |
514 | 520 | ||
515 | /* Good, done, now spit out some messages */ | 521 | /* Good, done, now spit out some messages */ |
516 | printk(KERN_INFO "%s: %s in slot %X (type %s)\n", | 522 | printk(KERN_INFO "%s: %s in slot %X (type %s)\n", |
@@ -532,8 +538,8 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd | |||
532 | 538 | ||
533 | static int mac8390_open(struct net_device *dev) | 539 | static int mac8390_open(struct net_device *dev) |
534 | { | 540 | { |
535 | ei_open(dev); | 541 | __ei_open(dev); |
536 | if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) { | 542 | if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { |
537 | printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq); | 543 | printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq); |
538 | return -EAGAIN; | 544 | return -EAGAIN; |
539 | } | 545 | } |
@@ -543,7 +549,7 @@ static int mac8390_open(struct net_device *dev) | |||
543 | static int mac8390_close(struct net_device *dev) | 549 | static int mac8390_close(struct net_device *dev) |
544 | { | 550 | { |
545 | free_irq(dev->irq, dev); | 551 | free_irq(dev->irq, dev); |
546 | ei_close(dev); | 552 | __ei_close(dev); |
547 | return 0; | 553 | return 0; |
548 | } | 554 | } |
549 | 555 | ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c new file mode 100644 index 000000000000..bd0ce98c939c --- /dev/null +++ b/drivers/net/macb.c | |||
@@ -0,0 +1,1210 @@ | |||
1 | /* | ||
2 | * Atmel MACB Ethernet Controller driver | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/mii.h> | ||
21 | #include <linux/mutex.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/ethtool.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include <asm/arch/board.h> | ||
27 | |||
28 | #include "macb.h" | ||
29 | |||
30 | #define to_net_dev(class) container_of(class, struct net_device, class_dev) | ||
31 | |||
32 | #define RX_BUFFER_SIZE 128 | ||
33 | #define RX_RING_SIZE 512 | ||
34 | #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) | ||
35 | |||
36 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ | ||
37 | #define RX_OFFSET 2 | ||
38 | |||
39 | #define TX_RING_SIZE 128 | ||
40 | #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) | ||
41 | #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) | ||
42 | |||
43 | #define TX_RING_GAP(bp) \ | ||
44 | (TX_RING_SIZE - (bp)->tx_pending) | ||
45 | #define TX_BUFFS_AVAIL(bp) \ | ||
46 | (((bp)->tx_tail <= (bp)->tx_head) ? \ | ||
47 | (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ | ||
48 | (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) | ||
49 | #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) | ||
50 | |||
51 | #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) | ||
52 | |||
53 | /* minimum number of free TX descriptors before waking up TX process */ | ||
54 | #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) | ||
55 | |||
56 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | ||
57 | | MACB_BIT(ISR_ROVR)) | ||
58 | |||
59 | static void __macb_set_hwaddr(struct macb *bp) | ||
60 | { | ||
61 | u32 bottom; | ||
62 | u16 top; | ||
63 | |||
64 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | ||
65 | macb_writel(bp, SA1B, bottom); | ||
66 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); | ||
67 | macb_writel(bp, SA1T, top); | ||
68 | } | ||
69 | |||
70 | static void __init macb_get_hwaddr(struct macb *bp) | ||
71 | { | ||
72 | u32 bottom; | ||
73 | u16 top; | ||
74 | u8 addr[6]; | ||
75 | |||
76 | bottom = macb_readl(bp, SA1B); | ||
77 | top = macb_readl(bp, SA1T); | ||
78 | |||
79 | addr[0] = bottom & 0xff; | ||
80 | addr[1] = (bottom >> 8) & 0xff; | ||
81 | addr[2] = (bottom >> 16) & 0xff; | ||
82 | addr[3] = (bottom >> 24) & 0xff; | ||
83 | addr[4] = top & 0xff; | ||
84 | addr[5] = (top >> 8) & 0xff; | ||
85 | |||
86 | if (is_valid_ether_addr(addr)) | ||
87 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | ||
88 | } | ||
89 | |||
90 | static void macb_enable_mdio(struct macb *bp) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | u32 reg; | ||
94 | |||
95 | spin_lock_irqsave(&bp->lock, flags); | ||
96 | reg = macb_readl(bp, NCR); | ||
97 | reg |= MACB_BIT(MPE); | ||
98 | macb_writel(bp, NCR, reg); | ||
99 | macb_writel(bp, IER, MACB_BIT(MFD)); | ||
100 | spin_unlock_irqrestore(&bp->lock, flags); | ||
101 | } | ||
102 | |||
103 | static void macb_disable_mdio(struct macb *bp) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | u32 reg; | ||
107 | |||
108 | spin_lock_irqsave(&bp->lock, flags); | ||
109 | reg = macb_readl(bp, NCR); | ||
110 | reg &= ~MACB_BIT(MPE); | ||
111 | macb_writel(bp, NCR, reg); | ||
112 | macb_writel(bp, IDR, MACB_BIT(MFD)); | ||
113 | spin_unlock_irqrestore(&bp->lock, flags); | ||
114 | } | ||
115 | |||
116 | static int macb_mdio_read(struct net_device *dev, int phy_id, int location) | ||
117 | { | ||
118 | struct macb *bp = netdev_priv(dev); | ||
119 | int value; | ||
120 | |||
121 | mutex_lock(&bp->mdio_mutex); | ||
122 | |||
123 | macb_enable_mdio(bp); | ||
124 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | ||
125 | | MACB_BF(RW, MACB_MAN_READ) | ||
126 | | MACB_BF(PHYA, phy_id) | ||
127 | | MACB_BF(REGA, location) | ||
128 | | MACB_BF(CODE, MACB_MAN_CODE))); | ||
129 | |||
130 | wait_for_completion(&bp->mdio_complete); | ||
131 | |||
132 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | ||
133 | macb_disable_mdio(bp); | ||
134 | mutex_unlock(&bp->mdio_mutex); | ||
135 | |||
136 | return value; | ||
137 | } | ||
138 | |||
139 | static void macb_mdio_write(struct net_device *dev, int phy_id, | ||
140 | int location, int val) | ||
141 | { | ||
142 | struct macb *bp = netdev_priv(dev); | ||
143 | |||
144 | dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n", | ||
145 | phy_id, location, val); | ||
146 | |||
147 | mutex_lock(&bp->mdio_mutex); | ||
148 | macb_enable_mdio(bp); | ||
149 | |||
150 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | ||
151 | | MACB_BF(RW, MACB_MAN_WRITE) | ||
152 | | MACB_BF(PHYA, phy_id) | ||
153 | | MACB_BF(REGA, location) | ||
154 | | MACB_BF(CODE, MACB_MAN_CODE) | ||
155 | | MACB_BF(DATA, val))); | ||
156 | |||
157 | wait_for_completion(&bp->mdio_complete); | ||
158 | |||
159 | macb_disable_mdio(bp); | ||
160 | mutex_unlock(&bp->mdio_mutex); | ||
161 | } | ||
162 | |||
163 | static int macb_phy_probe(struct macb *bp) | ||
164 | { | ||
165 | int phy_address; | ||
166 | u16 phyid1, phyid2; | ||
167 | |||
168 | for (phy_address = 0; phy_address < 32; phy_address++) { | ||
169 | phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1); | ||
170 | phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2); | ||
171 | |||
172 | if (phyid1 != 0xffff && phyid1 != 0x0000 | ||
173 | && phyid2 != 0xffff && phyid2 != 0x0000) | ||
174 | break; | ||
175 | } | ||
176 | |||
177 | if (phy_address == 32) | ||
178 | return -ENODEV; | ||
179 | |||
180 | dev_info(&bp->pdev->dev, | ||
181 | "detected PHY at address %d (ID %04x:%04x)\n", | ||
182 | phy_address, phyid1, phyid2); | ||
183 | |||
184 | bp->mii.phy_id = phy_address; | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static void macb_set_media(struct macb *bp, int media) | ||
189 | { | ||
190 | u32 reg; | ||
191 | |||
192 | spin_lock_irq(&bp->lock); | ||
193 | reg = macb_readl(bp, NCFGR); | ||
194 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | ||
195 | if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL)) | ||
196 | reg |= MACB_BIT(SPD); | ||
197 | if (media & ADVERTISE_FULL) | ||
198 | reg |= MACB_BIT(FD); | ||
199 | macb_writel(bp, NCFGR, reg); | ||
200 | spin_unlock_irq(&bp->lock); | ||
201 | } | ||
202 | |||
203 | static void macb_check_media(struct macb *bp, int ok_to_print, int init_media) | ||
204 | { | ||
205 | struct mii_if_info *mii = &bp->mii; | ||
206 | unsigned int old_carrier, new_carrier; | ||
207 | int advertise, lpa, media, duplex; | ||
208 | |||
209 | /* if forced media, go no further */ | ||
210 | if (mii->force_media) | ||
211 | return; | ||
212 | |||
213 | /* check current and old link status */ | ||
214 | old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; | ||
215 | new_carrier = (unsigned int) mii_link_ok(mii); | ||
216 | |||
217 | /* if carrier state did not change, assume nothing else did */ | ||
218 | if (!init_media && old_carrier == new_carrier) | ||
219 | return; | ||
220 | |||
221 | /* no carrier, nothing much to do */ | ||
222 | if (!new_carrier) { | ||
223 | netif_carrier_off(mii->dev); | ||
224 | printk(KERN_INFO "%s: link down\n", mii->dev->name); | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * we have carrier, see who's on the other end | ||
230 | */ | ||
231 | netif_carrier_on(mii->dev); | ||
232 | |||
233 | /* get MII advertise and LPA values */ | ||
234 | if (!init_media && mii->advertising) { | ||
235 | advertise = mii->advertising; | ||
236 | } else { | ||
237 | advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); | ||
238 | mii->advertising = advertise; | ||
239 | } | ||
240 | lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); | ||
241 | |||
242 | /* figure out media and duplex from advertise and LPA values */ | ||
243 | media = mii_nway_result(lpa & advertise); | ||
244 | duplex = (media & ADVERTISE_FULL) ? 1 : 0; | ||
245 | |||
246 | if (ok_to_print) | ||
247 | printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", | ||
248 | mii->dev->name, | ||
249 | media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", | ||
250 | duplex ? "full" : "half", lpa); | ||
251 | |||
252 | mii->full_duplex = duplex; | ||
253 | |||
254 | /* Let the MAC know about the new link state */ | ||
255 | macb_set_media(bp, media); | ||
256 | } | ||
257 | |||
258 | static void macb_update_stats(struct macb *bp) | ||
259 | { | ||
260 | u32 __iomem *reg = bp->regs + MACB_PFR; | ||
261 | u32 *p = &bp->hw_stats.rx_pause_frames; | ||
262 | u32 *end = &bp->hw_stats.tx_pause_frames + 1; | ||
263 | |||
264 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | ||
265 | |||
266 | for(; p < end; p++, reg++) | ||
267 | *p += readl(reg); | ||
268 | } | ||
269 | |||
270 | static void macb_periodic_task(void *arg) | ||
271 | { | ||
272 | struct macb *bp = arg; | ||
273 | |||
274 | macb_update_stats(bp); | ||
275 | macb_check_media(bp, 1, 0); | ||
276 | |||
277 | schedule_delayed_work(&bp->periodic_task, HZ); | ||
278 | } | ||
279 | |||
280 | static void macb_tx(struct macb *bp) | ||
281 | { | ||
282 | unsigned int tail; | ||
283 | unsigned int head; | ||
284 | u32 status; | ||
285 | |||
286 | status = macb_readl(bp, TSR); | ||
287 | macb_writel(bp, TSR, status); | ||
288 | |||
289 | dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", | ||
290 | (unsigned long)status); | ||
291 | |||
292 | if (status & MACB_BIT(UND)) { | ||
293 | printk(KERN_ERR "%s: TX underrun, resetting buffers\n", | ||
294 | bp->dev->name); | ||
295 | bp->tx_head = bp->tx_tail = 0; | ||
296 | } | ||
297 | |||
298 | if (!(status & MACB_BIT(COMP))) | ||
299 | /* | ||
300 | * This may happen when a buffer becomes complete | ||
301 | * between reading the ISR and scanning the | ||
302 | * descriptors. Nothing to worry about. | ||
303 | */ | ||
304 | return; | ||
305 | |||
306 | head = bp->tx_head; | ||
307 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | ||
308 | struct ring_info *rp = &bp->tx_skb[tail]; | ||
309 | struct sk_buff *skb = rp->skb; | ||
310 | u32 bufstat; | ||
311 | |||
312 | BUG_ON(skb == NULL); | ||
313 | |||
314 | rmb(); | ||
315 | bufstat = bp->tx_ring[tail].ctrl; | ||
316 | |||
317 | if (!(bufstat & MACB_BIT(TX_USED))) | ||
318 | break; | ||
319 | |||
320 | dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", | ||
321 | tail, skb->data); | ||
322 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | ||
323 | DMA_TO_DEVICE); | ||
324 | bp->stats.tx_packets++; | ||
325 | bp->stats.tx_bytes += skb->len; | ||
326 | rp->skb = NULL; | ||
327 | dev_kfree_skb_irq(skb); | ||
328 | } | ||
329 | |||
330 | bp->tx_tail = tail; | ||
331 | if (netif_queue_stopped(bp->dev) && | ||
332 | TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) | ||
333 | netif_wake_queue(bp->dev); | ||
334 | } | ||
335 | |||
336 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | ||
337 | unsigned int last_frag) | ||
338 | { | ||
339 | unsigned int len; | ||
340 | unsigned int frag; | ||
341 | unsigned int offset = 0; | ||
342 | struct sk_buff *skb; | ||
343 | |||
344 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | ||
345 | |||
346 | dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", | ||
347 | first_frag, last_frag, len); | ||
348 | |||
349 | skb = dev_alloc_skb(len + RX_OFFSET); | ||
350 | if (!skb) { | ||
351 | bp->stats.rx_dropped++; | ||
352 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | ||
353 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | ||
354 | if (frag == last_frag) | ||
355 | break; | ||
356 | } | ||
357 | wmb(); | ||
358 | return 1; | ||
359 | } | ||
360 | |||
361 | skb_reserve(skb, RX_OFFSET); | ||
362 | skb->dev = bp->dev; | ||
363 | skb->ip_summed = CHECKSUM_NONE; | ||
364 | skb_put(skb, len); | ||
365 | |||
366 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | ||
367 | unsigned int frag_len = RX_BUFFER_SIZE; | ||
368 | |||
369 | if (offset + frag_len > len) { | ||
370 | BUG_ON(frag != last_frag); | ||
371 | frag_len = len - offset; | ||
372 | } | ||
373 | memcpy(skb->data + offset, | ||
374 | bp->rx_buffers + (RX_BUFFER_SIZE * frag), | ||
375 | frag_len); | ||
376 | offset += RX_BUFFER_SIZE; | ||
377 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | ||
378 | wmb(); | ||
379 | |||
380 | if (frag == last_frag) | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
385 | |||
386 | bp->stats.rx_packets++; | ||
387 | bp->stats.rx_bytes += len; | ||
388 | bp->dev->last_rx = jiffies; | ||
389 | dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", | ||
390 | skb->len, skb->csum); | ||
391 | netif_receive_skb(skb); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /* Mark DMA descriptors from begin up to and not including end as unused */ | ||
397 | static void discard_partial_frame(struct macb *bp, unsigned int begin, | ||
398 | unsigned int end) | ||
399 | { | ||
400 | unsigned int frag; | ||
401 | |||
402 | for (frag = begin; frag != end; frag = NEXT_RX(frag)) | ||
403 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | ||
404 | wmb(); | ||
405 | |||
406 | /* | ||
407 | * When this happens, the hardware stats registers for | ||
408 | * whatever caused this is updated, so we don't have to record | ||
409 | * anything. | ||
410 | */ | ||
411 | } | ||
412 | |||
413 | static int macb_rx(struct macb *bp, int budget) | ||
414 | { | ||
415 | int received = 0; | ||
416 | unsigned int tail = bp->rx_tail; | ||
417 | int first_frag = -1; | ||
418 | |||
419 | for (; budget > 0; tail = NEXT_RX(tail)) { | ||
420 | u32 addr, ctrl; | ||
421 | |||
422 | rmb(); | ||
423 | addr = bp->rx_ring[tail].addr; | ||
424 | ctrl = bp->rx_ring[tail].ctrl; | ||
425 | |||
426 | if (!(addr & MACB_BIT(RX_USED))) | ||
427 | break; | ||
428 | |||
429 | if (ctrl & MACB_BIT(RX_SOF)) { | ||
430 | if (first_frag != -1) | ||
431 | discard_partial_frame(bp, first_frag, tail); | ||
432 | first_frag = tail; | ||
433 | } | ||
434 | |||
435 | if (ctrl & MACB_BIT(RX_EOF)) { | ||
436 | int dropped; | ||
437 | BUG_ON(first_frag == -1); | ||
438 | |||
439 | dropped = macb_rx_frame(bp, first_frag, tail); | ||
440 | first_frag = -1; | ||
441 | if (!dropped) { | ||
442 | received++; | ||
443 | budget--; | ||
444 | } | ||
445 | } | ||
446 | } | ||
447 | |||
448 | if (first_frag != -1) | ||
449 | bp->rx_tail = first_frag; | ||
450 | else | ||
451 | bp->rx_tail = tail; | ||
452 | |||
453 | return received; | ||
454 | } | ||
455 | |||
456 | static int macb_poll(struct net_device *dev, int *budget) | ||
457 | { | ||
458 | struct macb *bp = netdev_priv(dev); | ||
459 | int orig_budget, work_done, retval = 0; | ||
460 | u32 status; | ||
461 | |||
462 | status = macb_readl(bp, RSR); | ||
463 | macb_writel(bp, RSR, status); | ||
464 | |||
465 | if (!status) { | ||
466 | /* | ||
467 | * This may happen if an interrupt was pending before | ||
468 | * this function was called last time, and no packets | ||
469 | * have been received since. | ||
470 | */ | ||
471 | netif_rx_complete(dev); | ||
472 | goto out; | ||
473 | } | ||
474 | |||
475 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | ||
476 | (unsigned long)status, *budget); | ||
477 | |||
478 | if (!(status & MACB_BIT(REC))) { | ||
479 | dev_warn(&bp->pdev->dev, | ||
480 | "No RX buffers complete, status = %02lx\n", | ||
481 | (unsigned long)status); | ||
482 | netif_rx_complete(dev); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | orig_budget = *budget; | ||
487 | if (orig_budget > dev->quota) | ||
488 | orig_budget = dev->quota; | ||
489 | |||
490 | work_done = macb_rx(bp, orig_budget); | ||
491 | if (work_done < orig_budget) { | ||
492 | netif_rx_complete(dev); | ||
493 | retval = 0; | ||
494 | } else { | ||
495 | retval = 1; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * We've done what we can to clean the buffers. Make sure we | ||
500 | * get notified when new packets arrive. | ||
501 | */ | ||
502 | out: | ||
503 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | ||
504 | |||
505 | /* TODO: Handle errors */ | ||
506 | |||
507 | return retval; | ||
508 | } | ||
509 | |||
510 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | ||
511 | { | ||
512 | struct net_device *dev = dev_id; | ||
513 | struct macb *bp = netdev_priv(dev); | ||
514 | u32 status; | ||
515 | |||
516 | status = macb_readl(bp, ISR); | ||
517 | |||
518 | if (unlikely(!status)) | ||
519 | return IRQ_NONE; | ||
520 | |||
521 | spin_lock(&bp->lock); | ||
522 | |||
523 | while (status) { | ||
524 | if (status & MACB_BIT(MFD)) | ||
525 | complete(&bp->mdio_complete); | ||
526 | |||
527 | /* close possible race with dev_close */ | ||
528 | if (unlikely(!netif_running(dev))) { | ||
529 | macb_writel(bp, IDR, ~0UL); | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | if (status & MACB_RX_INT_FLAGS) { | ||
534 | if (netif_rx_schedule_prep(dev)) { | ||
535 | /* | ||
536 | * There's no point taking any more interrupts | ||
537 | * until we have processed the buffers | ||
538 | */ | ||
539 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | ||
540 | dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); | ||
541 | __netif_rx_schedule(dev); | ||
542 | } | ||
543 | } | ||
544 | |||
545 | if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) | ||
546 | macb_tx(bp); | ||
547 | |||
548 | /* | ||
549 | * Link change detection isn't possible with RMII, so we'll | ||
550 | * add that if/when we get our hands on a full-blown MII PHY. | ||
551 | */ | ||
552 | |||
553 | if (status & MACB_BIT(HRESP)) { | ||
554 | /* | ||
555 | * TODO: Reset the hardware, and maybe move the printk | ||
556 | * to a lower-priority context as well (work queue?) | ||
557 | */ | ||
558 | printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", | ||
559 | dev->name); | ||
560 | } | ||
561 | |||
562 | status = macb_readl(bp, ISR); | ||
563 | } | ||
564 | |||
565 | spin_unlock(&bp->lock); | ||
566 | |||
567 | return IRQ_HANDLED; | ||
568 | } | ||
569 | |||
570 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
571 | { | ||
572 | struct macb *bp = netdev_priv(dev); | ||
573 | dma_addr_t mapping; | ||
574 | unsigned int len, entry; | ||
575 | u32 ctrl; | ||
576 | |||
577 | #ifdef DEBUG | ||
578 | int i; | ||
579 | dev_dbg(&bp->pdev->dev, | ||
580 | "start_xmit: len %u head %p data %p tail %p end %p\n", | ||
581 | skb->len, skb->head, skb->data, skb->tail, skb->end); | ||
582 | dev_dbg(&bp->pdev->dev, | ||
583 | "data:"); | ||
584 | for (i = 0; i < 16; i++) | ||
585 | printk(" %02x", (unsigned int)skb->data[i]); | ||
586 | printk("\n"); | ||
587 | #endif | ||
588 | |||
589 | len = skb->len; | ||
590 | spin_lock_irq(&bp->lock); | ||
591 | |||
592 | /* This is a hard error, log it. */ | ||
593 | if (TX_BUFFS_AVAIL(bp) < 1) { | ||
594 | netif_stop_queue(dev); | ||
595 | spin_unlock_irq(&bp->lock); | ||
596 | dev_err(&bp->pdev->dev, | ||
597 | "BUG! Tx Ring full when queue awake!\n"); | ||
598 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | ||
599 | bp->tx_head, bp->tx_tail); | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | entry = bp->tx_head; | ||
604 | dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); | ||
605 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
606 | len, DMA_TO_DEVICE); | ||
607 | bp->tx_skb[entry].skb = skb; | ||
608 | bp->tx_skb[entry].mapping = mapping; | ||
609 | dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", | ||
610 | skb->data, (unsigned long)mapping); | ||
611 | |||
612 | ctrl = MACB_BF(TX_FRMLEN, len); | ||
613 | ctrl |= MACB_BIT(TX_LAST); | ||
614 | if (entry == (TX_RING_SIZE - 1)) | ||
615 | ctrl |= MACB_BIT(TX_WRAP); | ||
616 | |||
617 | bp->tx_ring[entry].addr = mapping; | ||
618 | bp->tx_ring[entry].ctrl = ctrl; | ||
619 | wmb(); | ||
620 | |||
621 | entry = NEXT_TX(entry); | ||
622 | bp->tx_head = entry; | ||
623 | |||
624 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | ||
625 | |||
626 | if (TX_BUFFS_AVAIL(bp) < 1) | ||
627 | netif_stop_queue(dev); | ||
628 | |||
629 | spin_unlock_irq(&bp->lock); | ||
630 | |||
631 | dev->trans_start = jiffies; | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static void macb_free_consistent(struct macb *bp) | ||
637 | { | ||
638 | if (bp->tx_skb) { | ||
639 | kfree(bp->tx_skb); | ||
640 | bp->tx_skb = NULL; | ||
641 | } | ||
642 | if (bp->rx_ring) { | ||
643 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, | ||
644 | bp->rx_ring, bp->rx_ring_dma); | ||
645 | bp->rx_ring = NULL; | ||
646 | } | ||
647 | if (bp->tx_ring) { | ||
648 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, | ||
649 | bp->tx_ring, bp->tx_ring_dma); | ||
650 | bp->tx_ring = NULL; | ||
651 | } | ||
652 | if (bp->rx_buffers) { | ||
653 | dma_free_coherent(&bp->pdev->dev, | ||
654 | RX_RING_SIZE * RX_BUFFER_SIZE, | ||
655 | bp->rx_buffers, bp->rx_buffers_dma); | ||
656 | bp->rx_buffers = NULL; | ||
657 | } | ||
658 | } | ||
659 | |||
660 | static int macb_alloc_consistent(struct macb *bp) | ||
661 | { | ||
662 | int size; | ||
663 | |||
664 | size = TX_RING_SIZE * sizeof(struct ring_info); | ||
665 | bp->tx_skb = kmalloc(size, GFP_KERNEL); | ||
666 | if (!bp->tx_skb) | ||
667 | goto out_err; | ||
668 | |||
669 | size = RX_RING_BYTES; | ||
670 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | ||
671 | &bp->rx_ring_dma, GFP_KERNEL); | ||
672 | if (!bp->rx_ring) | ||
673 | goto out_err; | ||
674 | dev_dbg(&bp->pdev->dev, | ||
675 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | ||
676 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | ||
677 | |||
678 | size = TX_RING_BYTES; | ||
679 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | ||
680 | &bp->tx_ring_dma, GFP_KERNEL); | ||
681 | if (!bp->tx_ring) | ||
682 | goto out_err; | ||
683 | dev_dbg(&bp->pdev->dev, | ||
684 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | ||
685 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | ||
686 | |||
687 | size = RX_RING_SIZE * RX_BUFFER_SIZE; | ||
688 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | ||
689 | &bp->rx_buffers_dma, GFP_KERNEL); | ||
690 | if (!bp->rx_buffers) | ||
691 | goto out_err; | ||
692 | dev_dbg(&bp->pdev->dev, | ||
693 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | ||
694 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | ||
695 | |||
696 | return 0; | ||
697 | |||
698 | out_err: | ||
699 | macb_free_consistent(bp); | ||
700 | return -ENOMEM; | ||
701 | } | ||
702 | |||
703 | static void macb_init_rings(struct macb *bp) | ||
704 | { | ||
705 | int i; | ||
706 | dma_addr_t addr; | ||
707 | |||
708 | addr = bp->rx_buffers_dma; | ||
709 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
710 | bp->rx_ring[i].addr = addr; | ||
711 | bp->rx_ring[i].ctrl = 0; | ||
712 | addr += RX_BUFFER_SIZE; | ||
713 | } | ||
714 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | ||
715 | |||
716 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
717 | bp->tx_ring[i].addr = 0; | ||
718 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | ||
719 | } | ||
720 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | ||
721 | |||
722 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; | ||
723 | } | ||
724 | |||
725 | static void macb_reset_hw(struct macb *bp) | ||
726 | { | ||
727 | /* Make sure we have the write buffer for ourselves */ | ||
728 | wmb(); | ||
729 | |||
730 | /* | ||
731 | * Disable RX and TX (XXX: Should we halt the transmission | ||
732 | * more gracefully?) | ||
733 | */ | ||
734 | macb_writel(bp, NCR, 0); | ||
735 | |||
736 | /* Clear the stats registers (XXX: Update stats first?) */ | ||
737 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | ||
738 | |||
739 | /* Clear all status flags */ | ||
740 | macb_writel(bp, TSR, ~0UL); | ||
741 | macb_writel(bp, RSR, ~0UL); | ||
742 | |||
743 | /* Disable all interrupts */ | ||
744 | macb_writel(bp, IDR, ~0UL); | ||
745 | macb_readl(bp, ISR); | ||
746 | } | ||
747 | |||
748 | static void macb_init_hw(struct macb *bp) | ||
749 | { | ||
750 | u32 config; | ||
751 | |||
752 | macb_reset_hw(bp); | ||
753 | __macb_set_hwaddr(bp); | ||
754 | |||
755 | config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); | ||
756 | config |= MACB_BIT(PAE); /* PAuse Enable */ | ||
757 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | ||
758 | if (bp->dev->flags & IFF_PROMISC) | ||
759 | config |= MACB_BIT(CAF); /* Copy All Frames */ | ||
760 | if (!(bp->dev->flags & IFF_BROADCAST)) | ||
761 | config |= MACB_BIT(NBC); /* No BroadCast */ | ||
762 | macb_writel(bp, NCFGR, config); | ||
763 | |||
764 | /* Initialize TX and RX buffers */ | ||
765 | macb_writel(bp, RBQP, bp->rx_ring_dma); | ||
766 | macb_writel(bp, TBQP, bp->tx_ring_dma); | ||
767 | |||
768 | /* Enable TX and RX */ | ||
769 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE)); | ||
770 | |||
771 | /* Enable interrupts */ | ||
772 | macb_writel(bp, IER, (MACB_BIT(RCOMP) | ||
773 | | MACB_BIT(RXUBR) | ||
774 | | MACB_BIT(ISR_TUND) | ||
775 | | MACB_BIT(ISR_RLE) | ||
776 | | MACB_BIT(TXERR) | ||
777 | | MACB_BIT(TCOMP) | ||
778 | | MACB_BIT(ISR_ROVR) | ||
779 | | MACB_BIT(HRESP))); | ||
780 | } | ||
781 | |||
782 | static void macb_init_phy(struct net_device *dev) | ||
783 | { | ||
784 | struct macb *bp = netdev_priv(dev); | ||
785 | |||
786 | /* Set some reasonable default settings */ | ||
787 | macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE, | ||
788 | ADVERTISE_CSMA | ADVERTISE_ALL); | ||
789 | macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR, | ||
790 | (BMCR_SPEED100 | BMCR_ANENABLE | ||
791 | | BMCR_ANRESTART | BMCR_FULLDPLX)); | ||
792 | } | ||
793 | |||
794 | static int macb_open(struct net_device *dev) | ||
795 | { | ||
796 | struct macb *bp = netdev_priv(dev); | ||
797 | int err; | ||
798 | |||
799 | dev_dbg(&bp->pdev->dev, "open\n"); | ||
800 | |||
801 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
802 | return -EADDRNOTAVAIL; | ||
803 | |||
804 | err = macb_alloc_consistent(bp); | ||
805 | if (err) { | ||
806 | printk(KERN_ERR | ||
807 | "%s: Unable to allocate DMA memory (error %d)\n", | ||
808 | dev->name, err); | ||
809 | return err; | ||
810 | } | ||
811 | |||
812 | macb_init_rings(bp); | ||
813 | macb_init_hw(bp); | ||
814 | macb_init_phy(dev); | ||
815 | |||
816 | macb_check_media(bp, 1, 1); | ||
817 | netif_start_queue(dev); | ||
818 | |||
819 | schedule_delayed_work(&bp->periodic_task, HZ); | ||
820 | |||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | static int macb_close(struct net_device *dev) | ||
825 | { | ||
826 | struct macb *bp = netdev_priv(dev); | ||
827 | unsigned long flags; | ||
828 | |||
829 | cancel_rearming_delayed_work(&bp->periodic_task); | ||
830 | |||
831 | netif_stop_queue(dev); | ||
832 | |||
833 | spin_lock_irqsave(&bp->lock, flags); | ||
834 | macb_reset_hw(bp); | ||
835 | netif_carrier_off(dev); | ||
836 | spin_unlock_irqrestore(&bp->lock, flags); | ||
837 | |||
838 | macb_free_consistent(bp); | ||
839 | |||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static struct net_device_stats *macb_get_stats(struct net_device *dev) | ||
844 | { | ||
845 | struct macb *bp = netdev_priv(dev); | ||
846 | struct net_device_stats *nstat = &bp->stats; | ||
847 | struct macb_stats *hwstat = &bp->hw_stats; | ||
848 | |||
849 | /* Convert HW stats into netdevice stats */ | ||
850 | nstat->rx_errors = (hwstat->rx_fcs_errors + | ||
851 | hwstat->rx_align_errors + | ||
852 | hwstat->rx_resource_errors + | ||
853 | hwstat->rx_overruns + | ||
854 | hwstat->rx_oversize_pkts + | ||
855 | hwstat->rx_jabbers + | ||
856 | hwstat->rx_undersize_pkts + | ||
857 | hwstat->sqe_test_errors + | ||
858 | hwstat->rx_length_mismatch); | ||
859 | nstat->tx_errors = (hwstat->tx_late_cols + | ||
860 | hwstat->tx_excessive_cols + | ||
861 | hwstat->tx_underruns + | ||
862 | hwstat->tx_carrier_errors); | ||
863 | nstat->collisions = (hwstat->tx_single_cols + | ||
864 | hwstat->tx_multiple_cols + | ||
865 | hwstat->tx_excessive_cols); | ||
866 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | ||
867 | hwstat->rx_jabbers + | ||
868 | hwstat->rx_undersize_pkts + | ||
869 | hwstat->rx_length_mismatch); | ||
870 | nstat->rx_over_errors = hwstat->rx_resource_errors; | ||
871 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; | ||
872 | nstat->rx_frame_errors = hwstat->rx_align_errors; | ||
873 | nstat->rx_fifo_errors = hwstat->rx_overruns; | ||
874 | /* XXX: What does "missed" mean? */ | ||
875 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | ||
876 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | ||
877 | nstat->tx_fifo_errors = hwstat->tx_underruns; | ||
878 | /* Don't know about heartbeat or window errors... */ | ||
879 | |||
880 | return nstat; | ||
881 | } | ||
882 | |||
883 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
884 | { | ||
885 | struct macb *bp = netdev_priv(dev); | ||
886 | int ret; | ||
887 | unsigned long flags; | ||
888 | |||
889 | spin_lock_irqsave(&bp->lock, flags); | ||
890 | ret = mii_ethtool_gset(&bp->mii, cmd); | ||
891 | spin_unlock_irqrestore(&bp->lock, flags); | ||
892 | |||
893 | return ret; | ||
894 | } | ||
895 | |||
896 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
897 | { | ||
898 | struct macb *bp = netdev_priv(dev); | ||
899 | int ret; | ||
900 | unsigned long flags; | ||
901 | |||
902 | spin_lock_irqsave(&bp->lock, flags); | ||
903 | ret = mii_ethtool_sset(&bp->mii, cmd); | ||
904 | spin_unlock_irqrestore(&bp->lock, flags); | ||
905 | |||
906 | return ret; | ||
907 | } | ||
908 | |||
909 | static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
910 | { | ||
911 | struct macb *bp = netdev_priv(dev); | ||
912 | |||
913 | strcpy(info->driver, bp->pdev->dev.driver->name); | ||
914 | strcpy(info->version, "$Revision: 1.14 $"); | ||
915 | strcpy(info->bus_info, bp->pdev->dev.bus_id); | ||
916 | } | ||
917 | |||
918 | static int macb_nway_reset(struct net_device *dev) | ||
919 | { | ||
920 | struct macb *bp = netdev_priv(dev); | ||
921 | return mii_nway_restart(&bp->mii); | ||
922 | } | ||
923 | |||
924 | static struct ethtool_ops macb_ethtool_ops = { | ||
925 | .get_settings = macb_get_settings, | ||
926 | .set_settings = macb_set_settings, | ||
927 | .get_drvinfo = macb_get_drvinfo, | ||
928 | .nway_reset = macb_nway_reset, | ||
929 | .get_link = ethtool_op_get_link, | ||
930 | }; | ||
931 | |||
932 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
933 | { | ||
934 | struct macb *bp = netdev_priv(dev); | ||
935 | int ret; | ||
936 | unsigned long flags; | ||
937 | |||
938 | if (!netif_running(dev)) | ||
939 | return -EINVAL; | ||
940 | |||
941 | spin_lock_irqsave(&bp->lock, flags); | ||
942 | ret = generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL); | ||
943 | spin_unlock_irqrestore(&bp->lock, flags); | ||
944 | |||
945 | return ret; | ||
946 | } | ||
947 | |||
948 | static ssize_t macb_mii_show(const struct class_device *cd, char *buf, | ||
949 | unsigned long addr) | ||
950 | { | ||
951 | struct net_device *dev = to_net_dev(cd); | ||
952 | struct macb *bp = netdev_priv(dev); | ||
953 | ssize_t ret = -EINVAL; | ||
954 | |||
955 | if (netif_running(dev)) { | ||
956 | int value; | ||
957 | value = macb_mdio_read(dev, bp->mii.phy_id, addr); | ||
958 | ret = sprintf(buf, "0x%04x\n", (uint16_t)value); | ||
959 | } | ||
960 | |||
961 | return ret; | ||
962 | } | ||
963 | |||
964 | #define MII_ENTRY(name, addr) \ | ||
965 | static ssize_t show_##name(struct class_device *cd, char *buf) \ | ||
966 | { \ | ||
967 | return macb_mii_show(cd, buf, addr); \ | ||
968 | } \ | ||
969 | static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | ||
970 | |||
971 | MII_ENTRY(bmcr, MII_BMCR); | ||
972 | MII_ENTRY(bmsr, MII_BMSR); | ||
973 | MII_ENTRY(physid1, MII_PHYSID1); | ||
974 | MII_ENTRY(physid2, MII_PHYSID2); | ||
975 | MII_ENTRY(advertise, MII_ADVERTISE); | ||
976 | MII_ENTRY(lpa, MII_LPA); | ||
977 | MII_ENTRY(expansion, MII_EXPANSION); | ||
978 | |||
979 | static struct attribute *macb_mii_attrs[] = { | ||
980 | &class_device_attr_bmcr.attr, | ||
981 | &class_device_attr_bmsr.attr, | ||
982 | &class_device_attr_physid1.attr, | ||
983 | &class_device_attr_physid2.attr, | ||
984 | &class_device_attr_advertise.attr, | ||
985 | &class_device_attr_lpa.attr, | ||
986 | &class_device_attr_expansion.attr, | ||
987 | NULL, | ||
988 | }; | ||
989 | |||
990 | static struct attribute_group macb_mii_group = { | ||
991 | .name = "mii", | ||
992 | .attrs = macb_mii_attrs, | ||
993 | }; | ||
994 | |||
995 | static void macb_unregister_sysfs(struct net_device *net) | ||
996 | { | ||
997 | struct class_device *class_dev = &net->class_dev; | ||
998 | |||
999 | sysfs_remove_group(&class_dev->kobj, &macb_mii_group); | ||
1000 | } | ||
1001 | |||
1002 | static int macb_register_sysfs(struct net_device *net) | ||
1003 | { | ||
1004 | struct class_device *class_dev = &net->class_dev; | ||
1005 | int ret; | ||
1006 | |||
1007 | ret = sysfs_create_group(&class_dev->kobj, &macb_mii_group); | ||
1008 | if (ret) | ||
1009 | printk(KERN_WARNING | ||
1010 | "%s: sysfs mii attribute registration failed: %d\n", | ||
1011 | net->name, ret); | ||
1012 | return ret; | ||
1013 | } | ||
1014 | static int __devinit macb_probe(struct platform_device *pdev) | ||
1015 | { | ||
1016 | struct eth_platform_data *pdata; | ||
1017 | struct resource *regs; | ||
1018 | struct net_device *dev; | ||
1019 | struct macb *bp; | ||
1020 | unsigned long pclk_hz; | ||
1021 | u32 config; | ||
1022 | int err = -ENXIO; | ||
1023 | |||
1024 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1025 | if (!regs) { | ||
1026 | dev_err(&pdev->dev, "no mmio resource defined\n"); | ||
1027 | goto err_out; | ||
1028 | } | ||
1029 | |||
1030 | err = -ENOMEM; | ||
1031 | dev = alloc_etherdev(sizeof(*bp)); | ||
1032 | if (!dev) { | ||
1033 | dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); | ||
1034 | goto err_out; | ||
1035 | } | ||
1036 | |||
1037 | SET_MODULE_OWNER(dev); | ||
1038 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1039 | |||
1040 | /* TODO: Actually, we have some interesting features... */ | ||
1041 | dev->features |= 0; | ||
1042 | |||
1043 | bp = netdev_priv(dev); | ||
1044 | bp->pdev = pdev; | ||
1045 | bp->dev = dev; | ||
1046 | |||
1047 | spin_lock_init(&bp->lock); | ||
1048 | |||
1049 | bp->pclk = clk_get(&pdev->dev, "pclk"); | ||
1050 | if (IS_ERR(bp->pclk)) { | ||
1051 | dev_err(&pdev->dev, "failed to get pclk\n"); | ||
1052 | goto err_out_free_dev; | ||
1053 | } | ||
1054 | bp->hclk = clk_get(&pdev->dev, "hclk"); | ||
1055 | if (IS_ERR(bp->hclk)) { | ||
1056 | dev_err(&pdev->dev, "failed to get hclk\n"); | ||
1057 | goto err_out_put_pclk; | ||
1058 | } | ||
1059 | |||
1060 | clk_enable(bp->pclk); | ||
1061 | clk_enable(bp->hclk); | ||
1062 | |||
1063 | bp->regs = ioremap(regs->start, regs->end - regs->start + 1); | ||
1064 | if (!bp->regs) { | ||
1065 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | ||
1066 | err = -ENOMEM; | ||
1067 | goto err_out_disable_clocks; | ||
1068 | } | ||
1069 | |||
1070 | dev->irq = platform_get_irq(pdev, 0); | ||
1071 | err = request_irq(dev->irq, macb_interrupt, SA_SAMPLE_RANDOM, | ||
1072 | dev->name, dev); | ||
1073 | if (err) { | ||
1074 | printk(KERN_ERR | ||
1075 | "%s: Unable to request IRQ %d (error %d)\n", | ||
1076 | dev->name, dev->irq, err); | ||
1077 | goto err_out_iounmap; | ||
1078 | } | ||
1079 | |||
1080 | dev->open = macb_open; | ||
1081 | dev->stop = macb_close; | ||
1082 | dev->hard_start_xmit = macb_start_xmit; | ||
1083 | dev->get_stats = macb_get_stats; | ||
1084 | dev->do_ioctl = macb_ioctl; | ||
1085 | dev->poll = macb_poll; | ||
1086 | dev->weight = 64; | ||
1087 | dev->ethtool_ops = &macb_ethtool_ops; | ||
1088 | |||
1089 | dev->base_addr = regs->start; | ||
1090 | |||
1091 | INIT_WORK(&bp->periodic_task, macb_periodic_task, bp); | ||
1092 | mutex_init(&bp->mdio_mutex); | ||
1093 | init_completion(&bp->mdio_complete); | ||
1094 | |||
1095 | /* Set MII management clock divider */ | ||
1096 | pclk_hz = clk_get_rate(bp->pclk); | ||
1097 | if (pclk_hz <= 20000000) | ||
1098 | config = MACB_BF(CLK, MACB_CLK_DIV8); | ||
1099 | else if (pclk_hz <= 40000000) | ||
1100 | config = MACB_BF(CLK, MACB_CLK_DIV16); | ||
1101 | else if (pclk_hz <= 80000000) | ||
1102 | config = MACB_BF(CLK, MACB_CLK_DIV32); | ||
1103 | else | ||
1104 | config = MACB_BF(CLK, MACB_CLK_DIV64); | ||
1105 | macb_writel(bp, NCFGR, config); | ||
1106 | |||
1107 | bp->mii.dev = dev; | ||
1108 | bp->mii.mdio_read = macb_mdio_read; | ||
1109 | bp->mii.mdio_write = macb_mdio_write; | ||
1110 | bp->mii.phy_id_mask = 0x1f; | ||
1111 | bp->mii.reg_num_mask = 0x1f; | ||
1112 | |||
1113 | macb_get_hwaddr(bp); | ||
1114 | err = macb_phy_probe(bp); | ||
1115 | if (err) { | ||
1116 | dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n"); | ||
1117 | goto err_out_free_irq; | ||
1118 | } | ||
1119 | |||
1120 | pdata = pdev->dev.platform_data; | ||
1121 | if (pdata && pdata->is_rmii) | ||
1122 | macb_writel(bp, USRIO, 0); | ||
1123 | else | ||
1124 | macb_writel(bp, USRIO, MACB_BIT(MII)); | ||
1125 | |||
1126 | bp->tx_pending = DEF_TX_RING_PENDING; | ||
1127 | |||
1128 | err = register_netdev(dev); | ||
1129 | if (err) { | ||
1130 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | ||
1131 | goto err_out_free_irq; | ||
1132 | } | ||
1133 | |||
1134 | platform_set_drvdata(pdev, dev); | ||
1135 | |||
1136 | macb_register_sysfs(dev); | ||
1137 | |||
1138 | printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " | ||
1139 | "(%02x:%02x:%02x:%02x:%02x:%02x)\n", | ||
1140 | dev->name, dev->base_addr, dev->irq, | ||
1141 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | ||
1142 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | ||
1143 | |||
1144 | return 0; | ||
1145 | |||
1146 | err_out_free_irq: | ||
1147 | free_irq(dev->irq, dev); | ||
1148 | err_out_iounmap: | ||
1149 | iounmap(bp->regs); | ||
1150 | err_out_disable_clocks: | ||
1151 | clk_disable(bp->hclk); | ||
1152 | clk_disable(bp->pclk); | ||
1153 | clk_put(bp->hclk); | ||
1154 | err_out_put_pclk: | ||
1155 | clk_put(bp->pclk); | ||
1156 | err_out_free_dev: | ||
1157 | free_netdev(dev); | ||
1158 | err_out: | ||
1159 | platform_set_drvdata(pdev, NULL); | ||
1160 | return err; | ||
1161 | } | ||
1162 | |||
1163 | static int __devexit macb_remove(struct platform_device *pdev) | ||
1164 | { | ||
1165 | struct net_device *dev; | ||
1166 | struct macb *bp; | ||
1167 | |||
1168 | dev = platform_get_drvdata(pdev); | ||
1169 | |||
1170 | if (dev) { | ||
1171 | bp = netdev_priv(dev); | ||
1172 | macb_unregister_sysfs(dev); | ||
1173 | unregister_netdev(dev); | ||
1174 | free_irq(dev->irq, dev); | ||
1175 | iounmap(bp->regs); | ||
1176 | clk_disable(bp->hclk); | ||
1177 | clk_disable(bp->pclk); | ||
1178 | clk_put(bp->hclk); | ||
1179 | clk_put(bp->pclk); | ||
1180 | free_netdev(dev); | ||
1181 | platform_set_drvdata(pdev, NULL); | ||
1182 | } | ||
1183 | |||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1187 | static struct platform_driver macb_driver = { | ||
1188 | .probe = macb_probe, | ||
1189 | .remove = __devexit_p(macb_remove), | ||
1190 | .driver = { | ||
1191 | .name = "macb", | ||
1192 | }, | ||
1193 | }; | ||
1194 | |||
1195 | static int __init macb_init(void) | ||
1196 | { | ||
1197 | return platform_driver_register(&macb_driver); | ||
1198 | } | ||
1199 | |||
1200 | static void __exit macb_exit(void) | ||
1201 | { | ||
1202 | platform_driver_unregister(&macb_driver); | ||
1203 | } | ||
1204 | |||
1205 | module_init(macb_init); | ||
1206 | module_exit(macb_exit); | ||
1207 | |||
1208 | MODULE_LICENSE("GPL"); | ||
1209 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); | ||
1210 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | ||
diff --git a/drivers/net/macb.h b/drivers/net/macb.h new file mode 100644 index 000000000000..8c253db69881 --- /dev/null +++ b/drivers/net/macb.h | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Atmel MACB Ethernet Controller driver | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef _MACB_H | ||
11 | #define _MACB_H | ||
12 | |||
13 | /* MACB register offsets */ | ||
14 | #define MACB_NCR 0x0000 | ||
15 | #define MACB_NCFGR 0x0004 | ||
16 | #define MACB_NSR 0x0008 | ||
17 | #define MACB_TSR 0x0014 | ||
18 | #define MACB_RBQP 0x0018 | ||
19 | #define MACB_TBQP 0x001c | ||
20 | #define MACB_RSR 0x0020 | ||
21 | #define MACB_ISR 0x0024 | ||
22 | #define MACB_IER 0x0028 | ||
23 | #define MACB_IDR 0x002c | ||
24 | #define MACB_IMR 0x0030 | ||
25 | #define MACB_MAN 0x0034 | ||
26 | #define MACB_PTR 0x0038 | ||
27 | #define MACB_PFR 0x003c | ||
28 | #define MACB_FTO 0x0040 | ||
29 | #define MACB_SCF 0x0044 | ||
30 | #define MACB_MCF 0x0048 | ||
31 | #define MACB_FRO 0x004c | ||
32 | #define MACB_FCSE 0x0050 | ||
33 | #define MACB_ALE 0x0054 | ||
34 | #define MACB_DTF 0x0058 | ||
35 | #define MACB_LCOL 0x005c | ||
36 | #define MACB_EXCOL 0x0060 | ||
37 | #define MACB_TUND 0x0064 | ||
38 | #define MACB_CSE 0x0068 | ||
39 | #define MACB_RRE 0x006c | ||
40 | #define MACB_ROVR 0x0070 | ||
41 | #define MACB_RSE 0x0074 | ||
42 | #define MACB_ELE 0x0078 | ||
43 | #define MACB_RJA 0x007c | ||
44 | #define MACB_USF 0x0080 | ||
45 | #define MACB_STE 0x0084 | ||
46 | #define MACB_RLE 0x0088 | ||
47 | #define MACB_TPF 0x008c | ||
48 | #define MACB_HRB 0x0090 | ||
49 | #define MACB_HRT 0x0094 | ||
50 | #define MACB_SA1B 0x0098 | ||
51 | #define MACB_SA1T 0x009c | ||
52 | #define MACB_SA2B 0x00a0 | ||
53 | #define MACB_SA2T 0x00a4 | ||
54 | #define MACB_SA3B 0x00a8 | ||
55 | #define MACB_SA3T 0x00ac | ||
56 | #define MACB_SA4B 0x00b0 | ||
57 | #define MACB_SA4T 0x00b4 | ||
58 | #define MACB_TID 0x00b8 | ||
59 | #define MACB_TPQ 0x00bc | ||
60 | #define MACB_USRIO 0x00c0 | ||
61 | #define MACB_WOL 0x00c4 | ||
62 | |||
63 | /* Bitfields in NCR */ | ||
64 | #define MACB_LB_OFFSET 0 | ||
65 | #define MACB_LB_SIZE 1 | ||
66 | #define MACB_LLB_OFFSET 1 | ||
67 | #define MACB_LLB_SIZE 1 | ||
68 | #define MACB_RE_OFFSET 2 | ||
69 | #define MACB_RE_SIZE 1 | ||
70 | #define MACB_TE_OFFSET 3 | ||
71 | #define MACB_TE_SIZE 1 | ||
72 | #define MACB_MPE_OFFSET 4 | ||
73 | #define MACB_MPE_SIZE 1 | ||
74 | #define MACB_CLRSTAT_OFFSET 5 | ||
75 | #define MACB_CLRSTAT_SIZE 1 | ||
76 | #define MACB_INCSTAT_OFFSET 6 | ||
77 | #define MACB_INCSTAT_SIZE 1 | ||
78 | #define MACB_WESTAT_OFFSET 7 | ||
79 | #define MACB_WESTAT_SIZE 1 | ||
80 | #define MACB_BP_OFFSET 8 | ||
81 | #define MACB_BP_SIZE 1 | ||
82 | #define MACB_TSTART_OFFSET 9 | ||
83 | #define MACB_TSTART_SIZE 1 | ||
84 | #define MACB_THALT_OFFSET 10 | ||
85 | #define MACB_THALT_SIZE 1 | ||
86 | #define MACB_NCR_TPF_OFFSET 11 | ||
87 | #define MACB_NCR_TPF_SIZE 1 | ||
88 | #define MACB_TZQ_OFFSET 12 | ||
89 | #define MACB_TZQ_SIZE 1 | ||
90 | |||
91 | /* Bitfields in NCFGR */ | ||
92 | #define MACB_SPD_OFFSET 0 | ||
93 | #define MACB_SPD_SIZE 1 | ||
94 | #define MACB_FD_OFFSET 1 | ||
95 | #define MACB_FD_SIZE 1 | ||
96 | #define MACB_BIT_RATE_OFFSET 2 | ||
97 | #define MACB_BIT_RATE_SIZE 1 | ||
98 | #define MACB_JFRAME_OFFSET 3 | ||
99 | #define MACB_JFRAME_SIZE 1 | ||
100 | #define MACB_CAF_OFFSET 4 | ||
101 | #define MACB_CAF_SIZE 1 | ||
102 | #define MACB_NBC_OFFSET 5 | ||
103 | #define MACB_NBC_SIZE 1 | ||
104 | #define MACB_NCFGR_MTI_OFFSET 6 | ||
105 | #define MACB_NCFGR_MTI_SIZE 1 | ||
106 | #define MACB_UNI_OFFSET 7 | ||
107 | #define MACB_UNI_SIZE 1 | ||
108 | #define MACB_BIG_OFFSET 8 | ||
109 | #define MACB_BIG_SIZE 1 | ||
110 | #define MACB_EAE_OFFSET 9 | ||
111 | #define MACB_EAE_SIZE 1 | ||
112 | #define MACB_CLK_OFFSET 10 | ||
113 | #define MACB_CLK_SIZE 2 | ||
114 | #define MACB_RTY_OFFSET 12 | ||
115 | #define MACB_RTY_SIZE 1 | ||
116 | #define MACB_PAE_OFFSET 13 | ||
117 | #define MACB_PAE_SIZE 1 | ||
118 | #define MACB_RBOF_OFFSET 14 | ||
119 | #define MACB_RBOF_SIZE 2 | ||
120 | #define MACB_RLCE_OFFSET 16 | ||
121 | #define MACB_RLCE_SIZE 1 | ||
122 | #define MACB_DRFCS_OFFSET 17 | ||
123 | #define MACB_DRFCS_SIZE 1 | ||
124 | #define MACB_EFRHD_OFFSET 18 | ||
125 | #define MACB_EFRHD_SIZE 1 | ||
126 | #define MACB_IRXFCS_OFFSET 19 | ||
127 | #define MACB_IRXFCS_SIZE 1 | ||
128 | |||
129 | /* Bitfields in NSR */ | ||
130 | #define MACB_NSR_LINK_OFFSET 0 | ||
131 | #define MACB_NSR_LINK_SIZE 1 | ||
132 | #define MACB_MDIO_OFFSET 1 | ||
133 | #define MACB_MDIO_SIZE 1 | ||
134 | #define MACB_IDLE_OFFSET 2 | ||
135 | #define MACB_IDLE_SIZE 1 | ||
136 | |||
137 | /* Bitfields in TSR */ | ||
138 | #define MACB_UBR_OFFSET 0 | ||
139 | #define MACB_UBR_SIZE 1 | ||
140 | #define MACB_COL_OFFSET 1 | ||
141 | #define MACB_COL_SIZE 1 | ||
142 | #define MACB_TSR_RLE_OFFSET 2 | ||
143 | #define MACB_TSR_RLE_SIZE 1 | ||
144 | #define MACB_TGO_OFFSET 3 | ||
145 | #define MACB_TGO_SIZE 1 | ||
146 | #define MACB_BEX_OFFSET 4 | ||
147 | #define MACB_BEX_SIZE 1 | ||
148 | #define MACB_COMP_OFFSET 5 | ||
149 | #define MACB_COMP_SIZE 1 | ||
150 | #define MACB_UND_OFFSET 6 | ||
151 | #define MACB_UND_SIZE 1 | ||
152 | |||
153 | /* Bitfields in RSR */ | ||
154 | #define MACB_BNA_OFFSET 0 | ||
155 | #define MACB_BNA_SIZE 1 | ||
156 | #define MACB_REC_OFFSET 1 | ||
157 | #define MACB_REC_SIZE 1 | ||
158 | #define MACB_OVR_OFFSET 2 | ||
159 | #define MACB_OVR_SIZE 1 | ||
160 | |||
161 | /* Bitfields in ISR/IER/IDR/IMR */ | ||
162 | #define MACB_MFD_OFFSET 0 | ||
163 | #define MACB_MFD_SIZE 1 | ||
164 | #define MACB_RCOMP_OFFSET 1 | ||
165 | #define MACB_RCOMP_SIZE 1 | ||
166 | #define MACB_RXUBR_OFFSET 2 | ||
167 | #define MACB_RXUBR_SIZE 1 | ||
168 | #define MACB_TXUBR_OFFSET 3 | ||
169 | #define MACB_TXUBR_SIZE 1 | ||
170 | #define MACB_ISR_TUND_OFFSET 4 | ||
171 | #define MACB_ISR_TUND_SIZE 1 | ||
172 | #define MACB_ISR_RLE_OFFSET 5 | ||
173 | #define MACB_ISR_RLE_SIZE 1 | ||
174 | #define MACB_TXERR_OFFSET 6 | ||
175 | #define MACB_TXERR_SIZE 1 | ||
176 | #define MACB_TCOMP_OFFSET 7 | ||
177 | #define MACB_TCOMP_SIZE 1 | ||
178 | #define MACB_ISR_LINK_OFFSET 9 | ||
179 | #define MACB_ISR_LINK_SIZE 1 | ||
180 | #define MACB_ISR_ROVR_OFFSET 10 | ||
181 | #define MACB_ISR_ROVR_SIZE 1 | ||
182 | #define MACB_HRESP_OFFSET 11 | ||
183 | #define MACB_HRESP_SIZE 1 | ||
184 | #define MACB_PFR_OFFSET 12 | ||
185 | #define MACB_PFR_SIZE 1 | ||
186 | #define MACB_PTZ_OFFSET 13 | ||
187 | #define MACB_PTZ_SIZE 1 | ||
188 | |||
189 | /* Bitfields in MAN */ | ||
190 | #define MACB_DATA_OFFSET 0 | ||
191 | #define MACB_DATA_SIZE 16 | ||
192 | #define MACB_CODE_OFFSET 16 | ||
193 | #define MACB_CODE_SIZE 2 | ||
194 | #define MACB_REGA_OFFSET 18 | ||
195 | #define MACB_REGA_SIZE 5 | ||
196 | #define MACB_PHYA_OFFSET 23 | ||
197 | #define MACB_PHYA_SIZE 5 | ||
198 | #define MACB_RW_OFFSET 28 | ||
199 | #define MACB_RW_SIZE 2 | ||
200 | #define MACB_SOF_OFFSET 30 | ||
201 | #define MACB_SOF_SIZE 2 | ||
202 | |||
203 | /* Bitfields in USRIO */ | ||
204 | #define MACB_MII_OFFSET 0 | ||
205 | #define MACB_MII_SIZE 1 | ||
206 | #define MACB_EAM_OFFSET 1 | ||
207 | #define MACB_EAM_SIZE 1 | ||
208 | #define MACB_TX_PAUSE_OFFSET 2 | ||
209 | #define MACB_TX_PAUSE_SIZE 1 | ||
210 | #define MACB_TX_PAUSE_ZERO_OFFSET 3 | ||
211 | #define MACB_TX_PAUSE_ZERO_SIZE 1 | ||
212 | |||
213 | /* Bitfields in WOL */ | ||
214 | #define MACB_IP_OFFSET 0 | ||
215 | #define MACB_IP_SIZE 16 | ||
216 | #define MACB_MAG_OFFSET 16 | ||
217 | #define MACB_MAG_SIZE 1 | ||
218 | #define MACB_ARP_OFFSET 17 | ||
219 | #define MACB_ARP_SIZE 1 | ||
220 | #define MACB_SA1_OFFSET 18 | ||
221 | #define MACB_SA1_SIZE 1 | ||
222 | #define MACB_WOL_MTI_OFFSET 19 | ||
223 | #define MACB_WOL_MTI_SIZE 1 | ||
224 | |||
225 | /* Constants for CLK */ | ||
226 | #define MACB_CLK_DIV8 0 | ||
227 | #define MACB_CLK_DIV16 1 | ||
228 | #define MACB_CLK_DIV32 2 | ||
229 | #define MACB_CLK_DIV64 3 | ||
230 | |||
231 | /* Constants for MAN register */ | ||
232 | #define MACB_MAN_SOF 1 | ||
233 | #define MACB_MAN_WRITE 1 | ||
234 | #define MACB_MAN_READ 2 | ||
235 | #define MACB_MAN_CODE 2 | ||
236 | |||
237 | /* Bit manipulation macros */ | ||
238 | #define MACB_BIT(name) \ | ||
239 | (1 << MACB_##name##_OFFSET) | ||
240 | #define MACB_BF(name,value) \ | ||
241 | (((value) & ((1 << MACB_##name##_SIZE) - 1)) \ | ||
242 | << MACB_##name##_OFFSET) | ||
243 | #define MACB_BFEXT(name,value)\ | ||
244 | (((value) >> MACB_##name##_OFFSET) \ | ||
245 | & ((1 << MACB_##name##_SIZE) - 1)) | ||
246 | #define MACB_BFINS(name,value,old) \ | ||
247 | (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \ | ||
248 | << MACB_##name##_OFFSET)) \ | ||
249 | | MACB_BF(name,value)) | ||
250 | |||
251 | /* Register access macros */ | ||
252 | #define macb_readl(port,reg) \ | ||
253 | readl((port)->regs + MACB_##reg) | ||
254 | #define macb_writel(port,reg,value) \ | ||
255 | writel((value), (port)->regs + MACB_##reg) | ||
256 | |||
257 | struct dma_desc { | ||
258 | u32 addr; | ||
259 | u32 ctrl; | ||
260 | }; | ||
261 | |||
262 | /* DMA descriptor bitfields */ | ||
263 | #define MACB_RX_USED_OFFSET 0 | ||
264 | #define MACB_RX_USED_SIZE 1 | ||
265 | #define MACB_RX_WRAP_OFFSET 1 | ||
266 | #define MACB_RX_WRAP_SIZE 1 | ||
267 | #define MACB_RX_WADDR_OFFSET 2 | ||
268 | #define MACB_RX_WADDR_SIZE 30 | ||
269 | |||
270 | #define MACB_RX_FRMLEN_OFFSET 0 | ||
271 | #define MACB_RX_FRMLEN_SIZE 12 | ||
272 | #define MACB_RX_OFFSET_OFFSET 12 | ||
273 | #define MACB_RX_OFFSET_SIZE 2 | ||
274 | #define MACB_RX_SOF_OFFSET 14 | ||
275 | #define MACB_RX_SOF_SIZE 1 | ||
276 | #define MACB_RX_EOF_OFFSET 15 | ||
277 | #define MACB_RX_EOF_SIZE 1 | ||
278 | #define MACB_RX_CFI_OFFSET 16 | ||
279 | #define MACB_RX_CFI_SIZE 1 | ||
280 | #define MACB_RX_VLAN_PRI_OFFSET 17 | ||
281 | #define MACB_RX_VLAN_PRI_SIZE 3 | ||
282 | #define MACB_RX_PRI_TAG_OFFSET 20 | ||
283 | #define MACB_RX_PRI_TAG_SIZE 1 | ||
284 | #define MACB_RX_VLAN_TAG_OFFSET 21 | ||
285 | #define MACB_RX_VLAN_TAG_SIZE 1 | ||
286 | #define MACB_RX_TYPEID_MATCH_OFFSET 22 | ||
287 | #define MACB_RX_TYPEID_MATCH_SIZE 1 | ||
288 | #define MACB_RX_SA4_MATCH_OFFSET 23 | ||
289 | #define MACB_RX_SA4_MATCH_SIZE 1 | ||
290 | #define MACB_RX_SA3_MATCH_OFFSET 24 | ||
291 | #define MACB_RX_SA3_MATCH_SIZE 1 | ||
292 | #define MACB_RX_SA2_MATCH_OFFSET 25 | ||
293 | #define MACB_RX_SA2_MATCH_SIZE 1 | ||
294 | #define MACB_RX_SA1_MATCH_OFFSET 26 | ||
295 | #define MACB_RX_SA1_MATCH_SIZE 1 | ||
296 | #define MACB_RX_EXT_MATCH_OFFSET 28 | ||
297 | #define MACB_RX_EXT_MATCH_SIZE 1 | ||
298 | #define MACB_RX_UHASH_MATCH_OFFSET 29 | ||
299 | #define MACB_RX_UHASH_MATCH_SIZE 1 | ||
300 | #define MACB_RX_MHASH_MATCH_OFFSET 30 | ||
301 | #define MACB_RX_MHASH_MATCH_SIZE 1 | ||
302 | #define MACB_RX_BROADCAST_OFFSET 31 | ||
303 | #define MACB_RX_BROADCAST_SIZE 1 | ||
304 | |||
305 | #define MACB_TX_FRMLEN_OFFSET 0 | ||
306 | #define MACB_TX_FRMLEN_SIZE 11 | ||
307 | #define MACB_TX_LAST_OFFSET 15 | ||
308 | #define MACB_TX_LAST_SIZE 1 | ||
309 | #define MACB_TX_NOCRC_OFFSET 16 | ||
310 | #define MACB_TX_NOCRC_SIZE 1 | ||
311 | #define MACB_TX_BUF_EXHAUSTED_OFFSET 27 | ||
312 | #define MACB_TX_BUF_EXHAUSTED_SIZE 1 | ||
313 | #define MACB_TX_UNDERRUN_OFFSET 28 | ||
314 | #define MACB_TX_UNDERRUN_SIZE 1 | ||
315 | #define MACB_TX_ERROR_OFFSET 29 | ||
316 | #define MACB_TX_ERROR_SIZE 1 | ||
317 | #define MACB_TX_WRAP_OFFSET 30 | ||
318 | #define MACB_TX_WRAP_SIZE 1 | ||
319 | #define MACB_TX_USED_OFFSET 31 | ||
320 | #define MACB_TX_USED_SIZE 1 | ||
321 | |||
322 | struct ring_info { | ||
323 | struct sk_buff *skb; | ||
324 | dma_addr_t mapping; | ||
325 | }; | ||
326 | |||
327 | /* | ||
328 | * Hardware-collected statistics. Used when updating the network | ||
329 | * device stats by a periodic timer. | ||
330 | */ | ||
331 | struct macb_stats { | ||
332 | u32 rx_pause_frames; | ||
333 | u32 tx_ok; | ||
334 | u32 tx_single_cols; | ||
335 | u32 tx_multiple_cols; | ||
336 | u32 rx_ok; | ||
337 | u32 rx_fcs_errors; | ||
338 | u32 rx_align_errors; | ||
339 | u32 tx_deferred; | ||
340 | u32 tx_late_cols; | ||
341 | u32 tx_excessive_cols; | ||
342 | u32 tx_underruns; | ||
343 | u32 tx_carrier_errors; | ||
344 | u32 rx_resource_errors; | ||
345 | u32 rx_overruns; | ||
346 | u32 rx_symbol_errors; | ||
347 | u32 rx_oversize_pkts; | ||
348 | u32 rx_jabbers; | ||
349 | u32 rx_undersize_pkts; | ||
350 | u32 sqe_test_errors; | ||
351 | u32 rx_length_mismatch; | ||
352 | u32 tx_pause_frames; | ||
353 | }; | ||
354 | |||
355 | struct macb { | ||
356 | void __iomem *regs; | ||
357 | |||
358 | unsigned int rx_tail; | ||
359 | struct dma_desc *rx_ring; | ||
360 | void *rx_buffers; | ||
361 | |||
362 | unsigned int tx_head, tx_tail; | ||
363 | struct dma_desc *tx_ring; | ||
364 | struct ring_info *tx_skb; | ||
365 | |||
366 | spinlock_t lock; | ||
367 | struct platform_device *pdev; | ||
368 | struct clk *pclk; | ||
369 | struct clk *hclk; | ||
370 | struct net_device *dev; | ||
371 | struct net_device_stats stats; | ||
372 | struct macb_stats hw_stats; | ||
373 | |||
374 | dma_addr_t rx_ring_dma; | ||
375 | dma_addr_t tx_ring_dma; | ||
376 | dma_addr_t rx_buffers_dma; | ||
377 | |||
378 | unsigned int rx_pending, tx_pending; | ||
379 | |||
380 | struct work_struct periodic_task; | ||
381 | |||
382 | struct mutex mdio_mutex; | ||
383 | struct completion mdio_complete; | ||
384 | struct mii_if_info mii; | ||
385 | }; | ||
386 | |||
387 | #endif /* _MACB_H */ | ||
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index eb893d7e8834..38fd525f0f13 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -33,6 +33,8 @@ static const char version1[] = | |||
33 | #include <asm/io.h> | 33 | #include <asm/io.h> |
34 | #include <asm/irq.h> | 34 | #include <asm/irq.h> |
35 | 35 | ||
36 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) | ||
37 | |||
36 | #include "8390.h" | 38 | #include "8390.h" |
37 | 39 | ||
38 | #define DRV_NAME "ne-h8300" | 40 | #define DRV_NAME "ne-h8300" |
@@ -52,6 +54,11 @@ static const char version1[] = | |||
52 | 54 | ||
53 | /* ---- No user-serviceable parts below ---- */ | 55 | /* ---- No user-serviceable parts below ---- */ |
54 | 56 | ||
57 | static const char version[] = | ||
58 | "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; | ||
59 | |||
60 | #include "lib8390.c" | ||
61 | |||
55 | #define NE_BASE (dev->base_addr) | 62 | #define NE_BASE (dev->base_addr) |
56 | #define NE_CMD 0x00 | 63 | #define NE_CMD 0x00 |
57 | #define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ | 64 | #define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ |
@@ -162,7 +169,7 @@ static void cleanup_card(struct net_device *dev) | |||
162 | #ifndef MODULE | 169 | #ifndef MODULE |
163 | struct net_device * __init ne_probe(int unit) | 170 | struct net_device * __init ne_probe(int unit) |
164 | { | 171 | { |
165 | struct net_device *dev = alloc_ei_netdev(); | 172 | struct net_device *dev = ____alloc_ei_netdev(0); |
166 | int err; | 173 | int err; |
167 | 174 | ||
168 | if (!dev) | 175 | if (!dev) |
@@ -283,7 +290,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
283 | 290 | ||
284 | /* Snarf the interrupt now. There's no point in waiting since we cannot | 291 | /* Snarf the interrupt now. There's no point in waiting since we cannot |
285 | share and the board will usually be enabled. */ | 292 | share and the board will usually be enabled. */ |
286 | ret = request_irq(dev->irq, ei_interrupt, 0, name, dev); | 293 | ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev); |
287 | if (ret) { | 294 | if (ret) { |
288 | printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); | 295 | printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); |
289 | goto err_out; | 296 | goto err_out; |
@@ -318,9 +325,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
318 | dev->open = &ne_open; | 325 | dev->open = &ne_open; |
319 | dev->stop = &ne_close; | 326 | dev->stop = &ne_close; |
320 | #ifdef CONFIG_NET_POLL_CONTROLLER | 327 | #ifdef CONFIG_NET_POLL_CONTROLLER |
321 | dev->poll_controller = ei_poll; | 328 | dev->poll_controller = __ei_poll; |
322 | #endif | 329 | #endif |
323 | NS8390_init(dev, 0); | 330 | __NS8390_init(dev, 0); |
324 | 331 | ||
325 | ret = register_netdev(dev); | 332 | ret = register_netdev(dev); |
326 | if (ret) | 333 | if (ret) |
@@ -335,7 +342,7 @@ err_out: | |||
335 | 342 | ||
336 | static int ne_open(struct net_device *dev) | 343 | static int ne_open(struct net_device *dev) |
337 | { | 344 | { |
338 | ei_open(dev); | 345 | __ei_open(dev); |
339 | return 0; | 346 | return 0; |
340 | } | 347 | } |
341 | 348 | ||
@@ -343,7 +350,7 @@ static int ne_close(struct net_device *dev) | |||
343 | { | 350 | { |
344 | if (ei_debug > 1) | 351 | if (ei_debug > 1) |
345 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); | 352 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); |
346 | ei_close(dev); | 353 | __ei_close(dev); |
347 | return 0; | 354 | return 0; |
348 | } | 355 | } |
349 | 356 | ||
@@ -584,7 +591,7 @@ retry: | |||
584 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ | 591 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
585 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 592 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); |
586 | ne_reset_8390(dev); | 593 | ne_reset_8390(dev); |
587 | NS8390_init(dev,1); | 594 | __NS8390_init(dev,1); |
588 | break; | 595 | break; |
589 | } | 596 | } |
590 | 597 | ||
@@ -620,7 +627,7 @@ int init_module(void) | |||
620 | int err; | 627 | int err; |
621 | 628 | ||
622 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { | 629 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { |
623 | struct net_device *dev = alloc_ei_netdev(); | 630 | struct net_device *dev = ____alloc_ei_netdev(0); |
624 | if (!dev) | 631 | if (!dev) |
625 | break; | 632 | break; |
626 | if (io[this_dev]) { | 633 | if (io[this_dev]) { |
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile new file mode 100644 index 000000000000..a07cdc6f7384 --- /dev/null +++ b/drivers/net/netxen/Makefile | |||
@@ -0,0 +1,35 @@ | |||
1 | # Copyright (C) 2003 - 2006 NetXen, Inc. | ||
2 | # All rights reserved. | ||
3 | # | ||
4 | # This program is free software; you can redistribute it and/or | ||
5 | # modify it under the terms of the GNU General Public License | ||
6 | # as published by the Free Software Foundation; either version 2 | ||
7 | # of the License, or (at your option) any later version. | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | # GNU General Public License for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License | ||
15 | # along with this program; if not, write to the Free Software | ||
16 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
17 | # MA 02111-1307, USA. | ||
18 | # | ||
19 | # The full GNU General Public License is included in this distribution | ||
20 | # in the file called LICENSE. | ||
21 | # | ||
22 | # Contact Information: | ||
23 | # info@netxen.com | ||
24 | # NetXen, | ||
25 | # 3965 Freedom Circle, Fourth floor, | ||
26 | # Santa Clara, CA 95054 | ||
27 | # | ||
28 | # Makefile for the NetXen NIC Driver | ||
29 | # | ||
30 | |||
31 | |||
32 | obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o | ||
33 | |||
34 | netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ | ||
35 | netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o | ||
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h new file mode 100644 index 000000000000..d925053fe597 --- /dev/null +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -0,0 +1,1028 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #ifndef _NETXEN_NIC_H_ | ||
31 | #define _NETXEN_NIC_H_ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/compiler.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/ioport.h> | ||
41 | #include <linux/pci.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <linux/ip.h> | ||
45 | #include <linux/in.h> | ||
46 | #include <linux/tcp.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <linux/version.h> | ||
49 | |||
50 | #include <linux/ethtool.h> | ||
51 | #include <linux/mii.h> | ||
52 | #include <linux/interrupt.h> | ||
53 | #include <linux/timer.h> | ||
54 | |||
55 | #include <linux/mm.h> | ||
56 | #include <linux/mman.h> | ||
57 | |||
58 | #include <asm/system.h> | ||
59 | #include <asm/io.h> | ||
60 | #include <asm/byteorder.h> | ||
61 | #include <asm/uaccess.h> | ||
62 | #include <asm/pgtable.h> | ||
63 | |||
64 | #include "netxen_nic_hw.h" | ||
65 | |||
66 | #define NETXEN_NIC_BUILD_NO "5" | ||
67 | #define _NETXEN_NIC_LINUX_MAJOR 2 | ||
68 | #define _NETXEN_NIC_LINUX_MINOR 3 | ||
69 | #define _NETXEN_NIC_LINUX_SUBVERSION 59 | ||
70 | #define NETXEN_NIC_LINUX_VERSIONID "2.3.59" "-" NETXEN_NIC_BUILD_NO | ||
71 | #define NETXEN_NIC_FW_VERSIONID "2.3.59" | ||
72 | |||
73 | #define RCV_DESC_RINGSIZE \ | ||
74 | (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) | ||
75 | #define STATUS_DESC_RINGSIZE \ | ||
76 | (sizeof(struct status_desc)* adapter->max_rx_desc_count) | ||
77 | #define TX_RINGSIZE \ | ||
78 | (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) | ||
79 | #define RCV_BUFFSIZE \ | ||
80 | (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) | ||
81 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) | ||
82 | |||
83 | #define NETXEN_NETDEV_STATUS 0x1 | ||
84 | |||
85 | #define ADDR_IN_WINDOW1(off) \ | ||
86 | ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 | ||
87 | |||
88 | /* | ||
89 | * normalize a 64MB crb address to 32MB PCI window | ||
90 | * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 | ||
91 | */ | ||
92 | #define NETXEN_CRB_NORMAL(reg) \ | ||
93 | (reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST | ||
94 | |||
95 | #define NETXEN_CRB_NORMALIZE(adapter, reg) \ | ||
96 | pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) | ||
97 | |||
98 | #define FIRST_PAGE_GROUP_START 0 | ||
99 | #define FIRST_PAGE_GROUP_END 0x400000 | ||
100 | |||
101 | #define SECOND_PAGE_GROUP_START 0x4000000 | ||
102 | #define SECOND_PAGE_GROUP_END 0x66BC000 | ||
103 | |||
104 | #define THIRD_PAGE_GROUP_START 0x70E4000 | ||
105 | #define THIRD_PAGE_GROUP_END 0x8000000 | ||
106 | |||
107 | #define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START | ||
108 | #define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START | ||
109 | #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START | ||
110 | |||
111 | #define MAX_RX_BUFFER_LENGTH 2000 | ||
112 | #define MAX_RX_JUMBO_BUFFER_LENGTH 9046 | ||
113 | #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - NET_IP_ALIGN) | ||
114 | #define RX_JUMBO_DMA_MAP_LEN \ | ||
115 | (MAX_RX_JUMBO_BUFFER_LENGTH - NET_IP_ALIGN) | ||
116 | #define NETXEN_ROM_ROUNDUP 0x80000000ULL | ||
117 | |||
118 | /* | ||
119 | * Maximum number of ring contexts | ||
120 | */ | ||
121 | #define MAX_RING_CTX 1 | ||
122 | |||
123 | /* Opcodes to be used with the commands */ | ||
124 | enum { | ||
125 | TX_ETHER_PKT = 0x01, | ||
126 | /* The following opcodes are for IP checksum */ | ||
127 | TX_TCP_PKT, | ||
128 | TX_UDP_PKT, | ||
129 | TX_IP_PKT, | ||
130 | TX_TCP_LSO, | ||
131 | TX_IPSEC, | ||
132 | TX_IPSEC_CMD | ||
133 | }; | ||
134 | |||
135 | /* The following opcodes are for internal consumption. */ | ||
136 | #define NETXEN_CONTROL_OP 0x10 | ||
137 | #define PEGNET_REQUEST 0x11 | ||
138 | |||
139 | #define MAX_NUM_CARDS 4 | ||
140 | |||
141 | #define MAX_BUFFERS_PER_CMD 32 | ||
142 | |||
143 | /* | ||
144 | * Following are the states of the Phantom. Phantom will set them and | ||
145 | * Host will read to check if the fields are correct. | ||
146 | */ | ||
147 | #define PHAN_INITIALIZE_START 0xff00 | ||
148 | #define PHAN_INITIALIZE_FAILED 0xffff | ||
149 | #define PHAN_INITIALIZE_COMPLETE 0xff01 | ||
150 | |||
151 | /* Host writes the following to notify that it has done the init-handshake */ | ||
152 | #define PHAN_INITIALIZE_ACK 0xf00f | ||
153 | |||
154 | #define NUM_RCV_DESC_RINGS 2 /* No of Rcv Descriptor contexts */ | ||
155 | |||
156 | /* descriptor types */ | ||
157 | #define RCV_DESC_NORMAL 0x01 | ||
158 | #define RCV_DESC_JUMBO 0x02 | ||
159 | #define RCV_DESC_NORMAL_CTXID 0 | ||
160 | #define RCV_DESC_JUMBO_CTXID 1 | ||
161 | |||
162 | #define RCV_DESC_TYPE(ID) \ | ||
163 | ((ID == RCV_DESC_JUMBO_CTXID) ? RCV_DESC_JUMBO : RCV_DESC_NORMAL) | ||
164 | |||
165 | #define MAX_CMD_DESCRIPTORS 1024 | ||
166 | #define MAX_RCV_DESCRIPTORS 32768 | ||
167 | #define MAX_JUMBO_RCV_DESCRIPTORS 1024 | ||
168 | #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS | ||
169 | #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS | ||
170 | #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS | ||
171 | #define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS | ||
172 | #define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS) | ||
173 | #define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8) | ||
174 | |||
175 | #define MIN_TX_COUNT 4096 | ||
176 | #define MIN_RX_COUNT 4096 | ||
177 | |||
178 | #define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */ | ||
179 | |||
180 | #define PHAN_PEG_RCV_INITIALIZED 0xff01 | ||
181 | #define PHAN_PEG_RCV_START_INITIALIZE 0xff00 | ||
182 | |||
183 | #define get_next_index(index, length) \ | ||
184 | (((index) + 1) & ((length) - 1)) | ||
185 | |||
186 | #define get_index_range(index,length,count) \ | ||
187 | (((index) + (count)) & ((length) - 1)) | ||
188 | |||
189 | /* | ||
190 | * Following data structures describe the descriptors that will be used. | ||
191 | * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when | ||
192 | * we are doing LSO (above the 1500 size packet) only. | ||
193 | */ | ||
194 | |||
195 | /* | ||
196 | * The size of reference handle been changed to 16 bits to pass the MSS fields | ||
197 | * for the LSO packet | ||
198 | */ | ||
199 | |||
200 | #define FLAGS_CHECKSUM_ENABLED 0x01 | ||
201 | #define FLAGS_LSO_ENABLED 0x02 | ||
202 | #define FLAGS_IPSEC_SA_ADD 0x04 | ||
203 | #define FLAGS_IPSEC_SA_DELETE 0x08 | ||
204 | #define FLAGS_VLAN_TAGGED 0x10 | ||
205 | |||
206 | #define CMD_DESC_TOTAL_LENGTH(cmd_desc) \ | ||
207 | ((cmd_desc)->length_tcp_hdr & 0x00FFFFFF) | ||
208 | #define CMD_DESC_TCP_HDR_OFFSET(cmd_desc) \ | ||
209 | (((cmd_desc)->length_tcp_hdr >> 24) & 0x0FF) | ||
210 | #define CMD_DESC_PORT(cmd_desc) ((cmd_desc)->port_ctxid & 0x0F) | ||
211 | #define CMD_DESC_CTX_ID(cmd_desc) (((cmd_desc)->port_ctxid >> 4) & 0x0F) | ||
212 | |||
213 | #define CMD_DESC_TOTAL_LENGTH_WRT(cmd_desc, var) \ | ||
214 | ((cmd_desc)->length_tcp_hdr |= ((var) & 0x00FFFFFF)) | ||
215 | #define CMD_DESC_TCP_HDR_OFFSET_WRT(cmd_desc, var) \ | ||
216 | ((cmd_desc)->length_tcp_hdr |= (((var) << 24) & 0xFF000000)) | ||
217 | #define CMD_DESC_PORT_WRT(cmd_desc, var) \ | ||
218 | ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) | ||
219 | |||
220 | struct cmd_desc_type0 { | ||
221 | u64 netxen_next; /* for fragments handled by Phantom */ | ||
222 | union { | ||
223 | struct { | ||
224 | u32 addr_low_part2; | ||
225 | u32 addr_high_part2; | ||
226 | }; | ||
227 | u64 addr_buffer2; | ||
228 | }; | ||
229 | |||
230 | /* Bit pattern: 0-23 total length, 24-32 tcp header offset */ | ||
231 | u32 length_tcp_hdr; | ||
232 | u8 ip_hdr_offset; /* For LSO only */ | ||
233 | u8 num_of_buffers; /* total number of segments */ | ||
234 | u8 flags; /* as defined above */ | ||
235 | u8 opcode; | ||
236 | |||
237 | u16 reference_handle; /* changed to u16 to add mss */ | ||
238 | u16 mss; /* passed by NDIS_PACKET for LSO */ | ||
239 | /* Bit pattern 0-3 port, 0-3 ctx id */ | ||
240 | u8 port_ctxid; | ||
241 | u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ | ||
242 | u16 conn_id; /* IPSec offoad only */ | ||
243 | |||
244 | union { | ||
245 | struct { | ||
246 | u32 addr_low_part3; | ||
247 | u32 addr_high_part3; | ||
248 | }; | ||
249 | u64 addr_buffer3; | ||
250 | }; | ||
251 | |||
252 | union { | ||
253 | struct { | ||
254 | u32 addr_low_part1; | ||
255 | u32 addr_high_part1; | ||
256 | }; | ||
257 | u64 addr_buffer1; | ||
258 | }; | ||
259 | |||
260 | u16 buffer1_length; | ||
261 | u16 buffer2_length; | ||
262 | u16 buffer3_length; | ||
263 | u16 buffer4_length; | ||
264 | |||
265 | union { | ||
266 | struct { | ||
267 | u32 addr_low_part4; | ||
268 | u32 addr_high_part4; | ||
269 | }; | ||
270 | u64 addr_buffer4; | ||
271 | }; | ||
272 | |||
273 | } __attribute__ ((aligned(64))); | ||
274 | |||
275 | /* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ | ||
276 | struct rcv_desc { | ||
277 | u16 reference_handle; | ||
278 | u16 reserved; | ||
279 | u32 buffer_length; /* allocated buffer length (usually 2K) */ | ||
280 | u64 addr_buffer; | ||
281 | }; | ||
282 | |||
283 | /* opcode field in status_desc */ | ||
284 | #define RCV_NIC_PKT (0xA) | ||
285 | #define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12) | ||
286 | |||
287 | /* for status field in status_desc */ | ||
288 | #define STATUS_NEED_CKSUM (1) | ||
289 | #define STATUS_CKSUM_OK (2) | ||
290 | |||
291 | /* owner bits of status_desc */ | ||
292 | #define STATUS_OWNER_HOST (0x1) | ||
293 | #define STATUS_OWNER_PHANTOM (0x2) | ||
294 | |||
295 | #define NETXEN_PROT_IP (1) | ||
296 | #define NETXEN_PROT_UNKNOWN (0) | ||
297 | |||
298 | /* Note: sizeof(status_desc) should always be a mutliple of 2 */ | ||
299 | #define STATUS_DESC_PORT(status_desc) \ | ||
300 | ((status_desc)->port_status_type_op & 0x0F) | ||
301 | #define STATUS_DESC_STATUS(status_desc) \ | ||
302 | (((status_desc)->port_status_type_op >> 4) & 0x0F) | ||
303 | #define STATUS_DESC_TYPE(status_desc) \ | ||
304 | (((status_desc)->port_status_type_op >> 8) & 0x0F) | ||
305 | #define STATUS_DESC_OPCODE(status_desc) \ | ||
306 | (((status_desc)->port_status_type_op >> 12) & 0x0F) | ||
307 | |||
308 | struct status_desc { | ||
309 | /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-15 opcode */ | ||
310 | u16 port_status_type_op; | ||
311 | u16 total_length; /* NIC mode */ | ||
312 | u16 reference_handle; /* handle for the associated packet */ | ||
313 | /* Bit pattern: 0-1 owner, 2-5 protocol */ | ||
314 | u16 owner; /* Owner of the descriptor */ | ||
315 | } __attribute__ ((aligned(8))); | ||
316 | |||
317 | enum { | ||
318 | NETXEN_RCV_PEG_0 = 0, | ||
319 | NETXEN_RCV_PEG_1 | ||
320 | }; | ||
321 | /* The version of the main data structure */ | ||
322 | #define NETXEN_BDINFO_VERSION 1 | ||
323 | |||
324 | /* Magic number to let user know flash is programmed */ | ||
325 | #define NETXEN_BDINFO_MAGIC 0x12345678 | ||
326 | |||
327 | /* Max number of Gig ports on a Phantom board */ | ||
328 | #define NETXEN_MAX_PORTS 4 | ||
329 | |||
330 | typedef enum { | ||
331 | NETXEN_BRDTYPE_P1_BD = 0x0000, | ||
332 | NETXEN_BRDTYPE_P1_SB = 0x0001, | ||
333 | NETXEN_BRDTYPE_P1_SMAX = 0x0002, | ||
334 | NETXEN_BRDTYPE_P1_SOCK = 0x0003, | ||
335 | |||
336 | NETXEN_BRDTYPE_P2_SOCK_31 = 0x0008, | ||
337 | NETXEN_BRDTYPE_P2_SOCK_35 = 0x0009, | ||
338 | NETXEN_BRDTYPE_P2_SB35_4G = 0x000a, | ||
339 | NETXEN_BRDTYPE_P2_SB31_10G = 0x000b, | ||
340 | NETXEN_BRDTYPE_P2_SB31_2G = 0x000c, | ||
341 | |||
342 | NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d, | ||
343 | NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e, | ||
344 | NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f | ||
345 | } netxen_brdtype_t; | ||
346 | |||
347 | typedef enum { | ||
348 | NETXEN_BRDMFG_INVENTEC = 1 | ||
349 | } netxen_brdmfg; | ||
350 | |||
351 | typedef enum { | ||
352 | MEM_ORG_128Mbx4 = 0x0, /* DDR1 only */ | ||
353 | MEM_ORG_128Mbx8 = 0x1, /* DDR1 only */ | ||
354 | MEM_ORG_128Mbx16 = 0x2, /* DDR1 only */ | ||
355 | MEM_ORG_256Mbx4 = 0x3, | ||
356 | MEM_ORG_256Mbx8 = 0x4, | ||
357 | MEM_ORG_256Mbx16 = 0x5, | ||
358 | MEM_ORG_512Mbx4 = 0x6, | ||
359 | MEM_ORG_512Mbx8 = 0x7, | ||
360 | MEM_ORG_512Mbx16 = 0x8, | ||
361 | MEM_ORG_1Gbx4 = 0x9, | ||
362 | MEM_ORG_1Gbx8 = 0xa, | ||
363 | MEM_ORG_1Gbx16 = 0xb, | ||
364 | MEM_ORG_2Gbx4 = 0xc, | ||
365 | MEM_ORG_2Gbx8 = 0xd, | ||
366 | MEM_ORG_2Gbx16 = 0xe, | ||
367 | MEM_ORG_128Mbx32 = 0x10002, /* GDDR only */ | ||
368 | MEM_ORG_256Mbx32 = 0x10005 /* GDDR only */ | ||
369 | } netxen_mn_mem_org_t; | ||
370 | |||
371 | typedef enum { | ||
372 | MEM_ORG_512Kx36 = 0x0, | ||
373 | MEM_ORG_1Mx36 = 0x1, | ||
374 | MEM_ORG_2Mx36 = 0x2 | ||
375 | } netxen_sn_mem_org_t; | ||
376 | |||
377 | typedef enum { | ||
378 | MEM_DEPTH_4MB = 0x1, | ||
379 | MEM_DEPTH_8MB = 0x2, | ||
380 | MEM_DEPTH_16MB = 0x3, | ||
381 | MEM_DEPTH_32MB = 0x4, | ||
382 | MEM_DEPTH_64MB = 0x5, | ||
383 | MEM_DEPTH_128MB = 0x6, | ||
384 | MEM_DEPTH_256MB = 0x7, | ||
385 | MEM_DEPTH_512MB = 0x8, | ||
386 | MEM_DEPTH_1GB = 0x9, | ||
387 | MEM_DEPTH_2GB = 0xa, | ||
388 | MEM_DEPTH_4GB = 0xb, | ||
389 | MEM_DEPTH_8GB = 0xc, | ||
390 | MEM_DEPTH_16GB = 0xd, | ||
391 | MEM_DEPTH_32GB = 0xe | ||
392 | } netxen_mem_depth_t; | ||
393 | |||
394 | struct netxen_board_info { | ||
395 | u32 header_version; | ||
396 | |||
397 | u32 board_mfg; | ||
398 | u32 board_type; | ||
399 | u32 board_num; | ||
400 | u32 chip_id; | ||
401 | u32 chip_minor; | ||
402 | u32 chip_major; | ||
403 | u32 chip_pkg; | ||
404 | u32 chip_lot; | ||
405 | |||
406 | u32 port_mask; /* available niu ports */ | ||
407 | u32 peg_mask; /* available pegs */ | ||
408 | u32 icache_ok; /* can we run with icache? */ | ||
409 | u32 dcache_ok; /* can we run with dcache? */ | ||
410 | u32 casper_ok; | ||
411 | |||
412 | u32 mac_addr_lo_0; | ||
413 | u32 mac_addr_lo_1; | ||
414 | u32 mac_addr_lo_2; | ||
415 | u32 mac_addr_lo_3; | ||
416 | |||
417 | /* MN-related config */ | ||
418 | u32 mn_sync_mode; /* enable/ sync shift cclk/ sync shift mclk */ | ||
419 | u32 mn_sync_shift_cclk; | ||
420 | u32 mn_sync_shift_mclk; | ||
421 | u32 mn_wb_en; | ||
422 | u32 mn_crystal_freq; /* in MHz */ | ||
423 | u32 mn_speed; /* in MHz */ | ||
424 | u32 mn_org; | ||
425 | u32 mn_depth; | ||
426 | u32 mn_ranks_0; /* ranks per slot */ | ||
427 | u32 mn_ranks_1; /* ranks per slot */ | ||
428 | u32 mn_rd_latency_0; | ||
429 | u32 mn_rd_latency_1; | ||
430 | u32 mn_rd_latency_2; | ||
431 | u32 mn_rd_latency_3; | ||
432 | u32 mn_rd_latency_4; | ||
433 | u32 mn_rd_latency_5; | ||
434 | u32 mn_rd_latency_6; | ||
435 | u32 mn_rd_latency_7; | ||
436 | u32 mn_rd_latency_8; | ||
437 | u32 mn_dll_val[18]; | ||
438 | u32 mn_mode_reg; /* MIU DDR Mode Register */ | ||
439 | u32 mn_ext_mode_reg; /* MIU DDR Extended Mode Register */ | ||
440 | u32 mn_timing_0; /* MIU Memory Control Timing Rgister */ | ||
441 | u32 mn_timing_1; /* MIU Extended Memory Ctrl Timing Register */ | ||
442 | u32 mn_timing_2; /* MIU Extended Memory Ctrl Timing2 Register */ | ||
443 | |||
444 | /* SN-related config */ | ||
445 | u32 sn_sync_mode; /* enable/ sync shift cclk / sync shift mclk */ | ||
446 | u32 sn_pt_mode; /* pass through mode */ | ||
447 | u32 sn_ecc_en; | ||
448 | u32 sn_wb_en; | ||
449 | u32 sn_crystal_freq; | ||
450 | u32 sn_speed; | ||
451 | u32 sn_org; | ||
452 | u32 sn_depth; | ||
453 | u32 sn_dll_tap; | ||
454 | u32 sn_rd_latency; | ||
455 | |||
456 | u32 mac_addr_hi_0; | ||
457 | u32 mac_addr_hi_1; | ||
458 | u32 mac_addr_hi_2; | ||
459 | u32 mac_addr_hi_3; | ||
460 | |||
461 | u32 magic; /* indicates flash has been initialized */ | ||
462 | |||
463 | u32 mn_rdimm; | ||
464 | u32 mn_dll_override; | ||
465 | |||
466 | }; | ||
467 | |||
468 | #define FLASH_NUM_PORTS (4) | ||
469 | |||
470 | struct netxen_flash_mac_addr { | ||
471 | u32 flash_addr[32]; | ||
472 | }; | ||
473 | |||
474 | struct netxen_user_old_info { | ||
475 | u8 flash_md5[16]; | ||
476 | u8 crbinit_md5[16]; | ||
477 | u8 brdcfg_md5[16]; | ||
478 | /* bootloader */ | ||
479 | u32 bootld_version; | ||
480 | u32 bootld_size; | ||
481 | u8 bootld_md5[16]; | ||
482 | /* image */ | ||
483 | u32 image_version; | ||
484 | u32 image_size; | ||
485 | u8 image_md5[16]; | ||
486 | /* primary image status */ | ||
487 | u32 primary_status; | ||
488 | u32 secondary_present; | ||
489 | |||
490 | /* MAC address , 4 ports */ | ||
491 | struct netxen_flash_mac_addr mac_addr[FLASH_NUM_PORTS]; | ||
492 | }; | ||
493 | #define FLASH_NUM_MAC_PER_PORT 32 | ||
494 | struct netxen_user_info { | ||
495 | u8 flash_md5[16 * 64]; | ||
496 | /* bootloader */ | ||
497 | u32 bootld_version; | ||
498 | u32 bootld_size; | ||
499 | /* image */ | ||
500 | u32 image_version; | ||
501 | u32 image_size; | ||
502 | /* primary image status */ | ||
503 | u32 primary_status; | ||
504 | u32 secondary_present; | ||
505 | |||
506 | /* MAC address , 4 ports, 32 address per port */ | ||
507 | u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT]; | ||
508 | u32 sub_sys_id; | ||
509 | u8 serial_num[32]; | ||
510 | |||
511 | /* Any user defined data */ | ||
512 | }; | ||
513 | |||
514 | /* | ||
515 | * Flash Layout - new format. | ||
516 | */ | ||
517 | struct netxen_new_user_info { | ||
518 | u8 flash_md5[16 * 64]; | ||
519 | /* bootloader */ | ||
520 | u32 bootld_version; | ||
521 | u32 bootld_size; | ||
522 | /* image */ | ||
523 | u32 image_version; | ||
524 | u32 image_size; | ||
525 | /* primary image status */ | ||
526 | u32 primary_status; | ||
527 | u32 secondary_present; | ||
528 | |||
529 | /* MAC address , 4 ports, 32 address per port */ | ||
530 | u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT]; | ||
531 | u32 sub_sys_id; | ||
532 | u8 serial_num[32]; | ||
533 | |||
534 | /* Any user defined data */ | ||
535 | }; | ||
536 | |||
537 | #define SECONDARY_IMAGE_PRESENT 0xb3b4b5b6 | ||
538 | #define SECONDARY_IMAGE_ABSENT 0xffffffff | ||
539 | #define PRIMARY_IMAGE_GOOD 0x5a5a5a5a | ||
540 | #define PRIMARY_IMAGE_BAD 0xffffffff | ||
541 | |||
542 | /* Flash memory map */ | ||
543 | typedef enum { | ||
544 | CRBINIT_START = 0, /* Crbinit section */ | ||
545 | BRDCFG_START = 0x4000, /* board config */ | ||
546 | INITCODE_START = 0x6000, /* pegtune code */ | ||
547 | BOOTLD_START = 0x10000, /* bootld */ | ||
548 | IMAGE_START = 0x43000, /* compressed image */ | ||
549 | SECONDARY_START = 0x200000, /* backup images */ | ||
550 | PXE_START = 0x3E0000, /* user defined region */ | ||
551 | USER_START = 0x3E8000, /* User defined region for new boards */ | ||
552 | FIXED_START = 0x3F0000 /* backup of crbinit */ | ||
553 | } netxen_flash_map_t; | ||
554 | |||
555 | #define USER_START_OLD PXE_START /* for backward compatibility */ | ||
556 | |||
557 | #define FLASH_START (CRBINIT_START) | ||
558 | #define INIT_SECTOR (0) | ||
559 | #define PRIMARY_START (BOOTLD_START) | ||
560 | #define FLASH_CRBINIT_SIZE (0x4000) | ||
561 | #define FLASH_BRDCFG_SIZE (sizeof(struct netxen_board_info)) | ||
562 | #define FLASH_USER_SIZE (sizeof(netxen_user_info)/sizeof(u32)) | ||
563 | #define FLASH_SECONDARY_SIZE (USER_START-SECONDARY_START) | ||
564 | #define NUM_PRIMARY_SECTORS (0x20) | ||
565 | #define NUM_CONFIG_SECTORS (1) | ||
566 | #define PFX "netxen: " | ||
567 | |||
568 | /* Note: Make sure to not call this before adapter->port is valid */ | ||
569 | #if !defined(NETXEN_DEBUG) | ||
570 | #define DPRINTK(klevel, fmt, args...) do { \ | ||
571 | } while (0) | ||
572 | #else | ||
573 | #define DPRINTK(klevel, fmt, args...) do { \ | ||
574 | printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\ | ||
575 | (adapter != NULL && adapter->port != NULL && \ | ||
576 | adapter->port[0] != NULL && \ | ||
577 | adapter->port[0]->netdev != NULL) ? \ | ||
578 | adapter->port[0]->netdev->name : NULL, \ | ||
579 | ## args); } while(0) | ||
580 | #endif | ||
581 | |||
582 | /* Number of status descriptors to handle per interrupt */ | ||
583 | #define MAX_STATUS_HANDLE (128) | ||
584 | |||
585 | /* | ||
586 | * netxen_skb_frag{} is to contain mapping info for each SG list. This | ||
587 | * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. | ||
588 | */ | ||
589 | struct netxen_skb_frag { | ||
590 | u64 dma; | ||
591 | u32 length; | ||
592 | }; | ||
593 | |||
594 | /* Following defines are for the state of the buffers */ | ||
595 | #define NETXEN_BUFFER_FREE 0 | ||
596 | #define NETXEN_BUFFER_BUSY 1 | ||
597 | |||
598 | /* | ||
599 | * There will be one netxen_buffer per skb packet. These will be | ||
600 | * used to save the dma info for pci_unmap_page() | ||
601 | */ | ||
602 | struct netxen_cmd_buffer { | ||
603 | struct sk_buff *skb; | ||
604 | struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; | ||
605 | u32 total_length; | ||
606 | u32 mss; | ||
607 | u16 port; | ||
608 | u8 cmd; | ||
609 | u8 frag_count; | ||
610 | unsigned long time_stamp; | ||
611 | u32 state; | ||
612 | u32 no_of_descriptors; | ||
613 | }; | ||
614 | |||
615 | /* In rx_buffer, we do not need multiple fragments as is a single buffer */ | ||
616 | struct netxen_rx_buffer { | ||
617 | struct sk_buff *skb; | ||
618 | u64 dma; | ||
619 | u16 ref_handle; | ||
620 | u16 state; | ||
621 | }; | ||
622 | |||
623 | /* Board types */ | ||
624 | #define NETXEN_NIC_GBE 0x01 | ||
625 | #define NETXEN_NIC_XGBE 0x02 | ||
626 | |||
627 | /* | ||
628 | * One hardware_context{} per adapter | ||
629 | * contains interrupt info as well shared hardware info. | ||
630 | */ | ||
631 | struct netxen_hardware_context { | ||
632 | struct pci_dev *pdev; | ||
633 | void __iomem *pci_base0; | ||
634 | void __iomem *pci_base1; | ||
635 | void __iomem *pci_base2; | ||
636 | |||
637 | u8 revision_id; | ||
638 | u16 board_type; | ||
639 | u16 max_ports; | ||
640 | struct netxen_board_info boardcfg; | ||
641 | u32 xg_linkup; | ||
642 | u32 qg_linksup; | ||
643 | /* Address of cmd ring in Phantom */ | ||
644 | struct cmd_desc_type0 *cmd_desc_head; | ||
645 | char *pauseaddr; | ||
646 | struct pci_dev *cmd_desc_pdev; | ||
647 | dma_addr_t cmd_desc_phys_addr; | ||
648 | dma_addr_t pause_physaddr; | ||
649 | struct pci_dev *pause_pdev; | ||
650 | struct netxen_adapter *adapter; | ||
651 | }; | ||
652 | |||
653 | #define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ | ||
654 | #define ETHERNET_FCS_SIZE 4 | ||
655 | |||
656 | struct netxen_adapter_stats { | ||
657 | u64 ints; | ||
658 | u64 hostints; | ||
659 | u64 otherints; | ||
660 | u64 process_rcv; | ||
661 | u64 process_xmit; | ||
662 | u64 noxmitdone; | ||
663 | u64 xmitcsummed; | ||
664 | u64 post_called; | ||
665 | u64 posted; | ||
666 | u64 lastposted; | ||
667 | u64 goodskbposts; | ||
668 | }; | ||
669 | |||
670 | /* | ||
671 | * Rcv Descriptor Context. One such per Rcv Descriptor. There may | ||
672 | * be one Rcv Descriptor for normal packets, one for jumbo and may be others. | ||
673 | */ | ||
674 | struct netxen_rcv_desc_ctx { | ||
675 | u32 flags; | ||
676 | u32 producer; | ||
677 | u32 rcv_pending; /* Num of bufs posted in phantom */ | ||
678 | u32 rcv_free; /* Num of bufs in free list */ | ||
679 | dma_addr_t phys_addr; | ||
680 | struct pci_dev *phys_pdev; | ||
681 | struct rcv_desc *desc_head; /* address of rx ring in Phantom */ | ||
682 | u32 max_rx_desc_count; | ||
683 | u32 dma_size; | ||
684 | u32 skb_size; | ||
685 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ | ||
686 | int begin_alloc; | ||
687 | }; | ||
688 | |||
689 | /* | ||
690 | * Receive context. There is one such structure per instance of the | ||
691 | * receive processing. Any state information that is relevant to | ||
692 | * the receive, and is must be in this structure. The global data may be | ||
693 | * present elsewhere. | ||
694 | */ | ||
695 | struct netxen_recv_context { | ||
696 | struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS]; | ||
697 | u32 status_rx_producer; | ||
698 | u32 status_rx_consumer; | ||
699 | dma_addr_t rcv_status_desc_phys_addr; | ||
700 | struct pci_dev *rcv_status_desc_pdev; | ||
701 | struct status_desc *rcv_status_desc_head; | ||
702 | }; | ||
703 | |||
704 | #define NETXEN_NIC_MSI_ENABLED 0x02 | ||
705 | |||
706 | struct netxen_drvops; | ||
707 | |||
708 | struct netxen_adapter { | ||
709 | struct netxen_hardware_context ahw; | ||
710 | int port_count; /* Number of configured ports */ | ||
711 | int active_ports; /* Number of open ports */ | ||
712 | struct netxen_port *port[NETXEN_MAX_PORTS]; /* ptr to each port */ | ||
713 | spinlock_t tx_lock; | ||
714 | spinlock_t lock; | ||
715 | struct work_struct watchdog_task; | ||
716 | struct work_struct tx_timeout_task; | ||
717 | struct timer_list watchdog_timer; | ||
718 | |||
719 | u32 curr_window; | ||
720 | |||
721 | u32 cmd_producer; | ||
722 | u32 cmd_consumer; | ||
723 | |||
724 | u32 last_cmd_consumer; | ||
725 | u32 max_tx_desc_count; | ||
726 | u32 max_rx_desc_count; | ||
727 | u32 max_jumbo_rx_desc_count; | ||
728 | /* Num of instances active on cmd buffer ring */ | ||
729 | u32 proc_cmd_buf_counter; | ||
730 | |||
731 | u32 num_threads, total_threads; /*Use to keep track of xmit threads */ | ||
732 | |||
733 | u32 flags; | ||
734 | u32 irq; | ||
735 | int driver_mismatch; | ||
736 | u32 temp; | ||
737 | |||
738 | struct netxen_adapter_stats stats; | ||
739 | |||
740 | struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */ | ||
741 | |||
742 | /* | ||
743 | * Receive instances. These can be either one per port, | ||
744 | * or one per peg, etc. | ||
745 | */ | ||
746 | struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; | ||
747 | |||
748 | int is_up; | ||
749 | int work_done; | ||
750 | struct netxen_drvops *ops; | ||
751 | }; /* netxen_adapter structure */ | ||
752 | |||
753 | /* Max number of xmit producer threads that can run simultaneously */ | ||
754 | #define MAX_XMIT_PRODUCERS 16 | ||
755 | |||
756 | struct netxen_port_stats { | ||
757 | u64 rcvdbadskb; | ||
758 | u64 xmitcalled; | ||
759 | u64 xmitedframes; | ||
760 | u64 xmitfinished; | ||
761 | u64 badskblen; | ||
762 | u64 nocmddescriptor; | ||
763 | u64 polled; | ||
764 | u64 uphappy; | ||
765 | u64 updropped; | ||
766 | u64 uplcong; | ||
767 | u64 uphcong; | ||
768 | u64 upmcong; | ||
769 | u64 updunno; | ||
770 | u64 skbfreed; | ||
771 | u64 txdropped; | ||
772 | u64 txnullskb; | ||
773 | u64 csummed; | ||
774 | u64 no_rcv; | ||
775 | u64 rxbytes; | ||
776 | u64 txbytes; | ||
777 | }; | ||
778 | |||
779 | struct netxen_port { | ||
780 | struct netxen_adapter *adapter; | ||
781 | |||
782 | u16 portnum; /* GBE port number */ | ||
783 | u16 link_speed; | ||
784 | u16 link_duplex; | ||
785 | u16 link_autoneg; | ||
786 | |||
787 | int flags; | ||
788 | |||
789 | struct net_device *netdev; | ||
790 | struct pci_dev *pdev; | ||
791 | struct net_device_stats net_stats; | ||
792 | struct netxen_port_stats stats; | ||
793 | }; | ||
794 | |||
795 | #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ | ||
796 | ((adapter)->ahw.pci_base0 + (off)) | ||
797 | #define PCI_OFFSET_SECOND_RANGE(adapter, off) \ | ||
798 | ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) | ||
799 | #define PCI_OFFSET_THIRD_RANGE(adapter, off) \ | ||
800 | ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) | ||
801 | |||
802 | static inline void __iomem *pci_base_offset(struct netxen_adapter *adapter, | ||
803 | unsigned long off) | ||
804 | { | ||
805 | if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) { | ||
806 | return (adapter->ahw.pci_base0 + off); | ||
807 | } else if ((off < SECOND_PAGE_GROUP_END) && | ||
808 | (off >= SECOND_PAGE_GROUP_START)) { | ||
809 | return (adapter->ahw.pci_base1 + off - SECOND_PAGE_GROUP_START); | ||
810 | } else if ((off < THIRD_PAGE_GROUP_END) && | ||
811 | (off >= THIRD_PAGE_GROUP_START)) { | ||
812 | return (adapter->ahw.pci_base2 + off - THIRD_PAGE_GROUP_START); | ||
813 | } | ||
814 | return NULL; | ||
815 | } | ||
816 | |||
817 | static inline void __iomem *pci_base(struct netxen_adapter *adapter, | ||
818 | unsigned long off) | ||
819 | { | ||
820 | if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) { | ||
821 | return adapter->ahw.pci_base0; | ||
822 | } else if ((off < SECOND_PAGE_GROUP_END) && | ||
823 | (off >= SECOND_PAGE_GROUP_START)) { | ||
824 | return adapter->ahw.pci_base1; | ||
825 | } else if ((off < THIRD_PAGE_GROUP_END) && | ||
826 | (off >= THIRD_PAGE_GROUP_START)) { | ||
827 | return adapter->ahw.pci_base2; | ||
828 | } | ||
829 | return NULL; | ||
830 | } | ||
831 | |||
832 | struct netxen_drvops { | ||
833 | int (*enable_phy_interrupts) (struct netxen_adapter *, int); | ||
834 | int (*disable_phy_interrupts) (struct netxen_adapter *, int); | ||
835 | void (*handle_phy_intr) (struct netxen_adapter *); | ||
836 | int (*macaddr_set) (struct netxen_port *, netxen_ethernet_macaddr_t); | ||
837 | int (*set_mtu) (struct netxen_port *, int); | ||
838 | int (*set_promisc) (struct netxen_adapter *, int, | ||
839 | netxen_niu_prom_mode_t); | ||
840 | int (*unset_promisc) (struct netxen_adapter *, int, | ||
841 | netxen_niu_prom_mode_t); | ||
842 | int (*phy_read) (struct netxen_adapter *, long phy, long reg, u32 *); | ||
843 | int (*phy_write) (struct netxen_adapter *, long phy, long reg, u32 val); | ||
844 | int (*init_port) (struct netxen_adapter *, int); | ||
845 | void (*init_niu) (struct netxen_adapter *); | ||
846 | int (*stop_port) (struct netxen_adapter *, int); | ||
847 | }; | ||
848 | |||
849 | extern char netxen_nic_driver_name[]; | ||
850 | |||
851 | int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter, | ||
852 | int port); | ||
853 | int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter, | ||
854 | int port); | ||
855 | int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter, | ||
856 | int port); | ||
857 | int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter, | ||
858 | int port); | ||
859 | int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter, | ||
860 | int port); | ||
861 | int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter, | ||
862 | int port); | ||
863 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter); | ||
864 | void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter); | ||
865 | void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, int port, | ||
866 | long enable); | ||
867 | void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, int port, | ||
868 | long enable); | ||
869 | int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, long reg, | ||
870 | __le32 * readval); | ||
871 | int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long phy, | ||
872 | long reg, __le32 val); | ||
873 | |||
874 | /* Functions available from netxen_nic_hw.c */ | ||
875 | int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu); | ||
876 | int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu); | ||
877 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter); | ||
878 | void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw); | ||
879 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val); | ||
880 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off); | ||
881 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value); | ||
882 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value); | ||
883 | |||
884 | int netxen_nic_get_board_info(struct netxen_adapter *adapter); | ||
885 | int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | ||
886 | int len); | ||
887 | int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | ||
888 | int len); | ||
889 | void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, | ||
890 | unsigned long off, int data); | ||
891 | |||
892 | /* Functions from netxen_nic_init.c */ | ||
893 | void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); | ||
894 | void netxen_load_firmware(struct netxen_adapter *adapter); | ||
895 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); | ||
896 | int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); | ||
897 | int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data); | ||
898 | int netxen_rom_se(struct netxen_adapter *adapter, int addr); | ||
899 | int netxen_do_rom_se(struct netxen_adapter *adapter, int addr); | ||
900 | |||
901 | /* Functions from netxen_nic_isr.c */ | ||
902 | void netxen_nic_isr_other(struct netxen_adapter *adapter); | ||
903 | void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 port, | ||
904 | u32 link); | ||
905 | void netxen_handle_port_int(struct netxen_adapter *adapter, u32 port, | ||
906 | u32 enable); | ||
907 | void netxen_nic_stop_all_ports(struct netxen_adapter *adapter); | ||
908 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); | ||
909 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); | ||
910 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | ||
911 | struct pci_dev **used_dev); | ||
912 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); | ||
913 | int netxen_init_firmware(struct netxen_adapter *adapter); | ||
914 | void netxen_free_hw_resources(struct netxen_adapter *adapter); | ||
915 | void netxen_tso_check(struct netxen_adapter *adapter, | ||
916 | struct cmd_desc_type0 *desc, struct sk_buff *skb); | ||
917 | int netxen_nic_hw_resources(struct netxen_adapter *adapter); | ||
918 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); | ||
919 | int | ||
920 | netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | ||
921 | struct netxen_port *port); | ||
922 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter); | ||
923 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter); | ||
924 | void netxen_watchdog_task(unsigned long v); | ||
925 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, | ||
926 | u32 ringid); | ||
927 | void netxen_process_cmd_ring(unsigned long data); | ||
928 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); | ||
929 | void netxen_nic_set_multi(struct net_device *netdev); | ||
930 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); | ||
931 | int netxen_nic_set_mac(struct net_device *netdev, void *p); | ||
932 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); | ||
933 | |||
934 | static inline void netxen_nic_disable_int(struct netxen_adapter *adapter) | ||
935 | { | ||
936 | /* | ||
937 | * ISR_INT_MASK: Can be read from window 0 or 1. | ||
938 | */ | ||
939 | writel(0x7ff, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK)); | ||
940 | |||
941 | } | ||
942 | |||
943 | static inline void netxen_nic_enable_int(struct netxen_adapter *adapter) | ||
944 | { | ||
945 | u32 mask; | ||
946 | |||
947 | switch (adapter->ahw.board_type) { | ||
948 | case NETXEN_NIC_GBE: | ||
949 | mask = 0x77b; | ||
950 | break; | ||
951 | case NETXEN_NIC_XGBE: | ||
952 | mask = 0x77f; | ||
953 | break; | ||
954 | default: | ||
955 | mask = 0x7ff; | ||
956 | break; | ||
957 | } | ||
958 | |||
959 | writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK)); | ||
960 | |||
961 | if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { | ||
962 | mask = 0xbff; | ||
963 | writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, | ||
964 | ISR_INT_TARGET_MASK)); | ||
965 | } | ||
966 | } | ||
967 | |||
968 | /* | ||
969 | * NetXen Board information | ||
970 | */ | ||
971 | |||
972 | #define NETXEN_MAX_SHORT_NAME 16 | ||
973 | struct netxen_brdinfo { | ||
974 | netxen_brdtype_t brdtype; /* type of board */ | ||
975 | long ports; /* max no of physical ports */ | ||
976 | char short_name[NETXEN_MAX_SHORT_NAME]; | ||
977 | }; | ||
978 | |||
979 | static const struct netxen_brdinfo netxen_boards[] = { | ||
980 | {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, | ||
981 | {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, | ||
982 | {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, | ||
983 | {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, | ||
984 | {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, | ||
985 | {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, | ||
986 | }; | ||
987 | |||
988 | #define NUM_SUPPORTED_BOARDS (sizeof(netxen_boards)/sizeof(struct netxen_brdinfo)) | ||
989 | |||
990 | static inline void get_brd_port_by_type(u32 type, int *ports) | ||
991 | { | ||
992 | int i, found = 0; | ||
993 | for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { | ||
994 | if (netxen_boards[i].brdtype == type) { | ||
995 | *ports = netxen_boards[i].ports; | ||
996 | found = 1; | ||
997 | break; | ||
998 | } | ||
999 | } | ||
1000 | if (!found) | ||
1001 | *ports = 0; | ||
1002 | } | ||
1003 | |||
1004 | static inline void get_brd_name_by_type(u32 type, char *name) | ||
1005 | { | ||
1006 | int i, found = 0; | ||
1007 | for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { | ||
1008 | if (netxen_boards[i].brdtype == type) { | ||
1009 | strcpy(name, netxen_boards[i].short_name); | ||
1010 | found = 1; | ||
1011 | break; | ||
1012 | } | ||
1013 | |||
1014 | } | ||
1015 | if (!found) | ||
1016 | name = "Unknown"; | ||
1017 | } | ||
1018 | |||
1019 | int netxen_is_flash_supported(struct netxen_adapter *adapter); | ||
1020 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]); | ||
1021 | |||
1022 | extern void netxen_change_ringparam(struct netxen_adapter *adapter); | ||
1023 | extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, | ||
1024 | int *valp); | ||
1025 | |||
1026 | extern struct ethtool_ops netxen_nic_ethtool_ops; | ||
1027 | |||
1028 | #endif /* __NETXEN_NIC_H_ */ | ||
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c new file mode 100644 index 000000000000..9a914aeba5bc --- /dev/null +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * ethtool support for netxen nic | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/types.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/ethtool.h> | ||
40 | #include <linux/version.h> | ||
41 | |||
42 | #include "netxen_nic_hw.h" | ||
43 | #include "netxen_nic.h" | ||
44 | #include "netxen_nic_phan_reg.h" | ||
45 | #include "netxen_nic_ioctl.h" | ||
46 | |||
47 | struct netxen_nic_stats { | ||
48 | char stat_string[ETH_GSTRING_LEN]; | ||
49 | int sizeof_stat; | ||
50 | int stat_offset; | ||
51 | }; | ||
52 | |||
53 | #define NETXEN_NIC_STAT(m) sizeof(((struct netxen_port *)0)->m), \ | ||
54 | offsetof(struct netxen_port, m) | ||
55 | |||
56 | #define NETXEN_NIC_PORT_WINDOW 0x10000 | ||
57 | #define NETXEN_NIC_INVALID_DATA 0xDEADBEEF | ||
58 | |||
59 | static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { | ||
60 | {"rcvd_bad_skb", NETXEN_NIC_STAT(stats.rcvdbadskb)}, | ||
61 | {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, | ||
62 | {"xmited_frames", NETXEN_NIC_STAT(stats.xmitedframes)}, | ||
63 | {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, | ||
64 | {"bad_skb_len", NETXEN_NIC_STAT(stats.badskblen)}, | ||
65 | {"no_cmd_desc", NETXEN_NIC_STAT(stats.nocmddescriptor)}, | ||
66 | {"polled", NETXEN_NIC_STAT(stats.polled)}, | ||
67 | {"uphappy", NETXEN_NIC_STAT(stats.uphappy)}, | ||
68 | {"updropped", NETXEN_NIC_STAT(stats.updropped)}, | ||
69 | {"uplcong", NETXEN_NIC_STAT(stats.uplcong)}, | ||
70 | {"uphcong", NETXEN_NIC_STAT(stats.uphcong)}, | ||
71 | {"upmcong", NETXEN_NIC_STAT(stats.upmcong)}, | ||
72 | {"updunno", NETXEN_NIC_STAT(stats.updunno)}, | ||
73 | {"skb_freed", NETXEN_NIC_STAT(stats.skbfreed)}, | ||
74 | {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, | ||
75 | {"tx_null_skb", NETXEN_NIC_STAT(stats.txnullskb)}, | ||
76 | {"csummed", NETXEN_NIC_STAT(stats.csummed)}, | ||
77 | {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)}, | ||
78 | {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, | ||
79 | {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, | ||
80 | }; | ||
81 | |||
82 | #define NETXEN_NIC_STATS_LEN \ | ||
83 | sizeof(netxen_nic_gstrings_stats) / sizeof(struct netxen_nic_stats) | ||
84 | |||
85 | static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { | ||
86 | "Register_Test_offline", "EEPROM_Test_offline", | ||
87 | "Interrupt_Test_offline", "Loopback_Test_offline", | ||
88 | "Link_Test_on_offline" | ||
89 | }; | ||
90 | |||
91 | #define NETXEN_NIC_TEST_LEN sizeof(netxen_nic_gstrings_test) / ETH_GSTRING_LEN | ||
92 | |||
93 | #define NETXEN_NIC_REGS_COUNT 42 | ||
94 | #define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) | ||
95 | #define NETXEN_MAX_EEPROM_LEN 1024 | ||
96 | |||
97 | static int netxen_nic_get_eeprom_len(struct net_device *dev) | ||
98 | { | ||
99 | struct netxen_port *port = netdev_priv(dev); | ||
100 | struct netxen_adapter *adapter = port->adapter; | ||
101 | int n; | ||
102 | |||
103 | if ((netxen_rom_fast_read(adapter, 0, &n) == 0) | ||
104 | && (n & NETXEN_ROM_ROUNDUP)) { | ||
105 | n &= ~NETXEN_ROM_ROUNDUP; | ||
106 | if (n < NETXEN_MAX_EEPROM_LEN) | ||
107 | return n; | ||
108 | } | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static void | ||
113 | netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | ||
114 | { | ||
115 | struct netxen_port *port = netdev_priv(dev); | ||
116 | struct netxen_adapter *adapter = port->adapter; | ||
117 | u32 fw_major = 0; | ||
118 | u32 fw_minor = 0; | ||
119 | u32 fw_build = 0; | ||
120 | |||
121 | strncpy(drvinfo->driver, "netxen_nic", 32); | ||
122 | strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); | ||
123 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
124 | NETXEN_FW_VERSION_MAJOR)); | ||
125 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
126 | NETXEN_FW_VERSION_MINOR)); | ||
127 | fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | ||
128 | sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); | ||
129 | |||
130 | strncpy(drvinfo->bus_info, pci_name(port->pdev), 32); | ||
131 | drvinfo->n_stats = NETXEN_NIC_STATS_LEN; | ||
132 | drvinfo->testinfo_len = NETXEN_NIC_TEST_LEN; | ||
133 | drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; | ||
134 | drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
139 | { | ||
140 | struct netxen_port *port = netdev_priv(dev); | ||
141 | struct netxen_adapter *adapter = port->adapter; | ||
142 | struct netxen_board_info *boardinfo = &adapter->ahw.boardcfg; | ||
143 | |||
144 | /* read which mode */ | ||
145 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
146 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
147 | SUPPORTED_10baseT_Full | | ||
148 | SUPPORTED_100baseT_Half | | ||
149 | SUPPORTED_100baseT_Full | | ||
150 | SUPPORTED_1000baseT_Half | | ||
151 | SUPPORTED_1000baseT_Full); | ||
152 | |||
153 | ecmd->advertising = (ADVERTISED_100baseT_Half | | ||
154 | ADVERTISED_100baseT_Full | | ||
155 | ADVERTISED_1000baseT_Half | | ||
156 | ADVERTISED_1000baseT_Full); | ||
157 | |||
158 | ecmd->port = PORT_TP; | ||
159 | |||
160 | if (netif_running(dev)) { | ||
161 | ecmd->speed = port->link_speed; | ||
162 | ecmd->duplex = port->link_duplex; | ||
163 | } else | ||
164 | return -EIO; /* link absent */ | ||
165 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | ||
166 | ecmd->supported = (SUPPORTED_TP | | ||
167 | SUPPORTED_1000baseT_Full | | ||
168 | SUPPORTED_10000baseT_Full); | ||
169 | ecmd->advertising = (ADVERTISED_TP | | ||
170 | ADVERTISED_1000baseT_Full | | ||
171 | ADVERTISED_10000baseT_Full); | ||
172 | ecmd->port = PORT_TP; | ||
173 | |||
174 | ecmd->speed = SPEED_10000; | ||
175 | ecmd->duplex = DUPLEX_FULL; | ||
176 | ecmd->autoneg = AUTONEG_DISABLE; | ||
177 | } else | ||
178 | return -EIO; | ||
179 | |||
180 | ecmd->phy_address = port->portnum; | ||
181 | ecmd->transceiver = XCVR_EXTERNAL; | ||
182 | |||
183 | switch ((netxen_brdtype_t) boardinfo->board_type) { | ||
184 | case NETXEN_BRDTYPE_P2_SB35_4G: | ||
185 | case NETXEN_BRDTYPE_P2_SB31_2G: | ||
186 | ecmd->supported |= SUPPORTED_Autoneg; | ||
187 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
188 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: | ||
189 | ecmd->supported |= SUPPORTED_TP; | ||
190 | ecmd->advertising |= ADVERTISED_TP; | ||
191 | ecmd->port = PORT_TP; | ||
192 | ecmd->autoneg = (boardinfo->board_type == | ||
193 | NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? | ||
194 | (AUTONEG_DISABLE) : (port->link_autoneg); | ||
195 | break; | ||
196 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | ||
197 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | ||
198 | ecmd->supported |= SUPPORTED_MII; | ||
199 | ecmd->advertising |= ADVERTISED_MII; | ||
200 | ecmd->port = PORT_FIBRE; | ||
201 | ecmd->autoneg = AUTONEG_DISABLE; | ||
202 | break; | ||
203 | case NETXEN_BRDTYPE_P2_SB31_10G: | ||
204 | ecmd->supported |= SUPPORTED_FIBRE; | ||
205 | ecmd->advertising |= ADVERTISED_FIBRE; | ||
206 | ecmd->port = PORT_FIBRE; | ||
207 | ecmd->autoneg = AUTONEG_DISABLE; | ||
208 | break; | ||
209 | default: | ||
210 | printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", | ||
211 | (netxen_brdtype_t) boardinfo->board_type); | ||
212 | return -EIO; | ||
213 | |||
214 | } | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static int | ||
220 | netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
221 | { | ||
222 | struct netxen_port *port = netdev_priv(dev); | ||
223 | struct netxen_adapter *adapter = port->adapter; | ||
224 | __le32 status; | ||
225 | |||
226 | /* read which mode */ | ||
227 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
228 | /* autonegotiation */ | ||
229 | if (adapter->ops->phy_write | ||
230 | && adapter->ops->phy_write(adapter, port->portnum, | ||
231 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, | ||
232 | (__le32) ecmd->autoneg) != 0) | ||
233 | return -EIO; | ||
234 | else | ||
235 | port->link_autoneg = ecmd->autoneg; | ||
236 | |||
237 | if (adapter->ops->phy_read | ||
238 | && adapter->ops->phy_read(adapter, port->portnum, | ||
239 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
240 | &status) != 0) | ||
241 | return -EIO; | ||
242 | |||
243 | /* speed */ | ||
244 | switch (ecmd->speed) { | ||
245 | case SPEED_10: | ||
246 | netxen_set_phy_speed(status, 0); | ||
247 | break; | ||
248 | case SPEED_100: | ||
249 | netxen_set_phy_speed(status, 1); | ||
250 | break; | ||
251 | case SPEED_1000: | ||
252 | netxen_set_phy_speed(status, 2); | ||
253 | break; | ||
254 | } | ||
255 | /* set duplex mode */ | ||
256 | if (ecmd->duplex == DUPLEX_HALF) | ||
257 | netxen_clear_phy_duplex(status); | ||
258 | if (ecmd->duplex == DUPLEX_FULL) | ||
259 | netxen_set_phy_duplex(status); | ||
260 | if (adapter->ops->phy_write | ||
261 | && adapter->ops->phy_write(adapter, port->portnum, | ||
262 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
263 | *((int *)&status)) != 0) | ||
264 | return -EIO; | ||
265 | else { | ||
266 | port->link_speed = ecmd->speed; | ||
267 | port->link_duplex = ecmd->duplex; | ||
268 | } | ||
269 | } else | ||
270 | return -EOPNOTSUPP; | ||
271 | |||
272 | if (netif_running(dev)) { | ||
273 | dev->stop(dev); | ||
274 | dev->open(dev); | ||
275 | } | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int netxen_nic_get_regs_len(struct net_device *dev) | ||
280 | { | ||
281 | return NETXEN_NIC_REGS_LEN; | ||
282 | } | ||
283 | |||
284 | struct netxen_niu_regs { | ||
285 | __le32 reg[NETXEN_NIC_REGS_COUNT]; | ||
286 | }; | ||
287 | |||
288 | static struct netxen_niu_regs niu_registers[] = { | ||
289 | { | ||
290 | /* GB Mode */ | ||
291 | { | ||
292 | NETXEN_NIU_GB_SERDES_RESET, | ||
293 | NETXEN_NIU_GB0_MII_MODE, | ||
294 | NETXEN_NIU_GB1_MII_MODE, | ||
295 | NETXEN_NIU_GB2_MII_MODE, | ||
296 | NETXEN_NIU_GB3_MII_MODE, | ||
297 | NETXEN_NIU_GB0_GMII_MODE, | ||
298 | NETXEN_NIU_GB1_GMII_MODE, | ||
299 | NETXEN_NIU_GB2_GMII_MODE, | ||
300 | NETXEN_NIU_GB3_GMII_MODE, | ||
301 | NETXEN_NIU_REMOTE_LOOPBACK, | ||
302 | NETXEN_NIU_GB0_HALF_DUPLEX, | ||
303 | NETXEN_NIU_GB1_HALF_DUPLEX, | ||
304 | NETXEN_NIU_RESET_SYS_FIFOS, | ||
305 | NETXEN_NIU_GB_CRC_DROP, | ||
306 | NETXEN_NIU_GB_DROP_WRONGADDR, | ||
307 | NETXEN_NIU_TEST_MUX_CTL, | ||
308 | |||
309 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
310 | NETXEN_NIU_GB_MAC_CONFIG_1(0), | ||
311 | NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0), | ||
312 | NETXEN_NIU_GB_MAX_FRAME_SIZE(0), | ||
313 | NETXEN_NIU_GB_TEST_REG(0), | ||
314 | NETXEN_NIU_GB_MII_MGMT_CONFIG(0), | ||
315 | NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | ||
316 | NETXEN_NIU_GB_MII_MGMT_ADDR(0), | ||
317 | NETXEN_NIU_GB_MII_MGMT_CTRL(0), | ||
318 | NETXEN_NIU_GB_MII_MGMT_STATUS(0), | ||
319 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | ||
320 | NETXEN_NIU_GB_INTERFACE_CTRL(0), | ||
321 | NETXEN_NIU_GB_INTERFACE_STATUS(0), | ||
322 | NETXEN_NIU_GB_STATION_ADDR_0(0), | ||
323 | NETXEN_NIU_GB_STATION_ADDR_1(0), | ||
324 | -1, | ||
325 | } | ||
326 | }, | ||
327 | { | ||
328 | /* XG Mode */ | ||
329 | { | ||
330 | NETXEN_NIU_XG_SINGLE_TERM, | ||
331 | NETXEN_NIU_XG_DRIVE_HI, | ||
332 | NETXEN_NIU_XG_DRIVE_LO, | ||
333 | NETXEN_NIU_XG_DTX, | ||
334 | NETXEN_NIU_XG_DEQ, | ||
335 | NETXEN_NIU_XG_WORD_ALIGN, | ||
336 | NETXEN_NIU_XG_RESET, | ||
337 | NETXEN_NIU_XG_POWER_DOWN, | ||
338 | NETXEN_NIU_XG_RESET_PLL, | ||
339 | NETXEN_NIU_XG_SERDES_LOOPBACK, | ||
340 | NETXEN_NIU_XG_DO_BYTE_ALIGN, | ||
341 | NETXEN_NIU_XG_TX_ENABLE, | ||
342 | NETXEN_NIU_XG_RX_ENABLE, | ||
343 | NETXEN_NIU_XG_STATUS, | ||
344 | NETXEN_NIU_XG_PAUSE_THRESHOLD, | ||
345 | NETXEN_NIU_XGE_CONFIG_0, | ||
346 | NETXEN_NIU_XGE_CONFIG_1, | ||
347 | NETXEN_NIU_XGE_IPG, | ||
348 | NETXEN_NIU_XGE_STATION_ADDR_0_HI, | ||
349 | NETXEN_NIU_XGE_STATION_ADDR_0_1, | ||
350 | NETXEN_NIU_XGE_STATION_ADDR_1_LO, | ||
351 | NETXEN_NIU_XGE_STATUS, | ||
352 | NETXEN_NIU_XGE_MAX_FRAME_SIZE, | ||
353 | NETXEN_NIU_XGE_PAUSE_FRAME_VALUE, | ||
354 | NETXEN_NIU_XGE_TX_BYTE_CNT, | ||
355 | NETXEN_NIU_XGE_TX_FRAME_CNT, | ||
356 | NETXEN_NIU_XGE_RX_BYTE_CNT, | ||
357 | NETXEN_NIU_XGE_RX_FRAME_CNT, | ||
358 | NETXEN_NIU_XGE_AGGR_ERROR_CNT, | ||
359 | NETXEN_NIU_XGE_MULTICAST_FRAME_CNT, | ||
360 | NETXEN_NIU_XGE_UNICAST_FRAME_CNT, | ||
361 | NETXEN_NIU_XGE_CRC_ERROR_CNT, | ||
362 | NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR, | ||
363 | NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR, | ||
364 | NETXEN_NIU_XGE_LOCAL_ERROR_CNT, | ||
365 | NETXEN_NIU_XGE_REMOTE_ERROR_CNT, | ||
366 | NETXEN_NIU_XGE_CONTROL_CHAR_CNT, | ||
367 | NETXEN_NIU_XGE_PAUSE_FRAME_CNT, | ||
368 | -1, | ||
369 | } | ||
370 | } | ||
371 | }; | ||
372 | |||
373 | static void | ||
374 | netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | ||
375 | { | ||
376 | struct netxen_port *port = netdev_priv(dev); | ||
377 | struct netxen_adapter *adapter = port->adapter; | ||
378 | __le32 mode, *regs_buff = p; | ||
379 | void __iomem *addr; | ||
380 | int i, window; | ||
381 | |||
382 | memset(p, 0, NETXEN_NIC_REGS_LEN); | ||
383 | regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | | ||
384 | (port->pdev)->device; | ||
385 | /* which mode */ | ||
386 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, ®s_buff[0]); | ||
387 | mode = regs_buff[0]; | ||
388 | |||
389 | /* Common registers to all the modes */ | ||
390 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, | ||
391 | ®s_buff[2]); | ||
392 | /* GB/XGB Mode */ | ||
393 | mode = (mode / 2) - 1; | ||
394 | window = 0; | ||
395 | if (mode <= 1) { | ||
396 | for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { | ||
397 | /* GB: port specific registers */ | ||
398 | if (mode == 0 && i >= 19) | ||
399 | window = port->portnum * NETXEN_NIC_PORT_WINDOW; | ||
400 | |||
401 | NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode]. | ||
402 | reg[i - 3] + window, | ||
403 | ®s_buff[i]); | ||
404 | } | ||
405 | |||
406 | } | ||
407 | } | ||
408 | |||
409 | static void | ||
410 | netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
411 | { | ||
412 | wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; | ||
413 | /* options can be added depending upon the mode */ | ||
414 | wol->wolopts = 0; | ||
415 | } | ||
416 | |||
417 | static u32 netxen_nic_get_link(struct net_device *dev) | ||
418 | { | ||
419 | struct netxen_port *port = netdev_priv(dev); | ||
420 | struct netxen_adapter *adapter = port->adapter; | ||
421 | __le32 status; | ||
422 | |||
423 | /* read which mode */ | ||
424 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
425 | if (adapter->ops->phy_read | ||
426 | && adapter->ops->phy_read(adapter, port->portnum, | ||
427 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
428 | &status) != 0) | ||
429 | return -EIO; | ||
430 | else | ||
431 | return (netxen_get_phy_link(status)); | ||
432 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | ||
433 | int val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | ||
434 | return val == XG_LINK_UP; | ||
435 | } | ||
436 | return -EIO; | ||
437 | } | ||
438 | |||
439 | static int | ||
440 | netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
441 | u8 * bytes) | ||
442 | { | ||
443 | struct netxen_port *port = netdev_priv(dev); | ||
444 | struct netxen_adapter *adapter = port->adapter; | ||
445 | int offset; | ||
446 | |||
447 | if (eeprom->len == 0) | ||
448 | return -EINVAL; | ||
449 | |||
450 | eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16); | ||
451 | for (offset = 0; offset < eeprom->len; offset++) | ||
452 | if (netxen_rom_fast_read | ||
453 | (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1) | ||
454 | return -EIO; | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static void | ||
459 | netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) | ||
460 | { | ||
461 | struct netxen_port *port = netdev_priv(dev); | ||
462 | struct netxen_adapter *adapter = port->adapter; | ||
463 | int i, j; | ||
464 | |||
465 | ring->rx_pending = 0; | ||
466 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
467 | for (j = 0; j < NUM_RCV_DESC_RINGS; j++) | ||
468 | ring->rx_pending += | ||
469 | adapter->recv_ctx[i].rcv_desc[j].rcv_pending; | ||
470 | } | ||
471 | |||
472 | ring->rx_max_pending = adapter->max_rx_desc_count; | ||
473 | ring->tx_max_pending = adapter->max_tx_desc_count; | ||
474 | ring->rx_mini_max_pending = 0; | ||
475 | ring->rx_mini_pending = 0; | ||
476 | ring->rx_jumbo_max_pending = 0; | ||
477 | ring->rx_jumbo_pending = 0; | ||
478 | } | ||
479 | |||
480 | static void | ||
481 | netxen_nic_get_pauseparam(struct net_device *dev, | ||
482 | struct ethtool_pauseparam *pause) | ||
483 | { | ||
484 | struct netxen_port *port = netdev_priv(dev); | ||
485 | struct netxen_adapter *adapter = port->adapter; | ||
486 | __le32 val; | ||
487 | |||
488 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
489 | /* get flow control settings */ | ||
490 | netxen_nic_read_w0(adapter, | ||
491 | NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum), | ||
492 | (u32 *) & val); | ||
493 | pause->rx_pause = netxen_gb_get_rx_flowctl(val); | ||
494 | pause->tx_pause = netxen_gb_get_tx_flowctl(val); | ||
495 | /* get autoneg settings */ | ||
496 | pause->autoneg = port->link_autoneg; | ||
497 | } | ||
498 | } | ||
499 | |||
500 | static int | ||
501 | netxen_nic_set_pauseparam(struct net_device *dev, | ||
502 | struct ethtool_pauseparam *pause) | ||
503 | { | ||
504 | struct netxen_port *port = netdev_priv(dev); | ||
505 | struct netxen_adapter *adapter = port->adapter; | ||
506 | __le32 val; | ||
507 | unsigned int autoneg; | ||
508 | |||
509 | /* read mode */ | ||
510 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
511 | /* set flow control */ | ||
512 | netxen_nic_read_w0(adapter, | ||
513 | NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum), | ||
514 | (u32 *) & val); | ||
515 | if (pause->tx_pause) | ||
516 | netxen_gb_tx_flowctl(val); | ||
517 | else | ||
518 | netxen_gb_unset_tx_flowctl(val); | ||
519 | if (pause->rx_pause) | ||
520 | netxen_gb_rx_flowctl(val); | ||
521 | else | ||
522 | netxen_gb_unset_rx_flowctl(val); | ||
523 | |||
524 | netxen_nic_write_w0(adapter, | ||
525 | NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum), | ||
526 | *(u32 *) (&val)); | ||
527 | /* set autoneg */ | ||
528 | autoneg = pause->autoneg; | ||
529 | if (adapter->ops->phy_write | ||
530 | && adapter->ops->phy_write(adapter, port->portnum, | ||
531 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, | ||
532 | (__le32) autoneg) != 0) | ||
533 | return -EIO; | ||
534 | else { | ||
535 | port->link_autoneg = pause->autoneg; | ||
536 | return 0; | ||
537 | } | ||
538 | } else | ||
539 | return -EOPNOTSUPP; | ||
540 | } | ||
541 | |||
542 | static int netxen_nic_reg_test(struct net_device *dev) | ||
543 | { | ||
544 | struct netxen_port *port = netdev_priv(dev); | ||
545 | struct netxen_adapter *adapter = port->adapter; | ||
546 | u32 data_read, data_written, save; | ||
547 | __le32 mode; | ||
548 | |||
549 | /* | ||
550 | * first test the "Read Only" registers by writing which mode | ||
551 | */ | ||
552 | netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); | ||
553 | if (netxen_get_niu_enable_ge(mode)) { /* GB Mode */ | ||
554 | netxen_nic_read_w0(adapter, | ||
555 | NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum), | ||
556 | &data_read); | ||
557 | |||
558 | save = data_read; | ||
559 | if (data_read) | ||
560 | data_written = data_read & NETXEN_NIC_INVALID_DATA; | ||
561 | else | ||
562 | data_written = NETXEN_NIC_INVALID_DATA; | ||
563 | netxen_nic_write_w0(adapter, | ||
564 | NETXEN_NIU_GB_MII_MGMT_STATUS(port-> | ||
565 | portnum), | ||
566 | data_written); | ||
567 | netxen_nic_read_w0(adapter, | ||
568 | NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum), | ||
569 | &data_read); | ||
570 | |||
571 | if (data_written == data_read) { | ||
572 | netxen_nic_write_w0(adapter, | ||
573 | NETXEN_NIU_GB_MII_MGMT_STATUS(port-> | ||
574 | portnum), | ||
575 | save); | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | /* netxen_niu_gb_mii_mgmt_indicators is read only */ | ||
581 | netxen_nic_read_w0(adapter, | ||
582 | NETXEN_NIU_GB_MII_MGMT_INDICATE(port-> | ||
583 | portnum), | ||
584 | &data_read); | ||
585 | |||
586 | save = data_read; | ||
587 | if (data_read) | ||
588 | data_written = data_read & NETXEN_NIC_INVALID_DATA; | ||
589 | else | ||
590 | data_written = NETXEN_NIC_INVALID_DATA; | ||
591 | netxen_nic_write_w0(adapter, | ||
592 | NETXEN_NIU_GB_MII_MGMT_INDICATE(port-> | ||
593 | portnum), | ||
594 | data_written); | ||
595 | |||
596 | netxen_nic_read_w0(adapter, | ||
597 | NETXEN_NIU_GB_MII_MGMT_INDICATE(port-> | ||
598 | portnum), | ||
599 | &data_read); | ||
600 | |||
601 | if (data_written == data_read) { | ||
602 | netxen_nic_write_w0(adapter, | ||
603 | NETXEN_NIU_GB_MII_MGMT_INDICATE | ||
604 | (port->portnum), save); | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /* netxen_niu_gb_interface_status is read only */ | ||
609 | netxen_nic_read_w0(adapter, | ||
610 | NETXEN_NIU_GB_INTERFACE_STATUS(port-> | ||
611 | portnum), | ||
612 | &data_read); | ||
613 | |||
614 | save = data_read; | ||
615 | if (data_read) | ||
616 | data_written = data_read & NETXEN_NIC_INVALID_DATA; | ||
617 | else | ||
618 | data_written = NETXEN_NIC_INVALID_DATA; | ||
619 | netxen_nic_write_w0(adapter, | ||
620 | NETXEN_NIU_GB_INTERFACE_STATUS(port-> | ||
621 | portnum), | ||
622 | data_written); | ||
623 | |||
624 | netxen_nic_read_w0(adapter, | ||
625 | NETXEN_NIU_GB_INTERFACE_STATUS(port-> | ||
626 | portnum), | ||
627 | &data_read); | ||
628 | |||
629 | if (data_written == data_read) { | ||
630 | netxen_nic_write_w0(adapter, | ||
631 | NETXEN_NIU_GB_INTERFACE_STATUS | ||
632 | (port->portnum), save); | ||
633 | |||
634 | return 0; | ||
635 | } | ||
636 | } /* GB Mode */ | ||
637 | return 1; | ||
638 | } | ||
639 | |||
640 | static int netxen_nic_diag_test_count(struct net_device *dev) | ||
641 | { | ||
642 | return NETXEN_NIC_TEST_LEN; | ||
643 | } | ||
644 | |||
645 | static void | ||
646 | netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, | ||
647 | u64 * data) | ||
648 | { | ||
649 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */ | ||
650 | /* link test */ | ||
651 | if (!(data[4] = (u64) netxen_nic_get_link(dev))) | ||
652 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
653 | |||
654 | if (netif_running(dev)) | ||
655 | dev->stop(dev); | ||
656 | |||
657 | /* register tests */ | ||
658 | if (!(data[0] = netxen_nic_reg_test(dev))) | ||
659 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
660 | /* other tests pass as of now */ | ||
661 | data[1] = data[2] = data[3] = 1; | ||
662 | if (netif_running(dev)) | ||
663 | dev->open(dev); | ||
664 | } else { /* online tests */ | ||
665 | /* link test */ | ||
666 | if (!(data[4] = (u64) netxen_nic_get_link(dev))) | ||
667 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
668 | |||
669 | /* other tests pass by default */ | ||
670 | data[0] = data[1] = data[2] = data[3] = 1; | ||
671 | } | ||
672 | } | ||
673 | |||
674 | static void | ||
675 | netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) | ||
676 | { | ||
677 | int index; | ||
678 | |||
679 | switch (stringset) { | ||
680 | case ETH_SS_TEST: | ||
681 | memcpy(data, *netxen_nic_gstrings_test, | ||
682 | NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); | ||
683 | break; | ||
684 | case ETH_SS_STATS: | ||
685 | for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { | ||
686 | memcpy(data + index * ETH_GSTRING_LEN, | ||
687 | netxen_nic_gstrings_stats[index].stat_string, | ||
688 | ETH_GSTRING_LEN); | ||
689 | } | ||
690 | break; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | static int netxen_nic_get_stats_count(struct net_device *dev) | ||
695 | { | ||
696 | return NETXEN_NIC_STATS_LEN; | ||
697 | } | ||
698 | |||
699 | static void | ||
700 | netxen_nic_get_ethtool_stats(struct net_device *dev, | ||
701 | struct ethtool_stats *stats, u64 * data) | ||
702 | { | ||
703 | struct netxen_port *port = netdev_priv(dev); | ||
704 | int index; | ||
705 | |||
706 | for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { | ||
707 | char *p = | ||
708 | (char *)port + netxen_nic_gstrings_stats[index].stat_offset; | ||
709 | data[index] = | ||
710 | (netxen_nic_gstrings_stats[index].sizeof_stat == | ||
711 | sizeof(u64)) ? *(u64 *) p : *(u32 *) p; | ||
712 | } | ||
713 | |||
714 | } | ||
715 | |||
716 | struct ethtool_ops netxen_nic_ethtool_ops = { | ||
717 | .get_settings = netxen_nic_get_settings, | ||
718 | .set_settings = netxen_nic_set_settings, | ||
719 | .get_drvinfo = netxen_nic_get_drvinfo, | ||
720 | .get_regs_len = netxen_nic_get_regs_len, | ||
721 | .get_regs = netxen_nic_get_regs, | ||
722 | .get_wol = netxen_nic_get_wol, | ||
723 | .get_link = netxen_nic_get_link, | ||
724 | .get_eeprom_len = netxen_nic_get_eeprom_len, | ||
725 | .get_eeprom = netxen_nic_get_eeprom, | ||
726 | .get_ringparam = netxen_nic_get_ringparam, | ||
727 | .get_pauseparam = netxen_nic_get_pauseparam, | ||
728 | .set_pauseparam = netxen_nic_set_pauseparam, | ||
729 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
730 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
731 | .get_sg = ethtool_op_get_sg, | ||
732 | .set_sg = ethtool_op_set_sg, | ||
733 | .get_tso = ethtool_op_get_tso, | ||
734 | .set_tso = ethtool_op_set_tso, | ||
735 | .self_test_count = netxen_nic_diag_test_count, | ||
736 | .self_test = netxen_nic_diag_test, | ||
737 | .get_strings = netxen_nic_get_strings, | ||
738 | .get_stats_count = netxen_nic_get_stats_count, | ||
739 | .get_ethtool_stats = netxen_nic_get_ethtool_stats, | ||
740 | .get_perm_addr = ethtool_op_get_perm_addr, | ||
741 | }; | ||
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h new file mode 100644 index 000000000000..72c6ec4ee2a0 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_hdr.h | |||
@@ -0,0 +1,678 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #ifndef __NETXEN_NIC_HDR_H_ | ||
31 | #define __NETXEN_NIC_HDR_H_ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/version.h> | ||
36 | |||
37 | #include <asm/semaphore.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <asm/irq.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/errno.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/types.h> | ||
44 | #include <asm/uaccess.h> | ||
45 | #include <asm/string.h> /* for memset */ | ||
46 | |||
47 | /* | ||
48 | * The basic unit of access when reading/writing control registers. | ||
49 | */ | ||
50 | |||
51 | typedef __le32 netxen_crbword_t; /* single word in CRB space */ | ||
52 | |||
53 | enum { | ||
54 | NETXEN_HW_H0_CH_HUB_ADR = 0x05, | ||
55 | NETXEN_HW_H1_CH_HUB_ADR = 0x0E, | ||
56 | NETXEN_HW_H2_CH_HUB_ADR = 0x03, | ||
57 | NETXEN_HW_H3_CH_HUB_ADR = 0x01, | ||
58 | NETXEN_HW_H4_CH_HUB_ADR = 0x06, | ||
59 | NETXEN_HW_H5_CH_HUB_ADR = 0x07, | ||
60 | NETXEN_HW_H6_CH_HUB_ADR = 0x08 | ||
61 | }; | ||
62 | |||
63 | /* Hub 0 */ | ||
64 | enum { | ||
65 | NETXEN_HW_MN_CRB_AGT_ADR = 0x15, | ||
66 | NETXEN_HW_MS_CRB_AGT_ADR = 0x25 | ||
67 | }; | ||
68 | |||
69 | /* Hub 1 */ | ||
70 | enum { | ||
71 | NETXEN_HW_PS_CRB_AGT_ADR = 0x73, | ||
72 | NETXEN_HW_SS_CRB_AGT_ADR = 0x20, | ||
73 | NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, | ||
74 | NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, | ||
75 | NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, | ||
76 | NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, | ||
77 | NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, | ||
78 | NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, | ||
79 | NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, | ||
80 | NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, | ||
81 | NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, | ||
82 | NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, | ||
83 | NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, | ||
84 | NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, | ||
85 | NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, | ||
86 | NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 | ||
87 | }; | ||
88 | |||
89 | /* Hub 2 */ | ||
90 | enum { | ||
91 | NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, | ||
92 | NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, | ||
93 | NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, | ||
94 | |||
95 | NETXEN_HW_SN_CRB_AGT_ADR = 0x10, | ||
96 | NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, | ||
97 | NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, | ||
98 | NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, | ||
99 | NETXEN_HW_QM_CRB_AGT_ADR = 0x66, | ||
100 | NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, | ||
101 | NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, | ||
102 | NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, | ||
103 | NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, | ||
104 | NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, | ||
105 | NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, | ||
106 | NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, | ||
107 | NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 | ||
108 | }; | ||
109 | |||
110 | /* Hub 3 */ | ||
111 | enum { | ||
112 | NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, | ||
113 | NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, | ||
114 | NETXEN_HW_EG_CRB_AGT_ADR = 0x51, | ||
115 | NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 | ||
116 | }; | ||
117 | |||
118 | /* Hub 4 */ | ||
119 | enum { | ||
120 | NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, | ||
121 | NETXEN_HW_PEGN1_CRB_AGT_ADR, | ||
122 | NETXEN_HW_PEGN2_CRB_AGT_ADR, | ||
123 | NETXEN_HW_PEGN3_CRB_AGT_ADR, | ||
124 | NETXEN_HW_PEGNI_CRB_AGT_ADR, | ||
125 | NETXEN_HW_PEGND_CRB_AGT_ADR, | ||
126 | NETXEN_HW_PEGNC_CRB_AGT_ADR, | ||
127 | NETXEN_HW_PEGR0_CRB_AGT_ADR, | ||
128 | NETXEN_HW_PEGR1_CRB_AGT_ADR, | ||
129 | NETXEN_HW_PEGR2_CRB_AGT_ADR, | ||
130 | NETXEN_HW_PEGR3_CRB_AGT_ADR | ||
131 | }; | ||
132 | |||
133 | /* Hub 5 */ | ||
134 | enum { | ||
135 | NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, | ||
136 | NETXEN_HW_PEGS1_CRB_AGT_ADR, | ||
137 | NETXEN_HW_PEGS2_CRB_AGT_ADR, | ||
138 | NETXEN_HW_PEGS3_CRB_AGT_ADR, | ||
139 | NETXEN_HW_PEGSI_CRB_AGT_ADR, | ||
140 | NETXEN_HW_PEGSD_CRB_AGT_ADR, | ||
141 | NETXEN_HW_PEGSC_CRB_AGT_ADR | ||
142 | }; | ||
143 | |||
144 | /* Hub 6 */ | ||
145 | enum { | ||
146 | NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, | ||
147 | NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, | ||
148 | NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, | ||
149 | NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, | ||
150 | NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, | ||
151 | NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, | ||
152 | NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, | ||
153 | NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, | ||
154 | NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 | ||
155 | }; | ||
156 | |||
157 | /* Floaters - non existent modules */ | ||
158 | #define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 | ||
159 | |||
160 | /* This field defines PCI/X adr [25:20] of agents on the CRB */ | ||
161 | enum { | ||
162 | NETXEN_HW_PX_MAP_CRB_PH = 0, | ||
163 | NETXEN_HW_PX_MAP_CRB_PS, | ||
164 | NETXEN_HW_PX_MAP_CRB_MN, | ||
165 | NETXEN_HW_PX_MAP_CRB_MS, | ||
166 | NETXEN_HW_PX_MAP_CRB_PGR1, | ||
167 | NETXEN_HW_PX_MAP_CRB_SRE, | ||
168 | NETXEN_HW_PX_MAP_CRB_NIU, | ||
169 | NETXEN_HW_PX_MAP_CRB_QMN, | ||
170 | NETXEN_HW_PX_MAP_CRB_SQN0, | ||
171 | NETXEN_HW_PX_MAP_CRB_SQN1, | ||
172 | NETXEN_HW_PX_MAP_CRB_SQN2, | ||
173 | NETXEN_HW_PX_MAP_CRB_SQN3, | ||
174 | NETXEN_HW_PX_MAP_CRB_QMS, | ||
175 | NETXEN_HW_PX_MAP_CRB_SQS0, | ||
176 | NETXEN_HW_PX_MAP_CRB_SQS1, | ||
177 | NETXEN_HW_PX_MAP_CRB_SQS2, | ||
178 | NETXEN_HW_PX_MAP_CRB_SQS3, | ||
179 | NETXEN_HW_PX_MAP_CRB_PGN0, | ||
180 | NETXEN_HW_PX_MAP_CRB_PGN1, | ||
181 | NETXEN_HW_PX_MAP_CRB_PGN2, | ||
182 | NETXEN_HW_PX_MAP_CRB_PGN3, | ||
183 | NETXEN_HW_PX_MAP_CRB_PGND, | ||
184 | NETXEN_HW_PX_MAP_CRB_PGNI, | ||
185 | NETXEN_HW_PX_MAP_CRB_PGS0, | ||
186 | NETXEN_HW_PX_MAP_CRB_PGS1, | ||
187 | NETXEN_HW_PX_MAP_CRB_PGS2, | ||
188 | NETXEN_HW_PX_MAP_CRB_PGS3, | ||
189 | NETXEN_HW_PX_MAP_CRB_PGSD, | ||
190 | NETXEN_HW_PX_MAP_CRB_PGSI, | ||
191 | NETXEN_HW_PX_MAP_CRB_SN, | ||
192 | NETXEN_HW_PX_MAP_CRB_PGR2, | ||
193 | NETXEN_HW_PX_MAP_CRB_EG, | ||
194 | NETXEN_HW_PX_MAP_CRB_PH2, | ||
195 | NETXEN_HW_PX_MAP_CRB_PS2, | ||
196 | NETXEN_HW_PX_MAP_CRB_CAM, | ||
197 | NETXEN_HW_PX_MAP_CRB_CAS0, | ||
198 | NETXEN_HW_PX_MAP_CRB_CAS1, | ||
199 | NETXEN_HW_PX_MAP_CRB_CAS2, | ||
200 | NETXEN_HW_PX_MAP_CRB_C2C0, | ||
201 | NETXEN_HW_PX_MAP_CRB_C2C1, | ||
202 | NETXEN_HW_PX_MAP_CRB_TIMR, | ||
203 | NETXEN_HW_PX_MAP_CRB_PGR3, | ||
204 | NETXEN_HW_PX_MAP_CRB_RPMX1, | ||
205 | NETXEN_HW_PX_MAP_CRB_RPMX2, | ||
206 | NETXEN_HW_PX_MAP_CRB_RPMX3, | ||
207 | NETXEN_HW_PX_MAP_CRB_RPMX4, | ||
208 | NETXEN_HW_PX_MAP_CRB_RPMX5, | ||
209 | NETXEN_HW_PX_MAP_CRB_RPMX6, | ||
210 | NETXEN_HW_PX_MAP_CRB_RPMX7, | ||
211 | NETXEN_HW_PX_MAP_CRB_XDMA, | ||
212 | NETXEN_HW_PX_MAP_CRB_I2Q, | ||
213 | NETXEN_HW_PX_MAP_CRB_ROMUSB, | ||
214 | NETXEN_HW_PX_MAP_CRB_CAS3, | ||
215 | NETXEN_HW_PX_MAP_CRB_RPMX0, | ||
216 | NETXEN_HW_PX_MAP_CRB_RPMX8, | ||
217 | NETXEN_HW_PX_MAP_CRB_RPMX9, | ||
218 | NETXEN_HW_PX_MAP_CRB_OCM0, | ||
219 | NETXEN_HW_PX_MAP_CRB_OCM1, | ||
220 | NETXEN_HW_PX_MAP_CRB_SMB, | ||
221 | NETXEN_HW_PX_MAP_CRB_I2C0, | ||
222 | NETXEN_HW_PX_MAP_CRB_I2C1, | ||
223 | NETXEN_HW_PX_MAP_CRB_LPC, | ||
224 | NETXEN_HW_PX_MAP_CRB_PGNC, | ||
225 | NETXEN_HW_PX_MAP_CRB_PGR0 | ||
226 | }; | ||
227 | |||
228 | /* This field defines CRB adr [31:20] of the agents */ | ||
229 | |||
230 | #define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ | ||
231 | ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) | ||
232 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ | ||
233 | ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) | ||
234 | #define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ | ||
235 | ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) | ||
236 | |||
237 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ | ||
238 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) | ||
239 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ | ||
240 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) | ||
241 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ | ||
242 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) | ||
243 | #define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ | ||
244 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) | ||
245 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ | ||
246 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) | ||
247 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ | ||
248 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) | ||
249 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ | ||
250 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) | ||
251 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ | ||
252 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) | ||
253 | #define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ | ||
254 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) | ||
255 | #define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ | ||
256 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) | ||
257 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ | ||
258 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) | ||
259 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ | ||
260 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) | ||
261 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ | ||
262 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) | ||
263 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ | ||
264 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) | ||
265 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ | ||
266 | ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) | ||
267 | |||
268 | #define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ | ||
269 | ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) | ||
270 | #define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ | ||
271 | ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) | ||
272 | #define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ | ||
273 | ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) | ||
274 | |||
275 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ | ||
276 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) | ||
277 | #define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ | ||
278 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) | ||
279 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ | ||
280 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) | ||
281 | #define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ | ||
282 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) | ||
283 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ | ||
284 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) | ||
285 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ | ||
286 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) | ||
287 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ | ||
288 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) | ||
289 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ | ||
290 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) | ||
291 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ | ||
292 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) | ||
293 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ | ||
294 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) | ||
295 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ | ||
296 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) | ||
297 | #define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ | ||
298 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) | ||
299 | #define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ | ||
300 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) | ||
301 | #define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ | ||
302 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) | ||
303 | #define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ | ||
304 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) | ||
305 | #define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ | ||
306 | ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) | ||
307 | |||
308 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ | ||
309 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) | ||
310 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ | ||
311 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) | ||
312 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ | ||
313 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) | ||
314 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ | ||
315 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) | ||
316 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ | ||
317 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) | ||
318 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ | ||
319 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) | ||
320 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ | ||
321 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) | ||
322 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ | ||
323 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) | ||
324 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ | ||
325 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) | ||
326 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ | ||
327 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) | ||
328 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ | ||
329 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) | ||
330 | |||
331 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ | ||
332 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) | ||
333 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ | ||
334 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) | ||
335 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ | ||
336 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) | ||
337 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ | ||
338 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) | ||
339 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ | ||
340 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) | ||
341 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ | ||
342 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) | ||
343 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ | ||
344 | ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) | ||
345 | |||
346 | #define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ | ||
347 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) | ||
348 | #define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ | ||
349 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) | ||
350 | #define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ | ||
351 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) | ||
352 | #define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ | ||
353 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) | ||
354 | #define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ | ||
355 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) | ||
356 | #define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ | ||
357 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) | ||
358 | #define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ | ||
359 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) | ||
360 | #define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ | ||
361 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) | ||
362 | #define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ | ||
363 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) | ||
364 | |||
365 | /* | ||
366 | * MAX_RCV_CTX : The number of receive contexts that are available on | ||
367 | * the phantom. | ||
368 | */ | ||
369 | #define MAX_RCV_CTX 1 | ||
370 | |||
371 | #define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) | ||
372 | #define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) | ||
373 | #define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) | ||
374 | #define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) | ||
375 | #define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) | ||
376 | |||
377 | #define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) | ||
378 | #define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) | ||
379 | |||
380 | #define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) | ||
381 | |||
382 | #define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) | ||
383 | #define CRB_REG_EX_PC 0x3c | ||
384 | |||
385 | #define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) | ||
386 | #define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) | ||
387 | |||
388 | #define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) | ||
389 | #define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) | ||
390 | #define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) | ||
391 | #define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) | ||
392 | #define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) | ||
393 | #define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) | ||
394 | #define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) | ||
395 | |||
396 | #define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) | ||
397 | |||
398 | #define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) | ||
399 | #define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) | ||
400 | #define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) | ||
401 | #define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) | ||
402 | #define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) | ||
403 | #define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) | ||
404 | |||
405 | /* Lock IDs for ROM lock */ | ||
406 | #define ROM_LOCK_DRIVER 0x0d417340 | ||
407 | |||
408 | /****************************************************************************** | ||
409 | * | ||
410 | * Definitions specific to M25P flash | ||
411 | * | ||
412 | ******************************************************************************* | ||
413 | * Instructions | ||
414 | */ | ||
415 | #define M25P_INSTR_WREN 0x06 | ||
416 | #define M25P_INSTR_WRDI 0x04 | ||
417 | #define M25P_INSTR_RDID 0x9f | ||
418 | #define M25P_INSTR_RDSR 0x05 | ||
419 | #define M25P_INSTR_WRSR 0x01 | ||
420 | #define M25P_INSTR_READ 0x03 | ||
421 | #define M25P_INSTR_FAST_READ 0x0b | ||
422 | #define M25P_INSTR_PP 0x02 | ||
423 | #define M25P_INSTR_SE 0xd8 | ||
424 | #define M25P_INSTR_BE 0xc7 | ||
425 | #define M25P_INSTR_DP 0xb9 | ||
426 | #define M25P_INSTR_RES 0xab | ||
427 | |||
428 | /* all are 1MB windows */ | ||
429 | |||
430 | #define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 | ||
431 | #define NETXEN_PCI_CRB_WINDOW(A) \ | ||
432 | (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) | ||
433 | |||
434 | #define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) | ||
435 | #define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) | ||
436 | #define NETXEN_CRB_ROMUSB \ | ||
437 | NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) | ||
438 | #define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) | ||
439 | #define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) | ||
440 | |||
441 | #define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) | ||
442 | #define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) | ||
443 | #define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) | ||
444 | #define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) | ||
445 | #define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) | ||
446 | #define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) | ||
447 | #define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) | ||
448 | #define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) | ||
449 | #define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) | ||
450 | |||
451 | #define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) | ||
452 | #define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD | ||
453 | |||
454 | #define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) | ||
455 | #define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) | ||
456 | #define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) | ||
457 | #define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) | ||
458 | #define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) | ||
459 | |||
460 | #define NETXEN_PCI_MAPSIZE 128 | ||
461 | #define NETXEN_PCI_DDR_NET (0x00000000UL) | ||
462 | #define NETXEN_PCI_QDR_NET (0x04000000UL) | ||
463 | #define NETXEN_PCI_DIRECT_CRB (0x04400000UL) | ||
464 | #define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) | ||
465 | #define NETXEN_PCI_OCM0 (0x05000000UL) | ||
466 | #define NETXEN_PCI_OCM0_MAX (0x050fffffUL) | ||
467 | #define NETXEN_PCI_OCM1 (0x05100000UL) | ||
468 | #define NETXEN_PCI_OCM1_MAX (0x051fffffUL) | ||
469 | #define NETXEN_PCI_CRBSPACE (0x06000000UL) | ||
470 | |||
471 | #define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) | ||
472 | |||
473 | #define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) | ||
474 | #define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) | ||
475 | #define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) | ||
476 | #define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) | ||
477 | #define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) | ||
478 | #define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) | ||
479 | #define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) | ||
480 | #define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL) | ||
481 | |||
482 | /* 200ms delay in each loop */ | ||
483 | #define NETXEN_NIU_PHY_WAITLEN 200000 | ||
484 | /* 10 seconds before we give up */ | ||
485 | #define NETXEN_NIU_PHY_WAITMAX 50 | ||
486 | #define NETXEN_NIU_MAX_GBE_PORTS 4 | ||
487 | |||
488 | #define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) | ||
489 | |||
490 | #define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) | ||
491 | #define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) | ||
492 | #define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) | ||
493 | #define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) | ||
494 | #define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) | ||
495 | #define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) | ||
496 | #define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) | ||
497 | #define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) | ||
498 | #define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) | ||
499 | #define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) | ||
500 | #define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) | ||
501 | #define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) | ||
502 | #define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) | ||
503 | #define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) | ||
504 | #define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) | ||
505 | #define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) | ||
506 | #define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) | ||
507 | #define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) | ||
508 | |||
509 | #define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) | ||
510 | |||
511 | #define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) | ||
512 | #define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) | ||
513 | #define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) | ||
514 | #define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) | ||
515 | #define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) | ||
516 | #define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) | ||
517 | #define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) | ||
518 | #define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) | ||
519 | #define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) | ||
520 | #define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) | ||
521 | #define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) | ||
522 | #define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) | ||
523 | #define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) | ||
524 | #define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) | ||
525 | #define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) | ||
526 | #define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) | ||
527 | #define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) | ||
528 | #define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) | ||
529 | #define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) | ||
530 | |||
531 | #define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) | ||
532 | |||
533 | #define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) | ||
534 | #define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) | ||
535 | #define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) | ||
536 | |||
537 | #define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) | ||
538 | |||
539 | #define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) | ||
540 | #define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) | ||
541 | #define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) | ||
542 | #define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) | ||
543 | |||
544 | #define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ | ||
545 | (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) | ||
546 | #define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ | ||
547 | (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) | ||
548 | #define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ | ||
549 | (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) | ||
550 | #define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ | ||
551 | (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) | ||
552 | #define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ | ||
553 | (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) | ||
554 | #define NETXEN_NIU_GB_TEST_REG(I) \ | ||
555 | (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) | ||
556 | #define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ | ||
557 | (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) | ||
558 | #define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ | ||
559 | (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) | ||
560 | #define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ | ||
561 | (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) | ||
562 | #define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ | ||
563 | (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) | ||
564 | #define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ | ||
565 | (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) | ||
566 | #define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ | ||
567 | (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) | ||
568 | #define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ | ||
569 | (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) | ||
570 | #define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ | ||
571 | (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) | ||
572 | #define NETXEN_NIU_GB_STATION_ADDR_0(I) \ | ||
573 | (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) | ||
574 | #define NETXEN_NIU_GB_STATION_ADDR_1(I) \ | ||
575 | (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) | ||
576 | |||
577 | #define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) | ||
578 | #define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) | ||
579 | #define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) | ||
580 | #define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) | ||
581 | #define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) | ||
582 | #define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) | ||
583 | #define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) | ||
584 | #define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) | ||
585 | #define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) | ||
586 | #define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) | ||
587 | #define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) | ||
588 | #define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) | ||
589 | #define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) | ||
590 | #define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) | ||
591 | #define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) | ||
592 | #define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) | ||
593 | #define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) | ||
594 | #define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) | ||
595 | #define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) | ||
596 | #define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) | ||
597 | #define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) | ||
598 | #define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) | ||
599 | #define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) | ||
600 | #define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) | ||
601 | #define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) | ||
602 | #define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) | ||
603 | #define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) | ||
604 | #define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) | ||
605 | #define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) | ||
606 | #define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) | ||
607 | #define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) | ||
608 | #define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) | ||
609 | #define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) | ||
610 | #define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) | ||
611 | #define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) | ||
612 | #define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) | ||
613 | #define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) | ||
614 | #define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) | ||
615 | #define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) | ||
616 | #define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) | ||
617 | #define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) | ||
618 | #define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) | ||
619 | #define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) | ||
620 | #define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) | ||
621 | #define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) | ||
622 | #define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) | ||
623 | |||
624 | /* XG Link status */ | ||
625 | #define XG_LINK_UP 0x10 | ||
626 | #define XG_LINK_DOWN 0x20 | ||
627 | |||
628 | #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) | ||
629 | #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) | ||
630 | #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) | ||
631 | #define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) | ||
632 | #define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) | ||
633 | #define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) | ||
634 | |||
635 | #define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) | ||
636 | |||
637 | /* Lock IDs for PHY lock */ | ||
638 | #define PHY_LOCK_DRIVER 0x44524956 | ||
639 | |||
640 | /* Used for PS PCI Memory access */ | ||
641 | #define PCIX_PS_OP_ADDR_LO (0x10000) | ||
642 | /* via CRB (PS side only) */ | ||
643 | #define PCIX_PS_OP_ADDR_HI (0x10004) | ||
644 | |||
645 | #define PCIX_INT_VECTOR (0x10100) | ||
646 | #define PCIX_INT_MASK (0x10104) | ||
647 | |||
648 | #define PCIX_MN_WINDOW (0x10200) | ||
649 | #define PCIX_MS_WINDOW (0x10204) | ||
650 | #define PCIX_SN_WINDOW (0x10208) | ||
651 | #define PCIX_CRB_WINDOW (0x10210) | ||
652 | |||
653 | #define PCIX_TARGET_STATUS (0x10118) | ||
654 | #define PCIX_TARGET_MASK (0x10128) | ||
655 | |||
656 | #define PCIX_MSI_F0 (0x13000) | ||
657 | |||
658 | #define PCIX_PS_MEM_SPACE (0x90000) | ||
659 | |||
660 | #define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) | ||
661 | #define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) | ||
662 | |||
663 | #define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) | ||
664 | |||
665 | #define PCIE_MAX_DMA_XFER_SIZE (0x1404c) | ||
666 | |||
667 | #define PCIE_DCR 0x00d8 | ||
668 | |||
669 | #define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ | ||
670 | #define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ | ||
671 | #define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ | ||
672 | #define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ | ||
673 | |||
674 | #define PCIE_TGT_SPLIT_CHICKEN (0x12080) | ||
675 | |||
676 | #define PCIE_MAX_MASTER_SPLIT (0x14048) | ||
677 | |||
678 | #endif /* __NETXEN_NIC_HDR_H_ */ | ||
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c new file mode 100644 index 000000000000..105c24f0ad4c --- /dev/null +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -0,0 +1,1010 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Source file for NIC routines to access the Phantom hardware | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include "netxen_nic.h" | ||
35 | #include "netxen_nic_hw.h" | ||
36 | #include "netxen_nic_phan_reg.h" | ||
37 | |||
38 | /* PCI Windowing for DDR regions. */ | ||
39 | |||
40 | #define ADDR_IN_RANGE(addr, low, high) \ | ||
41 | (((addr) <= (high)) && ((addr) >= (low))) | ||
42 | |||
43 | #define NETXEN_FLASH_BASE (BOOTLD_START) | ||
44 | #define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE) | ||
45 | #define NETXEN_MAX_MTU 8000 | ||
46 | #define NETXEN_MIN_MTU 64 | ||
47 | #define NETXEN_ETH_FCS_SIZE 4 | ||
48 | #define NETXEN_ENET_HEADER_SIZE 14 | ||
49 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ | ||
50 | #define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) | ||
51 | #define NETXEN_NIU_HDRSIZE (0x1 << 6) | ||
52 | #define NETXEN_NIU_TLRSIZE (0x1 << 5) | ||
53 | |||
54 | #define lower32(x) ((u32)((x) & 0xffffffff)) | ||
55 | #define upper32(x) \ | ||
56 | ((u32)(((unsigned long long)(x) >> 32) & 0xffffffff)) | ||
57 | |||
58 | #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL | ||
59 | #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL | ||
60 | #define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL | ||
61 | #define NETXEN_NIC_EPG_PAUSE_ADDR2 0x0100088866554433ULL | ||
62 | |||
63 | #define NETXEN_NIC_WINDOW_MARGIN 0x100000 | ||
64 | |||
65 | unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter, | ||
66 | unsigned long long addr); | ||
67 | void netxen_free_hw_resources(struct netxen_adapter *adapter); | ||
68 | |||
69 | int netxen_nic_set_mac(struct net_device *netdev, void *p) | ||
70 | { | ||
71 | struct netxen_port *port = netdev_priv(netdev); | ||
72 | struct netxen_adapter *adapter = port->adapter; | ||
73 | struct sockaddr *addr = p; | ||
74 | |||
75 | if (netif_running(netdev)) | ||
76 | return -EBUSY; | ||
77 | |||
78 | if (!is_valid_ether_addr(addr->sa_data)) | ||
79 | return -EADDRNOTAVAIL; | ||
80 | |||
81 | DPRINTK(INFO, "valid ether addr\n"); | ||
82 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
83 | |||
84 | if (adapter->ops->macaddr_set) | ||
85 | adapter->ops->macaddr_set(port, addr->sa_data); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * netxen_nic_set_multi - Multicast | ||
92 | */ | ||
93 | void netxen_nic_set_multi(struct net_device *netdev) | ||
94 | { | ||
95 | struct netxen_port *port = netdev_priv(netdev); | ||
96 | struct netxen_adapter *adapter = port->adapter; | ||
97 | struct dev_mc_list *mc_ptr; | ||
98 | __le32 netxen_mac_addr_cntl_data = 0; | ||
99 | |||
100 | mc_ptr = netdev->mc_list; | ||
101 | if (netdev->flags & IFF_PROMISC) { | ||
102 | if (adapter->ops->set_promisc) | ||
103 | adapter->ops->set_promisc(adapter, | ||
104 | port->portnum, | ||
105 | NETXEN_NIU_PROMISC_MODE); | ||
106 | } else { | ||
107 | if (adapter->ops->unset_promisc && | ||
108 | adapter->ahw.boardcfg.board_type | ||
109 | != NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) | ||
110 | adapter->ops->unset_promisc(adapter, | ||
111 | port->portnum, | ||
112 | NETXEN_NIU_NON_PROMISC_MODE); | ||
113 | } | ||
114 | if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | ||
115 | netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x03); | ||
116 | netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00); | ||
117 | netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x00); | ||
118 | netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x00); | ||
119 | netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x00); | ||
120 | netxen_nic_mcr_set_enable_xtnd0(netxen_mac_addr_cntl_data); | ||
121 | netxen_nic_mcr_set_enable_xtnd1(netxen_mac_addr_cntl_data); | ||
122 | netxen_nic_mcr_set_enable_xtnd2(netxen_mac_addr_cntl_data); | ||
123 | netxen_nic_mcr_set_enable_xtnd3(netxen_mac_addr_cntl_data); | ||
124 | } else { | ||
125 | netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x00); | ||
126 | netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00); | ||
127 | netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x01); | ||
128 | netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x02); | ||
129 | netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x03); | ||
130 | } | ||
131 | writel(netxen_mac_addr_cntl_data, | ||
132 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_MAC_ADDR_CNTL_REG)); | ||
133 | if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | ||
134 | writel(netxen_mac_addr_cntl_data, | ||
135 | NETXEN_CRB_NORMALIZE(adapter, | ||
136 | NETXEN_MULTICAST_ADDR_HI_0)); | ||
137 | } else { | ||
138 | writel(netxen_mac_addr_cntl_data, | ||
139 | NETXEN_CRB_NORMALIZE(adapter, | ||
140 | NETXEN_MULTICAST_ADDR_HI_1)); | ||
141 | } | ||
142 | netxen_mac_addr_cntl_data = 0; | ||
143 | writel(netxen_mac_addr_cntl_data, | ||
144 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_NIU_GB_DROP_WRONGADDR)); | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * netxen_nic_change_mtu - Change the Maximum Transfer Unit | ||
149 | * @returns 0 on success, negative on failure | ||
150 | */ | ||
151 | int netxen_nic_change_mtu(struct net_device *netdev, int mtu) | ||
152 | { | ||
153 | struct netxen_port *port = netdev_priv(netdev); | ||
154 | struct netxen_adapter *adapter = port->adapter; | ||
155 | int eff_mtu = mtu + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE; | ||
156 | |||
157 | if ((eff_mtu > NETXEN_MAX_MTU) || (eff_mtu < NETXEN_MIN_MTU)) { | ||
158 | printk(KERN_ERR "%s: %s %d is not supported.\n", | ||
159 | netxen_nic_driver_name, netdev->name, mtu); | ||
160 | return -EINVAL; | ||
161 | } | ||
162 | |||
163 | if (adapter->ops->set_mtu) | ||
164 | adapter->ops->set_mtu(port, mtu); | ||
165 | netdev->mtu = mtu; | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * check if the firmware has been downloaded and ready to run and | ||
172 | * setup the address for the descriptors in the adapter | ||
173 | */ | ||
174 | int netxen_nic_hw_resources(struct netxen_adapter *adapter) | ||
175 | { | ||
176 | struct netxen_hardware_context *hw = &adapter->ahw; | ||
177 | u32 state = 0; | ||
178 | void *addr; | ||
179 | void *pause_addr; | ||
180 | int loops = 0, err = 0; | ||
181 | int ctx, ring; | ||
182 | u32 card_cmdring = 0; | ||
183 | struct netxen_rcv_desc_crb *rcv_desc_crb = NULL; | ||
184 | struct netxen_recv_context *recv_ctx; | ||
185 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
186 | |||
187 | DPRINTK(INFO, "crb_base: %lx %lx", NETXEN_PCI_CRBSPACE, | ||
188 | PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE)); | ||
189 | DPRINTK(INFO, "cam base: %lx %lx", NETXEN_CRB_CAM, | ||
190 | pci_base_offset(adapter, NETXEN_CRB_CAM)); | ||
191 | DPRINTK(INFO, "cam RAM: %lx %lx", NETXEN_CAM_RAM_BASE, | ||
192 | pci_base_offset(adapter, NETXEN_CAM_RAM_BASE)); | ||
193 | DPRINTK(INFO, "NIC base:%lx %lx\n", NIC_CRB_BASE_PORT1, | ||
194 | pci_base_offset(adapter, NIC_CRB_BASE_PORT1)); | ||
195 | |||
196 | /* Window 1 call */ | ||
197 | card_cmdring = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_CMDRING)); | ||
198 | |||
199 | DPRINTK(INFO, "Command Peg sends 0x%x for cmdring base\n", | ||
200 | card_cmdring); | ||
201 | |||
202 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
203 | DPRINTK(INFO, "Command Peg ready..waiting for rcv peg\n"); | ||
204 | loops = 0; | ||
205 | state = 0; | ||
206 | /* Window 1 call */ | ||
207 | state = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
208 | recv_crb_registers[ctx]. | ||
209 | crb_rcvpeg_state)); | ||
210 | while (state != PHAN_PEG_RCV_INITIALIZED && loops < 20) { | ||
211 | udelay(100); | ||
212 | /* Window 1 call */ | ||
213 | state = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
214 | recv_crb_registers | ||
215 | [ctx]. | ||
216 | crb_rcvpeg_state)); | ||
217 | loops++; | ||
218 | } | ||
219 | if (loops >= 20) { | ||
220 | printk(KERN_ERR "Rcv Peg initialization not complete:" | ||
221 | "%x.\n", state); | ||
222 | err = -EIO; | ||
223 | return err; | ||
224 | } | ||
225 | } | ||
226 | DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n"); | ||
227 | |||
228 | addr = netxen_alloc(adapter->ahw.pdev, | ||
229 | sizeof(struct cmd_desc_type0) * | ||
230 | adapter->max_tx_desc_count, | ||
231 | &hw->cmd_desc_phys_addr, &hw->cmd_desc_pdev); | ||
232 | |||
233 | if (addr == NULL) { | ||
234 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | |||
238 | pause_addr = netxen_alloc(adapter->ahw.pdev, 512, | ||
239 | (dma_addr_t *) & hw->pause_physaddr, | ||
240 | &hw->pause_pdev); | ||
241 | if (pause_addr == NULL) { | ||
242 | DPRINTK(1, ERR, "bad return from pci_alloc_consistent\n"); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | |||
246 | hw->pauseaddr = (char *)pause_addr; | ||
247 | { | ||
248 | u64 *ptr = (u64 *) pause_addr; | ||
249 | *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR; | ||
250 | *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR; | ||
251 | *ptr++ = NETXEN_NIC_UNIT_PAUSE_ADDR; | ||
252 | *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR; | ||
253 | *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR1; | ||
254 | *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR2; | ||
255 | } | ||
256 | |||
257 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; | ||
258 | |||
259 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
260 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
261 | |||
262 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
263 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
264 | addr = netxen_alloc(adapter->ahw.pdev, | ||
265 | RCV_DESC_RINGSIZE, | ||
266 | &rcv_desc->phys_addr, | ||
267 | &rcv_desc->phys_pdev); | ||
268 | if (addr == NULL) { | ||
269 | DPRINTK(ERR, "bad return from " | ||
270 | "pci_alloc_consistent\n"); | ||
271 | netxen_free_hw_resources(adapter); | ||
272 | err = -ENOMEM; | ||
273 | return err; | ||
274 | } | ||
275 | rcv_desc->desc_head = (struct rcv_desc *)addr; | ||
276 | } | ||
277 | |||
278 | addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE, | ||
279 | &recv_ctx->rcv_status_desc_phys_addr, | ||
280 | &recv_ctx->rcv_status_desc_pdev); | ||
281 | if (addr == NULL) { | ||
282 | DPRINTK(ERR, "bad return from" | ||
283 | " pci_alloc_consistent\n"); | ||
284 | netxen_free_hw_resources(adapter); | ||
285 | err = -ENOMEM; | ||
286 | return err; | ||
287 | } | ||
288 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | ||
289 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
290 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
291 | rcv_desc_crb = | ||
292 | &recv_crb_registers[ctx].rcv_desc_crb[ring]; | ||
293 | DPRINTK(INFO, "ring #%d crb global ring reg 0x%x\n", | ||
294 | ring, rcv_desc_crb->crb_globalrcv_ring); | ||
295 | /* Window = 1 */ | ||
296 | writel(lower32(rcv_desc->phys_addr), | ||
297 | NETXEN_CRB_NORMALIZE(adapter, | ||
298 | rcv_desc_crb-> | ||
299 | crb_globalrcv_ring)); | ||
300 | DPRINTK(INFO, "GLOBAL_RCV_RING ctx %d, addr 0x%x" | ||
301 | " val 0x%llx," | ||
302 | " virt %p\n", ctx, | ||
303 | rcv_desc_crb->crb_globalrcv_ring, | ||
304 | (unsigned long long)rcv_desc->phys_addr, | ||
305 | +rcv_desc->desc_head); | ||
306 | } | ||
307 | |||
308 | /* Window = 1 */ | ||
309 | writel(lower32(recv_ctx->rcv_status_desc_phys_addr), | ||
310 | NETXEN_CRB_NORMALIZE(adapter, | ||
311 | recv_crb_registers[ctx]. | ||
312 | crb_rcvstatus_ring)); | ||
313 | DPRINTK(INFO, "RCVSTATUS_RING, ctx %d, addr 0x%x," | ||
314 | " val 0x%x,virt%p\n", | ||
315 | ctx, | ||
316 | recv_crb_registers[ctx].crb_rcvstatus_ring, | ||
317 | (unsigned long long)recv_ctx->rcv_status_desc_phys_addr, | ||
318 | recv_ctx->rcv_status_desc_head); | ||
319 | } | ||
320 | /* Window = 1 */ | ||
321 | writel(lower32(hw->pause_physaddr), | ||
322 | NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_LO)); | ||
323 | writel(upper32(hw->pause_physaddr), | ||
324 | NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_HI)); | ||
325 | |||
326 | writel(lower32(hw->cmd_desc_phys_addr), | ||
327 | NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); | ||
328 | writel(upper32(hw->cmd_desc_phys_addr), | ||
329 | NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_HI)); | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | void netxen_free_hw_resources(struct netxen_adapter *adapter) | ||
334 | { | ||
335 | struct netxen_recv_context *recv_ctx; | ||
336 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
337 | int ctx, ring; | ||
338 | |||
339 | if (adapter->ahw.cmd_desc_head != NULL) { | ||
340 | pci_free_consistent(adapter->ahw.cmd_desc_pdev, | ||
341 | sizeof(struct cmd_desc_type0) * | ||
342 | adapter->max_tx_desc_count, | ||
343 | adapter->ahw.cmd_desc_head, | ||
344 | adapter->ahw.cmd_desc_phys_addr); | ||
345 | adapter->ahw.cmd_desc_head = NULL; | ||
346 | } | ||
347 | if (adapter->ahw.pauseaddr != NULL) { | ||
348 | pci_free_consistent(adapter->ahw.pause_pdev, 512, | ||
349 | adapter->ahw.pauseaddr, | ||
350 | adapter->ahw.pause_physaddr); | ||
351 | adapter->ahw.pauseaddr = NULL; | ||
352 | } | ||
353 | |||
354 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
355 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
356 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
357 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
358 | |||
359 | if (rcv_desc->desc_head != NULL) { | ||
360 | pci_free_consistent(rcv_desc->phys_pdev, | ||
361 | RCV_DESC_RINGSIZE, | ||
362 | rcv_desc->desc_head, | ||
363 | rcv_desc->phys_addr); | ||
364 | rcv_desc->desc_head = NULL; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | if (recv_ctx->rcv_status_desc_head != NULL) { | ||
369 | pci_free_consistent(recv_ctx->rcv_status_desc_pdev, | ||
370 | STATUS_DESC_RINGSIZE, | ||
371 | recv_ctx->rcv_status_desc_head, | ||
372 | recv_ctx-> | ||
373 | rcv_status_desc_phys_addr); | ||
374 | recv_ctx->rcv_status_desc_head = NULL; | ||
375 | } | ||
376 | } | ||
377 | } | ||
378 | |||
379 | void netxen_tso_check(struct netxen_adapter *adapter, | ||
380 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | ||
381 | { | ||
382 | if (desc->mss) { | ||
383 | desc->total_hdr_length = sizeof(struct ethhdr) + | ||
384 | ((skb->nh.iph)->ihl * sizeof(u32)) + | ||
385 | ((skb->h.th)->doff * sizeof(u32)); | ||
386 | desc->opcode = TX_TCP_LSO; | ||
387 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
388 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | ||
389 | desc->opcode = TX_TCP_PKT; | ||
390 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | ||
391 | desc->opcode = TX_UDP_PKT; | ||
392 | } else { | ||
393 | return; | ||
394 | } | ||
395 | } | ||
396 | adapter->stats.xmitcsummed++; | ||
397 | CMD_DESC_TCP_HDR_OFFSET_WRT(desc, skb->h.raw - skb->data); | ||
398 | desc->length_tcp_hdr = cpu_to_le32(desc->length_tcp_hdr); | ||
399 | desc->ip_hdr_offset = skb->nh.raw - skb->data; | ||
400 | } | ||
401 | |||
402 | int netxen_is_flash_supported(struct netxen_adapter *adapter) | ||
403 | { | ||
404 | const int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 }; | ||
405 | int addr, val01, val02, i, j; | ||
406 | |||
407 | /* if the flash size less than 4Mb, make huge war cry and die */ | ||
408 | for (j = 1; j < 4; j++) { | ||
409 | addr = j * NETXEN_NIC_WINDOW_MARGIN; | ||
410 | for (i = 0; i < (sizeof(locs) / sizeof(locs[0])); i++) { | ||
411 | if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0 | ||
412 | && netxen_rom_fast_read(adapter, (addr + locs[i]), | ||
413 | &val02) == 0) { | ||
414 | if (val01 == val02) | ||
415 | return -1; | ||
416 | } else | ||
417 | return -1; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, | ||
425 | int size, u32 * buf) | ||
426 | { | ||
427 | int i, addr; | ||
428 | u32 *ptr32; | ||
429 | |||
430 | addr = base; | ||
431 | ptr32 = buf; | ||
432 | for (i = 0; i < size / sizeof(u32); i++) { | ||
433 | if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) | ||
434 | return -1; | ||
435 | ptr32++; | ||
436 | addr += sizeof(u32); | ||
437 | } | ||
438 | if ((char *)buf + size > (char *)ptr32) { | ||
439 | u32 local; | ||
440 | |||
441 | if (netxen_rom_fast_read(adapter, addr, &local) == -1) | ||
442 | return -1; | ||
443 | memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); | ||
444 | } | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]) | ||
450 | { | ||
451 | u32 *pmac = (u32 *) & mac[0]; | ||
452 | |||
453 | if (netxen_get_flash_block(adapter, | ||
454 | USER_START + | ||
455 | offsetof(struct netxen_new_user_info, | ||
456 | mac_addr), | ||
457 | FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) { | ||
458 | return -1; | ||
459 | } | ||
460 | if (*mac == ~0ULL) { | ||
461 | if (netxen_get_flash_block(adapter, | ||
462 | USER_START_OLD + | ||
463 | offsetof(struct netxen_user_old_info, | ||
464 | mac_addr), | ||
465 | FLASH_NUM_PORTS * sizeof(u64), | ||
466 | pmac) == -1) | ||
467 | return -1; | ||
468 | if (*mac == ~0ULL) | ||
469 | return -1; | ||
470 | } | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * Changes the CRB window to the specified window. | ||
476 | */ | ||
477 | void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw) | ||
478 | { | ||
479 | void __iomem *offset; | ||
480 | u32 tmp; | ||
481 | int count = 0; | ||
482 | |||
483 | if (adapter->curr_window == wndw) | ||
484 | return; | ||
485 | |||
486 | /* | ||
487 | * Move the CRB window. | ||
488 | * We need to write to the "direct access" region of PCI | ||
489 | * to avoid a race condition where the window register has | ||
490 | * not been successfully written across CRB before the target | ||
491 | * register address is received by PCI. The direct region bypasses | ||
492 | * the CRB bus. | ||
493 | */ | ||
494 | offset = | ||
495 | PCI_OFFSET_SECOND_RANGE(adapter, | ||
496 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW)); | ||
497 | |||
498 | if (wndw & 0x1) | ||
499 | wndw = NETXEN_WINDOW_ONE; | ||
500 | |||
501 | writel(wndw, offset); | ||
502 | |||
503 | /* MUST make sure window is set before we forge on... */ | ||
504 | while ((tmp = readl(offset)) != wndw) { | ||
505 | printk(KERN_WARNING "%s: %s WARNING: CRB window value not " | ||
506 | "registered properly: 0x%08x.\n", | ||
507 | netxen_nic_driver_name, __FUNCTION__, tmp); | ||
508 | mdelay(1); | ||
509 | if (count >= 10) | ||
510 | break; | ||
511 | count++; | ||
512 | } | ||
513 | |||
514 | adapter->curr_window = wndw; | ||
515 | } | ||
516 | |||
517 | void netxen_load_firmware(struct netxen_adapter *adapter) | ||
518 | { | ||
519 | int i; | ||
520 | long data, size = 0; | ||
521 | long flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE; | ||
522 | u64 off; | ||
523 | void __iomem *addr; | ||
524 | |||
525 | size = NETXEN_FIRMWARE_LEN; | ||
526 | writel(1, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST)); | ||
527 | |||
528 | for (i = 0; i < size; i++) { | ||
529 | if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) { | ||
530 | DPRINTK(ERR, | ||
531 | "Error in netxen_rom_fast_read(). Will skip" | ||
532 | "loading flash image\n"); | ||
533 | return; | ||
534 | } | ||
535 | off = netxen_nic_pci_set_window(adapter, memaddr); | ||
536 | addr = pci_base_offset(adapter, off); | ||
537 | writel(data, addr); | ||
538 | flashaddr += 4; | ||
539 | memaddr += 4; | ||
540 | } | ||
541 | udelay(100); | ||
542 | /* make sure Casper is powered on */ | ||
543 | writel(0x3fff, | ||
544 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL)); | ||
545 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST)); | ||
546 | |||
547 | udelay(100); | ||
548 | } | ||
549 | |||
550 | int | ||
551 | netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | ||
552 | int len) | ||
553 | { | ||
554 | void __iomem *addr; | ||
555 | |||
556 | if (ADDR_IN_WINDOW1(off)) { | ||
557 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | ||
558 | } else { /* Window 0 */ | ||
559 | addr = pci_base_offset(adapter, off); | ||
560 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
561 | } | ||
562 | |||
563 | DPRINTK(INFO, "writing to base %lx offset %llx addr %p" | ||
564 | " data %llx len %d\n", | ||
565 | pci_base(adapter, off), off, addr, | ||
566 | *(unsigned long long *)data, len); | ||
567 | if (!addr) { | ||
568 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
569 | return 1; | ||
570 | } | ||
571 | |||
572 | switch (len) { | ||
573 | case 1: | ||
574 | writeb(*(u8 *) data, addr); | ||
575 | break; | ||
576 | case 2: | ||
577 | writew(*(u16 *) data, addr); | ||
578 | break; | ||
579 | case 4: | ||
580 | writel(*(u32 *) data, addr); | ||
581 | break; | ||
582 | case 8: | ||
583 | writeq(*(u64 *) data, addr); | ||
584 | break; | ||
585 | default: | ||
586 | DPRINTK(INFO, | ||
587 | "writing data %lx to offset %llx, num words=%d\n", | ||
588 | *(unsigned long *)data, off, (len >> 3)); | ||
589 | |||
590 | netxen_nic_hw_block_write64((u64 __iomem *) data, addr, | ||
591 | (len >> 3)); | ||
592 | break; | ||
593 | } | ||
594 | if (!ADDR_IN_WINDOW1(off)) | ||
595 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
596 | |||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | int | ||
601 | netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | ||
602 | int len) | ||
603 | { | ||
604 | void __iomem *addr; | ||
605 | |||
606 | if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ | ||
607 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | ||
608 | } else { /* Window 0 */ | ||
609 | addr = pci_base_offset(adapter, off); | ||
610 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
611 | } | ||
612 | |||
613 | DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n", | ||
614 | pci_base(adapter, off), off, addr); | ||
615 | if (!addr) { | ||
616 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
617 | return 1; | ||
618 | } | ||
619 | switch (len) { | ||
620 | case 1: | ||
621 | *(u8 *) data = readb(addr); | ||
622 | break; | ||
623 | case 2: | ||
624 | *(u16 *) data = readw(addr); | ||
625 | break; | ||
626 | case 4: | ||
627 | *(u32 *) data = readl(addr); | ||
628 | break; | ||
629 | case 8: | ||
630 | *(u64 *) data = readq(addr); | ||
631 | break; | ||
632 | default: | ||
633 | netxen_nic_hw_block_read64((u64 __iomem *) data, addr, | ||
634 | (len >> 3)); | ||
635 | break; | ||
636 | } | ||
637 | DPRINTK(INFO, "read %lx\n", *(unsigned long *)data); | ||
638 | |||
639 | if (!ADDR_IN_WINDOW1(off)) | ||
640 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
641 | |||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val) | ||
646 | { /* Only for window 1 */ | ||
647 | void __iomem *addr; | ||
648 | |||
649 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | ||
650 | DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n", | ||
651 | pci_base(adapter, off), off, addr); | ||
652 | writel(val, addr); | ||
653 | |||
654 | } | ||
655 | |||
656 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off) | ||
657 | { /* Only for window 1 */ | ||
658 | void __iomem *addr; | ||
659 | int val; | ||
660 | |||
661 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | ||
662 | DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n", | ||
663 | adapter->ahw.pci_base, off, addr); | ||
664 | val = readl(addr); | ||
665 | writel(val, addr); | ||
666 | |||
667 | return val; | ||
668 | } | ||
669 | |||
670 | /* Change the window to 0, write and change back to window 1. */ | ||
671 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value) | ||
672 | { | ||
673 | void __iomem *addr; | ||
674 | |||
675 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
676 | addr = pci_base_offset(adapter, index); | ||
677 | writel(value, addr); | ||
678 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
679 | } | ||
680 | |||
681 | /* Change the window to 0, read and change back to window 1. */ | ||
682 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value) | ||
683 | { | ||
684 | void __iomem *addr; | ||
685 | |||
686 | addr = pci_base_offset(adapter, index); | ||
687 | |||
688 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
689 | *value = readl(addr); | ||
690 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
691 | } | ||
692 | |||
693 | int netxen_pci_set_window_warning_count = 0; | ||
694 | |||
695 | unsigned long | ||
696 | netxen_nic_pci_set_window(struct netxen_adapter *adapter, | ||
697 | unsigned long long addr) | ||
698 | { | ||
699 | static int ddr_mn_window = -1; | ||
700 | static int qdr_sn_window = -1; | ||
701 | int window; | ||
702 | |||
703 | if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { | ||
704 | /* DDR network side */ | ||
705 | addr -= NETXEN_ADDR_DDR_NET; | ||
706 | window = (addr >> 25) & 0x3ff; | ||
707 | if (ddr_mn_window != window) { | ||
708 | ddr_mn_window = window; | ||
709 | writel(window, PCI_OFFSET_SECOND_RANGE(adapter, | ||
710 | NETXEN_PCIX_PH_REG | ||
711 | (PCIX_MN_WINDOW))); | ||
712 | /* MUST make sure window is set before we forge on... */ | ||
713 | readl(PCI_OFFSET_SECOND_RANGE(adapter, | ||
714 | NETXEN_PCIX_PH_REG | ||
715 | (PCIX_MN_WINDOW))); | ||
716 | } | ||
717 | addr -= (window * NETXEN_WINDOW_ONE); | ||
718 | addr += NETXEN_PCI_DDR_NET; | ||
719 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { | ||
720 | addr -= NETXEN_ADDR_OCM0; | ||
721 | addr += NETXEN_PCI_OCM0; | ||
722 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { | ||
723 | addr -= NETXEN_ADDR_OCM1; | ||
724 | addr += NETXEN_PCI_OCM1; | ||
725 | } else | ||
726 | if (ADDR_IN_RANGE | ||
727 | (addr, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX)) { | ||
728 | /* QDR network side */ | ||
729 | addr -= NETXEN_ADDR_QDR_NET; | ||
730 | window = (addr >> 22) & 0x3f; | ||
731 | if (qdr_sn_window != window) { | ||
732 | qdr_sn_window = window; | ||
733 | writel((window << 22), | ||
734 | PCI_OFFSET_SECOND_RANGE(adapter, | ||
735 | NETXEN_PCIX_PH_REG | ||
736 | (PCIX_SN_WINDOW))); | ||
737 | /* MUST make sure window is set before we forge on... */ | ||
738 | readl(PCI_OFFSET_SECOND_RANGE(adapter, | ||
739 | NETXEN_PCIX_PH_REG | ||
740 | (PCIX_SN_WINDOW))); | ||
741 | } | ||
742 | addr -= (window * 0x400000); | ||
743 | addr += NETXEN_PCI_QDR_NET; | ||
744 | } else { | ||
745 | /* | ||
746 | * peg gdb frequently accesses memory that doesn't exist, | ||
747 | * this limits the chit chat so debugging isn't slowed down. | ||
748 | */ | ||
749 | if ((netxen_pci_set_window_warning_count++ < 8) | ||
750 | || (netxen_pci_set_window_warning_count % 64 == 0)) | ||
751 | printk("%s: Warning:netxen_nic_pci_set_window()" | ||
752 | " Unknown address range!\n", | ||
753 | netxen_nic_driver_name); | ||
754 | |||
755 | } | ||
756 | return addr; | ||
757 | } | ||
758 | |||
759 | int netxen_nic_get_board_info(struct netxen_adapter *adapter) | ||
760 | { | ||
761 | int rv = 0; | ||
762 | int addr = BRDCFG_START; | ||
763 | struct netxen_board_info *boardinfo; | ||
764 | int index; | ||
765 | u32 *ptr32; | ||
766 | |||
767 | boardinfo = &adapter->ahw.boardcfg; | ||
768 | ptr32 = (u32 *) boardinfo; | ||
769 | |||
770 | for (index = 0; index < sizeof(struct netxen_board_info) / sizeof(u32); | ||
771 | index++) { | ||
772 | if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { | ||
773 | return -EIO; | ||
774 | } | ||
775 | ptr32++; | ||
776 | addr += sizeof(u32); | ||
777 | } | ||
778 | if (boardinfo->magic != NETXEN_BDINFO_MAGIC) { | ||
779 | printk("%s: ERROR reading %s board config." | ||
780 | " Read %x, expected %x\n", netxen_nic_driver_name, | ||
781 | netxen_nic_driver_name, | ||
782 | boardinfo->magic, NETXEN_BDINFO_MAGIC); | ||
783 | rv = -1; | ||
784 | } | ||
785 | if (boardinfo->header_version != NETXEN_BDINFO_VERSION) { | ||
786 | printk("%s: Unknown board config version." | ||
787 | " Read %x, expected %x\n", netxen_nic_driver_name, | ||
788 | boardinfo->header_version, NETXEN_BDINFO_VERSION); | ||
789 | rv = -1; | ||
790 | } | ||
791 | |||
792 | DPRINTK(INFO, "Discovered board type:0x%x ", boardinfo->board_type); | ||
793 | switch ((netxen_brdtype_t) boardinfo->board_type) { | ||
794 | case NETXEN_BRDTYPE_P2_SB35_4G: | ||
795 | adapter->ahw.board_type = NETXEN_NIC_GBE; | ||
796 | break; | ||
797 | case NETXEN_BRDTYPE_P2_SB31_10G: | ||
798 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | ||
799 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | ||
800 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: | ||
801 | adapter->ahw.board_type = NETXEN_NIC_XGBE; | ||
802 | break; | ||
803 | case NETXEN_BRDTYPE_P1_BD: | ||
804 | case NETXEN_BRDTYPE_P1_SB: | ||
805 | case NETXEN_BRDTYPE_P1_SMAX: | ||
806 | case NETXEN_BRDTYPE_P1_SOCK: | ||
807 | adapter->ahw.board_type = NETXEN_NIC_GBE; | ||
808 | break; | ||
809 | default: | ||
810 | printk("%s: Unknown(%x)\n", netxen_nic_driver_name, | ||
811 | boardinfo->board_type); | ||
812 | break; | ||
813 | } | ||
814 | |||
815 | return rv; | ||
816 | } | ||
817 | |||
818 | /* NIU access sections */ | ||
819 | |||
820 | int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu) | ||
821 | { | ||
822 | struct netxen_adapter *adapter = port->adapter; | ||
823 | netxen_nic_write_w0(adapter, | ||
824 | NETXEN_NIU_GB_MAX_FRAME_SIZE(port->portnum), | ||
825 | new_mtu); | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu) | ||
830 | { | ||
831 | struct netxen_adapter *adapter = port->adapter; | ||
832 | new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; | ||
833 | netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); | ||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter) | ||
838 | { | ||
839 | int portno; | ||
840 | for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) | ||
841 | netxen_niu_gbe_init_port(adapter, portno); | ||
842 | } | ||
843 | |||
844 | void netxen_nic_stop_all_ports(struct netxen_adapter *adapter) | ||
845 | { | ||
846 | int port_nr; | ||
847 | struct netxen_port *port; | ||
848 | |||
849 | for (port_nr = 0; port_nr < adapter->ahw.max_ports; port_nr++) { | ||
850 | port = adapter->port[port_nr]; | ||
851 | if (adapter->ops->stop_port) | ||
852 | adapter->ops->stop_port(adapter, port->portnum); | ||
853 | } | ||
854 | } | ||
855 | |||
856 | void | ||
857 | netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off, | ||
858 | int data) | ||
859 | { | ||
860 | void __iomem *addr; | ||
861 | |||
862 | if (ADDR_IN_WINDOW1(off)) { | ||
863 | writel(data, NETXEN_CRB_NORMALIZE(adapter, off)); | ||
864 | } else { | ||
865 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
866 | addr = pci_base_offset(adapter, off); | ||
867 | writel(data, addr); | ||
868 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
869 | } | ||
870 | } | ||
871 | |||
872 | void netxen_nic_set_link_parameters(struct netxen_port *port) | ||
873 | { | ||
874 | struct netxen_adapter *adapter = port->adapter; | ||
875 | __le32 status; | ||
876 | u16 autoneg; | ||
877 | __le32 mode; | ||
878 | |||
879 | netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); | ||
880 | if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ | ||
881 | if (adapter->ops->phy_read | ||
882 | && adapter->ops-> | ||
883 | phy_read(adapter, port->portnum, | ||
884 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
885 | &status) == 0) { | ||
886 | if (netxen_get_phy_link(status)) { | ||
887 | switch (netxen_get_phy_speed(status)) { | ||
888 | case 0: | ||
889 | port->link_speed = SPEED_10; | ||
890 | break; | ||
891 | case 1: | ||
892 | port->link_speed = SPEED_100; | ||
893 | break; | ||
894 | case 2: | ||
895 | port->link_speed = SPEED_1000; | ||
896 | break; | ||
897 | default: | ||
898 | port->link_speed = -1; | ||
899 | break; | ||
900 | } | ||
901 | switch (netxen_get_phy_duplex(status)) { | ||
902 | case 0: | ||
903 | port->link_duplex = DUPLEX_HALF; | ||
904 | break; | ||
905 | case 1: | ||
906 | port->link_duplex = DUPLEX_FULL; | ||
907 | break; | ||
908 | default: | ||
909 | port->link_duplex = -1; | ||
910 | break; | ||
911 | } | ||
912 | if (adapter->ops->phy_read | ||
913 | && adapter->ops-> | ||
914 | phy_read(adapter, port->portnum, | ||
915 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, | ||
916 | (__le32 *) & autoneg) != 0) | ||
917 | port->link_autoneg = autoneg; | ||
918 | } else | ||
919 | goto link_down; | ||
920 | } else { | ||
921 | link_down: | ||
922 | port->link_speed = -1; | ||
923 | port->link_duplex = -1; | ||
924 | } | ||
925 | } | ||
926 | } | ||
927 | |||
928 | void netxen_nic_flash_print(struct netxen_adapter *adapter) | ||
929 | { | ||
930 | int valid = 1; | ||
931 | u32 fw_major = 0; | ||
932 | u32 fw_minor = 0; | ||
933 | u32 fw_build = 0; | ||
934 | char brd_name[NETXEN_MAX_SHORT_NAME]; | ||
935 | struct netxen_new_user_info user_info; | ||
936 | int i, addr = USER_START; | ||
937 | u32 *ptr32; | ||
938 | |||
939 | struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); | ||
940 | if (board_info->magic != NETXEN_BDINFO_MAGIC) { | ||
941 | printk | ||
942 | ("NetXen Unknown board config, Read 0x%x expected as 0x%x\n", | ||
943 | board_info->magic, NETXEN_BDINFO_MAGIC); | ||
944 | valid = 0; | ||
945 | } | ||
946 | if (board_info->header_version != NETXEN_BDINFO_VERSION) { | ||
947 | printk("NetXen Unknown board config version." | ||
948 | " Read %x, expected %x\n", | ||
949 | board_info->header_version, NETXEN_BDINFO_VERSION); | ||
950 | valid = 0; | ||
951 | } | ||
952 | if (valid) { | ||
953 | ptr32 = (u32 *) & user_info; | ||
954 | for (i = 0; | ||
955 | i < sizeof(struct netxen_new_user_info) / sizeof(u32); | ||
956 | i++) { | ||
957 | if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { | ||
958 | printk("%s: ERROR reading %s board userarea.\n", | ||
959 | netxen_nic_driver_name, | ||
960 | netxen_nic_driver_name); | ||
961 | return; | ||
962 | } | ||
963 | ptr32++; | ||
964 | addr += sizeof(u32); | ||
965 | } | ||
966 | get_brd_name_by_type(board_info->board_type, brd_name); | ||
967 | |||
968 | printk("NetXen %s Board S/N %s Chip id 0x%x\n", | ||
969 | brd_name, user_info.serial_num, board_info->chip_id); | ||
970 | |||
971 | printk("NetXen %s Board #%d, Chip id 0x%x\n", | ||
972 | board_info->board_type == 0x0b ? "XGB" : "GBE", | ||
973 | board_info->board_num, board_info->chip_id); | ||
974 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
975 | NETXEN_FW_VERSION_MAJOR)); | ||
976 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
977 | NETXEN_FW_VERSION_MINOR)); | ||
978 | fw_build = | ||
979 | readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | ||
980 | |||
981 | printk("NetXen Firmware version %d.%d.%d\n", fw_major, fw_minor, | ||
982 | fw_build); | ||
983 | } | ||
984 | if (fw_major != _NETXEN_NIC_LINUX_MAJOR) { | ||
985 | printk(KERN_ERR "The mismatch in driver version and firmware " | ||
986 | "version major number\n" | ||
987 | "Driver version major number = %d \t" | ||
988 | "Firmware version major number = %d \n", | ||
989 | _NETXEN_NIC_LINUX_MAJOR, fw_major); | ||
990 | adapter->driver_mismatch = 1; | ||
991 | } | ||
992 | if (fw_minor != _NETXEN_NIC_LINUX_MINOR) { | ||
993 | printk(KERN_ERR "The mismatch in driver version and firmware " | ||
994 | "version minor number\n" | ||
995 | "Driver version minor number = %d \t" | ||
996 | "Firmware version minor number = %d \n", | ||
997 | _NETXEN_NIC_LINUX_MINOR, fw_minor); | ||
998 | adapter->driver_mismatch = 1; | ||
999 | } | ||
1000 | if (adapter->driver_mismatch) | ||
1001 | printk(KERN_INFO "Use the driver with version no %d.%d.xxx\n", | ||
1002 | fw_major, fw_minor); | ||
1003 | } | ||
1004 | |||
1005 | int netxen_crb_read_val(struct netxen_adapter *adapter, unsigned long off) | ||
1006 | { | ||
1007 | int data; | ||
1008 | netxen_nic_hw_read_wx(adapter, off, &data, 4); | ||
1009 | return data; | ||
1010 | } | ||
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h new file mode 100644 index 000000000000..201a636b7ab8 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_hw.h | |||
@@ -0,0 +1,482 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Structures, enums, and macros for the MAC | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef __NETXEN_NIC_HW_H_ | ||
35 | #define __NETXEN_NIC_HW_H_ | ||
36 | |||
37 | #include "netxen_nic_hdr.h" | ||
38 | |||
39 | /* Hardware memory size of 128 meg */ | ||
40 | #define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) | ||
41 | |||
42 | #ifndef readq | ||
43 | static inline u64 readq(void __iomem * addr) | ||
44 | { | ||
45 | return readl(addr) | (((u64) readl(addr + 4)) << 32LL); | ||
46 | } | ||
47 | #endif | ||
48 | |||
49 | #ifndef writeq | ||
50 | static inline void writeq(u64 val, void __iomem * addr) | ||
51 | { | ||
52 | writel(((u32) (val)), (addr)); | ||
53 | writel(((u32) (val >> 32)), (addr + 4)); | ||
54 | } | ||
55 | #endif | ||
56 | |||
57 | static inline void netxen_nic_hw_block_write64(u64 __iomem * data_ptr, | ||
58 | u64 __iomem * addr, | ||
59 | int num_words) | ||
60 | { | ||
61 | int num; | ||
62 | for (num = 0; num < num_words; num++) { | ||
63 | writeq(readq((void __iomem *)data_ptr), addr); | ||
64 | addr++; | ||
65 | data_ptr++; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static inline void netxen_nic_hw_block_read64(u64 __iomem * data_ptr, | ||
70 | u64 __iomem * addr, int num_words) | ||
71 | { | ||
72 | int num; | ||
73 | for (num = 0; num < num_words; num++) { | ||
74 | writeq(readq((void __iomem *)addr), data_ptr); | ||
75 | addr++; | ||
76 | data_ptr++; | ||
77 | } | ||
78 | |||
79 | } | ||
80 | |||
81 | struct netxen_adapter; | ||
82 | |||
83 | #define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) | ||
84 | |||
85 | #define NETXEN_NIC_LOCKED_READ_REG(X, Y) \ | ||
86 | addr = pci_base_offset(adapter, (X)); \ | ||
87 | *(u32 *)Y = readl(addr); | ||
88 | |||
89 | struct netxen_port; | ||
90 | void netxen_nic_set_link_parameters(struct netxen_port *port); | ||
91 | void netxen_nic_flash_print(struct netxen_adapter *adapter); | ||
92 | int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, | ||
93 | void *data, int len); | ||
94 | void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, | ||
95 | unsigned long off, int data); | ||
96 | int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, | ||
97 | void *data, int len); | ||
98 | |||
99 | typedef u8 netxen_ethernet_macaddr_t[6]; | ||
100 | |||
101 | /* Nibble or Byte mode for phy interface (GbE mode only) */ | ||
102 | typedef enum { | ||
103 | NETXEN_NIU_10_100_MB = 0, | ||
104 | NETXEN_NIU_1000_MB | ||
105 | } netxen_niu_gbe_ifmode_t; | ||
106 | |||
107 | #define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) | ||
108 | |||
109 | /* | ||
110 | * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) | ||
111 | * | ||
112 | * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable | ||
113 | * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream | ||
114 | * Bit 2 : enable_rx => 1:enable frame recv, 0:disable | ||
115 | * Bit 3 : rx_synced => R/O: recv enable synched to recv stream | ||
116 | * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable | ||
117 | * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore | ||
118 | * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal | ||
119 | * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op | ||
120 | * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op | ||
121 | * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op | ||
122 | * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op | ||
123 | * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op | ||
124 | */ | ||
125 | |||
126 | #define netxen_gb_enable_tx(config_word) \ | ||
127 | set_bit(0, (unsigned long*)(&config_word)) | ||
128 | #define netxen_gb_enable_rx(config_word) \ | ||
129 | set_bit(2, (unsigned long*)(&config_word)) | ||
130 | #define netxen_gb_tx_flowctl(config_word) \ | ||
131 | set_bit(4, (unsigned long*)(&config_word)) | ||
132 | #define netxen_gb_rx_flowctl(config_word) \ | ||
133 | set_bit(5, (unsigned long*)(&config_word)) | ||
134 | #define netxen_gb_tx_reset_pb(config_word) \ | ||
135 | set_bit(16, (unsigned long*)(&config_word)) | ||
136 | #define netxen_gb_rx_reset_pb(config_word) \ | ||
137 | set_bit(17, (unsigned long*)(&config_word)) | ||
138 | #define netxen_gb_tx_reset_mac(config_word) \ | ||
139 | set_bit(18, (unsigned long*)(&config_word)) | ||
140 | #define netxen_gb_rx_reset_mac(config_word) \ | ||
141 | set_bit(19, (unsigned long*)(&config_word)) | ||
142 | #define netxen_gb_soft_reset(config_word) \ | ||
143 | set_bit(31, (unsigned long*)(&config_word)) | ||
144 | |||
145 | #define netxen_gb_unset_tx_flowctl(config_word) \ | ||
146 | clear_bit(4, (unsigned long *)(&config_word)) | ||
147 | #define netxen_gb_unset_rx_flowctl(config_word) \ | ||
148 | clear_bit(5, (unsigned long*)(&config_word)) | ||
149 | |||
150 | #define netxen_gb_get_tx_synced(config_word) \ | ||
151 | _netxen_crb_get_bit((config_word), 1) | ||
152 | #define netxen_gb_get_rx_synced(config_word) \ | ||
153 | _netxen_crb_get_bit((config_word), 3) | ||
154 | #define netxen_gb_get_tx_flowctl(config_word) \ | ||
155 | _netxen_crb_get_bit((config_word), 4) | ||
156 | #define netxen_gb_get_rx_flowctl(config_word) \ | ||
157 | _netxen_crb_get_bit((config_word), 5) | ||
158 | #define netxen_gb_get_soft_reset(config_word) \ | ||
159 | _netxen_crb_get_bit((config_word), 31) | ||
160 | |||
161 | /* | ||
162 | * NIU GB MAC Config Register 1 (applies to GB0, GB1, GB2, GB3) | ||
163 | * | ||
164 | * Bit 0 : duplex => 1:full duplex mode, 0:half duplex | ||
165 | * Bit 1 : crc_enable => 1:append CRC to xmit frames, 0:dont append | ||
166 | * Bit 2 : padshort => 1:pad short frames and add CRC, 0:dont pad | ||
167 | * Bit 4 : checklength => 1:check framelen with actual,0:dont check | ||
168 | * Bit 5 : hugeframes => 1:allow oversize xmit frames, 0:dont allow | ||
169 | * Bits 8-9 : intfmode => 01:nibble (10/100), 10:byte (1000) | ||
170 | * Bits 12-15 : preamblelen => preamble field length in bytes, default 7 | ||
171 | */ | ||
172 | |||
173 | #define netxen_gb_set_duplex(config_word) \ | ||
174 | set_bit(0, (unsigned long*)&config_word) | ||
175 | #define netxen_gb_set_crc_enable(config_word) \ | ||
176 | set_bit(1, (unsigned long*)&config_word) | ||
177 | #define netxen_gb_set_padshort(config_word) \ | ||
178 | set_bit(2, (unsigned long*)&config_word) | ||
179 | #define netxen_gb_set_checklength(config_word) \ | ||
180 | set_bit(4, (unsigned long*)&config_word) | ||
181 | #define netxen_gb_set_hugeframes(config_word) \ | ||
182 | set_bit(5, (unsigned long*)&config_word) | ||
183 | #define netxen_gb_set_preamblelen(config_word, val) \ | ||
184 | ((config_word) |= ((val) << 12) & 0xF000) | ||
185 | #define netxen_gb_set_intfmode(config_word, val) \ | ||
186 | ((config_word) |= ((val) << 8) & 0x300) | ||
187 | |||
188 | #define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) | ||
189 | |||
190 | #define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ | ||
191 | ((config_word) |= ((val) & 0x07)) | ||
192 | #define netxen_gb_mii_mgmt_reset(config_word) \ | ||
193 | set_bit(31, (unsigned long*)&config_word) | ||
194 | #define netxen_gb_mii_mgmt_unset(config_word) \ | ||
195 | clear_bit(31, (unsigned long*)&config_word) | ||
196 | |||
197 | /* | ||
198 | * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) | ||
199 | * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op | ||
200 | * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op | ||
201 | */ | ||
202 | |||
203 | #define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ | ||
204 | set_bit(0, (unsigned long*)&config_word) | ||
205 | #define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ | ||
206 | ((config_word) |= ((val) & 0x1F)) | ||
207 | #define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ | ||
208 | ((config_word) |= (((val) & 0x1F) << 8)) | ||
209 | |||
210 | /* | ||
211 | * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) | ||
212 | * Read-only register. | ||
213 | * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle | ||
214 | * Bit 1 : scanning => 1:scan operation in progress, 0:idle | ||
215 | * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle | ||
216 | */ | ||
217 | #define netxen_get_gb_mii_mgmt_busy(config_word) \ | ||
218 | _netxen_crb_get_bit(config_word, 0) | ||
219 | #define netxen_get_gb_mii_mgmt_scanning(config_word) \ | ||
220 | _netxen_crb_get_bit(config_word, 1) | ||
221 | #define netxen_get_gb_mii_mgmt_notvalid(config_word) \ | ||
222 | _netxen_crb_get_bit(config_word, 2) | ||
223 | |||
224 | /* | ||
225 | * PHY-Specific MII control/status registers. | ||
226 | */ | ||
227 | typedef enum { | ||
228 | NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL = 0, | ||
229 | NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS = 1, | ||
230 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 = 2, | ||
231 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 = 3, | ||
232 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG = 4, | ||
233 | NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART = 5, | ||
234 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE = 6, | ||
235 | NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT = 7, | ||
236 | NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE = 8, | ||
237 | NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL = 9, | ||
238 | NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS = 10, | ||
239 | NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS = 15, | ||
240 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL = 16, | ||
241 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS = 17, | ||
242 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE = 18, | ||
243 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS = 19, | ||
244 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE = 20, | ||
245 | NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT = 21, | ||
246 | NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL = 24, | ||
247 | NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE = 25, | ||
248 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET = 26, | ||
249 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE = 27 | ||
250 | } netxen_niu_phy_register_t; | ||
251 | |||
252 | /* | ||
253 | * PHY-Specific Status Register (reg 17). | ||
254 | * | ||
255 | * Bit 0 : jabber => 1:jabber detected, 0:not | ||
256 | * Bit 1 : polarity => 1:polarity reversed, 0:normal | ||
257 | * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled | ||
258 | * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled | ||
259 | * Bit 4 : energydetect => 1:sleep, 0:active | ||
260 | * Bit 5 : downshift => 1:downshift, 0:no downshift | ||
261 | * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) | ||
262 | * Bits 7-9 : cablelen => not valid in 10Mb/s mode | ||
263 | * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m | ||
264 | * Bit 10 : link => 1:link up, 0:link down | ||
265 | * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet | ||
266 | * Bit 12 : pagercvd => 1:page received, 0:page not received | ||
267 | * Bit 13 : duplex => 1:full duplex, 0:half duplex | ||
268 | * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd | ||
269 | */ | ||
270 | |||
271 | #define netxen_get_phy_cablelen(config_word) (((config_word) >> 7) & 0x07) | ||
272 | #define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) | ||
273 | |||
274 | #define netxen_set_phy_speed(config_word, val) \ | ||
275 | ((config_word) |= ((val & 0x03) << 14)) | ||
276 | #define netxen_set_phy_duplex(config_word) \ | ||
277 | set_bit(13, (unsigned long*)&config_word) | ||
278 | #define netxen_clear_phy_duplex(config_word) \ | ||
279 | clear_bit(13, (unsigned long*)&config_word) | ||
280 | |||
281 | #define netxen_get_phy_jabber(config_word) \ | ||
282 | _netxen_crb_get_bit(config_word, 0) | ||
283 | #define netxen_get_phy_polarity(config_word) \ | ||
284 | _netxen_crb_get_bit(config_word, 1) | ||
285 | #define netxen_get_phy_recvpause(config_word) \ | ||
286 | _netxen_crb_get_bit(config_word, 2) | ||
287 | #define netxen_get_phy_xmitpause(config_word) \ | ||
288 | _netxen_crb_get_bit(config_word, 3) | ||
289 | #define netxen_get_phy_energydetect(config_word) \ | ||
290 | _netxen_crb_get_bit(config_word, 4) | ||
291 | #define netxen_get_phy_downshift(config_word) \ | ||
292 | _netxen_crb_get_bit(config_word, 5) | ||
293 | #define netxen_get_phy_crossover(config_word) \ | ||
294 | _netxen_crb_get_bit(config_word, 6) | ||
295 | #define netxen_get_phy_link(config_word) \ | ||
296 | _netxen_crb_get_bit(config_word, 10) | ||
297 | #define netxen_get_phy_resolved(config_word) \ | ||
298 | _netxen_crb_get_bit(config_word, 11) | ||
299 | #define netxen_get_phy_pagercvd(config_word) \ | ||
300 | _netxen_crb_get_bit(config_word, 12) | ||
301 | #define netxen_get_phy_duplex(config_word) \ | ||
302 | _netxen_crb_get_bit(config_word, 13) | ||
303 | |||
304 | /* | ||
305 | * Interrupt Register definition | ||
306 | * This definition applies to registers 18 and 19 (int enable and int status). | ||
307 | * Bit 0 : jabber | ||
308 | * Bit 1 : polarity_changed | ||
309 | * Bit 4 : energy_detect | ||
310 | * Bit 5 : downshift | ||
311 | * Bit 6 : mdi_xover_changed | ||
312 | * Bit 7 : fifo_over_underflow | ||
313 | * Bit 8 : false_carrier | ||
314 | * Bit 9 : symbol_error | ||
315 | * Bit 10: link_status_changed | ||
316 | * Bit 11: autoneg_completed | ||
317 | * Bit 12: page_received | ||
318 | * Bit 13: duplex_changed | ||
319 | * Bit 14: speed_changed | ||
320 | * Bit 15: autoneg_error | ||
321 | */ | ||
322 | |||
323 | #define netxen_get_phy_int_jabber(config_word) \ | ||
324 | _netxen_crb_get_bit(config_word, 0) | ||
325 | #define netxen_get_phy_int_polarity_changed(config_word) \ | ||
326 | _netxen_crb_get_bit(config_word, 1) | ||
327 | #define netxen_get_phy_int_energy_detect(config_word) \ | ||
328 | _netxen_crb_get_bit(config_word, 4) | ||
329 | #define netxen_get_phy_int_downshift(config_word) \ | ||
330 | _netxen_crb_get_bit(config_word, 5) | ||
331 | #define netxen_get_phy_int_mdi_xover_changed(config_word) \ | ||
332 | _netxen_crb_get_bit(config_word, 6) | ||
333 | #define netxen_get_phy_int_fifo_over_underflow(config_word) \ | ||
334 | _netxen_crb_get_bit(config_word, 7) | ||
335 | #define netxen_get_phy_int_false_carrier(config_word) \ | ||
336 | _netxen_crb_get_bit(config_word, 8) | ||
337 | #define netxen_get_phy_int_symbol_error(config_word) \ | ||
338 | _netxen_crb_get_bit(config_word, 9) | ||
339 | #define netxen_get_phy_int_link_status_changed(config_word) \ | ||
340 | _netxen_crb_get_bit(config_word, 10) | ||
341 | #define netxen_get_phy_int_autoneg_completed(config_word) \ | ||
342 | _netxen_crb_get_bit(config_word, 11) | ||
343 | #define netxen_get_phy_int_page_received(config_word) \ | ||
344 | _netxen_crb_get_bit(config_word, 12) | ||
345 | #define netxen_get_phy_int_duplex_changed(config_word) \ | ||
346 | _netxen_crb_get_bit(config_word, 13) | ||
347 | #define netxen_get_phy_int_speed_changed(config_word) \ | ||
348 | _netxen_crb_get_bit(config_word, 14) | ||
349 | #define netxen_get_phy_int_autoneg_error(config_word) \ | ||
350 | _netxen_crb_get_bit(config_word, 15) | ||
351 | |||
352 | #define netxen_set_phy_int_link_status_changed(config_word) \ | ||
353 | set_bit(10, (unsigned long*)&config_word) | ||
354 | #define netxen_set_phy_int_autoneg_completed(config_word) \ | ||
355 | set_bit(11, (unsigned long*)&config_word) | ||
356 | #define netxen_set_phy_int_speed_changed(config_word) \ | ||
357 | set_bit(14, (unsigned long*)&config_word) | ||
358 | |||
359 | /* | ||
360 | * NIU Mode Register. | ||
361 | * Bit 0 : enable FibreChannel | ||
362 | * Bit 1 : enable 10/100/1000 Ethernet | ||
363 | * Bit 2 : enable 10Gb Ethernet | ||
364 | */ | ||
365 | |||
366 | #define netxen_get_niu_enable_ge(config_word) \ | ||
367 | _netxen_crb_get_bit(config_word, 1) | ||
368 | |||
369 | /* Promiscous mode options (GbE mode only) */ | ||
370 | typedef enum { | ||
371 | NETXEN_NIU_PROMISC_MODE = 0, | ||
372 | NETXEN_NIU_NON_PROMISC_MODE | ||
373 | } netxen_niu_prom_mode_t; | ||
374 | |||
375 | /* | ||
376 | * NIU GB Drop CRC Register | ||
377 | * | ||
378 | * Bit 0 : drop_gb0 => 1:drop pkts with bad CRCs, 0:pass them on | ||
379 | * Bit 1 : drop_gb1 => 1:drop pkts with bad CRCs, 0:pass them on | ||
380 | * Bit 2 : drop_gb2 => 1:drop pkts with bad CRCs, 0:pass them on | ||
381 | * Bit 3 : drop_gb3 => 1:drop pkts with bad CRCs, 0:pass them on | ||
382 | */ | ||
383 | |||
384 | #define netxen_set_gb_drop_gb0(config_word) \ | ||
385 | set_bit(0, (unsigned long*)&config_word) | ||
386 | #define netxen_set_gb_drop_gb1(config_word) \ | ||
387 | set_bit(1, (unsigned long*)&config_word) | ||
388 | #define netxen_set_gb_drop_gb2(config_word) \ | ||
389 | set_bit(2, (unsigned long*)&config_word) | ||
390 | #define netxen_set_gb_drop_gb3(config_word) \ | ||
391 | set_bit(3, (unsigned long*)&config_word) | ||
392 | |||
393 | #define netxen_clear_gb_drop_gb0(config_word) \ | ||
394 | clear_bit(0, (unsigned long*)&config_word) | ||
395 | #define netxen_clear_gb_drop_gb1(config_word) \ | ||
396 | clear_bit(1, (unsigned long*)&config_word) | ||
397 | #define netxen_clear_gb_drop_gb2(config_word) \ | ||
398 | clear_bit(2, (unsigned long*)&config_word) | ||
399 | #define netxen_clear_gb_drop_gb3(config_word) \ | ||
400 | clear_bit(3, (unsigned long*)&config_word) | ||
401 | |||
402 | /* | ||
403 | * NIU XG MAC Config Register | ||
404 | * | ||
405 | * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable | ||
406 | * Bit 2 : rx_enable => 1:enable frame recv, 0:disable | ||
407 | * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op | ||
408 | * Bit 27: xaui_framer_reset | ||
409 | * Bit 28: xaui_rx_reset | ||
410 | * Bit 29: xaui_tx_reset | ||
411 | * Bit 30: xg_ingress_afifo_reset | ||
412 | * Bit 31: xg_egress_afifo_reset | ||
413 | */ | ||
414 | |||
415 | #define netxen_xg_soft_reset(config_word) \ | ||
416 | set_bit(4, (unsigned long*)&config_word) | ||
417 | |||
418 | /* | ||
419 | * MAC Control Register | ||
420 | * | ||
421 | * Bit 0-1 : id_pool0 | ||
422 | * Bit 2 : enable_xtnd0 | ||
423 | * Bit 4-5 : id_pool1 | ||
424 | * Bit 6 : enable_xtnd1 | ||
425 | * Bit 8-9 : id_pool2 | ||
426 | * Bit 10 : enable_xtnd2 | ||
427 | * Bit 12-13 : id_pool3 | ||
428 | * Bit 14 : enable_xtnd3 | ||
429 | * Bit 24-25 : mode_select | ||
430 | * Bit 28-31 : enable_pool | ||
431 | */ | ||
432 | |||
433 | #define netxen_nic_mcr_set_id_pool0(config, val) \ | ||
434 | ((config) |= ((val) &0x03)) | ||
435 | #define netxen_nic_mcr_set_enable_xtnd0(config) \ | ||
436 | (set_bit(3, (unsigned long *)&(config))) | ||
437 | #define netxen_nic_mcr_set_id_pool1(config, val) \ | ||
438 | ((config) |= (((val) & 0x03) << 4)) | ||
439 | #define netxen_nic_mcr_set_enable_xtnd1(config) \ | ||
440 | (set_bit(6, (unsigned long *)&(config))) | ||
441 | #define netxen_nic_mcr_set_id_pool2(config, val) \ | ||
442 | ((config) |= (((val) & 0x03) << 8)) | ||
443 | #define netxen_nic_mcr_set_enable_xtnd2(config) \ | ||
444 | (set_bit(10, (unsigned long *)&(config))) | ||
445 | #define netxen_nic_mcr_set_id_pool3(config, val) \ | ||
446 | ((config) |= (((val) & 0x03) << 12)) | ||
447 | #define netxen_nic_mcr_set_enable_xtnd3(config) \ | ||
448 | (set_bit(14, (unsigned long *)&(config))) | ||
449 | #define netxen_nic_mcr_set_mode_select(config, val) \ | ||
450 | ((config) |= (((val) & 0x03) << 24)) | ||
451 | #define netxen_nic_mcr_set_enable_pool(config, val) \ | ||
452 | ((config) |= (((val) & 0x0f) << 28)) | ||
453 | |||
454 | /* Set promiscuous mode for a GbE interface */ | ||
455 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port, | ||
456 | netxen_niu_prom_mode_t mode); | ||
457 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | ||
458 | int port, netxen_niu_prom_mode_t mode); | ||
459 | |||
460 | /* get/set the MAC address for a given MAC */ | ||
461 | int netxen_niu_macaddr_get(struct netxen_adapter *adapter, int port, | ||
462 | netxen_ethernet_macaddr_t * addr); | ||
463 | int netxen_niu_macaddr_set(struct netxen_port *port, | ||
464 | netxen_ethernet_macaddr_t addr); | ||
465 | |||
466 | /* XG versons */ | ||
467 | int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int port, | ||
468 | netxen_ethernet_macaddr_t * addr); | ||
469 | int netxen_niu_xg_macaddr_set(struct netxen_port *port, | ||
470 | netxen_ethernet_macaddr_t addr); | ||
471 | |||
472 | /* Generic enable for GbE ports. Will detect the speed of the link. */ | ||
473 | int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port); | ||
474 | |||
475 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port); | ||
476 | |||
477 | /* Disable a GbE interface */ | ||
478 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port); | ||
479 | |||
480 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port); | ||
481 | |||
482 | #endif /* __NETXEN_NIC_HW_H_ */ | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c new file mode 100644 index 000000000000..0dca029bc3e5 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -0,0 +1,1304 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Source file for NIC routines to initialize the Phantom Hardware | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include "netxen_nic.h" | ||
37 | #include "netxen_nic_hw.h" | ||
38 | #include "netxen_nic_ioctl.h" | ||
39 | #include "netxen_nic_phan_reg.h" | ||
40 | |||
41 | struct crb_addr_pair { | ||
42 | long addr; | ||
43 | long data; | ||
44 | }; | ||
45 | |||
46 | #define NETXEN_MAX_CRB_XFORM 60 | ||
47 | static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; | ||
48 | #define NETXEN_ADDR_ERROR ((unsigned long ) 0xffffffff ) | ||
49 | |||
50 | #define crb_addr_transform(name) \ | ||
51 | crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ | ||
52 | NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 | ||
53 | |||
54 | #define NETXEN_NIC_XDMA_RESET 0x8000ff | ||
55 | |||
56 | static inline void | ||
57 | netxen_nic_locked_write_reg(struct netxen_adapter *adapter, | ||
58 | unsigned long off, int *data) | ||
59 | { | ||
60 | void __iomem *addr = pci_base_offset(adapter, off); | ||
61 | writel(*data, addr); | ||
62 | } | ||
63 | |||
64 | static void crb_addr_transform_setup(void) | ||
65 | { | ||
66 | crb_addr_transform(XDMA); | ||
67 | crb_addr_transform(TIMR); | ||
68 | crb_addr_transform(SRE); | ||
69 | crb_addr_transform(SQN3); | ||
70 | crb_addr_transform(SQN2); | ||
71 | crb_addr_transform(SQN1); | ||
72 | crb_addr_transform(SQN0); | ||
73 | crb_addr_transform(SQS3); | ||
74 | crb_addr_transform(SQS2); | ||
75 | crb_addr_transform(SQS1); | ||
76 | crb_addr_transform(SQS0); | ||
77 | crb_addr_transform(RPMX7); | ||
78 | crb_addr_transform(RPMX6); | ||
79 | crb_addr_transform(RPMX5); | ||
80 | crb_addr_transform(RPMX4); | ||
81 | crb_addr_transform(RPMX3); | ||
82 | crb_addr_transform(RPMX2); | ||
83 | crb_addr_transform(RPMX1); | ||
84 | crb_addr_transform(RPMX0); | ||
85 | crb_addr_transform(ROMUSB); | ||
86 | crb_addr_transform(SN); | ||
87 | crb_addr_transform(QMN); | ||
88 | crb_addr_transform(QMS); | ||
89 | crb_addr_transform(PGNI); | ||
90 | crb_addr_transform(PGND); | ||
91 | crb_addr_transform(PGN3); | ||
92 | crb_addr_transform(PGN2); | ||
93 | crb_addr_transform(PGN1); | ||
94 | crb_addr_transform(PGN0); | ||
95 | crb_addr_transform(PGSI); | ||
96 | crb_addr_transform(PGSD); | ||
97 | crb_addr_transform(PGS3); | ||
98 | crb_addr_transform(PGS2); | ||
99 | crb_addr_transform(PGS1); | ||
100 | crb_addr_transform(PGS0); | ||
101 | crb_addr_transform(PS); | ||
102 | crb_addr_transform(PH); | ||
103 | crb_addr_transform(NIU); | ||
104 | crb_addr_transform(I2Q); | ||
105 | crb_addr_transform(EG); | ||
106 | crb_addr_transform(MN); | ||
107 | crb_addr_transform(MS); | ||
108 | crb_addr_transform(CAS2); | ||
109 | crb_addr_transform(CAS1); | ||
110 | crb_addr_transform(CAS0); | ||
111 | crb_addr_transform(CAM); | ||
112 | crb_addr_transform(C2C1); | ||
113 | crb_addr_transform(C2C0); | ||
114 | } | ||
115 | |||
116 | int netxen_init_firmware(struct netxen_adapter *adapter) | ||
117 | { | ||
118 | u32 state = 0, loops = 0, err = 0; | ||
119 | |||
120 | /* Window 1 call */ | ||
121 | state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
122 | |||
123 | if (state == PHAN_INITIALIZE_ACK) | ||
124 | return 0; | ||
125 | |||
126 | while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) { | ||
127 | udelay(100); | ||
128 | /* Window 1 call */ | ||
129 | state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
130 | |||
131 | loops++; | ||
132 | } | ||
133 | if (loops >= 2000) { | ||
134 | printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n", | ||
135 | state); | ||
136 | err = -EIO; | ||
137 | return err; | ||
138 | } | ||
139 | /* Window 1 call */ | ||
140 | writel(PHAN_INITIALIZE_ACK, | ||
141 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
142 | |||
143 | return err; | ||
144 | } | ||
145 | |||
146 | #define NETXEN_ADDR_LIMIT 0xffffffffULL | ||
147 | |||
148 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | ||
149 | struct pci_dev **used_dev) | ||
150 | { | ||
151 | void *addr; | ||
152 | |||
153 | addr = pci_alloc_consistent(pdev, sz, ptr); | ||
154 | if ((unsigned long long)(*ptr) < NETXEN_ADDR_LIMIT) { | ||
155 | *used_dev = pdev; | ||
156 | return addr; | ||
157 | } | ||
158 | pci_free_consistent(pdev, sz, addr, *ptr); | ||
159 | addr = pci_alloc_consistent(NULL, sz, ptr); | ||
160 | *used_dev = NULL; | ||
161 | return addr; | ||
162 | } | ||
163 | |||
164 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter) | ||
165 | { | ||
166 | int ctxid, ring; | ||
167 | u32 i; | ||
168 | u32 num_rx_bufs = 0; | ||
169 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
170 | |||
171 | DPRINTK(INFO, "initializing some queues: %p\n", adapter); | ||
172 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | ||
173 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
174 | struct netxen_rx_buffer *rx_buf; | ||
175 | rcv_desc = &adapter->recv_ctx[ctxid].rcv_desc[ring]; | ||
176 | rcv_desc->rcv_free = rcv_desc->max_rx_desc_count; | ||
177 | rcv_desc->begin_alloc = 0; | ||
178 | rx_buf = rcv_desc->rx_buf_arr; | ||
179 | num_rx_bufs = rcv_desc->max_rx_desc_count; | ||
180 | /* | ||
181 | * Now go through all of them, set reference handles | ||
182 | * and put them in the queues. | ||
183 | */ | ||
184 | for (i = 0; i < num_rx_bufs; i++) { | ||
185 | rx_buf->ref_handle = i; | ||
186 | rx_buf->state = NETXEN_BUFFER_FREE; | ||
187 | |||
188 | DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:" | ||
189 | "%p\n", ctxid, i, rx_buf); | ||
190 | rx_buf++; | ||
191 | } | ||
192 | } | ||
193 | } | ||
194 | DPRINTK(INFO, "initialized buffers for %s and %s\n", | ||
195 | "adapter->free_cmd_buf_list", "adapter->free_rxbuf"); | ||
196 | } | ||
197 | |||
198 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) | ||
199 | { | ||
200 | int ports = 0; | ||
201 | struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); | ||
202 | |||
203 | if (netxen_nic_get_board_info(adapter) != 0) | ||
204 | printk("%s: Error getting board config info.\n", | ||
205 | netxen_nic_driver_name); | ||
206 | get_brd_port_by_type(board_info->board_type, &ports); | ||
207 | if (ports == 0) | ||
208 | printk(KERN_ERR "%s: Unknown board type\n", | ||
209 | netxen_nic_driver_name); | ||
210 | adapter->ahw.max_ports = ports; | ||
211 | } | ||
212 | |||
213 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | ||
214 | { | ||
215 | struct netxen_drvops *ops = adapter->ops; | ||
216 | switch (adapter->ahw.board_type) { | ||
217 | case NETXEN_NIC_GBE: | ||
218 | ops->enable_phy_interrupts = | ||
219 | netxen_niu_gbe_enable_phy_interrupts; | ||
220 | ops->disable_phy_interrupts = | ||
221 | netxen_niu_gbe_disable_phy_interrupts; | ||
222 | ops->handle_phy_intr = netxen_nic_gbe_handle_phy_intr; | ||
223 | ops->macaddr_set = netxen_niu_macaddr_set; | ||
224 | ops->set_mtu = netxen_nic_set_mtu_gb; | ||
225 | ops->set_promisc = netxen_niu_set_promiscuous_mode; | ||
226 | ops->unset_promisc = netxen_niu_set_promiscuous_mode; | ||
227 | ops->phy_read = netxen_niu_gbe_phy_read; | ||
228 | ops->phy_write = netxen_niu_gbe_phy_write; | ||
229 | ops->init_port = netxen_niu_gbe_init_port; | ||
230 | ops->init_niu = netxen_nic_init_niu_gb; | ||
231 | ops->stop_port = netxen_niu_disable_gbe_port; | ||
232 | break; | ||
233 | |||
234 | case NETXEN_NIC_XGBE: | ||
235 | ops->enable_phy_interrupts = | ||
236 | netxen_niu_xgbe_enable_phy_interrupts; | ||
237 | ops->disable_phy_interrupts = | ||
238 | netxen_niu_xgbe_disable_phy_interrupts; | ||
239 | ops->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr; | ||
240 | ops->macaddr_set = netxen_niu_xg_macaddr_set; | ||
241 | ops->set_mtu = netxen_nic_set_mtu_xgb; | ||
242 | ops->init_port = netxen_niu_xg_init_port; | ||
243 | ops->set_promisc = netxen_niu_xg_set_promiscuous_mode; | ||
244 | ops->unset_promisc = netxen_niu_xg_set_promiscuous_mode; | ||
245 | ops->stop_port = netxen_niu_disable_xg_port; | ||
246 | break; | ||
247 | |||
248 | default: | ||
249 | break; | ||
250 | } | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB | ||
255 | * address to external PCI CRB address. | ||
256 | */ | ||
257 | unsigned long netxen_decode_crb_addr(unsigned long addr) | ||
258 | { | ||
259 | int i; | ||
260 | unsigned long base_addr, offset, pci_base; | ||
261 | |||
262 | crb_addr_transform_setup(); | ||
263 | |||
264 | pci_base = NETXEN_ADDR_ERROR; | ||
265 | base_addr = addr & 0xfff00000; | ||
266 | offset = addr & 0x000fffff; | ||
267 | |||
268 | for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { | ||
269 | if (crb_addr_xform[i] == base_addr) { | ||
270 | pci_base = i << 20; | ||
271 | break; | ||
272 | } | ||
273 | } | ||
274 | if (pci_base == NETXEN_ADDR_ERROR) | ||
275 | return pci_base; | ||
276 | else | ||
277 | return (pci_base + offset); | ||
278 | } | ||
279 | |||
280 | static long rom_max_timeout = 10000; | ||
281 | static long rom_lock_timeout = 1000000; | ||
282 | |||
283 | static inline int rom_lock(struct netxen_adapter *adapter) | ||
284 | { | ||
285 | int iter; | ||
286 | u32 done = 0; | ||
287 | int timeout = 0; | ||
288 | |||
289 | while (!done) { | ||
290 | /* acquire semaphore2 from PCI HW block */ | ||
291 | netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK), | ||
292 | &done); | ||
293 | if (done == 1) | ||
294 | break; | ||
295 | if (timeout >= rom_lock_timeout) | ||
296 | return -EIO; | ||
297 | |||
298 | timeout++; | ||
299 | /* | ||
300 | * Yield CPU | ||
301 | */ | ||
302 | if (!in_atomic()) | ||
303 | schedule(); | ||
304 | else { | ||
305 | for (iter = 0; iter < 20; iter++) | ||
306 | cpu_relax(); /*This a nop instr on i386 */ | ||
307 | } | ||
308 | } | ||
309 | netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER); | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | int netxen_wait_rom_done(struct netxen_adapter *adapter) | ||
314 | { | ||
315 | long timeout = 0; | ||
316 | long done = 0; | ||
317 | |||
318 | while (done == 0) { | ||
319 | done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS); | ||
320 | done &= 2; | ||
321 | timeout++; | ||
322 | if (timeout >= rom_max_timeout) { | ||
323 | printk("Timeout reached waiting for rom done"); | ||
324 | return -EIO; | ||
325 | } | ||
326 | } | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static inline int netxen_rom_wren(struct netxen_adapter *adapter) | ||
331 | { | ||
332 | /* Set write enable latch in ROM status register */ | ||
333 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); | ||
334 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, | ||
335 | M25P_INSTR_WREN); | ||
336 | if (netxen_wait_rom_done(adapter)) { | ||
337 | return -1; | ||
338 | } | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static inline unsigned int netxen_rdcrbreg(struct netxen_adapter *adapter, | ||
343 | unsigned int addr) | ||
344 | { | ||
345 | unsigned int data = 0xdeaddead; | ||
346 | data = netxen_nic_reg_read(adapter, addr); | ||
347 | return data; | ||
348 | } | ||
349 | |||
350 | static inline int netxen_do_rom_rdsr(struct netxen_adapter *adapter) | ||
351 | { | ||
352 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, | ||
353 | M25P_INSTR_RDSR); | ||
354 | if (netxen_wait_rom_done(adapter)) { | ||
355 | return -1; | ||
356 | } | ||
357 | return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA); | ||
358 | } | ||
359 | |||
360 | static inline void netxen_rom_unlock(struct netxen_adapter *adapter) | ||
361 | { | ||
362 | u32 val; | ||
363 | |||
364 | /* release semaphore2 */ | ||
365 | netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val); | ||
366 | |||
367 | } | ||
368 | |||
369 | int netxen_rom_wip_poll(struct netxen_adapter *adapter) | ||
370 | { | ||
371 | long timeout = 0; | ||
372 | long wip = 1; | ||
373 | int val; | ||
374 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); | ||
375 | while (wip != 0) { | ||
376 | val = netxen_do_rom_rdsr(adapter); | ||
377 | wip = val & 1; | ||
378 | timeout++; | ||
379 | if (timeout > rom_max_timeout) { | ||
380 | return -1; | ||
381 | } | ||
382 | } | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static inline int do_rom_fast_write(struct netxen_adapter *adapter, | ||
387 | int addr, int data) | ||
388 | { | ||
389 | if (netxen_rom_wren(adapter)) { | ||
390 | return -1; | ||
391 | } | ||
392 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_WDATA, data); | ||
393 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); | ||
394 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); | ||
395 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, | ||
396 | M25P_INSTR_PP); | ||
397 | if (netxen_wait_rom_done(adapter)) { | ||
398 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); | ||
399 | return -1; | ||
400 | } | ||
401 | |||
402 | return netxen_rom_wip_poll(adapter); | ||
403 | } | ||
404 | |||
405 | static inline int | ||
406 | do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) | ||
407 | { | ||
408 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); | ||
409 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); | ||
410 | udelay(100); /* prevent bursting on CRB */ | ||
411 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | ||
412 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); | ||
413 | if (netxen_wait_rom_done(adapter)) { | ||
414 | printk("Error waiting for rom done\n"); | ||
415 | return -EIO; | ||
416 | } | ||
417 | /* reset abyte_cnt and dummy_byte_cnt */ | ||
418 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); | ||
419 | udelay(100); /* prevent bursting on CRB */ | ||
420 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | ||
421 | |||
422 | *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) | ||
427 | { | ||
428 | int ret; | ||
429 | |||
430 | if (rom_lock(adapter) != 0) | ||
431 | return -EIO; | ||
432 | |||
433 | ret = do_rom_fast_read(adapter, addr, valp); | ||
434 | netxen_rom_unlock(adapter); | ||
435 | return ret; | ||
436 | } | ||
437 | |||
438 | int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data) | ||
439 | { | ||
440 | int ret = 0; | ||
441 | |||
442 | if (rom_lock(adapter) != 0) { | ||
443 | return -1; | ||
444 | } | ||
445 | ret = do_rom_fast_write(adapter, addr, data); | ||
446 | netxen_rom_unlock(adapter); | ||
447 | return ret; | ||
448 | } | ||
449 | int netxen_do_rom_se(struct netxen_adapter *adapter, int addr) | ||
450 | { | ||
451 | netxen_rom_wren(adapter); | ||
452 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); | ||
453 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); | ||
454 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, | ||
455 | M25P_INSTR_SE); | ||
456 | if (netxen_wait_rom_done(adapter)) { | ||
457 | netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); | ||
458 | return -1; | ||
459 | } | ||
460 | return netxen_rom_wip_poll(adapter); | ||
461 | } | ||
462 | |||
463 | int netxen_rom_se(struct netxen_adapter *adapter, int addr) | ||
464 | { | ||
465 | int ret = 0; | ||
466 | if (rom_lock(adapter) != 0) { | ||
467 | return -1; | ||
468 | } | ||
469 | ret = netxen_do_rom_se(adapter, addr); | ||
470 | netxen_rom_unlock(adapter); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | #define NETXEN_BOARDTYPE 0x4008 | ||
475 | #define NETXEN_BOARDNUM 0x400c | ||
476 | #define NETXEN_CHIPNUM 0x4010 | ||
477 | #define NETXEN_ROMBUS_RESET 0xFFFFFFFF | ||
478 | #define NETXEN_ROM_FIRST_BARRIER 0x800000000ULL | ||
479 | #define NETXEN_ROM_FOUND_INIT 0x400 | ||
480 | |||
481 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | ||
482 | { | ||
483 | int addr, val, status; | ||
484 | int n, i; | ||
485 | int init_delay = 0; | ||
486 | struct crb_addr_pair *buf; | ||
487 | unsigned long off; | ||
488 | |||
489 | /* resetall */ | ||
490 | status = netxen_nic_get_board_info(adapter); | ||
491 | if (status) | ||
492 | printk("%s: netxen_pinit_from_rom: Error getting board info\n", | ||
493 | netxen_nic_driver_name); | ||
494 | |||
495 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, | ||
496 | NETXEN_ROMBUS_RESET); | ||
497 | |||
498 | if (verbose) { | ||
499 | int val; | ||
500 | if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0) | ||
501 | printk("P2 ROM board type: 0x%08x\n", val); | ||
502 | else | ||
503 | printk("Could not read board type\n"); | ||
504 | if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0) | ||
505 | printk("P2 ROM board num: 0x%08x\n", val); | ||
506 | else | ||
507 | printk("Could not read board number\n"); | ||
508 | if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0) | ||
509 | printk("P2 ROM chip num: 0x%08x\n", val); | ||
510 | else | ||
511 | printk("Could not read chip number\n"); | ||
512 | } | ||
513 | |||
514 | if (netxen_rom_fast_read(adapter, 0, &n) == 0 | ||
515 | && (n & NETXEN_ROM_FIRST_BARRIER)) { | ||
516 | n &= ~NETXEN_ROM_ROUNDUP; | ||
517 | if (n < NETXEN_ROM_FOUND_INIT) { | ||
518 | if (verbose) | ||
519 | printk("%s: %d CRB init values found" | ||
520 | " in ROM.\n", netxen_nic_driver_name, n); | ||
521 | } else { | ||
522 | printk("%s:n=0x%x Error! NetXen card flash not" | ||
523 | " initialized.\n", __FUNCTION__, n); | ||
524 | return -EIO; | ||
525 | } | ||
526 | buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); | ||
527 | if (buf == NULL) { | ||
528 | printk("%s: netxen_pinit_from_rom: Unable to calloc " | ||
529 | "memory.\n", netxen_nic_driver_name); | ||
530 | return -ENOMEM; | ||
531 | } | ||
532 | for (i = 0; i < n; i++) { | ||
533 | if (netxen_rom_fast_read(adapter, 8 * i + 4, &val) != 0 | ||
534 | || netxen_rom_fast_read(adapter, 8 * i + 8, | ||
535 | &addr) != 0) | ||
536 | return -EIO; | ||
537 | |||
538 | buf[i].addr = addr; | ||
539 | buf[i].data = val; | ||
540 | |||
541 | if (verbose) | ||
542 | printk("%s: PCI: 0x%08x == 0x%08x\n", | ||
543 | netxen_nic_driver_name, (unsigned int) | ||
544 | netxen_decode_crb_addr((unsigned long) | ||
545 | addr), val); | ||
546 | } | ||
547 | for (i = 0; i < n; i++) { | ||
548 | |||
549 | off = | ||
550 | netxen_decode_crb_addr((unsigned long)buf[i].addr) + | ||
551 | NETXEN_PCI_CRBSPACE; | ||
552 | /* skipping cold reboot MAGIC */ | ||
553 | if (off == NETXEN_CAM_RAM(0x1fc)) | ||
554 | continue; | ||
555 | |||
556 | /* After writing this register, HW needs time for CRB */ | ||
557 | /* to quiet down (else crb_window returns 0xffffffff) */ | ||
558 | if (off == NETXEN_ROMUSB_GLB_SW_RESET) { | ||
559 | init_delay = 1; | ||
560 | /* hold xdma in reset also */ | ||
561 | buf[i].data = NETXEN_NIC_XDMA_RESET; | ||
562 | } | ||
563 | |||
564 | if (ADDR_IN_WINDOW1(off)) { | ||
565 | writel(buf[i].data, | ||
566 | NETXEN_CRB_NORMALIZE(adapter, off)); | ||
567 | } else { | ||
568 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
569 | writel(buf[i].data, | ||
570 | pci_base_offset(adapter, off)); | ||
571 | |||
572 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
573 | } | ||
574 | if (init_delay == 1) { | ||
575 | ssleep(1); | ||
576 | init_delay = 0; | ||
577 | } | ||
578 | msleep(1); | ||
579 | } | ||
580 | kfree(buf); | ||
581 | |||
582 | /* disable_peg_cache_all */ | ||
583 | |||
584 | /* unreset_net_cache */ | ||
585 | netxen_nic_hw_read_wx(adapter, NETXEN_ROMUSB_GLB_SW_RESET, &val, | ||
586 | 4); | ||
587 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, | ||
588 | (val & 0xffffff0f)); | ||
589 | /* p2dn replyCount */ | ||
590 | netxen_crb_writelit_adapter(adapter, | ||
591 | NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); | ||
592 | /* disable_peg_cache 0 */ | ||
593 | netxen_crb_writelit_adapter(adapter, | ||
594 | NETXEN_CRB_PEG_NET_D + 0x4c, 8); | ||
595 | /* disable_peg_cache 1 */ | ||
596 | netxen_crb_writelit_adapter(adapter, | ||
597 | NETXEN_CRB_PEG_NET_I + 0x4c, 8); | ||
598 | |||
599 | /* peg_clr_all */ | ||
600 | |||
601 | /* peg_clr 0 */ | ||
602 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, | ||
603 | 0); | ||
604 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, | ||
605 | 0); | ||
606 | /* peg_clr 1 */ | ||
607 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, | ||
608 | 0); | ||
609 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, | ||
610 | 0); | ||
611 | /* peg_clr 2 */ | ||
612 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, | ||
613 | 0); | ||
614 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, | ||
615 | 0); | ||
616 | /* peg_clr 3 */ | ||
617 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, | ||
618 | 0); | ||
619 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, | ||
620 | 0); | ||
621 | } | ||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | ||
626 | { | ||
627 | u32 val = 0; | ||
628 | int loops = 0; | ||
629 | |||
630 | if (!pegtune_val) { | ||
631 | while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) { | ||
632 | udelay(100); | ||
633 | schedule(); | ||
634 | val = | ||
635 | readl(NETXEN_CRB_NORMALIZE | ||
636 | (adapter, CRB_CMDPEG_STATE)); | ||
637 | loops++; | ||
638 | } | ||
639 | if (val != PHAN_INITIALIZE_COMPLETE) | ||
640 | printk("WARNING: Initial boot wait loop failed...\n"); | ||
641 | } | ||
642 | } | ||
643 | |||
644 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter) | ||
645 | { | ||
646 | int ctx; | ||
647 | |||
648 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
649 | struct netxen_recv_context *recv_ctx = | ||
650 | &(adapter->recv_ctx[ctx]); | ||
651 | u32 consumer; | ||
652 | struct status_desc *desc_head; | ||
653 | struct status_desc *desc; | ||
654 | |||
655 | consumer = recv_ctx->status_rx_consumer; | ||
656 | desc_head = recv_ctx->rcv_status_desc_head; | ||
657 | desc = &desc_head[consumer]; | ||
658 | |||
659 | if (((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) | ||
660 | return 1; | ||
661 | } | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static inline int netxen_nic_check_temp(struct netxen_adapter *adapter) | ||
667 | { | ||
668 | int port_num; | ||
669 | struct netxen_port *port; | ||
670 | struct net_device *netdev; | ||
671 | uint32_t temp, temp_state, temp_val; | ||
672 | int rv = 0; | ||
673 | |||
674 | temp = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_TEMP_STATE)); | ||
675 | |||
676 | temp_state = nx_get_temp_state(temp); | ||
677 | temp_val = nx_get_temp_val(temp); | ||
678 | |||
679 | if (temp_state == NX_TEMP_PANIC) { | ||
680 | printk(KERN_ALERT | ||
681 | "%s: Device temperature %d degrees C exceeds" | ||
682 | " maximum allowed. Hardware has been shut down.\n", | ||
683 | netxen_nic_driver_name, temp_val); | ||
684 | for (port_num = 0; port_num < adapter->ahw.max_ports; | ||
685 | port_num++) { | ||
686 | port = adapter->port[port_num]; | ||
687 | netdev = port->netdev; | ||
688 | |||
689 | netif_carrier_off(netdev); | ||
690 | netif_stop_queue(netdev); | ||
691 | } | ||
692 | rv = 1; | ||
693 | } else if (temp_state == NX_TEMP_WARN) { | ||
694 | if (adapter->temp == NX_TEMP_NORMAL) { | ||
695 | printk(KERN_ALERT | ||
696 | "%s: Device temperature %d degrees C " | ||
697 | "exceeds operating range." | ||
698 | " Immediate action needed.\n", | ||
699 | netxen_nic_driver_name, temp_val); | ||
700 | } | ||
701 | } else { | ||
702 | if (adapter->temp == NX_TEMP_WARN) { | ||
703 | printk(KERN_INFO | ||
704 | "%s: Device temperature is now %d degrees C" | ||
705 | " in normal range.\n", netxen_nic_driver_name, | ||
706 | temp_val); | ||
707 | } | ||
708 | } | ||
709 | adapter->temp = temp_state; | ||
710 | return rv; | ||
711 | } | ||
712 | |||
713 | void netxen_watchdog_task(unsigned long v) | ||
714 | { | ||
715 | int port_num; | ||
716 | struct netxen_port *port; | ||
717 | struct net_device *netdev; | ||
718 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | ||
719 | |||
720 | if (netxen_nic_check_temp(adapter)) | ||
721 | return; | ||
722 | |||
723 | for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) { | ||
724 | port = adapter->port[port_num]; | ||
725 | netdev = port->netdev; | ||
726 | |||
727 | if ((netif_running(netdev)) && !netif_carrier_ok(netdev)) { | ||
728 | printk(KERN_INFO "%s port %d, %s carrier is now ok\n", | ||
729 | netxen_nic_driver_name, port_num, netdev->name); | ||
730 | netif_carrier_on(netdev); | ||
731 | } | ||
732 | |||
733 | if (netif_queue_stopped(netdev)) | ||
734 | netif_wake_queue(netdev); | ||
735 | } | ||
736 | |||
737 | if (adapter->ops->handle_phy_intr) | ||
738 | adapter->ops->handle_phy_intr(adapter); | ||
739 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * netxen_process_rcv() send the received packet to the protocol stack. | ||
744 | * and if the number of receives exceeds RX_BUFFERS_REFILL, then we | ||
745 | * invoke the routine to send more rx buffers to the Phantom... | ||
746 | */ | ||
747 | void | ||
748 | netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | ||
749 | struct status_desc *desc) | ||
750 | { | ||
751 | struct netxen_port *port = adapter->port[STATUS_DESC_PORT(desc)]; | ||
752 | struct pci_dev *pdev = port->pdev; | ||
753 | struct net_device *netdev = port->netdev; | ||
754 | int index = le16_to_cpu(desc->reference_handle); | ||
755 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | ||
756 | struct netxen_rx_buffer *buffer; | ||
757 | struct sk_buff *skb; | ||
758 | u32 length = le16_to_cpu(desc->total_length); | ||
759 | u32 desc_ctx; | ||
760 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
761 | int ret; | ||
762 | |||
763 | desc_ctx = STATUS_DESC_TYPE(desc); | ||
764 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { | ||
765 | printk("%s: %s Bad Rcv descriptor ring\n", | ||
766 | netxen_nic_driver_name, netdev->name); | ||
767 | return; | ||
768 | } | ||
769 | |||
770 | rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; | ||
771 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
772 | |||
773 | pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, | ||
774 | PCI_DMA_FROMDEVICE); | ||
775 | |||
776 | skb = (struct sk_buff *)buffer->skb; | ||
777 | |||
778 | if (likely(STATUS_DESC_STATUS(desc) == STATUS_CKSUM_OK)) { | ||
779 | port->stats.csummed++; | ||
780 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
781 | } else | ||
782 | skb->ip_summed = CHECKSUM_NONE; | ||
783 | skb->dev = netdev; | ||
784 | skb_put(skb, length); | ||
785 | skb->protocol = eth_type_trans(skb, netdev); | ||
786 | |||
787 | ret = netif_receive_skb(skb); | ||
788 | |||
789 | /* | ||
790 | * RH: Do we need these stats on a regular basis. Can we get it from | ||
791 | * Linux stats. | ||
792 | */ | ||
793 | switch (ret) { | ||
794 | case NET_RX_SUCCESS: | ||
795 | port->stats.uphappy++; | ||
796 | break; | ||
797 | |||
798 | case NET_RX_CN_LOW: | ||
799 | port->stats.uplcong++; | ||
800 | break; | ||
801 | |||
802 | case NET_RX_CN_MOD: | ||
803 | port->stats.upmcong++; | ||
804 | break; | ||
805 | |||
806 | case NET_RX_CN_HIGH: | ||
807 | port->stats.uphcong++; | ||
808 | break; | ||
809 | |||
810 | case NET_RX_DROP: | ||
811 | port->stats.updropped++; | ||
812 | break; | ||
813 | |||
814 | default: | ||
815 | port->stats.updunno++; | ||
816 | break; | ||
817 | } | ||
818 | |||
819 | netdev->last_rx = jiffies; | ||
820 | |||
821 | rcv_desc->rcv_free++; | ||
822 | rcv_desc->rcv_pending--; | ||
823 | |||
824 | /* | ||
825 | * We just consumed one buffer so post a buffer. | ||
826 | */ | ||
827 | adapter->stats.post_called++; | ||
828 | buffer->skb = NULL; | ||
829 | buffer->state = NETXEN_BUFFER_FREE; | ||
830 | |||
831 | port->stats.no_rcv++; | ||
832 | port->stats.rxbytes += length; | ||
833 | } | ||
834 | |||
835 | /* Process Receive status ring */ | ||
836 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | ||
837 | { | ||
838 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | ||
839 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | ||
840 | struct status_desc *desc; /* used to read status desc here */ | ||
841 | u32 consumer = recv_ctx->status_rx_consumer; | ||
842 | int count = 0, ring; | ||
843 | |||
844 | DPRINTK(INFO, "procesing receive\n"); | ||
845 | /* | ||
846 | * we assume in this case that there is only one port and that is | ||
847 | * port #1...changes need to be done in firmware to indicate port | ||
848 | * number as part of the descriptor. This way we will be able to get | ||
849 | * the netdev which is associated with that device. | ||
850 | */ | ||
851 | while (count < max) { | ||
852 | desc = &desc_head[consumer]; | ||
853 | if (!((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) { | ||
854 | DPRINTK(ERR, "desc %p ownedby %x\n", desc, desc->owner); | ||
855 | break; | ||
856 | } | ||
857 | netxen_process_rcv(adapter, ctxid, desc); | ||
858 | desc->owner = STATUS_OWNER_PHANTOM; | ||
859 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | ||
860 | count++; | ||
861 | } | ||
862 | if (count) { | ||
863 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
864 | netxen_post_rx_buffers(adapter, ctxid, ring); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | /* update the consumer index in phantom */ | ||
869 | if (count) { | ||
870 | adapter->stats.process_rcv++; | ||
871 | recv_ctx->status_rx_consumer = consumer; | ||
872 | |||
873 | /* Window = 1 */ | ||
874 | writel(consumer, | ||
875 | NETXEN_CRB_NORMALIZE(adapter, | ||
876 | recv_crb_registers[ctxid]. | ||
877 | crb_rcv_status_consumer)); | ||
878 | } | ||
879 | |||
880 | return count; | ||
881 | } | ||
882 | |||
883 | /* Process Command status ring */ | ||
884 | void netxen_process_cmd_ring(unsigned long data) | ||
885 | { | ||
886 | u32 last_consumer; | ||
887 | u32 consumer; | ||
888 | struct netxen_adapter *adapter = (struct netxen_adapter *)data; | ||
889 | int count = 0; | ||
890 | struct netxen_cmd_buffer *buffer; | ||
891 | struct netxen_port *port; /* port #1 */ | ||
892 | struct netxen_port *nport; | ||
893 | struct pci_dev *pdev; | ||
894 | struct netxen_skb_frag *frag; | ||
895 | u32 i; | ||
896 | struct sk_buff *skb = NULL; | ||
897 | int p; | ||
898 | |||
899 | spin_lock(&adapter->tx_lock); | ||
900 | last_consumer = adapter->last_cmd_consumer; | ||
901 | DPRINTK(INFO, "procesing xmit complete\n"); | ||
902 | /* we assume in this case that there is only one port and that is | ||
903 | * port #1...changes need to be done in firmware to indicate port | ||
904 | * number as part of the descriptor. This way we will be able to get | ||
905 | * the netdev which is associated with that device. | ||
906 | */ | ||
907 | consumer = | ||
908 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET)); | ||
909 | |||
910 | if (last_consumer == consumer) { /* Ring is empty */ | ||
911 | DPRINTK(INFO, "last_consumer %d == consumer %d\n", | ||
912 | last_consumer, consumer); | ||
913 | spin_unlock(&adapter->tx_lock); | ||
914 | return; | ||
915 | } | ||
916 | |||
917 | adapter->proc_cmd_buf_counter++; | ||
918 | adapter->stats.process_xmit++; | ||
919 | /* | ||
920 | * Not needed - does not seem to be used anywhere. | ||
921 | * adapter->cmd_consumer = consumer; | ||
922 | */ | ||
923 | spin_unlock(&adapter->tx_lock); | ||
924 | |||
925 | while ((last_consumer != consumer) && (count < MAX_STATUS_HANDLE)) { | ||
926 | buffer = &adapter->cmd_buf_arr[last_consumer]; | ||
927 | port = adapter->port[buffer->port]; | ||
928 | pdev = port->pdev; | ||
929 | frag = &buffer->frag_array[0]; | ||
930 | skb = buffer->skb; | ||
931 | if (skb && (cmpxchg(&buffer->skb, skb, 0) == skb)) { | ||
932 | pci_unmap_single(pdev, frag->dma, frag->length, | ||
933 | PCI_DMA_TODEVICE); | ||
934 | for (i = 1; i < buffer->frag_count; i++) { | ||
935 | DPRINTK(INFO, "getting fragment no %d\n", i); | ||
936 | frag++; /* Get the next frag */ | ||
937 | pci_unmap_page(pdev, frag->dma, frag->length, | ||
938 | PCI_DMA_TODEVICE); | ||
939 | } | ||
940 | |||
941 | port->stats.skbfreed++; | ||
942 | dev_kfree_skb_any(skb); | ||
943 | skb = NULL; | ||
944 | } else if (adapter->proc_cmd_buf_counter == 1) { | ||
945 | port->stats.txnullskb++; | ||
946 | } | ||
947 | if (unlikely(netif_queue_stopped(port->netdev) | ||
948 | && netif_carrier_ok(port->netdev)) | ||
949 | && ((jiffies - port->netdev->trans_start) > | ||
950 | port->netdev->watchdog_timeo)) { | ||
951 | schedule_work(&port->adapter->tx_timeout_task); | ||
952 | } | ||
953 | |||
954 | last_consumer = get_next_index(last_consumer, | ||
955 | adapter->max_tx_desc_count); | ||
956 | count++; | ||
957 | } | ||
958 | adapter->stats.noxmitdone += count; | ||
959 | |||
960 | count = 0; | ||
961 | spin_lock(&adapter->tx_lock); | ||
962 | if ((--adapter->proc_cmd_buf_counter) == 0) { | ||
963 | adapter->last_cmd_consumer = last_consumer; | ||
964 | while ((adapter->last_cmd_consumer != consumer) | ||
965 | && (count < MAX_STATUS_HANDLE)) { | ||
966 | buffer = | ||
967 | &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; | ||
968 | count++; | ||
969 | if (buffer->skb) | ||
970 | break; | ||
971 | else | ||
972 | adapter->last_cmd_consumer = | ||
973 | get_next_index(adapter->last_cmd_consumer, | ||
974 | adapter->max_tx_desc_count); | ||
975 | } | ||
976 | } | ||
977 | if (count) { | ||
978 | for (p = 0; p < adapter->ahw.max_ports; p++) { | ||
979 | nport = adapter->port[p]; | ||
980 | if (netif_queue_stopped(nport->netdev) | ||
981 | && (nport->flags & NETXEN_NETDEV_STATUS)) { | ||
982 | netif_wake_queue(nport->netdev); | ||
983 | nport->flags &= ~NETXEN_NETDEV_STATUS; | ||
984 | } | ||
985 | } | ||
986 | } | ||
987 | |||
988 | spin_unlock(&adapter->tx_lock); | ||
989 | DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer, | ||
990 | __FUNCTION__); | ||
991 | } | ||
992 | |||
993 | /* | ||
994 | * netxen_post_rx_buffers puts buffer in the Phantom memory | ||
995 | */ | ||
996 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | ||
997 | { | ||
998 | struct pci_dev *pdev = adapter->ahw.pdev; | ||
999 | struct sk_buff *skb; | ||
1000 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | ||
1001 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | ||
1002 | struct netxen_recv_crb *crbarea = &recv_crb_registers[ctx]; | ||
1003 | struct netxen_rcv_desc_crb *rcv_desc_crb = NULL; | ||
1004 | u32 producer; | ||
1005 | struct rcv_desc *pdesc; | ||
1006 | struct netxen_rx_buffer *buffer; | ||
1007 | int count = 0; | ||
1008 | int index = 0; | ||
1009 | |||
1010 | adapter->stats.post_called++; | ||
1011 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | ||
1012 | rcv_desc_crb = &crbarea->rcv_desc_crb[ringid]; | ||
1013 | |||
1014 | producer = rcv_desc->producer; | ||
1015 | index = rcv_desc->begin_alloc; | ||
1016 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1017 | /* We can start writing rx descriptors into the phantom memory. */ | ||
1018 | while (buffer->state == NETXEN_BUFFER_FREE) { | ||
1019 | skb = dev_alloc_skb(rcv_desc->skb_size); | ||
1020 | if (unlikely(!skb)) { | ||
1021 | /* | ||
1022 | * We need to schedule the posting of buffers to the pegs. | ||
1023 | */ | ||
1024 | rcv_desc->begin_alloc = index; | ||
1025 | DPRINTK(ERR, "netxen_post_rx_buffers: " | ||
1026 | " allocated only %d buffers\n", count); | ||
1027 | break; | ||
1028 | } | ||
1029 | count++; /* now there should be no failure */ | ||
1030 | pdesc = &rcv_desc->desc_head[producer]; | ||
1031 | skb_reserve(skb, NET_IP_ALIGN); | ||
1032 | /* | ||
1033 | * This will be setup when we receive the | ||
1034 | * buffer after it has been filled | ||
1035 | * skb->dev = netdev; | ||
1036 | */ | ||
1037 | buffer->skb = skb; | ||
1038 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1039 | buffer->dma = pci_map_single(pdev, skb->data, | ||
1040 | rcv_desc->dma_size, | ||
1041 | PCI_DMA_FROMDEVICE); | ||
1042 | /* make a rcv descriptor */ | ||
1043 | pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); | ||
1044 | pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); | ||
1045 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | ||
1046 | DPRINTK(INFO, "done writing descripter\n"); | ||
1047 | producer = | ||
1048 | get_next_index(producer, rcv_desc->max_rx_desc_count); | ||
1049 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | ||
1050 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1051 | } | ||
1052 | |||
1053 | /* if we did allocate buffers, then write the count to Phantom */ | ||
1054 | if (count) { | ||
1055 | rcv_desc->begin_alloc = index; | ||
1056 | rcv_desc->rcv_pending += count; | ||
1057 | adapter->stats.lastposted = count; | ||
1058 | adapter->stats.posted += count; | ||
1059 | rcv_desc->producer = producer; | ||
1060 | if (rcv_desc->rcv_free >= 32) { | ||
1061 | rcv_desc->rcv_free = 0; | ||
1062 | /* Window = 1 */ | ||
1063 | writel((producer - 1) & | ||
1064 | (rcv_desc->max_rx_desc_count - 1), | ||
1065 | NETXEN_CRB_NORMALIZE(adapter, | ||
1066 | rcv_desc_crb-> | ||
1067 | crb_rcv_producer_offset)); | ||
1068 | wmb(); | ||
1069 | } | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter) | ||
1074 | { | ||
1075 | if (find_diff_among(adapter->last_cmd_consumer, | ||
1076 | adapter->cmd_producer, | ||
1077 | adapter->max_tx_desc_count) > 0) | ||
1078 | return 1; | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | int | ||
1084 | netxen_nic_fill_statistics(struct netxen_adapter *adapter, | ||
1085 | struct netxen_port *port, | ||
1086 | struct netxen_statistics *netxen_stats) | ||
1087 | { | ||
1088 | void __iomem *addr; | ||
1089 | |||
1090 | if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | ||
1091 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
1092 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_BYTE_CNT, | ||
1093 | &(netxen_stats->tx_bytes)); | ||
1094 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_FRAME_CNT, | ||
1095 | &(netxen_stats->tx_packets)); | ||
1096 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_BYTE_CNT, | ||
1097 | &(netxen_stats->rx_bytes)); | ||
1098 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_FRAME_CNT, | ||
1099 | &(netxen_stats->rx_packets)); | ||
1100 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_AGGR_ERROR_CNT, | ||
1101 | &(netxen_stats->rx_errors)); | ||
1102 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_CRC_ERROR_CNT, | ||
1103 | &(netxen_stats->rx_crc_errors)); | ||
1104 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR, | ||
1105 | &(netxen_stats-> | ||
1106 | rx_long_length_error)); | ||
1107 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR, | ||
1108 | &(netxen_stats-> | ||
1109 | rx_short_length_error)); | ||
1110 | |||
1111 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
1112 | } else { | ||
1113 | spin_lock_bh(&adapter->tx_lock); | ||
1114 | netxen_stats->tx_bytes = port->stats.txbytes; | ||
1115 | netxen_stats->tx_packets = port->stats.xmitedframes + | ||
1116 | port->stats.xmitfinished; | ||
1117 | netxen_stats->rx_bytes = port->stats.rxbytes; | ||
1118 | netxen_stats->rx_packets = port->stats.no_rcv; | ||
1119 | netxen_stats->rx_errors = port->stats.rcvdbadskb; | ||
1120 | netxen_stats->tx_errors = port->stats.nocmddescriptor; | ||
1121 | netxen_stats->rx_short_length_error = port->stats.uplcong; | ||
1122 | netxen_stats->rx_long_length_error = port->stats.uphcong; | ||
1123 | netxen_stats->rx_crc_errors = 0; | ||
1124 | netxen_stats->rx_mac_errors = 0; | ||
1125 | spin_unlock_bh(&adapter->tx_lock); | ||
1126 | } | ||
1127 | return 0; | ||
1128 | } | ||
1129 | |||
1130 | void netxen_nic_clear_stats(struct netxen_adapter *adapter) | ||
1131 | { | ||
1132 | struct netxen_port *port; | ||
1133 | int port_num; | ||
1134 | |||
1135 | memset(&adapter->stats, 0, sizeof(adapter->stats)); | ||
1136 | for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) { | ||
1137 | port = adapter->port[port_num]; | ||
1138 | memset(&port->stats, 0, sizeof(port->stats)); | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | int | ||
1143 | netxen_nic_clear_statistics(struct netxen_adapter *adapter, | ||
1144 | struct netxen_port *port) | ||
1145 | { | ||
1146 | int data = 0; | ||
1147 | |||
1148 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
1149 | |||
1150 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_BYTE_CNT, &data); | ||
1151 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_FRAME_CNT, | ||
1152 | &data); | ||
1153 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_BYTE_CNT, &data); | ||
1154 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_FRAME_CNT, | ||
1155 | &data); | ||
1156 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_AGGR_ERROR_CNT, | ||
1157 | &data); | ||
1158 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_CRC_ERROR_CNT, | ||
1159 | &data); | ||
1160 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR, | ||
1161 | &data); | ||
1162 | netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR, | ||
1163 | &data); | ||
1164 | |||
1165 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
1166 | netxen_nic_clear_stats(adapter); | ||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | int | ||
1171 | netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | ||
1172 | struct netxen_port *port) | ||
1173 | { | ||
1174 | struct netxen_nic_ioctl_data data; | ||
1175 | struct netxen_nic_ioctl_data *up_data; | ||
1176 | int retval = 0; | ||
1177 | struct netxen_statistics netxen_stats; | ||
1178 | |||
1179 | up_data = (void *)u_data; | ||
1180 | |||
1181 | DPRINTK(INFO, "doing ioctl for %p\n", adapter); | ||
1182 | if (copy_from_user(&data, (void __user *)up_data, sizeof(data))) { | ||
1183 | /* evil user tried to crash the kernel */ | ||
1184 | DPRINTK(ERR, "bad copy from userland: %d\n", (int)sizeof(data)); | ||
1185 | retval = -EFAULT; | ||
1186 | goto error_out; | ||
1187 | } | ||
1188 | |||
1189 | /* Shouldn't access beyond legal limits of "char u[64];" member */ | ||
1190 | if (!data.ptr && (data.size > sizeof(data.u))) { | ||
1191 | /* evil user tried to crash the kernel */ | ||
1192 | DPRINTK(ERR, "bad size: %d\n", data.size); | ||
1193 | retval = -EFAULT; | ||
1194 | goto error_out; | ||
1195 | } | ||
1196 | |||
1197 | switch (data.cmd) { | ||
1198 | case netxen_nic_cmd_pci_read: | ||
1199 | if ((retval = netxen_nic_hw_read_wx(adapter, data.off, | ||
1200 | &(data.u), data.size))) | ||
1201 | goto error_out; | ||
1202 | if (copy_to_user | ||
1203 | ((void __user *)&(up_data->u), &(data.u), data.size)) { | ||
1204 | DPRINTK(ERR, "bad copy to userland: %d\n", | ||
1205 | (int)sizeof(data)); | ||
1206 | retval = -EFAULT; | ||
1207 | goto error_out; | ||
1208 | } | ||
1209 | data.rv = 0; | ||
1210 | break; | ||
1211 | |||
1212 | case netxen_nic_cmd_pci_write: | ||
1213 | data.rv = netxen_nic_hw_write_wx(adapter, data.off, &(data.u), | ||
1214 | data.size); | ||
1215 | break; | ||
1216 | |||
1217 | case netxen_nic_cmd_pci_config_read: | ||
1218 | switch (data.size) { | ||
1219 | case 1: | ||
1220 | data.rv = pci_read_config_byte(adapter->ahw.pdev, | ||
1221 | data.off, | ||
1222 | (char *)&(data.u)); | ||
1223 | break; | ||
1224 | case 2: | ||
1225 | data.rv = pci_read_config_word(adapter->ahw.pdev, | ||
1226 | data.off, | ||
1227 | (short *)&(data.u)); | ||
1228 | break; | ||
1229 | case 4: | ||
1230 | data.rv = pci_read_config_dword(adapter->ahw.pdev, | ||
1231 | data.off, | ||
1232 | (u32 *) & (data.u)); | ||
1233 | break; | ||
1234 | } | ||
1235 | if (copy_to_user | ||
1236 | ((void __user *)&(up_data->u), &(data.u), data.size)) { | ||
1237 | DPRINTK(ERR, "bad copy to userland: %d\n", | ||
1238 | (int)sizeof(data)); | ||
1239 | retval = -EFAULT; | ||
1240 | goto error_out; | ||
1241 | } | ||
1242 | break; | ||
1243 | |||
1244 | case netxen_nic_cmd_pci_config_write: | ||
1245 | switch (data.size) { | ||
1246 | case 1: | ||
1247 | data.rv = pci_write_config_byte(adapter->ahw.pdev, | ||
1248 | data.off, | ||
1249 | *(char *)&(data.u)); | ||
1250 | break; | ||
1251 | case 2: | ||
1252 | data.rv = pci_write_config_word(adapter->ahw.pdev, | ||
1253 | data.off, | ||
1254 | *(short *)&(data.u)); | ||
1255 | break; | ||
1256 | case 4: | ||
1257 | data.rv = pci_write_config_dword(adapter->ahw.pdev, | ||
1258 | data.off, | ||
1259 | *(u32 *) & (data.u)); | ||
1260 | break; | ||
1261 | } | ||
1262 | break; | ||
1263 | |||
1264 | case netxen_nic_cmd_get_stats: | ||
1265 | data.rv = | ||
1266 | netxen_nic_fill_statistics(adapter, port, &netxen_stats); | ||
1267 | if (copy_to_user | ||
1268 | ((void __user *)(up_data->ptr), (void *)&netxen_stats, | ||
1269 | sizeof(struct netxen_statistics))) { | ||
1270 | DPRINTK(ERR, "bad copy to userland: %d\n", | ||
1271 | (int)sizeof(netxen_stats)); | ||
1272 | retval = -EFAULT; | ||
1273 | goto error_out; | ||
1274 | } | ||
1275 | up_data->rv = data.rv; | ||
1276 | break; | ||
1277 | |||
1278 | case netxen_nic_cmd_clear_stats: | ||
1279 | data.rv = netxen_nic_clear_statistics(adapter, port); | ||
1280 | up_data->rv = data.rv; | ||
1281 | break; | ||
1282 | |||
1283 | case netxen_nic_cmd_get_version: | ||
1284 | if (copy_to_user | ||
1285 | ((void __user *)&(up_data->u), NETXEN_NIC_LINUX_VERSIONID, | ||
1286 | sizeof(NETXEN_NIC_LINUX_VERSIONID))) { | ||
1287 | DPRINTK(ERR, "bad copy to userland: %d\n", | ||
1288 | (int)sizeof(data)); | ||
1289 | retval = -EFAULT; | ||
1290 | goto error_out; | ||
1291 | } | ||
1292 | break; | ||
1293 | |||
1294 | default: | ||
1295 | DPRINTK(INFO, "bad command %d for %p\n", data.cmd, adapter); | ||
1296 | retval = -EOPNOTSUPP; | ||
1297 | goto error_out; | ||
1298 | } | ||
1299 | put_user(data.rv, (u16 __user *) (&(up_data->rv))); | ||
1300 | DPRINTK(INFO, "done ioctl for %p well.\n", adapter); | ||
1301 | |||
1302 | error_out: | ||
1303 | return retval; | ||
1304 | } | ||
diff --git a/drivers/net/netxen/netxen_nic_ioctl.h b/drivers/net/netxen/netxen_nic_ioctl.h new file mode 100644 index 000000000000..23e53adbf123 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_ioctl.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #ifndef __NETXEN_NIC_IOCTL_H__ | ||
31 | #define __NETXEN_NIC_IOCTL_H__ | ||
32 | |||
33 | #include <linux/sockios.h> | ||
34 | |||
35 | #define NETXEN_CMD_START SIOCDEVPRIVATE | ||
36 | #define NETXEN_NIC_CMD (NETXEN_CMD_START + 1) | ||
37 | #define NETXEN_NIC_NAME (NETXEN_CMD_START + 2) | ||
38 | #define NETXEN_NIC_NAME_LEN 16 | ||
39 | #define NETXEN_NIC_NAME_RSP "NETXEN" | ||
40 | |||
41 | typedef enum { | ||
42 | netxen_nic_cmd_none = 0, | ||
43 | netxen_nic_cmd_pci_read, | ||
44 | netxen_nic_cmd_pci_write, | ||
45 | netxen_nic_cmd_pci_mem_read, | ||
46 | netxen_nic_cmd_pci_mem_write, | ||
47 | netxen_nic_cmd_pci_config_read, | ||
48 | netxen_nic_cmd_pci_config_write, | ||
49 | netxen_nic_cmd_get_stats, | ||
50 | netxen_nic_cmd_clear_stats, | ||
51 | netxen_nic_cmd_get_version | ||
52 | } netxen_nic_ioctl_cmd_t; | ||
53 | |||
54 | struct netxen_nic_ioctl_data { | ||
55 | u32 cmd; | ||
56 | u32 unused1; | ||
57 | u64 off; | ||
58 | u32 size; | ||
59 | u32 rv; | ||
60 | char u[64]; | ||
61 | void *ptr; | ||
62 | }; | ||
63 | |||
64 | struct netxen_statistics { | ||
65 | u64 rx_packets; | ||
66 | u64 tx_packets; | ||
67 | u64 rx_bytes; | ||
68 | u64 rx_errors; | ||
69 | u64 tx_bytes; | ||
70 | u64 tx_errors; | ||
71 | u64 rx_crc_errors; | ||
72 | u64 rx_short_length_error; | ||
73 | u64 rx_long_length_error; | ||
74 | u64 rx_mac_errors; | ||
75 | }; | ||
76 | |||
77 | #endif /* __NETXEN_NIC_IOCTL_H_ */ | ||
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c new file mode 100644 index 000000000000..ae180fee8008 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_isr.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/delay.h> | ||
32 | |||
33 | #include "netxen_nic.h" | ||
34 | #include "netxen_nic_hw.h" | ||
35 | #include "netxen_nic_phan_reg.h" | ||
36 | |||
37 | /* | ||
38 | * netxen_nic_get_stats - Get System Network Statistics | ||
39 | * @netdev: network interface device structure | ||
40 | */ | ||
41 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | ||
42 | { | ||
43 | struct netxen_port *port = netdev_priv(netdev); | ||
44 | struct net_device_stats *stats = &port->net_stats; | ||
45 | |||
46 | memset(stats, 0, sizeof(*stats)); | ||
47 | |||
48 | /* total packets received */ | ||
49 | stats->rx_packets = port->stats.no_rcv; | ||
50 | /* total packets transmitted */ | ||
51 | stats->tx_packets = port->stats.xmitedframes + port->stats.xmitfinished; | ||
52 | /* total bytes received */ | ||
53 | stats->rx_bytes = port->stats.rxbytes; | ||
54 | /* total bytes transmitted */ | ||
55 | stats->tx_bytes = port->stats.txbytes; | ||
56 | /* bad packets received */ | ||
57 | stats->rx_errors = port->stats.rcvdbadskb; | ||
58 | /* packet transmit problems */ | ||
59 | stats->tx_errors = port->stats.nocmddescriptor; | ||
60 | /* no space in linux buffers */ | ||
61 | stats->rx_dropped = port->stats.updropped; | ||
62 | /* no space available in linux */ | ||
63 | stats->tx_dropped = port->stats.txdropped; | ||
64 | |||
65 | return stats; | ||
66 | } | ||
67 | |||
68 | void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno, | ||
69 | u32 link) | ||
70 | { | ||
71 | struct netxen_port *pport = adapter->port[portno]; | ||
72 | struct net_device *netdev = pport->netdev; | ||
73 | |||
74 | if (link) | ||
75 | netif_carrier_on(netdev); | ||
76 | else | ||
77 | netif_carrier_off(netdev); | ||
78 | } | ||
79 | |||
80 | void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno, | ||
81 | u32 enable) | ||
82 | { | ||
83 | __le32 int_src; | ||
84 | struct netxen_port *port; | ||
85 | |||
86 | /* This should clear the interrupt source */ | ||
87 | if (adapter->ops->phy_read) | ||
88 | adapter->ops->phy_read(adapter, portno, | ||
89 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS, | ||
90 | &int_src); | ||
91 | if (int_src == 0) { | ||
92 | DPRINTK(INFO, "No phy interrupts for port #%d\n", portno); | ||
93 | return; | ||
94 | } | ||
95 | if (adapter->ops->disable_phy_interrupts) | ||
96 | adapter->ops->disable_phy_interrupts(adapter, portno); | ||
97 | |||
98 | port = adapter->port[portno]; | ||
99 | |||
100 | if (netxen_get_phy_int_jabber(int_src)) | ||
101 | DPRINTK(INFO, "NetXen: %s Jabber interrupt \n", | ||
102 | port->netdev->name); | ||
103 | |||
104 | if (netxen_get_phy_int_polarity_changed(int_src)) | ||
105 | DPRINTK(INFO, "NetXen: %s POLARITY CHANGED int \n", | ||
106 | port->netdev->name); | ||
107 | |||
108 | if (netxen_get_phy_int_energy_detect(int_src)) | ||
109 | DPRINTK(INFO, "NetXen: %s ENERGY DETECT INT \n", | ||
110 | port->netdev->name); | ||
111 | |||
112 | if (netxen_get_phy_int_downshift(int_src)) | ||
113 | DPRINTK(INFO, "NetXen: %s DOWNSHIFT INT \n", | ||
114 | port->netdev->name); | ||
115 | /* write it down later.. */ | ||
116 | if ((netxen_get_phy_int_speed_changed(int_src)) | ||
117 | || (netxen_get_phy_int_link_status_changed(int_src))) { | ||
118 | __le32 status; | ||
119 | |||
120 | DPRINTK(INFO, "NetXen: %s SPEED CHANGED OR" | ||
121 | " LINK STATUS CHANGED \n", port->netdev->name); | ||
122 | |||
123 | if (adapter->ops->phy_read | ||
124 | && adapter->ops->phy_read(adapter, portno, | ||
125 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
126 | &status) == 0) { | ||
127 | if (netxen_get_phy_int_link_status_changed(int_src)) { | ||
128 | if (netxen_get_phy_link(status)) { | ||
129 | netxen_niu_gbe_init_port(adapter, | ||
130 | portno); | ||
131 | printk("%s: %s Link UP\n", | ||
132 | netxen_nic_driver_name, | ||
133 | port->netdev->name); | ||
134 | |||
135 | } else { | ||
136 | printk("%s: %s Link DOWN\n", | ||
137 | netxen_nic_driver_name, | ||
138 | port->netdev->name); | ||
139 | } | ||
140 | netxen_indicate_link_status(adapter, portno, | ||
141 | netxen_get_phy_link | ||
142 | (status)); | ||
143 | } | ||
144 | } | ||
145 | } | ||
146 | if (adapter->ops->enable_phy_interrupts) | ||
147 | adapter->ops->enable_phy_interrupts(adapter, portno); | ||
148 | } | ||
149 | |||
150 | void netxen_nic_isr_other(struct netxen_adapter *adapter) | ||
151 | { | ||
152 | u32 portno; | ||
153 | u32 val, linkup, qg_linksup; | ||
154 | |||
155 | /* verify the offset */ | ||
156 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | ||
157 | if (val == adapter->ahw.qg_linksup) | ||
158 | return; | ||
159 | |||
160 | qg_linksup = adapter->ahw.qg_linksup; | ||
161 | adapter->ahw.qg_linksup = val; | ||
162 | DPRINTK(1, INFO, "%s: link update 0x%08x\n", netxen_nic_driver_name, | ||
163 | val); | ||
164 | for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) { | ||
165 | linkup = val & 1; | ||
166 | if (linkup != (qg_linksup & 1)) { | ||
167 | printk(KERN_INFO "%s: PORT %d link %s\n", | ||
168 | netxen_nic_driver_name, portno, | ||
169 | ((linkup == 0) ? "down" : "up")); | ||
170 | netxen_indicate_link_status(adapter, portno, linkup); | ||
171 | if (linkup) | ||
172 | netxen_nic_set_link_parameters(adapter-> | ||
173 | port[portno]); | ||
174 | |||
175 | } | ||
176 | val = val >> 1; | ||
177 | qg_linksup = qg_linksup >> 1; | ||
178 | } | ||
179 | |||
180 | adapter->stats.otherints++; | ||
181 | |||
182 | } | ||
183 | |||
184 | void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter) | ||
185 | { | ||
186 | netxen_nic_isr_other(adapter); | ||
187 | } | ||
188 | |||
189 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) | ||
190 | { | ||
191 | struct net_device *netdev = adapter->port[0]->netdev; | ||
192 | u32 val; | ||
193 | |||
194 | /* WINDOW = 1 */ | ||
195 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | ||
196 | |||
197 | if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) { | ||
198 | printk(KERN_INFO "%s: %s NIC Link is down\n", | ||
199 | netxen_nic_driver_name, netdev->name); | ||
200 | adapter->ahw.xg_linkup = 0; | ||
201 | /* read twice to clear sticky bits */ | ||
202 | /* WINDOW = 0 */ | ||
203 | netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val); | ||
204 | netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val); | ||
205 | |||
206 | if ((val & 0xffb) != 0xffb) { | ||
207 | printk(KERN_INFO "%s ISR: Sync/Align BAD: 0x%08x\n", | ||
208 | netxen_nic_driver_name, val); | ||
209 | } | ||
210 | } else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) { | ||
211 | printk(KERN_INFO "%s: %s NIC Link is up\n", | ||
212 | netxen_nic_driver_name, netdev->name); | ||
213 | adapter->ahw.xg_linkup = 1; | ||
214 | } | ||
215 | } | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c new file mode 100644 index 000000000000..1cb662d5bd76 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -0,0 +1,1161 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Main source file for NetXen NIC Driver on Linux | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/vmalloc.h> | ||
35 | #include "netxen_nic_hw.h" | ||
36 | |||
37 | #include "netxen_nic.h" | ||
38 | #define DEFINE_GLOBAL_RECV_CRB | ||
39 | #include "netxen_nic_phan_reg.h" | ||
40 | #include "netxen_nic_ioctl.h" | ||
41 | |||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/vmalloc.h> | ||
44 | |||
45 | #define PHAN_VENDOR_ID 0x4040 | ||
46 | |||
47 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); | ||
48 | MODULE_LICENSE("GPL"); | ||
49 | MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); | ||
50 | |||
51 | char netxen_nic_driver_name[] = "netxen"; | ||
52 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " | ||
53 | NETXEN_NIC_LINUX_VERSIONID; | ||
54 | |||
55 | #define NETXEN_NETDEV_WEIGHT 120 | ||
56 | #define NETXEN_ADAPTER_UP_MAGIC 777 | ||
57 | #define NETXEN_NIC_PEG_TUNE 0 | ||
58 | |||
59 | /* Local functions to NetXen NIC driver */ | ||
60 | static int __devinit netxen_nic_probe(struct pci_dev *pdev, | ||
61 | const struct pci_device_id *ent); | ||
62 | static void __devexit netxen_nic_remove(struct pci_dev *pdev); | ||
63 | static int netxen_nic_open(struct net_device *netdev); | ||
64 | static int netxen_nic_close(struct net_device *netdev); | ||
65 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); | ||
66 | static void netxen_tx_timeout(struct net_device *netdev); | ||
67 | static void netxen_tx_timeout_task(struct net_device *netdev); | ||
68 | static void netxen_watchdog(unsigned long); | ||
69 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); | ||
70 | static int netxen_nic_ioctl(struct net_device *netdev, | ||
71 | struct ifreq *ifr, int cmd); | ||
72 | static int netxen_nic_poll(struct net_device *dev, int *budget); | ||
73 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
74 | static void netxen_nic_poll_controller(struct net_device *netdev); | ||
75 | #endif | ||
76 | static irqreturn_t netxen_intr(int irq, void *data); | ||
77 | |||
78 | /* PCI Device ID Table */ | ||
79 | static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | ||
80 | {PCI_DEVICE(0x4040, 0x0001)}, | ||
81 | {PCI_DEVICE(0x4040, 0x0002)}, | ||
82 | {PCI_DEVICE(0x4040, 0x0003)}, | ||
83 | {PCI_DEVICE(0x4040, 0x0004)}, | ||
84 | {PCI_DEVICE(0x4040, 0x0005)}, | ||
85 | {0,} | ||
86 | }; | ||
87 | |||
88 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); | ||
89 | |||
90 | /* | ||
91 | * netxen_nic_probe() | ||
92 | * | ||
93 | * The Linux system will invoke this after identifying the vendor ID and | ||
94 | * device Id in the pci_tbl supported by this module. | ||
95 | * | ||
96 | * A quad port card has one operational PCI config space, (function 0), | ||
97 | * which is used to access all four ports. | ||
98 | * | ||
99 | * This routine will initialize the adapter, and setup the global parameters | ||
100 | * along with the port's specific structure. | ||
101 | */ | ||
102 | static int __devinit | ||
103 | netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
104 | { | ||
105 | struct net_device *netdev = NULL; | ||
106 | struct netxen_adapter *adapter = NULL; | ||
107 | struct netxen_port *port = NULL; | ||
108 | u8 *mem_ptr0 = NULL; | ||
109 | u8 *mem_ptr1 = NULL; | ||
110 | u8 *mem_ptr2 = NULL; | ||
111 | |||
112 | unsigned long mem_base, mem_len; | ||
113 | int pci_using_dac, i, err; | ||
114 | int ring; | ||
115 | struct netxen_recv_context *recv_ctx = NULL; | ||
116 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | ||
117 | struct netxen_cmd_buffer *cmd_buf_arr = NULL; | ||
118 | u64 mac_addr[FLASH_NUM_PORTS + 1]; | ||
119 | int valid_mac; | ||
120 | |||
121 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | ||
122 | if ((err = pci_enable_device(pdev))) | ||
123 | return err; | ||
124 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
125 | err = -ENODEV; | ||
126 | goto err_out_disable_pdev; | ||
127 | } | ||
128 | |||
129 | if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) | ||
130 | goto err_out_disable_pdev; | ||
131 | |||
132 | pci_set_master(pdev); | ||
133 | if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) && | ||
134 | (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0)) | ||
135 | pci_using_dac = 1; | ||
136 | else { | ||
137 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | ||
138 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) | ||
139 | goto err_out_free_res; | ||
140 | |||
141 | pci_using_dac = 0; | ||
142 | } | ||
143 | |||
144 | /* remap phys address */ | ||
145 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | ||
146 | mem_len = pci_resource_len(pdev, 0); | ||
147 | |||
148 | /* 128 Meg of memory */ | ||
149 | mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); | ||
150 | mem_ptr1 = | ||
151 | ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE); | ||
152 | mem_ptr2 = | ||
153 | ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); | ||
154 | |||
155 | if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) { | ||
156 | DPRINTK(1, ERR, | ||
157 | "Cannot remap adapter memory aborting.:" | ||
158 | "0 -> %p, 1 -> %p, 2 -> %p\n", | ||
159 | mem_ptr0, mem_ptr1, mem_ptr2); | ||
160 | |||
161 | err = -EIO; | ||
162 | if (mem_ptr0) | ||
163 | iounmap(mem_ptr0); | ||
164 | if (mem_ptr1) | ||
165 | iounmap(mem_ptr1); | ||
166 | if (mem_ptr2) | ||
167 | iounmap(mem_ptr2); | ||
168 | |||
169 | goto err_out_free_res; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Allocate a adapter structure which will manage all the initialization | ||
174 | * as well as the common resources for all ports... | ||
175 | * all the ports will have pointer to this adapter as well as Adapter | ||
176 | * will have pointers of all the ports structures. | ||
177 | */ | ||
178 | |||
179 | /* One adapter structure for all 4 ports.... */ | ||
180 | adapter = kzalloc(sizeof(struct netxen_adapter), GFP_KERNEL); | ||
181 | if (adapter == NULL) { | ||
182 | printk(KERN_ERR "%s: Could not allocate adapter memory:%d\n", | ||
183 | netxen_nic_driver_name, | ||
184 | (int)sizeof(struct netxen_adapter)); | ||
185 | err = -ENOMEM; | ||
186 | goto err_out_iounmap; | ||
187 | } | ||
188 | |||
189 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; | ||
190 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; | ||
191 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; | ||
192 | |||
193 | pci_set_drvdata(pdev, adapter); | ||
194 | |||
195 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); | ||
196 | if (cmd_buf_arr == NULL) { | ||
197 | err = -ENOMEM; | ||
198 | goto err_out_free_adapter; | ||
199 | } | ||
200 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | ||
201 | |||
202 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
203 | recv_ctx = &adapter->recv_ctx[i]; | ||
204 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
205 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
206 | switch (RCV_DESC_TYPE(ring)) { | ||
207 | case RCV_DESC_NORMAL: | ||
208 | rcv_desc->max_rx_desc_count = | ||
209 | adapter->max_rx_desc_count; | ||
210 | rcv_desc->flags = RCV_DESC_NORMAL; | ||
211 | rcv_desc->dma_size = RX_DMA_MAP_LEN; | ||
212 | rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH; | ||
213 | break; | ||
214 | |||
215 | case RCV_DESC_JUMBO: | ||
216 | rcv_desc->max_rx_desc_count = | ||
217 | adapter->max_jumbo_rx_desc_count; | ||
218 | rcv_desc->flags = RCV_DESC_JUMBO; | ||
219 | rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN; | ||
220 | rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH; | ||
221 | break; | ||
222 | |||
223 | } | ||
224 | rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) | ||
225 | vmalloc(RCV_BUFFSIZE); | ||
226 | |||
227 | if (rcv_desc->rx_buf_arr == NULL) { | ||
228 | err = -ENOMEM; | ||
229 | goto err_out_free_rx_buffer; | ||
230 | } | ||
231 | memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE); | ||
232 | } | ||
233 | |||
234 | } | ||
235 | |||
236 | adapter->ops = kzalloc(sizeof(struct netxen_drvops), GFP_KERNEL); | ||
237 | if (adapter->ops == NULL) { | ||
238 | printk(KERN_ERR | ||
239 | "%s: Could not allocate memory for adapter->ops:%d\n", | ||
240 | netxen_nic_driver_name, | ||
241 | (int)sizeof(struct netxen_adapter)); | ||
242 | err = -ENOMEM; | ||
243 | goto err_out_free_rx_buffer; | ||
244 | } | ||
245 | |||
246 | adapter->cmd_buf_arr = cmd_buf_arr; | ||
247 | adapter->ahw.pci_base0 = mem_ptr0; | ||
248 | adapter->ahw.pci_base1 = mem_ptr1; | ||
249 | adapter->ahw.pci_base2 = mem_ptr2; | ||
250 | spin_lock_init(&adapter->tx_lock); | ||
251 | spin_lock_init(&adapter->lock); | ||
252 | #ifdef CONFIG_IA64 | ||
253 | netxen_pinit_from_rom(adapter, 0); | ||
254 | udelay(500); | ||
255 | netxen_load_firmware(adapter); | ||
256 | #endif | ||
257 | |||
258 | /* initialize the buffers in adapter */ | ||
259 | netxen_initialize_adapter_sw(adapter); | ||
260 | /* | ||
261 | * Set the CRB window to invalid. If any register in window 0 is | ||
262 | * accessed it should set the window to 0 and then reset it to 1. | ||
263 | */ | ||
264 | adapter->curr_window = 255; | ||
265 | /* | ||
266 | * Adapter in our case is quad port so initialize it before | ||
267 | * initializing the ports | ||
268 | */ | ||
269 | netxen_initialize_adapter_hw(adapter); /* initialize the adapter */ | ||
270 | |||
271 | netxen_initialize_adapter_ops(adapter); | ||
272 | |||
273 | init_timer(&adapter->watchdog_timer); | ||
274 | adapter->ahw.xg_linkup = 0; | ||
275 | adapter->watchdog_timer.function = &netxen_watchdog; | ||
276 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
277 | INIT_WORK(&adapter->watchdog_task, | ||
278 | (void (*)(void *))netxen_watchdog_task, adapter); | ||
279 | adapter->ahw.pdev = pdev; | ||
280 | adapter->proc_cmd_buf_counter = 0; | ||
281 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); | ||
282 | |||
283 | if (pci_enable_msi(pdev)) { | ||
284 | adapter->flags &= ~NETXEN_NIC_MSI_ENABLED; | ||
285 | printk(KERN_WARNING "%s: unable to allocate MSI interrupt" | ||
286 | " error\n", netxen_nic_driver_name); | ||
287 | } else | ||
288 | adapter->flags |= NETXEN_NIC_MSI_ENABLED; | ||
289 | |||
290 | if (netxen_is_flash_supported(adapter) == 0 && | ||
291 | netxen_get_flash_mac_addr(adapter, mac_addr) == 0) | ||
292 | valid_mac = 1; | ||
293 | else | ||
294 | valid_mac = 0; | ||
295 | |||
296 | /* | ||
297 | * Initialize all the CRB registers here. | ||
298 | */ | ||
299 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
300 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET)); | ||
301 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); | ||
302 | |||
303 | /* Unlock the HW, prompting the boot sequence */ | ||
304 | writel(1, | ||
305 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE)); | ||
306 | |||
307 | /* Handshake with the card before we register the devices. */ | ||
308 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | ||
309 | |||
310 | /* initialize the all the ports */ | ||
311 | |||
312 | for (i = 0; i < adapter->ahw.max_ports; i++) { | ||
313 | netdev = alloc_etherdev(sizeof(struct netxen_port)); | ||
314 | if (!netdev) { | ||
315 | printk(KERN_ERR "%s: could not allocate netdev for port" | ||
316 | " %d\n", netxen_nic_driver_name, i + 1); | ||
317 | goto err_out_free_dev; | ||
318 | } | ||
319 | |||
320 | SET_MODULE_OWNER(netdev); | ||
321 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
322 | |||
323 | port = netdev_priv(netdev); | ||
324 | port->netdev = netdev; | ||
325 | port->pdev = pdev; | ||
326 | port->adapter = adapter; | ||
327 | port->portnum = i; /* Gigabit port number from 0-3 */ | ||
328 | |||
329 | netdev->open = netxen_nic_open; | ||
330 | netdev->stop = netxen_nic_close; | ||
331 | netdev->hard_start_xmit = netxen_nic_xmit_frame; | ||
332 | netdev->get_stats = netxen_nic_get_stats; | ||
333 | netdev->set_multicast_list = netxen_nic_set_multi; | ||
334 | netdev->set_mac_address = netxen_nic_set_mac; | ||
335 | netdev->change_mtu = netxen_nic_change_mtu; | ||
336 | netdev->do_ioctl = netxen_nic_ioctl; | ||
337 | netdev->tx_timeout = netxen_tx_timeout; | ||
338 | netdev->watchdog_timeo = HZ; | ||
339 | |||
340 | SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); | ||
341 | netdev->poll = netxen_nic_poll; | ||
342 | netdev->weight = NETXEN_NETDEV_WEIGHT; | ||
343 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
344 | netdev->poll_controller = netxen_nic_poll_controller; | ||
345 | #endif | ||
346 | /* ScatterGather support */ | ||
347 | netdev->features = NETIF_F_SG; | ||
348 | netdev->features |= NETIF_F_IP_CSUM; | ||
349 | netdev->features |= NETIF_F_TSO; | ||
350 | |||
351 | if (pci_using_dac) | ||
352 | netdev->features |= NETIF_F_HIGHDMA; | ||
353 | |||
354 | if (valid_mac) { | ||
355 | unsigned char *p = (unsigned char *)&mac_addr[i]; | ||
356 | netdev->dev_addr[0] = *(p + 5); | ||
357 | netdev->dev_addr[1] = *(p + 4); | ||
358 | netdev->dev_addr[2] = *(p + 3); | ||
359 | netdev->dev_addr[3] = *(p + 2); | ||
360 | netdev->dev_addr[4] = *(p + 1); | ||
361 | netdev->dev_addr[5] = *(p + 0); | ||
362 | |||
363 | memcpy(netdev->perm_addr, netdev->dev_addr, | ||
364 | netdev->addr_len); | ||
365 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
366 | printk(KERN_ERR "%s: Bad MAC address " | ||
367 | "%02x:%02x:%02x:%02x:%02x:%02x.\n", | ||
368 | netxen_nic_driver_name, | ||
369 | netdev->dev_addr[0], | ||
370 | netdev->dev_addr[1], | ||
371 | netdev->dev_addr[2], | ||
372 | netdev->dev_addr[3], | ||
373 | netdev->dev_addr[4], | ||
374 | netdev->dev_addr[5]); | ||
375 | } else { | ||
376 | if (adapter->ops->macaddr_set) | ||
377 | adapter->ops->macaddr_set(port, | ||
378 | netdev-> | ||
379 | dev_addr); | ||
380 | } | ||
381 | } | ||
382 | INIT_WORK(&adapter->tx_timeout_task, | ||
383 | (void (*)(void *))netxen_tx_timeout_task, netdev); | ||
384 | netif_carrier_off(netdev); | ||
385 | netif_stop_queue(netdev); | ||
386 | |||
387 | if ((err = register_netdev(netdev))) { | ||
388 | printk(KERN_ERR "%s: register_netdev failed port #%d" | ||
389 | " aborting\n", netxen_nic_driver_name, i + 1); | ||
390 | err = -EIO; | ||
391 | free_netdev(netdev); | ||
392 | goto err_out_free_dev; | ||
393 | } | ||
394 | adapter->port_count++; | ||
395 | adapter->active_ports = 0; | ||
396 | adapter->port[i] = port; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * delay a while to ensure that the Pegs are up & running. | ||
401 | * Otherwise, we might see some flaky behaviour. | ||
402 | */ | ||
403 | udelay(100); | ||
404 | |||
405 | switch (adapter->ahw.board_type) { | ||
406 | case NETXEN_NIC_GBE: | ||
407 | printk("%s: QUAD GbE board initialized\n", | ||
408 | netxen_nic_driver_name); | ||
409 | break; | ||
410 | |||
411 | case NETXEN_NIC_XGBE: | ||
412 | printk("%s: XGbE board initialized\n", netxen_nic_driver_name); | ||
413 | break; | ||
414 | } | ||
415 | |||
416 | adapter->driver_mismatch = 0; | ||
417 | |||
418 | return 0; | ||
419 | |||
420 | err_out_free_dev: | ||
421 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) | ||
422 | pci_disable_msi(pdev); | ||
423 | for (i = 0; i < adapter->port_count; i++) { | ||
424 | port = adapter->port[i]; | ||
425 | if ((port) && (port->netdev)) { | ||
426 | unregister_netdev(port->netdev); | ||
427 | free_netdev(port->netdev); | ||
428 | } | ||
429 | } | ||
430 | kfree(adapter->ops); | ||
431 | |||
432 | err_out_free_rx_buffer: | ||
433 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
434 | recv_ctx = &adapter->recv_ctx[i]; | ||
435 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
436 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
437 | if (rcv_desc->rx_buf_arr != NULL) { | ||
438 | vfree(rcv_desc->rx_buf_arr); | ||
439 | rcv_desc->rx_buf_arr = NULL; | ||
440 | } | ||
441 | } | ||
442 | } | ||
443 | |||
444 | vfree(cmd_buf_arr); | ||
445 | |||
446 | kfree(adapter->port); | ||
447 | |||
448 | err_out_free_adapter: | ||
449 | pci_set_drvdata(pdev, NULL); | ||
450 | kfree(adapter); | ||
451 | |||
452 | err_out_iounmap: | ||
453 | iounmap(mem_ptr0); | ||
454 | iounmap(mem_ptr1); | ||
455 | iounmap(mem_ptr2); | ||
456 | |||
457 | err_out_free_res: | ||
458 | pci_release_regions(pdev); | ||
459 | err_out_disable_pdev: | ||
460 | pci_disable_device(pdev); | ||
461 | return err; | ||
462 | } | ||
463 | |||
464 | static void __devexit netxen_nic_remove(struct pci_dev *pdev) | ||
465 | { | ||
466 | struct netxen_adapter *adapter; | ||
467 | struct netxen_port *port; | ||
468 | struct netxen_rx_buffer *buffer; | ||
469 | struct netxen_recv_context *recv_ctx; | ||
470 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
471 | int i; | ||
472 | int ctxid, ring; | ||
473 | |||
474 | adapter = pci_get_drvdata(pdev); | ||
475 | if (adapter == NULL) | ||
476 | return; | ||
477 | |||
478 | netxen_nic_stop_all_ports(adapter); | ||
479 | /* leave the hw in the same state as reboot */ | ||
480 | netxen_pinit_from_rom(adapter, 0); | ||
481 | udelay(500); | ||
482 | netxen_load_firmware(adapter); | ||
483 | |||
484 | if ((adapter->flags & NETXEN_NIC_MSI_ENABLED)) | ||
485 | netxen_nic_disable_int(adapter); | ||
486 | |||
487 | udelay(500); /* Delay for a while to drain the DMA engines */ | ||
488 | for (i = 0; i < adapter->port_count; i++) { | ||
489 | port = adapter->port[i]; | ||
490 | if ((port) && (port->netdev)) { | ||
491 | unregister_netdev(port->netdev); | ||
492 | free_netdev(port->netdev); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | if ((adapter->flags & NETXEN_NIC_MSI_ENABLED)) | ||
497 | pci_disable_msi(pdev); | ||
498 | pci_set_drvdata(pdev, NULL); | ||
499 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) | ||
500 | netxen_free_hw_resources(adapter); | ||
501 | |||
502 | iounmap(adapter->ahw.pci_base0); | ||
503 | iounmap(adapter->ahw.pci_base1); | ||
504 | iounmap(adapter->ahw.pci_base2); | ||
505 | |||
506 | pci_release_regions(pdev); | ||
507 | pci_disable_device(pdev); | ||
508 | |||
509 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | ||
510 | recv_ctx = &adapter->recv_ctx[ctxid]; | ||
511 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
512 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
513 | for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) { | ||
514 | buffer = &(rcv_desc->rx_buf_arr[i]); | ||
515 | if (buffer->state == NETXEN_BUFFER_FREE) | ||
516 | continue; | ||
517 | pci_unmap_single(pdev, buffer->dma, | ||
518 | rcv_desc->dma_size, | ||
519 | PCI_DMA_FROMDEVICE); | ||
520 | if (buffer->skb != NULL) | ||
521 | dev_kfree_skb_any(buffer->skb); | ||
522 | } | ||
523 | vfree(rcv_desc->rx_buf_arr); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | vfree(adapter->cmd_buf_arr); | ||
528 | kfree(adapter->ops); | ||
529 | kfree(adapter); | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * Called when a network interface is made active | ||
534 | * @returns 0 on success, negative value on failure | ||
535 | */ | ||
536 | static int netxen_nic_open(struct net_device *netdev) | ||
537 | { | ||
538 | struct netxen_port *port = netdev_priv(netdev); | ||
539 | struct netxen_adapter *adapter = port->adapter; | ||
540 | int err = 0; | ||
541 | int ctx, ring; | ||
542 | |||
543 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) { | ||
544 | err = netxen_init_firmware(adapter); | ||
545 | if (err != 0) { | ||
546 | printk(KERN_ERR "Failed to init firmware\n"); | ||
547 | return -EIO; | ||
548 | } | ||
549 | netxen_nic_flash_print(adapter); | ||
550 | |||
551 | /* setup all the resources for the Phantom... */ | ||
552 | /* this include the descriptors for rcv, tx, and status */ | ||
553 | netxen_nic_clear_stats(adapter); | ||
554 | err = netxen_nic_hw_resources(adapter); | ||
555 | if (err) { | ||
556 | printk(KERN_ERR "Error in setting hw resources:%d\n", | ||
557 | err); | ||
558 | return err; | ||
559 | } | ||
560 | if (adapter->ops->init_port | ||
561 | && adapter->ops->init_port(adapter, port->portnum) != 0) { | ||
562 | printk(KERN_ERR "%s: Failed to initialize port %d\n", | ||
563 | netxen_nic_driver_name, port->portnum); | ||
564 | netxen_free_hw_resources(adapter); | ||
565 | return -EIO; | ||
566 | } | ||
567 | if (adapter->ops->init_niu) | ||
568 | adapter->ops->init_niu(adapter); | ||
569 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
570 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) | ||
571 | netxen_post_rx_buffers(adapter, ctx, ring); | ||
572 | } | ||
573 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; | ||
574 | } | ||
575 | adapter->active_ports++; | ||
576 | if (adapter->active_ports == 1) { | ||
577 | err = request_irq(adapter->ahw.pdev->irq, &netxen_intr, | ||
578 | SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name, | ||
579 | adapter); | ||
580 | if (err) { | ||
581 | printk(KERN_ERR "request_irq failed with: %d\n", err); | ||
582 | adapter->active_ports--; | ||
583 | return err; | ||
584 | } | ||
585 | adapter->irq = adapter->ahw.pdev->irq; | ||
586 | if (!adapter->driver_mismatch) | ||
587 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
588 | |||
589 | netxen_nic_enable_int(adapter); | ||
590 | } | ||
591 | |||
592 | /* Done here again so that even if phantom sw overwrote it, | ||
593 | * we set it */ | ||
594 | if (adapter->ops->macaddr_set) | ||
595 | adapter->ops->macaddr_set(port, netdev->dev_addr); | ||
596 | netxen_nic_set_link_parameters(port); | ||
597 | |||
598 | netxen_nic_set_multi(netdev); | ||
599 | if (!adapter->driver_mismatch) | ||
600 | netif_start_queue(netdev); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * netxen_nic_close - Disables a network interface entry point | ||
607 | */ | ||
608 | static int netxen_nic_close(struct net_device *netdev) | ||
609 | { | ||
610 | struct netxen_port *port = netdev_priv(netdev); | ||
611 | struct netxen_adapter *adapter = port->adapter; | ||
612 | int i, j; | ||
613 | struct netxen_cmd_buffer *cmd_buff; | ||
614 | struct netxen_skb_frag *buffrag; | ||
615 | |||
616 | netif_carrier_off(netdev); | ||
617 | netif_stop_queue(netdev); | ||
618 | |||
619 | adapter->active_ports--; | ||
620 | |||
621 | if (!adapter->active_ports) { | ||
622 | netxen_nic_disable_int(adapter); | ||
623 | if (adapter->irq) | ||
624 | free_irq(adapter->irq, adapter); | ||
625 | cmd_buff = adapter->cmd_buf_arr; | ||
626 | for (i = 0; i < adapter->max_tx_desc_count; i++) { | ||
627 | buffrag = cmd_buff->frag_array; | ||
628 | if (buffrag->dma) { | ||
629 | pci_unmap_single(port->pdev, buffrag->dma, | ||
630 | buffrag->length, | ||
631 | PCI_DMA_TODEVICE); | ||
632 | buffrag->dma = (u64) NULL; | ||
633 | } | ||
634 | for (j = 0; j < cmd_buff->frag_count; j++) { | ||
635 | buffrag++; | ||
636 | if (buffrag->dma) { | ||
637 | pci_unmap_page(port->pdev, | ||
638 | buffrag->dma, | ||
639 | buffrag->length, | ||
640 | PCI_DMA_TODEVICE); | ||
641 | buffrag->dma = (u64) NULL; | ||
642 | } | ||
643 | } | ||
644 | /* Free the skb we received in netxen_nic_xmit_frame */ | ||
645 | if (cmd_buff->skb) { | ||
646 | dev_kfree_skb_any(cmd_buff->skb); | ||
647 | cmd_buff->skb = NULL; | ||
648 | } | ||
649 | cmd_buff++; | ||
650 | } | ||
651 | del_timer_sync(&adapter->watchdog_timer); | ||
652 | } | ||
653 | |||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
658 | { | ||
659 | struct netxen_port *port = netdev_priv(netdev); | ||
660 | struct netxen_adapter *adapter = port->adapter; | ||
661 | struct netxen_hardware_context *hw = &adapter->ahw; | ||
662 | unsigned int first_seg_len = skb->len - skb->data_len; | ||
663 | struct netxen_skb_frag *buffrag; | ||
664 | unsigned int i; | ||
665 | |||
666 | u32 producer = 0; | ||
667 | u32 saved_producer = 0; | ||
668 | struct cmd_desc_type0 *hwdesc; | ||
669 | int k; | ||
670 | struct netxen_cmd_buffer *pbuf = NULL; | ||
671 | unsigned int tries = 0; | ||
672 | static int dropped_packet = 0; | ||
673 | int frag_count; | ||
674 | u32 local_producer = 0; | ||
675 | u32 max_tx_desc_count = 0; | ||
676 | u32 last_cmd_consumer = 0; | ||
677 | int no_of_desc; | ||
678 | |||
679 | port->stats.xmitcalled++; | ||
680 | frag_count = skb_shinfo(skb)->nr_frags + 1; | ||
681 | |||
682 | if (unlikely(skb->len <= 0)) { | ||
683 | dev_kfree_skb_any(skb); | ||
684 | port->stats.badskblen++; | ||
685 | return NETDEV_TX_OK; | ||
686 | } | ||
687 | |||
688 | if (frag_count > MAX_BUFFERS_PER_CMD) { | ||
689 | printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)" | ||
690 | "too large, can handle only %d frags\n", | ||
691 | netxen_nic_driver_name, netdev->name, | ||
692 | frag_count, MAX_BUFFERS_PER_CMD); | ||
693 | port->stats.txdropped++; | ||
694 | if ((++dropped_packet & 0xff) == 0xff) | ||
695 | printk("%s: %s droppped packets = %d\n", | ||
696 | netxen_nic_driver_name, netdev->name, | ||
697 | dropped_packet); | ||
698 | |||
699 | return NETDEV_TX_OK; | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * Everything is set up. Now, we just need to transmit it out. | ||
704 | * Note that we have to copy the contents of buffer over to | ||
705 | * right place. Later on, this can be optimized out by de-coupling the | ||
706 | * producer index from the buffer index. | ||
707 | */ | ||
708 | retry_getting_window: | ||
709 | spin_lock_bh(&adapter->tx_lock); | ||
710 | if (adapter->total_threads == MAX_XMIT_PRODUCERS) { | ||
711 | spin_unlock_bh(&adapter->tx_lock); | ||
712 | /* | ||
713 | * Yield CPU | ||
714 | */ | ||
715 | if (!in_atomic()) | ||
716 | schedule(); | ||
717 | else { | ||
718 | for (i = 0; i < 20; i++) | ||
719 | cpu_relax(); /*This a nop instr on i386 */ | ||
720 | } | ||
721 | goto retry_getting_window; | ||
722 | } | ||
723 | local_producer = adapter->cmd_producer; | ||
724 | /* There 4 fragments per descriptor */ | ||
725 | no_of_desc = (frag_count + 3) >> 2; | ||
726 | if (netdev->features & NETIF_F_TSO) { | ||
727 | if (skb_shinfo(skb)->gso_size > 0) { | ||
728 | |||
729 | no_of_desc++; | ||
730 | if (((skb->nh.iph)->ihl * sizeof(u32)) + | ||
731 | ((skb->h.th)->doff * sizeof(u32)) + | ||
732 | sizeof(struct ethhdr) > | ||
733 | (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { | ||
734 | no_of_desc++; | ||
735 | } | ||
736 | } | ||
737 | } | ||
738 | k = adapter->cmd_producer; | ||
739 | max_tx_desc_count = adapter->max_tx_desc_count; | ||
740 | last_cmd_consumer = adapter->last_cmd_consumer; | ||
741 | if ((k + no_of_desc) >= | ||
742 | ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count : | ||
743 | last_cmd_consumer)) { | ||
744 | spin_unlock_bh(&adapter->tx_lock); | ||
745 | if (tries == 0) { | ||
746 | local_bh_disable(); | ||
747 | netxen_process_cmd_ring((unsigned long)adapter); | ||
748 | local_bh_enable(); | ||
749 | ++tries; | ||
750 | goto retry_getting_window; | ||
751 | } else { | ||
752 | port->stats.nocmddescriptor++; | ||
753 | DPRINTK(ERR, "No command descriptors available," | ||
754 | " producer = %d, consumer = %d count=%llu," | ||
755 | " dropping packet\n", producer, | ||
756 | adapter->last_cmd_consumer, | ||
757 | port->stats.nocmddescriptor); | ||
758 | |||
759 | spin_lock_bh(&adapter->tx_lock); | ||
760 | netif_stop_queue(netdev); | ||
761 | port->flags |= NETXEN_NETDEV_STATUS; | ||
762 | spin_unlock_bh(&adapter->tx_lock); | ||
763 | return NETDEV_TX_BUSY; | ||
764 | } | ||
765 | } | ||
766 | k = get_index_range(k, max_tx_desc_count, no_of_desc); | ||
767 | adapter->cmd_producer = k; | ||
768 | adapter->total_threads++; | ||
769 | adapter->num_threads++; | ||
770 | |||
771 | spin_unlock_bh(&adapter->tx_lock); | ||
772 | /* Copy the descriptors into the hardware */ | ||
773 | producer = local_producer; | ||
774 | saved_producer = producer; | ||
775 | hwdesc = &hw->cmd_desc_head[producer]; | ||
776 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | ||
777 | /* Take skb->data itself */ | ||
778 | pbuf = &adapter->cmd_buf_arr[producer]; | ||
779 | if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { | ||
780 | pbuf->mss = skb_shinfo(skb)->gso_size; | ||
781 | hwdesc->mss = skb_shinfo(skb)->gso_size; | ||
782 | } else { | ||
783 | pbuf->mss = 0; | ||
784 | hwdesc->mss = 0; | ||
785 | } | ||
786 | pbuf->no_of_descriptors = no_of_desc; | ||
787 | pbuf->total_length = skb->len; | ||
788 | pbuf->skb = skb; | ||
789 | pbuf->cmd = TX_ETHER_PKT; | ||
790 | pbuf->frag_count = frag_count; | ||
791 | pbuf->port = port->portnum; | ||
792 | buffrag = &pbuf->frag_array[0]; | ||
793 | buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len, | ||
794 | PCI_DMA_TODEVICE); | ||
795 | buffrag->length = first_seg_len; | ||
796 | CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len); | ||
797 | hwdesc->num_of_buffers = frag_count; | ||
798 | hwdesc->opcode = TX_ETHER_PKT; | ||
799 | |||
800 | CMD_DESC_PORT_WRT(hwdesc, port->portnum); | ||
801 | hwdesc->buffer1_length = cpu_to_le16(first_seg_len); | ||
802 | hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); | ||
803 | |||
804 | for (i = 1, k = 1; i < frag_count; i++, k++) { | ||
805 | struct skb_frag_struct *frag; | ||
806 | int len, temp_len; | ||
807 | unsigned long offset; | ||
808 | dma_addr_t temp_dma; | ||
809 | |||
810 | /* move to next desc. if there is a need */ | ||
811 | if ((i & 0x3) == 0) { | ||
812 | k = 0; | ||
813 | producer = get_next_index(producer, | ||
814 | adapter->max_tx_desc_count); | ||
815 | hwdesc = &hw->cmd_desc_head[producer]; | ||
816 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | ||
817 | } | ||
818 | frag = &skb_shinfo(skb)->frags[i - 1]; | ||
819 | len = frag->size; | ||
820 | offset = frag->page_offset; | ||
821 | |||
822 | temp_len = len; | ||
823 | temp_dma = pci_map_page(port->pdev, frag->page, offset, | ||
824 | len, PCI_DMA_TODEVICE); | ||
825 | |||
826 | buffrag++; | ||
827 | buffrag->dma = temp_dma; | ||
828 | buffrag->length = temp_len; | ||
829 | |||
830 | DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k); | ||
831 | switch (k) { | ||
832 | case 0: | ||
833 | hwdesc->buffer1_length = cpu_to_le16(temp_len); | ||
834 | hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); | ||
835 | break; | ||
836 | case 1: | ||
837 | hwdesc->buffer2_length = cpu_to_le16(temp_len); | ||
838 | hwdesc->addr_buffer2 = cpu_to_le64(temp_dma); | ||
839 | break; | ||
840 | case 2: | ||
841 | hwdesc->buffer3_length = cpu_to_le16(temp_len); | ||
842 | hwdesc->addr_buffer3 = cpu_to_le64(temp_dma); | ||
843 | break; | ||
844 | case 3: | ||
845 | hwdesc->buffer4_length = temp_len; | ||
846 | hwdesc->addr_buffer4 = cpu_to_le64(temp_dma); | ||
847 | break; | ||
848 | } | ||
849 | frag++; | ||
850 | } | ||
851 | producer = get_next_index(producer, adapter->max_tx_desc_count); | ||
852 | |||
853 | /* might change opcode to TX_TCP_LSO */ | ||
854 | netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb); | ||
855 | |||
856 | /* For LSO, we need to copy the MAC/IP/TCP headers into | ||
857 | * the descriptor ring | ||
858 | */ | ||
859 | if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) { | ||
860 | int hdr_len, first_hdr_len, more_hdr; | ||
861 | hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length; | ||
862 | if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { | ||
863 | first_hdr_len = | ||
864 | sizeof(struct cmd_desc_type0) - NET_IP_ALIGN; | ||
865 | more_hdr = 1; | ||
866 | } else { | ||
867 | first_hdr_len = hdr_len; | ||
868 | more_hdr = 0; | ||
869 | } | ||
870 | /* copy the MAC/IP/TCP headers to the cmd descriptor list */ | ||
871 | hwdesc = &hw->cmd_desc_head[producer]; | ||
872 | |||
873 | /* copy the first 64 bytes */ | ||
874 | memcpy(((void *)hwdesc) + NET_IP_ALIGN, | ||
875 | (void *)(skb->data), first_hdr_len); | ||
876 | producer = get_next_index(producer, max_tx_desc_count); | ||
877 | |||
878 | if (more_hdr) { | ||
879 | hwdesc = &hw->cmd_desc_head[producer]; | ||
880 | /* copy the next 64 bytes - should be enough except | ||
881 | * for pathological case | ||
882 | */ | ||
883 | memcpy((void *)hwdesc, (void *)(skb->data) + | ||
884 | first_hdr_len, hdr_len - first_hdr_len); | ||
885 | producer = get_next_index(producer, max_tx_desc_count); | ||
886 | } | ||
887 | } | ||
888 | spin_lock_bh(&adapter->tx_lock); | ||
889 | port->stats.txbytes += | ||
890 | CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]); | ||
891 | /* Code to update the adapter considering how many producer threads | ||
892 | are currently working */ | ||
893 | if ((--adapter->num_threads) == 0) { | ||
894 | /* This is the last thread */ | ||
895 | u32 crb_producer = adapter->cmd_producer; | ||
896 | writel(crb_producer, | ||
897 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
898 | wmb(); | ||
899 | adapter->total_threads = 0; | ||
900 | } else { | ||
901 | u32 crb_producer = 0; | ||
902 | crb_producer = | ||
903 | readl(NETXEN_CRB_NORMALIZE | ||
904 | (adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
905 | if (crb_producer == local_producer) { | ||
906 | crb_producer = get_index_range(crb_producer, | ||
907 | max_tx_desc_count, | ||
908 | no_of_desc); | ||
909 | writel(crb_producer, | ||
910 | NETXEN_CRB_NORMALIZE(adapter, | ||
911 | CRB_CMD_PRODUCER_OFFSET)); | ||
912 | wmb(); | ||
913 | } | ||
914 | } | ||
915 | |||
916 | port->stats.xmitfinished++; | ||
917 | spin_unlock_bh(&adapter->tx_lock); | ||
918 | |||
919 | netdev->trans_start = jiffies; | ||
920 | |||
921 | DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer); | ||
922 | |||
923 | DPRINTK(INFO, "Done. Send\n"); | ||
924 | return NETDEV_TX_OK; | ||
925 | } | ||
926 | |||
927 | static void netxen_watchdog(unsigned long v) | ||
928 | { | ||
929 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | ||
930 | schedule_work(&adapter->watchdog_task); | ||
931 | } | ||
932 | |||
933 | static void netxen_tx_timeout(struct net_device *netdev) | ||
934 | { | ||
935 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
936 | struct netxen_adapter *adapter = port->adapter; | ||
937 | |||
938 | schedule_work(&adapter->tx_timeout_task); | ||
939 | } | ||
940 | |||
941 | static void netxen_tx_timeout_task(struct net_device *netdev) | ||
942 | { | ||
943 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
944 | unsigned long flags; | ||
945 | |||
946 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | ||
947 | netxen_nic_driver_name, netdev->name); | ||
948 | |||
949 | spin_lock_irqsave(&port->adapter->lock, flags); | ||
950 | netxen_nic_close(netdev); | ||
951 | netxen_nic_open(netdev); | ||
952 | spin_unlock_irqrestore(&port->adapter->lock, flags); | ||
953 | netdev->trans_start = jiffies; | ||
954 | netif_wake_queue(netdev); | ||
955 | } | ||
956 | |||
957 | static int | ||
958 | netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) | ||
959 | { | ||
960 | u32 ret = 0; | ||
961 | |||
962 | DPRINTK(INFO, "Entered handle ISR\n"); | ||
963 | |||
964 | adapter->stats.ints++; | ||
965 | |||
966 | if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { | ||
967 | int count = 0; | ||
968 | u32 mask; | ||
969 | netxen_nic_disable_int(adapter); | ||
970 | /* Window = 0 or 1 */ | ||
971 | do { | ||
972 | writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter, | ||
973 | ISR_INT_TARGET_STATUS)); | ||
974 | mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR)); | ||
975 | } while (((mask & 0x80) != 0) && (++count < 32)); | ||
976 | if ((mask & 0x80) != 0) | ||
977 | printk("Could not disable interrupt completely\n"); | ||
978 | |||
979 | } | ||
980 | adapter->stats.hostints++; | ||
981 | |||
982 | if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { | ||
983 | if (netif_rx_schedule_prep(netdev)) { | ||
984 | /* | ||
985 | * Interrupts are already disabled. | ||
986 | */ | ||
987 | __netif_rx_schedule(netdev); | ||
988 | } else { | ||
989 | static unsigned int intcount = 0; | ||
990 | if ((++intcount & 0xfff) == 0xfff) | ||
991 | printk(KERN_ERR | ||
992 | "%s: %s interrupt %d while in poll\n", | ||
993 | netxen_nic_driver_name, netdev->name, | ||
994 | intcount); | ||
995 | } | ||
996 | ret = 1; | ||
997 | } | ||
998 | |||
999 | if (ret == 0) { | ||
1000 | netxen_nic_enable_int(adapter); | ||
1001 | } | ||
1002 | |||
1003 | return ret; | ||
1004 | } | ||
1005 | |||
1006 | /* | ||
1007 | * netxen_intr - Interrupt Handler | ||
1008 | * @irq: interrupt number | ||
1009 | * data points to adapter stucture (which may be handling more than 1 port | ||
1010 | */ | ||
1011 | irqreturn_t netxen_intr(int irq, void *data) | ||
1012 | { | ||
1013 | struct netxen_adapter *adapter; | ||
1014 | struct netxen_port *port; | ||
1015 | struct net_device *netdev; | ||
1016 | int i; | ||
1017 | |||
1018 | if (unlikely(!irq)) { | ||
1019 | return IRQ_NONE; /* Not our interrupt */ | ||
1020 | } | ||
1021 | |||
1022 | adapter = (struct netxen_adapter *)data; | ||
1023 | for (i = 0; i < adapter->ahw.max_ports; i++) { | ||
1024 | port = adapter->port[i]; | ||
1025 | netdev = port->netdev; | ||
1026 | |||
1027 | /* process our status queue (for all 4 ports) */ | ||
1028 | netxen_handle_int(adapter, netdev); | ||
1029 | } | ||
1030 | |||
1031 | return IRQ_HANDLED; | ||
1032 | } | ||
1033 | |||
1034 | static int netxen_nic_poll(struct net_device *netdev, int *budget) | ||
1035 | { | ||
1036 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
1037 | struct netxen_adapter *adapter = port->adapter; | ||
1038 | int work_to_do = min(*budget, netdev->quota); | ||
1039 | int done = 1; | ||
1040 | int ctx; | ||
1041 | int this_work_done; | ||
1042 | |||
1043 | DPRINTK(INFO, "polling for %d descriptors\n", *budget); | ||
1044 | port->stats.polled++; | ||
1045 | |||
1046 | adapter->work_done = 0; | ||
1047 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
1048 | /* | ||
1049 | * Fairness issue. This will give undue weight to the | ||
1050 | * receive context 0. | ||
1051 | */ | ||
1052 | |||
1053 | /* | ||
1054 | * To avoid starvation, we give each of our receivers, | ||
1055 | * a fraction of the quota. Sometimes, it might happen that we | ||
1056 | * have enough quota to process every packet, but since all the | ||
1057 | * packets are on one context, it gets only half of the quota, | ||
1058 | * and ends up not processing it. | ||
1059 | */ | ||
1060 | this_work_done = netxen_process_rcv_ring(adapter, ctx, | ||
1061 | work_to_do / | ||
1062 | MAX_RCV_CTX); | ||
1063 | adapter->work_done += this_work_done; | ||
1064 | } | ||
1065 | |||
1066 | netdev->quota -= adapter->work_done; | ||
1067 | *budget -= adapter->work_done; | ||
1068 | |||
1069 | if (adapter->work_done >= work_to_do | ||
1070 | && netxen_nic_rx_has_work(adapter) != 0) | ||
1071 | done = 0; | ||
1072 | |||
1073 | netxen_process_cmd_ring((unsigned long)adapter); | ||
1074 | |||
1075 | DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", | ||
1076 | adapter->work_done, work_to_do); | ||
1077 | if (done) { | ||
1078 | netif_rx_complete(netdev); | ||
1079 | netxen_nic_enable_int(adapter); | ||
1080 | } | ||
1081 | |||
1082 | return !done; | ||
1083 | } | ||
1084 | |||
1085 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1086 | static void netxen_nic_poll_controller(struct net_device *netdev) | ||
1087 | { | ||
1088 | struct netxen_port *port = netdev_priv(netdev); | ||
1089 | struct netxen_adapter *adapter = port->adapter; | ||
1090 | disable_irq(adapter->irq); | ||
1091 | netxen_intr(adapter->irq, adapter); | ||
1092 | enable_irq(adapter->irq); | ||
1093 | } | ||
1094 | #endif | ||
1095 | /* | ||
1096 | * netxen_nic_ioctl () We provide the tcl/phanmon support through these | ||
1097 | * ioctls. | ||
1098 | */ | ||
1099 | static int | ||
1100 | netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1101 | { | ||
1102 | int err = 0; | ||
1103 | unsigned long nr_bytes = 0; | ||
1104 | struct netxen_port *port = netdev_priv(netdev); | ||
1105 | struct netxen_adapter *adapter = port->adapter; | ||
1106 | char dev_name[NETXEN_NIC_NAME_LEN]; | ||
1107 | |||
1108 | DPRINTK(INFO, "doing ioctl for %s\n", netdev->name); | ||
1109 | switch (cmd) { | ||
1110 | case NETXEN_NIC_CMD: | ||
1111 | err = netxen_nic_do_ioctl(adapter, (void *)ifr->ifr_data, port); | ||
1112 | break; | ||
1113 | |||
1114 | case NETXEN_NIC_NAME: | ||
1115 | DPRINTK(INFO, "ioctl cmd for NetXen\n"); | ||
1116 | if (ifr->ifr_data) { | ||
1117 | sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP, | ||
1118 | port->portnum); | ||
1119 | nr_bytes = copy_to_user((char *)ifr->ifr_data, dev_name, | ||
1120 | NETXEN_NIC_NAME_LEN); | ||
1121 | if (nr_bytes) | ||
1122 | err = -EIO; | ||
1123 | |||
1124 | } | ||
1125 | break; | ||
1126 | |||
1127 | default: | ||
1128 | DPRINTK(INFO, "ioctl cmd %x not supported\n", cmd); | ||
1129 | err = -EOPNOTSUPP; | ||
1130 | break; | ||
1131 | } | ||
1132 | |||
1133 | return err; | ||
1134 | } | ||
1135 | |||
1136 | static struct pci_driver netxen_driver = { | ||
1137 | .name = netxen_nic_driver_name, | ||
1138 | .id_table = netxen_pci_tbl, | ||
1139 | .probe = netxen_nic_probe, | ||
1140 | .remove = __devexit_p(netxen_nic_remove) | ||
1141 | }; | ||
1142 | |||
1143 | /* Driver Registration on NetXen card */ | ||
1144 | |||
1145 | static int __init netxen_init_module(void) | ||
1146 | { | ||
1147 | return pci_module_init(&netxen_driver); | ||
1148 | } | ||
1149 | |||
1150 | module_init(netxen_init_module); | ||
1151 | |||
1152 | static void __exit netxen_exit_module(void) | ||
1153 | { | ||
1154 | /* | ||
1155 | * Wait for some time to allow the dma to drain, if any. | ||
1156 | */ | ||
1157 | mdelay(5); | ||
1158 | pci_unregister_driver(&netxen_driver); | ||
1159 | } | ||
1160 | |||
1161 | module_exit(netxen_exit_module); | ||
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c new file mode 100644 index 000000000000..7950a04532e6 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_niu.c | |||
@@ -0,0 +1,894 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Provides access to the Network Interface Unit h/w block. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include "netxen_nic.h" | ||
35 | |||
36 | #define NETXEN_GB_MAC_SOFT_RESET 0x80000000 | ||
37 | #define NETXEN_GB_MAC_RESET_PROT_BLK 0x000F0000 | ||
38 | #define NETXEN_GB_MAC_ENABLE_TX_RX 0x00000005 | ||
39 | #define NETXEN_GB_MAC_PAUSED_FRMS 0x00000020 | ||
40 | |||
41 | static long phy_lock_timeout = 100000000; | ||
42 | |||
43 | static inline int phy_lock(void) | ||
44 | { | ||
45 | int i; | ||
46 | int done = 0, timeout = 0; | ||
47 | |||
48 | while (!done) { | ||
49 | done = readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_LOCK)); | ||
50 | if (done == 1) | ||
51 | break; | ||
52 | if (timeout >= phy_lock_timeout) { | ||
53 | return -1; | ||
54 | } | ||
55 | timeout++; | ||
56 | if (!in_atomic()) | ||
57 | schedule(); | ||
58 | else { | ||
59 | for (i = 0; i < 20; i++) | ||
60 | cpu_relax(); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | writel(NETXEN_PHY_LOCK_ID, (void __iomem *)PHY_LOCK_DRIVER); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static inline int phy_unlock(void) | ||
69 | { | ||
70 | readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)); | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * netxen_niu_gbe_phy_read - read a register from the GbE PHY via | ||
76 | * mii management interface. | ||
77 | * | ||
78 | * Note: The MII management interface goes through port 0. | ||
79 | * Individual phys are addressed as follows: | ||
80 | * @param phy [15:8] phy id | ||
81 | * @param reg [7:0] register number | ||
82 | * | ||
83 | * @returns 0 on success | ||
84 | * -1 on error | ||
85 | * | ||
86 | */ | ||
87 | int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, | ||
88 | long reg, __le32 * readval) | ||
89 | { | ||
90 | long timeout = 0; | ||
91 | long result = 0; | ||
92 | long restore = 0; | ||
93 | __le32 address; | ||
94 | __le32 command; | ||
95 | __le32 status; | ||
96 | __le32 mac_cfg0; | ||
97 | |||
98 | if (phy_lock() != 0) { | ||
99 | return -1; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * MII mgmt all goes through port 0 MAC interface, | ||
104 | * so it cannot be in reset | ||
105 | */ | ||
106 | |||
107 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
108 | &mac_cfg0, 4)) | ||
109 | return -EIO; | ||
110 | if (netxen_gb_get_soft_reset(mac_cfg0)) { | ||
111 | __le32 temp; | ||
112 | temp = 0; | ||
113 | netxen_gb_tx_reset_pb(temp); | ||
114 | netxen_gb_rx_reset_pb(temp); | ||
115 | netxen_gb_tx_reset_mac(temp); | ||
116 | netxen_gb_rx_reset_mac(temp); | ||
117 | if (netxen_nic_hw_write_wx(adapter, | ||
118 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
119 | &temp, 4)) | ||
120 | return -EIO; | ||
121 | restore = 1; | ||
122 | } | ||
123 | |||
124 | address = 0; | ||
125 | netxen_gb_mii_mgmt_reg_addr(address, reg); | ||
126 | netxen_gb_mii_mgmt_phy_addr(address, phy); | ||
127 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), | ||
128 | &address, 4)) | ||
129 | return -EIO; | ||
130 | command = 0; /* turn off any prior activity */ | ||
131 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | ||
132 | &command, 4)) | ||
133 | return -EIO; | ||
134 | /* send read command */ | ||
135 | netxen_gb_mii_mgmt_set_read_cycle(command); | ||
136 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | ||
137 | &command, 4)) | ||
138 | return -EIO; | ||
139 | |||
140 | status = 0; | ||
141 | do { | ||
142 | if (netxen_nic_hw_read_wx(adapter, | ||
143 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | ||
144 | &status, 4)) | ||
145 | return -EIO; | ||
146 | timeout++; | ||
147 | } while ((netxen_get_gb_mii_mgmt_busy(status) | ||
148 | || netxen_get_gb_mii_mgmt_notvalid(status)) | ||
149 | && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); | ||
150 | |||
151 | if (timeout < NETXEN_NIU_PHY_WAITMAX) { | ||
152 | if (netxen_nic_hw_read_wx(adapter, | ||
153 | NETXEN_NIU_GB_MII_MGMT_STATUS(0), | ||
154 | readval, 4)) | ||
155 | return -EIO; | ||
156 | result = 0; | ||
157 | } else | ||
158 | result = -1; | ||
159 | |||
160 | if (restore) | ||
161 | if (netxen_nic_hw_write_wx(adapter, | ||
162 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
163 | &mac_cfg0, 4)) | ||
164 | return -EIO; | ||
165 | phy_unlock(); | ||
166 | return result; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * netxen_niu_gbe_phy_write - write a register to the GbE PHY via | ||
171 | * mii management interface. | ||
172 | * | ||
173 | * Note: The MII management interface goes through port 0. | ||
174 | * Individual phys are addressed as follows: | ||
175 | * @param phy [15:8] phy id | ||
176 | * @param reg [7:0] register number | ||
177 | * | ||
178 | * @returns 0 on success | ||
179 | * -1 on error | ||
180 | * | ||
181 | */ | ||
182 | int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, | ||
183 | long phy, long reg, __le32 val) | ||
184 | { | ||
185 | long timeout = 0; | ||
186 | long result = 0; | ||
187 | long restore = 0; | ||
188 | __le32 address; | ||
189 | __le32 command; | ||
190 | __le32 status; | ||
191 | __le32 mac_cfg0; | ||
192 | |||
193 | /* | ||
194 | * MII mgmt all goes through port 0 MAC interface, so it | ||
195 | * cannot be in reset | ||
196 | */ | ||
197 | |||
198 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
199 | &mac_cfg0, 4)) | ||
200 | return -EIO; | ||
201 | if (netxen_gb_get_soft_reset(mac_cfg0)) { | ||
202 | __le32 temp; | ||
203 | temp = 0; | ||
204 | netxen_gb_tx_reset_pb(temp); | ||
205 | netxen_gb_rx_reset_pb(temp); | ||
206 | netxen_gb_tx_reset_mac(temp); | ||
207 | netxen_gb_rx_reset_mac(temp); | ||
208 | |||
209 | if (netxen_nic_hw_write_wx(adapter, | ||
210 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
211 | &temp, 4)) | ||
212 | return -EIO; | ||
213 | restore = 1; | ||
214 | } | ||
215 | |||
216 | command = 0; /* turn off any prior activity */ | ||
217 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | ||
218 | &command, 4)) | ||
219 | return -EIO; | ||
220 | |||
221 | address = 0; | ||
222 | netxen_gb_mii_mgmt_reg_addr(address, reg); | ||
223 | netxen_gb_mii_mgmt_phy_addr(address, phy); | ||
224 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), | ||
225 | &address, 4)) | ||
226 | return -EIO; | ||
227 | |||
228 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), | ||
229 | &val, 4)) | ||
230 | return -EIO; | ||
231 | |||
232 | status = 0; | ||
233 | do { | ||
234 | if (netxen_nic_hw_read_wx(adapter, | ||
235 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | ||
236 | &status, 4)) | ||
237 | return -EIO; | ||
238 | timeout++; | ||
239 | } while ((netxen_get_gb_mii_mgmt_busy(status)) | ||
240 | && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); | ||
241 | |||
242 | if (timeout < NETXEN_NIU_PHY_WAITMAX) | ||
243 | result = 0; | ||
244 | else | ||
245 | result = -EIO; | ||
246 | |||
247 | /* restore the state of port 0 MAC in case we tampered with it */ | ||
248 | if (restore) | ||
249 | if (netxen_nic_hw_write_wx(adapter, | ||
250 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
251 | &mac_cfg0, 4)) | ||
252 | return -EIO; | ||
253 | |||
254 | return result; | ||
255 | } | ||
256 | |||
257 | int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter, | ||
258 | int port) | ||
259 | { | ||
260 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x3f); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter, | ||
265 | int port) | ||
266 | { | ||
267 | int result = 0; | ||
268 | __le32 enable = 0; | ||
269 | netxen_set_phy_int_link_status_changed(enable); | ||
270 | netxen_set_phy_int_autoneg_completed(enable); | ||
271 | netxen_set_phy_int_speed_changed(enable); | ||
272 | |||
273 | if (0 != | ||
274 | netxen_niu_gbe_phy_write(adapter, port, | ||
275 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE, | ||
276 | enable)) | ||
277 | result = -EIO; | ||
278 | |||
279 | return result; | ||
280 | } | ||
281 | |||
282 | int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter, | ||
283 | int port) | ||
284 | { | ||
285 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x7f); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter, | ||
290 | int port) | ||
291 | { | ||
292 | int result = 0; | ||
293 | if (0 != | ||
294 | netxen_niu_gbe_phy_write(adapter, port, | ||
295 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE, 0)) | ||
296 | result = -EIO; | ||
297 | |||
298 | return result; | ||
299 | } | ||
300 | |||
301 | int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter, | ||
302 | int port) | ||
303 | { | ||
304 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_ACTIVE_INT, -1); | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter, | ||
309 | int port) | ||
310 | { | ||
311 | int result = 0; | ||
312 | if (0 != | ||
313 | netxen_niu_gbe_phy_write(adapter, port, | ||
314 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS, | ||
315 | -EIO)) | ||
316 | result = -EIO; | ||
317 | |||
318 | return result; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * netxen_niu_gbe_set_mii_mode- Set 10/100 Mbit Mode for GbE MAC | ||
323 | * | ||
324 | */ | ||
325 | void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, | ||
326 | int port, long enable) | ||
327 | { | ||
328 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2); | ||
329 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
330 | 0x80000000); | ||
331 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
332 | 0x0000f0025); | ||
333 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), | ||
334 | 0xf1ff); | ||
335 | netxen_crb_writelit_adapter(adapter, | ||
336 | NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0); | ||
337 | netxen_crb_writelit_adapter(adapter, | ||
338 | NETXEN_NIU_GB0_MII_MODE + (port << 3), 1); | ||
339 | netxen_crb_writelit_adapter(adapter, | ||
340 | (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0); | ||
341 | netxen_crb_writelit_adapter(adapter, | ||
342 | NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7); | ||
343 | |||
344 | if (enable) { | ||
345 | /* | ||
346 | * Do NOT enable flow control until a suitable solution for | ||
347 | * shutting down pause frames is found. | ||
348 | */ | ||
349 | netxen_crb_writelit_adapter(adapter, | ||
350 | NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
351 | 0x5); | ||
352 | } | ||
353 | |||
354 | if (netxen_niu_gbe_enable_phy_interrupts(adapter, port)) | ||
355 | printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n"); | ||
356 | if (netxen_niu_gbe_clear_phy_interrupts(adapter, port)) | ||
357 | printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n"); | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * netxen_niu_gbe_set_gmii_mode- Set GbE Mode for GbE MAC | ||
362 | */ | ||
363 | void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, | ||
364 | int port, long enable) | ||
365 | { | ||
366 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2); | ||
367 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
368 | 0x80000000); | ||
369 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
370 | 0x0000f0025); | ||
371 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), | ||
372 | 0xf2ff); | ||
373 | netxen_crb_writelit_adapter(adapter, | ||
374 | NETXEN_NIU_GB0_MII_MODE + (port << 3), 0); | ||
375 | netxen_crb_writelit_adapter(adapter, | ||
376 | NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1); | ||
377 | netxen_crb_writelit_adapter(adapter, | ||
378 | (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0); | ||
379 | netxen_crb_writelit_adapter(adapter, | ||
380 | NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7); | ||
381 | |||
382 | if (enable) { | ||
383 | /* | ||
384 | * Do NOT enable flow control until a suitable solution for | ||
385 | * shutting down pause frames is found. | ||
386 | */ | ||
387 | netxen_crb_writelit_adapter(adapter, | ||
388 | NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
389 | 0x5); | ||
390 | } | ||
391 | |||
392 | if (netxen_niu_gbe_enable_phy_interrupts(adapter, port)) | ||
393 | printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n"); | ||
394 | if (netxen_niu_gbe_clear_phy_interrupts(adapter, port)) | ||
395 | printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n"); | ||
396 | } | ||
397 | |||
398 | int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) | ||
399 | { | ||
400 | int result = 0; | ||
401 | __le32 status; | ||
402 | if (adapter->ops->disable_phy_interrupts) | ||
403 | adapter->ops->disable_phy_interrupts(adapter, port); | ||
404 | mdelay(2); | ||
405 | |||
406 | if (0 == | ||
407 | netxen_niu_gbe_phy_read(adapter, port, | ||
408 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
409 | (__le32 *) & status)) { | ||
410 | if (netxen_get_phy_link(status)) { | ||
411 | if (netxen_get_phy_speed(status) == 2) { | ||
412 | netxen_niu_gbe_set_gmii_mode(adapter, port, 1); | ||
413 | } else if ((netxen_get_phy_speed(status) == 1) | ||
414 | || (netxen_get_phy_speed(status) == 0)) { | ||
415 | netxen_niu_gbe_set_mii_mode(adapter, port, 1); | ||
416 | } else { | ||
417 | result = -1; | ||
418 | } | ||
419 | |||
420 | } else { | ||
421 | /* | ||
422 | * We don't have link. Cable must be unconnected. | ||
423 | * Enable phy interrupts so we take action when | ||
424 | * plugged in. | ||
425 | */ | ||
426 | |||
427 | netxen_crb_writelit_adapter(adapter, | ||
428 | NETXEN_NIU_GB_MAC_CONFIG_0 | ||
429 | (port), | ||
430 | NETXEN_GB_MAC_SOFT_RESET); | ||
431 | netxen_crb_writelit_adapter(adapter, | ||
432 | NETXEN_NIU_GB_MAC_CONFIG_0 | ||
433 | (port), | ||
434 | NETXEN_GB_MAC_RESET_PROT_BLK | ||
435 | | NETXEN_GB_MAC_ENABLE_TX_RX | ||
436 | | | ||
437 | NETXEN_GB_MAC_PAUSED_FRMS); | ||
438 | if (netxen_niu_gbe_clear_phy_interrupts(adapter, port)) | ||
439 | printk(KERN_ERR PFX | ||
440 | "ERROR clearing PHY interrupts\n"); | ||
441 | if (netxen_niu_gbe_enable_phy_interrupts(adapter, port)) | ||
442 | printk(KERN_ERR PFX | ||
443 | "ERROR enabling PHY interrupts\n"); | ||
444 | if (netxen_niu_gbe_clear_phy_interrupts(adapter, port)) | ||
445 | printk(KERN_ERR PFX | ||
446 | "ERROR clearing PHY interrupts\n"); | ||
447 | result = -1; | ||
448 | } | ||
449 | } else { | ||
450 | result = -EIO; | ||
451 | } | ||
452 | return result; | ||
453 | } | ||
454 | |||
455 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) | ||
456 | { | ||
457 | long reg = 0, ret = 0; | ||
458 | |||
459 | if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) { | ||
460 | netxen_crb_writelit_adapter(adapter, | ||
461 | NETXEN_NIU_XG1_CONFIG_0, 0x5); | ||
462 | /* XXX hack for Mez cards: both ports in promisc mode */ | ||
463 | netxen_nic_hw_read_wx(adapter, | ||
464 | NETXEN_NIU_XGE_CONFIG_1, ®, 4); | ||
465 | reg = (reg | 0x2000UL); | ||
466 | netxen_crb_writelit_adapter(adapter, | ||
467 | NETXEN_NIU_XGE_CONFIG_1, reg); | ||
468 | reg = 0; | ||
469 | netxen_nic_hw_read_wx(adapter, | ||
470 | NETXEN_NIU_XG1_CONFIG_1, ®, 4); | ||
471 | reg = (reg | 0x2000UL); | ||
472 | netxen_crb_writelit_adapter(adapter, | ||
473 | NETXEN_NIU_XG1_CONFIG_1, reg); | ||
474 | } | ||
475 | |||
476 | return ret; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * netxen_niu_gbe_handle_phy_interrupt - Handles GbE PHY interrupts | ||
481 | * @param enable 0 means don't enable the port | ||
482 | * 1 means enable (or re-enable) the port | ||
483 | */ | ||
484 | int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter, | ||
485 | int port, long enable) | ||
486 | { | ||
487 | int result = 0; | ||
488 | __le32 int_src; | ||
489 | |||
490 | printk(KERN_INFO PFX "NETXEN: Handling PHY interrupt on port %d" | ||
491 | " (device enable = %d)\n", (int)port, (int)enable); | ||
492 | |||
493 | /* | ||
494 | * The read of the PHY INT status will clear the pending | ||
495 | * interrupt status | ||
496 | */ | ||
497 | if (netxen_niu_gbe_phy_read(adapter, port, | ||
498 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS, | ||
499 | &int_src) != 0) | ||
500 | result = -EINVAL; | ||
501 | else { | ||
502 | printk(KERN_INFO PFX "PHY Interrupt source = 0x%x \n", int_src); | ||
503 | if (netxen_get_phy_int_jabber(int_src)) | ||
504 | printk(KERN_INFO PFX "jabber Interrupt "); | ||
505 | if (netxen_get_phy_int_polarity_changed(int_src)) | ||
506 | printk(KERN_INFO PFX "polarity changed "); | ||
507 | if (netxen_get_phy_int_energy_detect(int_src)) | ||
508 | printk(KERN_INFO PFX "energy detect \n"); | ||
509 | if (netxen_get_phy_int_downshift(int_src)) | ||
510 | printk(KERN_INFO PFX "downshift \n"); | ||
511 | if (netxen_get_phy_int_mdi_xover_changed(int_src)) | ||
512 | printk(KERN_INFO PFX "mdi_xover_changed "); | ||
513 | if (netxen_get_phy_int_fifo_over_underflow(int_src)) | ||
514 | printk(KERN_INFO PFX "fifo_over_underflow "); | ||
515 | if (netxen_get_phy_int_false_carrier(int_src)) | ||
516 | printk(KERN_INFO PFX "false_carrier "); | ||
517 | if (netxen_get_phy_int_symbol_error(int_src)) | ||
518 | printk(KERN_INFO PFX "symbol_error "); | ||
519 | if (netxen_get_phy_int_autoneg_completed(int_src)) | ||
520 | printk(KERN_INFO PFX "autoneg_completed "); | ||
521 | if (netxen_get_phy_int_page_received(int_src)) | ||
522 | printk(KERN_INFO PFX "page_received "); | ||
523 | if (netxen_get_phy_int_duplex_changed(int_src)) | ||
524 | printk(KERN_INFO PFX "duplex_changed "); | ||
525 | if (netxen_get_phy_int_autoneg_error(int_src)) | ||
526 | printk(KERN_INFO PFX "autoneg_error "); | ||
527 | if ((netxen_get_phy_int_speed_changed(int_src)) | ||
528 | || (netxen_get_phy_int_link_status_changed(int_src))) { | ||
529 | __le32 status; | ||
530 | |||
531 | printk(KERN_INFO PFX | ||
532 | "speed_changed or link status changed"); | ||
533 | if (netxen_niu_gbe_phy_read | ||
534 | (adapter, port, | ||
535 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
536 | &status) == 0) { | ||
537 | if (netxen_get_phy_speed(status) == 2) { | ||
538 | printk | ||
539 | (KERN_INFO PFX "Link speed changed" | ||
540 | " to 1000 Mbps\n"); | ||
541 | netxen_niu_gbe_set_gmii_mode(adapter, | ||
542 | port, | ||
543 | enable); | ||
544 | } else if (netxen_get_phy_speed(status) == 1) { | ||
545 | printk | ||
546 | (KERN_INFO PFX "Link speed changed" | ||
547 | " to 100 Mbps\n"); | ||
548 | netxen_niu_gbe_set_mii_mode(adapter, | ||
549 | port, | ||
550 | enable); | ||
551 | } else if (netxen_get_phy_speed(status) == 0) { | ||
552 | printk | ||
553 | (KERN_INFO PFX "Link speed changed" | ||
554 | " to 10 Mbps\n"); | ||
555 | netxen_niu_gbe_set_mii_mode(adapter, | ||
556 | port, | ||
557 | enable); | ||
558 | } else { | ||
559 | printk(KERN_ERR PFX "ERROR reading" | ||
560 | "PHY status. Illegal speed.\n"); | ||
561 | result = -1; | ||
562 | } | ||
563 | } else { | ||
564 | printk(KERN_ERR PFX | ||
565 | "ERROR reading PHY status.\n"); | ||
566 | result = -1; | ||
567 | } | ||
568 | |||
569 | } | ||
570 | printk(KERN_INFO "\n"); | ||
571 | } | ||
572 | return result; | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Return the current station MAC address. | ||
577 | * Note that the passed-in value must already be in network byte order. | ||
578 | */ | ||
579 | int netxen_niu_macaddr_get(struct netxen_adapter *adapter, | ||
580 | int phy, netxen_ethernet_macaddr_t * addr) | ||
581 | { | ||
582 | u64 result = 0; | ||
583 | __le32 stationhigh; | ||
584 | __le32 stationlow; | ||
585 | |||
586 | if (addr == NULL) | ||
587 | return -EINVAL; | ||
588 | if ((phy < 0) || (phy > 3)) | ||
589 | return -EINVAL; | ||
590 | |||
591 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), | ||
592 | &stationhigh, 4)) | ||
593 | return -EIO; | ||
594 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), | ||
595 | &stationlow, 4)) | ||
596 | return -EIO; | ||
597 | |||
598 | result = (u64) netxen_gb_get_stationaddress_low(stationlow); | ||
599 | result |= (u64) stationhigh << 16; | ||
600 | memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t)); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * Set the station MAC address. | ||
607 | * Note that the passed-in value must already be in network byte order. | ||
608 | */ | ||
609 | int netxen_niu_macaddr_set(struct netxen_port *port, | ||
610 | netxen_ethernet_macaddr_t addr) | ||
611 | { | ||
612 | __le32 temp = 0; | ||
613 | struct netxen_adapter *adapter = port->adapter; | ||
614 | int phy = port->portnum; | ||
615 | unsigned char mac_addr[MAX_ADDR_LEN]; | ||
616 | int i; | ||
617 | |||
618 | for (i = 0; i < 10; i++) { | ||
619 | memcpy(&temp, addr, 2); | ||
620 | temp <<= 16; | ||
621 | if (netxen_nic_hw_write_wx | ||
622 | (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &temp, 4)) | ||
623 | return -EIO; | ||
624 | |||
625 | temp = 0; | ||
626 | |||
627 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | ||
628 | if (netxen_nic_hw_write_wx | ||
629 | (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &temp, 4)) | ||
630 | return -2; | ||
631 | |||
632 | netxen_niu_macaddr_get(adapter, phy, | ||
633 | (netxen_ethernet_macaddr_t *) mac_addr); | ||
634 | if (memcmp(mac_addr, addr, MAX_ADDR_LEN == 0)) | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | if (i == 10) { | ||
639 | printk(KERN_ERR "%s: cannot set Mac addr for %s\n", | ||
640 | netxen_nic_driver_name, port->netdev->name); | ||
641 | printk(KERN_ERR "MAC address set: " | ||
642 | "%02x:%02x:%02x:%02x:%02x:%02x.\n", | ||
643 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); | ||
644 | |||
645 | printk(KERN_ERR "MAC address get: " | ||
646 | "%02x:%02x:%02x:%02x:%02x:%02x.\n", | ||
647 | mac_addr[0], | ||
648 | mac_addr[1], | ||
649 | mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); | ||
650 | } | ||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | /* Enable a GbE interface */ | ||
655 | int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | ||
656 | int port, netxen_niu_gbe_ifmode_t mode) | ||
657 | { | ||
658 | __le32 mac_cfg0; | ||
659 | __le32 mac_cfg1; | ||
660 | __le32 mii_cfg; | ||
661 | |||
662 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | ||
663 | return -EINVAL; | ||
664 | |||
665 | mac_cfg0 = 0; | ||
666 | netxen_gb_soft_reset(mac_cfg0); | ||
667 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
668 | &mac_cfg0, 4)) | ||
669 | return -EIO; | ||
670 | mac_cfg0 = 0; | ||
671 | netxen_gb_enable_tx(mac_cfg0); | ||
672 | netxen_gb_enable_rx(mac_cfg0); | ||
673 | netxen_gb_unset_rx_flowctl(mac_cfg0); | ||
674 | netxen_gb_tx_reset_pb(mac_cfg0); | ||
675 | netxen_gb_rx_reset_pb(mac_cfg0); | ||
676 | netxen_gb_tx_reset_mac(mac_cfg0); | ||
677 | netxen_gb_rx_reset_mac(mac_cfg0); | ||
678 | |||
679 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
680 | &mac_cfg0, 4)) | ||
681 | return -EIO; | ||
682 | mac_cfg1 = 0; | ||
683 | netxen_gb_set_preamblelen(mac_cfg1, 0xf); | ||
684 | netxen_gb_set_duplex(mac_cfg1); | ||
685 | netxen_gb_set_crc_enable(mac_cfg1); | ||
686 | netxen_gb_set_padshort(mac_cfg1); | ||
687 | netxen_gb_set_checklength(mac_cfg1); | ||
688 | netxen_gb_set_hugeframes(mac_cfg1); | ||
689 | |||
690 | if (mode == NETXEN_NIU_10_100_MB) { | ||
691 | netxen_gb_set_intfmode(mac_cfg1, 1); | ||
692 | if (netxen_nic_hw_write_wx(adapter, | ||
693 | NETXEN_NIU_GB_MAC_CONFIG_1(port), | ||
694 | &mac_cfg1, 4)) | ||
695 | return -EIO; | ||
696 | |||
697 | /* set mii mode */ | ||
698 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE + | ||
699 | (port << 3), 0); | ||
700 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE + | ||
701 | (port << 3), 1); | ||
702 | |||
703 | } else if (mode == NETXEN_NIU_1000_MB) { | ||
704 | netxen_gb_set_intfmode(mac_cfg1, 2); | ||
705 | if (netxen_nic_hw_write_wx(adapter, | ||
706 | NETXEN_NIU_GB_MAC_CONFIG_1(port), | ||
707 | &mac_cfg1, 4)) | ||
708 | return -EIO; | ||
709 | /* set gmii mode */ | ||
710 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE + | ||
711 | (port << 3), 0); | ||
712 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE + | ||
713 | (port << 3), 1); | ||
714 | } | ||
715 | mii_cfg = 0; | ||
716 | netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7); | ||
717 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), | ||
718 | &mii_cfg, 4)) | ||
719 | return -EIO; | ||
720 | mac_cfg0 = 0; | ||
721 | netxen_gb_enable_tx(mac_cfg0); | ||
722 | netxen_gb_enable_rx(mac_cfg0); | ||
723 | netxen_gb_unset_rx_flowctl(mac_cfg0); | ||
724 | netxen_gb_unset_tx_flowctl(mac_cfg0); | ||
725 | |||
726 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
727 | &mac_cfg0, 4)) | ||
728 | return -EIO; | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | /* Disable a GbE interface */ | ||
733 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port) | ||
734 | { | ||
735 | __le32 mac_cfg0; | ||
736 | |||
737 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | ||
738 | return -EINVAL; | ||
739 | |||
740 | mac_cfg0 = 0; | ||
741 | netxen_gb_soft_reset(mac_cfg0); | ||
742 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | ||
743 | &mac_cfg0, 4)) | ||
744 | return -EIO; | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | /* Disable an XG interface */ | ||
749 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port) | ||
750 | { | ||
751 | __le32 mac_cfg; | ||
752 | |||
753 | if (port != 0) | ||
754 | return -EINVAL; | ||
755 | |||
756 | mac_cfg = 0; | ||
757 | netxen_xg_soft_reset(mac_cfg); | ||
758 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_CONFIG_0, | ||
759 | &mac_cfg, 4)) | ||
760 | return -EIO; | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | /* Set promiscuous mode for a GbE interface */ | ||
765 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port, | ||
766 | netxen_niu_prom_mode_t mode) | ||
767 | { | ||
768 | __le32 reg; | ||
769 | |||
770 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | ||
771 | return -EINVAL; | ||
772 | |||
773 | /* save previous contents */ | ||
774 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, | ||
775 | ®, 4)) | ||
776 | return -EIO; | ||
777 | if (mode == NETXEN_NIU_PROMISC_MODE) { | ||
778 | switch (port) { | ||
779 | case 0: | ||
780 | netxen_clear_gb_drop_gb0(reg); | ||
781 | break; | ||
782 | case 1: | ||
783 | netxen_clear_gb_drop_gb1(reg); | ||
784 | break; | ||
785 | case 2: | ||
786 | netxen_clear_gb_drop_gb2(reg); | ||
787 | break; | ||
788 | case 3: | ||
789 | netxen_clear_gb_drop_gb3(reg); | ||
790 | break; | ||
791 | default: | ||
792 | return -EIO; | ||
793 | } | ||
794 | } else { | ||
795 | switch (port) { | ||
796 | case 0: | ||
797 | netxen_set_gb_drop_gb0(reg); | ||
798 | break; | ||
799 | case 1: | ||
800 | netxen_set_gb_drop_gb1(reg); | ||
801 | break; | ||
802 | case 2: | ||
803 | netxen_set_gb_drop_gb2(reg); | ||
804 | break; | ||
805 | case 3: | ||
806 | netxen_set_gb_drop_gb3(reg); | ||
807 | break; | ||
808 | default: | ||
809 | return -EIO; | ||
810 | } | ||
811 | } | ||
812 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, | ||
813 | ®, 4)) | ||
814 | return -EIO; | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | /* | ||
819 | * Set the MAC address for an XG port | ||
820 | * Note that the passed-in value must already be in network byte order. | ||
821 | */ | ||
822 | int netxen_niu_xg_macaddr_set(struct netxen_port *port, | ||
823 | netxen_ethernet_macaddr_t addr) | ||
824 | { | ||
825 | __le32 temp = 0; | ||
826 | struct netxen_adapter *adapter = port->adapter; | ||
827 | |||
828 | memcpy(&temp, addr, 2); | ||
829 | temp = cpu_to_le32(temp); | ||
830 | temp <<= 16; | ||
831 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, | ||
832 | &temp, 4)) | ||
833 | return -EIO; | ||
834 | |||
835 | temp = 0; | ||
836 | |||
837 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | ||
838 | temp = cpu_to_le32(temp); | ||
839 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, | ||
840 | &temp, 4)) | ||
841 | return -EIO; | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Return the current station MAC address. | ||
848 | * Note that the passed-in value must already be in network byte order. | ||
849 | */ | ||
850 | int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy, | ||
851 | netxen_ethernet_macaddr_t * addr) | ||
852 | { | ||
853 | __le32 stationhigh; | ||
854 | __le32 stationlow; | ||
855 | u64 result; | ||
856 | |||
857 | if (addr == NULL) | ||
858 | return -EINVAL; | ||
859 | if (phy != 0) | ||
860 | return -EINVAL; | ||
861 | |||
862 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, | ||
863 | &stationhigh, 4)) | ||
864 | return -EIO; | ||
865 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, | ||
866 | &stationlow, 4)) | ||
867 | return -EIO; | ||
868 | |||
869 | result = ((u64) stationlow) >> 16; | ||
870 | result |= (u64) stationhigh << 16; | ||
871 | memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t)); | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | ||
877 | int port, netxen_niu_prom_mode_t mode) | ||
878 | { | ||
879 | __le32 reg; | ||
880 | |||
881 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | ||
882 | return -EINVAL; | ||
883 | |||
884 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_CONFIG_1, ®, 4)) | ||
885 | return -EIO; | ||
886 | if (mode == NETXEN_NIU_PROMISC_MODE) | ||
887 | reg = (reg | 0x2000UL); | ||
888 | else | ||
889 | reg = (reg & ~0x2000UL); | ||
890 | |||
891 | netxen_crb_writelit_adapter(adapter, NETXEN_NIU_XGE_CONFIG_1, reg); | ||
892 | |||
893 | return 0; | ||
894 | } | ||
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h new file mode 100644 index 000000000000..8181d436783f --- /dev/null +++ b/drivers/net/netxen/netxen_nic_phan_reg.h | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #ifndef __NIC_PHAN_REG_H_ | ||
31 | #define __NIC_PHAN_REG_H_ | ||
32 | |||
33 | /* | ||
34 | * CRB Registers or queue message done only at initialization time. | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * The following 2 are the base adresses for the CRB registers and their | ||
39 | * offsets will be added to get addresses for the index addresses. | ||
40 | */ | ||
41 | #define NIC_CRB_BASE_PORT1 NETXEN_CAM_RAM(0x200) | ||
42 | #define NIC_CRB_BASE_PORT2 NETXEN_CAM_RAM(0x250) | ||
43 | |||
44 | #define NETXEN_NIC_REG(X) (NIC_CRB_BASE_PORT1+(X)) | ||
45 | |||
46 | /* | ||
47 | * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address | ||
48 | * which can be read by the Phantom host to get producer/consumer indexes from | ||
49 | * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following | ||
50 | * registers will be used for the addresses of the ring's shared memory | ||
51 | * on the Phantom. | ||
52 | */ | ||
53 | |||
54 | #define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00) | ||
55 | #define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04) | ||
56 | |||
57 | /* point to the indexes */ | ||
58 | #define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08) | ||
59 | #define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c) | ||
60 | |||
61 | #define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) | ||
62 | #define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14) | ||
63 | |||
64 | /* address of command descriptors in the host memory */ | ||
65 | #define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x30) | ||
66 | #define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x34) | ||
67 | |||
68 | /* The following 4 CRB registers are for doing performance coal */ | ||
69 | #define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x38) | ||
70 | #define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x3c) | ||
71 | #define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x40) | ||
72 | #define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x44) | ||
73 | |||
74 | /* Needed by the host to find out the state of Phantom's initialization */ | ||
75 | #define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x4c) | ||
76 | #define CRB_CMDPEG_STATE NETXEN_NIC_REG(0x50) | ||
77 | #define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x54) | ||
78 | |||
79 | /* Interrupt coalescing parameters */ | ||
80 | #define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x80) | ||
81 | #define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x84) | ||
82 | #define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x88) | ||
83 | #define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x8c) | ||
84 | #define CRB_RX_PKT_TIMER NETXEN_NIC_REG(0x90) | ||
85 | #define CRB_TX_PKT_TIMER NETXEN_NIC_REG(0x94) | ||
86 | #define CRB_RX_PKT_CNT NETXEN_NIC_REG(0x98) | ||
87 | #define CRB_RX_TMR_CNT NETXEN_NIC_REG(0x9c) | ||
88 | #define CRB_INT_THRESH NETXEN_NIC_REG(0xa4) | ||
89 | |||
90 | /* Register for communicating XG link status */ | ||
91 | #define CRB_XG_STATE NETXEN_NIC_REG(0xa0) | ||
92 | |||
93 | /* Register for communicating card temperature */ | ||
94 | /* Upper 16 bits are temperature value. Lower 16 bits are the state */ | ||
95 | #define CRB_TEMP_STATE NETXEN_NIC_REG(0xa8) | ||
96 | #define nx_get_temp_val(x) ((x) >> 16) | ||
97 | #define nx_get_temp_state(x) ((x) & 0xffff) | ||
98 | #define nx_encode_temp(val, state) (((val) << 16) | (state)) | ||
99 | |||
100 | /* Debug registers for controlling NIC pkt gen agent */ | ||
101 | #define CRB_AGENT_GO NETXEN_NIC_REG(0xb0) | ||
102 | #define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0xb4) | ||
103 | #define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xb8) | ||
104 | #define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xbc) | ||
105 | #define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xc0) | ||
106 | |||
107 | /* Debug registers for observing NIC performance */ | ||
108 | #define CRB_TX_STATE NETXEN_NIC_REG(0xd0) | ||
109 | #define CRB_TX_COUNT NETXEN_NIC_REG(0xd4) | ||
110 | #define CRB_RX_STATE NETXEN_NIC_REG(0xd8) | ||
111 | |||
112 | /* CRB registers per Rcv Descriptor ring */ | ||
113 | struct netxen_rcv_desc_crb { | ||
114 | u32 crb_rcv_producer_offset __attribute__ ((aligned(512))); | ||
115 | u32 crb_rcv_consumer_offset; | ||
116 | u32 crb_globalrcv_ring; | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * CRB registers used by the receive peg logic. One instance of these | ||
121 | * needs to be instantiated per instance of the receive peg. | ||
122 | */ | ||
123 | |||
124 | struct netxen_recv_crb { | ||
125 | struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS]; | ||
126 | u32 crb_rcvstatus_ring; | ||
127 | u32 crb_rcv_status_producer; | ||
128 | u32 crb_rcv_status_consumer; | ||
129 | u32 crb_rcvpeg_state; | ||
130 | }; | ||
131 | |||
132 | #if defined(DEFINE_GLOBAL_RECV_CRB) | ||
133 | struct netxen_recv_crb recv_crb_registers[] = { | ||
134 | /* | ||
135 | * Instance 0. | ||
136 | */ | ||
137 | { | ||
138 | /* rcv_desc_crb: */ | ||
139 | { | ||
140 | { | ||
141 | /* crb_rcv_producer_offset: */ | ||
142 | NETXEN_NIC_REG(0x18), | ||
143 | /* crb_rcv_consumer_offset: */ | ||
144 | NETXEN_NIC_REG(0x1c), | ||
145 | /* crb_gloablrcv_ring: */ | ||
146 | NETXEN_NIC_REG(0x20), | ||
147 | }, | ||
148 | /* Jumbo frames */ | ||
149 | { | ||
150 | /* crb_rcv_producer_offset: */ | ||
151 | NETXEN_NIC_REG(0x100), | ||
152 | /* crb_rcv_consumer_offset: */ | ||
153 | NETXEN_NIC_REG(0x104), | ||
154 | /* crb_gloablrcv_ring: */ | ||
155 | NETXEN_NIC_REG(0x108), | ||
156 | } | ||
157 | }, | ||
158 | /* crb_rcvstatus_ring: */ | ||
159 | NETXEN_NIC_REG(0x24), | ||
160 | /* crb_rcv_status_producer: */ | ||
161 | NETXEN_NIC_REG(0x28), | ||
162 | /* crb_rcv_status_consumer: */ | ||
163 | NETXEN_NIC_REG(0x2c), | ||
164 | /* crb_rcvpeg_state: */ | ||
165 | NETXEN_NIC_REG(0x48), | ||
166 | |||
167 | }, | ||
168 | /* | ||
169 | * Instance 1, | ||
170 | */ | ||
171 | { | ||
172 | /* rcv_desc_crb: */ | ||
173 | { | ||
174 | { | ||
175 | /* crb_rcv_producer_offset: */ | ||
176 | NETXEN_NIC_REG(0x80), | ||
177 | /* crb_rcv_consumer_offset: */ | ||
178 | NETXEN_NIC_REG(0x84), | ||
179 | /* crb_globalrcv_ring: */ | ||
180 | NETXEN_NIC_REG(0x88), | ||
181 | }, | ||
182 | /* Jumbo frames */ | ||
183 | { | ||
184 | /* crb_rcv_producer_offset: */ | ||
185 | NETXEN_NIC_REG(0x10C), | ||
186 | /* crb_rcv_consumer_offset: */ | ||
187 | NETXEN_NIC_REG(0x110), | ||
188 | /* crb_globalrcv_ring: */ | ||
189 | NETXEN_NIC_REG(0x114), | ||
190 | } | ||
191 | }, | ||
192 | /* crb_rcvstatus_ring: */ | ||
193 | NETXEN_NIC_REG(0x8c), | ||
194 | /* crb_rcv_status_producer: */ | ||
195 | NETXEN_NIC_REG(0x90), | ||
196 | /* crb_rcv_status_consumer: */ | ||
197 | NETXEN_NIC_REG(0x94), | ||
198 | /* crb_rcvpeg_state: */ | ||
199 | NETXEN_NIC_REG(0x98), | ||
200 | }, | ||
201 | }; | ||
202 | #else | ||
203 | extern struct netxen_recv_crb recv_crb_registers[]; | ||
204 | #endif /* DEFINE_GLOBAL_RECEIVE_CRB */ | ||
205 | |||
206 | /* | ||
207 | * Temperature control. | ||
208 | */ | ||
209 | enum { | ||
210 | NX_TEMP_NORMAL = 0x1, /* Normal operating range */ | ||
211 | NX_TEMP_WARN, /* Sound alert, temperature getting high */ | ||
212 | NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ | ||
213 | }; | ||
214 | |||
215 | #endif /* __NIC_PHAN_REG_H_ */ | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 0c00d182e7fd..c51cc5d8789a 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1096,7 +1096,6 @@ static void ei_watchdog(u_long arg) | |||
1096 | 1096 | ||
1097 | /* Check for pending interrupt with expired latency timer: with | 1097 | /* Check for pending interrupt with expired latency timer: with |
1098 | this, we can limp along even if the interrupt is blocked */ | 1098 | this, we can limp along even if the interrupt is blocked */ |
1099 | outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD); | ||
1100 | if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { | 1099 | if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { |
1101 | if (!info->fast_poll) | 1100 | if (!info->fast_poll) |
1102 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); | 1101 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ecb61f876f27..f994f129f3d8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -56,6 +56,12 @@ config SMSC_PHY | |||
56 | ---help--- | 56 | ---help--- |
57 | Currently supports the LAN83C185 PHY | 57 | Currently supports the LAN83C185 PHY |
58 | 58 | ||
59 | config BROADCOM_PHY | ||
60 | tristate "Drivers for Broadcom PHYs" | ||
61 | depends on PHYLIB | ||
62 | ---help--- | ||
63 | Currently supports the BCM5411, BCM5421 and BCM5461 PHYs. | ||
64 | |||
59 | config FIXED_PHY | 65 | config FIXED_PHY |
60 | tristate "Drivers for PHY emulation on fixed speed/link" | 66 | tristate "Drivers for PHY emulation on fixed speed/link" |
61 | depends on PHYLIB | 67 | depends on PHYLIB |
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 320f8323123f..bcd1efbd2a18 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile | |||
@@ -10,4 +10,5 @@ obj-$(CONFIG_LXT_PHY) += lxt.o | |||
10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o | 10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o |
11 | obj-$(CONFIG_SMSC_PHY) += smsc.o | 11 | obj-$(CONFIG_SMSC_PHY) += smsc.o |
12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o | 12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o |
13 | obj-$(CONFIG_BROADCOM_PHY) += broadcom.o | ||
13 | obj-$(CONFIG_FIXED_PHY) += fixed.o | 14 | obj-$(CONFIG_FIXED_PHY) += fixed.o |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c new file mode 100644 index 000000000000..29666c85ed55 --- /dev/null +++ b/drivers/net/phy/broadcom.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * drivers/net/phy/broadcom.c | ||
3 | * | ||
4 | * Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet | ||
5 | * transceivers. | ||
6 | * | ||
7 | * Copyright (c) 2006 Maciej W. Rozycki | ||
8 | * | ||
9 | * Inspired by code written by Amy Fong. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/phy.h> | ||
19 | |||
20 | #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ | ||
21 | #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ | ||
22 | #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ | ||
23 | |||
24 | #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ | ||
25 | #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ | ||
26 | |||
27 | #define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ | ||
28 | #define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ | ||
29 | #define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ | ||
30 | #define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */ | ||
31 | #define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */ | ||
32 | #define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */ | ||
33 | #define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */ | ||
34 | #define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */ | ||
35 | #define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */ | ||
36 | #define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */ | ||
37 | #define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */ | ||
38 | #define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */ | ||
39 | #define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */ | ||
40 | #define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */ | ||
41 | #define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */ | ||
42 | #define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ | ||
43 | #define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ | ||
44 | |||
45 | MODULE_DESCRIPTION("Broadcom PHY driver"); | ||
46 | MODULE_AUTHOR("Maciej W. Rozycki"); | ||
47 | MODULE_LICENSE("GPL"); | ||
48 | |||
49 | static int bcm54xx_config_init(struct phy_device *phydev) | ||
50 | { | ||
51 | int reg, err; | ||
52 | |||
53 | reg = phy_read(phydev, MII_BCM54XX_ECR); | ||
54 | if (reg < 0) | ||
55 | return reg; | ||
56 | |||
57 | /* Mask interrupts globally. */ | ||
58 | reg |= MII_BCM54XX_ECR_IM; | ||
59 | err = phy_write(phydev, MII_BCM54XX_ECR, reg); | ||
60 | if (err < 0) | ||
61 | return err; | ||
62 | |||
63 | /* Unmask events we are interested in. */ | ||
64 | reg = ~(MII_BCM54XX_INT_DUPLEX | | ||
65 | MII_BCM54XX_INT_SPEED | | ||
66 | MII_BCM54XX_INT_LINK); | ||
67 | err = phy_write(phydev, MII_BCM54XX_IMR, reg); | ||
68 | if (err < 0) | ||
69 | return err; | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static int bcm54xx_ack_interrupt(struct phy_device *phydev) | ||
74 | { | ||
75 | int reg; | ||
76 | |||
77 | /* Clear pending interrupts. */ | ||
78 | reg = phy_read(phydev, MII_BCM54XX_ISR); | ||
79 | if (reg < 0) | ||
80 | return reg; | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static int bcm54xx_config_intr(struct phy_device *phydev) | ||
86 | { | ||
87 | int reg, err; | ||
88 | |||
89 | reg = phy_read(phydev, MII_BCM54XX_ECR); | ||
90 | if (reg < 0) | ||
91 | return reg; | ||
92 | |||
93 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | ||
94 | reg &= ~MII_BCM54XX_ECR_IM; | ||
95 | else | ||
96 | reg |= MII_BCM54XX_ECR_IM; | ||
97 | |||
98 | err = phy_write(phydev, MII_BCM54XX_ECR, reg); | ||
99 | return err; | ||
100 | } | ||
101 | |||
102 | static struct phy_driver bcm5411_driver = { | ||
103 | .phy_id = 0x00206070, | ||
104 | .phy_id_mask = 0xfffffff0, | ||
105 | .name = "Broadcom BCM5411", | ||
106 | .features = PHY_GBIT_FEATURES, | ||
107 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
108 | .config_init = bcm54xx_config_init, | ||
109 | .config_aneg = genphy_config_aneg, | ||
110 | .read_status = genphy_read_status, | ||
111 | .ack_interrupt = bcm54xx_ack_interrupt, | ||
112 | .config_intr = bcm54xx_config_intr, | ||
113 | .driver = { .owner = THIS_MODULE }, | ||
114 | }; | ||
115 | |||
116 | static struct phy_driver bcm5421_driver = { | ||
117 | .phy_id = 0x002060e0, | ||
118 | .phy_id_mask = 0xfffffff0, | ||
119 | .name = "Broadcom BCM5421", | ||
120 | .features = PHY_GBIT_FEATURES, | ||
121 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
122 | .config_init = bcm54xx_config_init, | ||
123 | .config_aneg = genphy_config_aneg, | ||
124 | .read_status = genphy_read_status, | ||
125 | .ack_interrupt = bcm54xx_ack_interrupt, | ||
126 | .config_intr = bcm54xx_config_intr, | ||
127 | .driver = { .owner = THIS_MODULE }, | ||
128 | }; | ||
129 | |||
130 | static struct phy_driver bcm5461_driver = { | ||
131 | .phy_id = 0x002060c0, | ||
132 | .phy_id_mask = 0xfffffff0, | ||
133 | .name = "Broadcom BCM5461", | ||
134 | .features = PHY_GBIT_FEATURES, | ||
135 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
136 | .config_init = bcm54xx_config_init, | ||
137 | .config_aneg = genphy_config_aneg, | ||
138 | .read_status = genphy_read_status, | ||
139 | .ack_interrupt = bcm54xx_ack_interrupt, | ||
140 | .config_intr = bcm54xx_config_intr, | ||
141 | .driver = { .owner = THIS_MODULE }, | ||
142 | }; | ||
143 | |||
144 | static int __init broadcom_init(void) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | ret = phy_driver_register(&bcm5411_driver); | ||
149 | if (ret) | ||
150 | goto out_5411; | ||
151 | ret = phy_driver_register(&bcm5421_driver); | ||
152 | if (ret) | ||
153 | goto out_5421; | ||
154 | ret = phy_driver_register(&bcm5461_driver); | ||
155 | if (ret) | ||
156 | goto out_5461; | ||
157 | return ret; | ||
158 | |||
159 | out_5461: | ||
160 | phy_driver_unregister(&bcm5421_driver); | ||
161 | out_5421: | ||
162 | phy_driver_unregister(&bcm5411_driver); | ||
163 | out_5411: | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | static void __exit broadcom_exit(void) | ||
168 | { | ||
169 | phy_driver_unregister(&bcm5461_driver); | ||
170 | phy_driver_unregister(&bcm5421_driver); | ||
171 | phy_driver_unregister(&bcm5411_driver); | ||
172 | } | ||
173 | |||
174 | module_init(broadcom_init); | ||
175 | module_exit(broadcom_exit); | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3af9fcf76c81..88237bdb5255 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Author: Andy Fleming | 7 | * Author: Andy Fleming |
8 | * | 8 | * |
9 | * Copyright (c) 2004 Freescale Semiconductor, Inc. | 9 | * Copyright (c) 2004 Freescale Semiconductor, Inc. |
10 | * Copyright (c) 2006 Maciej W. Rozycki | ||
10 | * | 11 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | 13 | * under the terms of the GNU General Public License as published by the |
@@ -32,6 +33,8 @@ | |||
32 | #include <linux/mii.h> | 33 | #include <linux/mii.h> |
33 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
34 | #include <linux/phy.h> | 35 | #include <linux/phy.h> |
36 | #include <linux/timer.h> | ||
37 | #include <linux/workqueue.h> | ||
35 | 38 | ||
36 | #include <asm/io.h> | 39 | #include <asm/io.h> |
37 | #include <asm/irq.h> | 40 | #include <asm/irq.h> |
@@ -484,6 +487,9 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
484 | { | 487 | { |
485 | struct phy_device *phydev = phy_dat; | 488 | struct phy_device *phydev = phy_dat; |
486 | 489 | ||
490 | if (PHY_HALTED == phydev->state) | ||
491 | return IRQ_NONE; /* It can't be ours. */ | ||
492 | |||
487 | /* The MDIO bus is not allowed to be written in interrupt | 493 | /* The MDIO bus is not allowed to be written in interrupt |
488 | * context, so we need to disable the irq here. A work | 494 | * context, so we need to disable the irq here. A work |
489 | * queue will write the PHY to disable and clear the | 495 | * queue will write the PHY to disable and clear the |
@@ -577,6 +583,13 @@ int phy_stop_interrupts(struct phy_device *phydev) | |||
577 | if (err) | 583 | if (err) |
578 | phy_error(phydev); | 584 | phy_error(phydev); |
579 | 585 | ||
586 | /* | ||
587 | * Finish any pending work; we might have been scheduled | ||
588 | * to be called from keventd ourselves, though. | ||
589 | */ | ||
590 | if (!current_is_keventd()) | ||
591 | flush_scheduled_work(); | ||
592 | |||
580 | free_irq(phydev->irq, phydev); | 593 | free_irq(phydev->irq, phydev); |
581 | 594 | ||
582 | return err; | 595 | return err; |
@@ -603,7 +616,8 @@ static void phy_change(void *data) | |||
603 | enable_irq(phydev->irq); | 616 | enable_irq(phydev->irq); |
604 | 617 | ||
605 | /* Reenable interrupts */ | 618 | /* Reenable interrupts */ |
606 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); | 619 | if (PHY_HALTED != phydev->state) |
620 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); | ||
607 | 621 | ||
608 | if (err) | 622 | if (err) |
609 | goto irq_enable_err; | 623 | goto irq_enable_err; |
@@ -624,18 +638,24 @@ void phy_stop(struct phy_device *phydev) | |||
624 | if (PHY_HALTED == phydev->state) | 638 | if (PHY_HALTED == phydev->state) |
625 | goto out_unlock; | 639 | goto out_unlock; |
626 | 640 | ||
627 | if (phydev->irq != PHY_POLL) { | 641 | phydev->state = PHY_HALTED; |
628 | /* Clear any pending interrupts */ | ||
629 | phy_clear_interrupt(phydev); | ||
630 | 642 | ||
643 | if (phydev->irq != PHY_POLL) { | ||
631 | /* Disable PHY Interrupts */ | 644 | /* Disable PHY Interrupts */ |
632 | phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); | 645 | phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); |
633 | } | ||
634 | 646 | ||
635 | phydev->state = PHY_HALTED; | 647 | /* Clear any pending interrupts */ |
648 | phy_clear_interrupt(phydev); | ||
649 | } | ||
636 | 650 | ||
637 | out_unlock: | 651 | out_unlock: |
638 | spin_unlock(&phydev->lock); | 652 | spin_unlock(&phydev->lock); |
653 | |||
654 | /* | ||
655 | * Cannot call flush_scheduled_work() here as desired because | ||
656 | * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() | ||
657 | * will not reenable interrupts. | ||
658 | */ | ||
639 | } | 659 | } |
640 | 660 | ||
641 | 661 | ||
@@ -693,60 +713,57 @@ static void phy_timer(unsigned long data) | |||
693 | 713 | ||
694 | break; | 714 | break; |
695 | case PHY_AN: | 715 | case PHY_AN: |
716 | err = phy_read_status(phydev); | ||
717 | |||
718 | if (err < 0) | ||
719 | break; | ||
720 | |||
721 | /* If the link is down, give up on | ||
722 | * negotiation for now */ | ||
723 | if (!phydev->link) { | ||
724 | phydev->state = PHY_NOLINK; | ||
725 | netif_carrier_off(phydev->attached_dev); | ||
726 | phydev->adjust_link(phydev->attached_dev); | ||
727 | break; | ||
728 | } | ||
729 | |||
696 | /* Check if negotiation is done. Break | 730 | /* Check if negotiation is done. Break |
697 | * if there's an error */ | 731 | * if there's an error */ |
698 | err = phy_aneg_done(phydev); | 732 | err = phy_aneg_done(phydev); |
699 | if (err < 0) | 733 | if (err < 0) |
700 | break; | 734 | break; |
701 | 735 | ||
702 | /* If auto-negotiation is done, we change to | 736 | /* If AN is done, we're running */ |
703 | * either RUNNING, or NOLINK */ | ||
704 | if (err > 0) { | 737 | if (err > 0) { |
705 | err = phy_read_status(phydev); | 738 | phydev->state = PHY_RUNNING; |
739 | netif_carrier_on(phydev->attached_dev); | ||
740 | phydev->adjust_link(phydev->attached_dev); | ||
741 | |||
742 | } else if (0 == phydev->link_timeout--) { | ||
743 | int idx; | ||
706 | 744 | ||
707 | if (err) | 745 | needs_aneg = 1; |
746 | /* If we have the magic_aneg bit, | ||
747 | * we try again */ | ||
748 | if (phydev->drv->flags & PHY_HAS_MAGICANEG) | ||
708 | break; | 749 | break; |
709 | 750 | ||
710 | if (phydev->link) { | 751 | /* The timer expired, and we still |
711 | phydev->state = PHY_RUNNING; | 752 | * don't have a setting, so we try |
712 | netif_carrier_on(phydev->attached_dev); | 753 | * forcing it until we find one that |
713 | } else { | 754 | * works, starting from the fastest speed, |
714 | phydev->state = PHY_NOLINK; | 755 | * and working our way down */ |
715 | netif_carrier_off(phydev->attached_dev); | 756 | idx = phy_find_valid(0, phydev->supported); |
716 | } | ||
717 | 757 | ||
718 | phydev->adjust_link(phydev->attached_dev); | 758 | phydev->speed = settings[idx].speed; |
759 | phydev->duplex = settings[idx].duplex; | ||
719 | 760 | ||
720 | } else if (0 == phydev->link_timeout--) { | 761 | phydev->autoneg = AUTONEG_DISABLE; |
721 | /* The counter expired, so either we | ||
722 | * switch to forced mode, or the | ||
723 | * magic_aneg bit exists, and we try aneg | ||
724 | * again */ | ||
725 | if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) { | ||
726 | int idx; | ||
727 | |||
728 | /* We'll start from the | ||
729 | * fastest speed, and work | ||
730 | * our way down */ | ||
731 | idx = phy_find_valid(0, | ||
732 | phydev->supported); | ||
733 | |||
734 | phydev->speed = settings[idx].speed; | ||
735 | phydev->duplex = settings[idx].duplex; | ||
736 | |||
737 | phydev->autoneg = AUTONEG_DISABLE; | ||
738 | phydev->state = PHY_FORCING; | ||
739 | phydev->link_timeout = | ||
740 | PHY_FORCE_TIMEOUT; | ||
741 | |||
742 | pr_info("Trying %d/%s\n", | ||
743 | phydev->speed, | ||
744 | DUPLEX_FULL == | ||
745 | phydev->duplex ? | ||
746 | "FULL" : "HALF"); | ||
747 | } | ||
748 | 762 | ||
749 | needs_aneg = 1; | 763 | pr_info("Trying %d/%s\n", phydev->speed, |
764 | DUPLEX_FULL == | ||
765 | phydev->duplex ? | ||
766 | "FULL" : "HALF"); | ||
750 | } | 767 | } |
751 | break; | 768 | break; |
752 | case PHY_NOLINK: | 769 | case PHY_NOLINK: |
@@ -762,7 +779,7 @@ static void phy_timer(unsigned long data) | |||
762 | } | 779 | } |
763 | break; | 780 | break; |
764 | case PHY_FORCING: | 781 | case PHY_FORCING: |
765 | err = phy_read_status(phydev); | 782 | err = genphy_update_link(phydev); |
766 | 783 | ||
767 | if (err) | 784 | if (err) |
768 | break; | 785 | break; |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 3bbd5e70c209..b01fc70a57db 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -59,6 +59,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | |||
59 | dev->duplex = -1; | 59 | dev->duplex = -1; |
60 | dev->pause = dev->asym_pause = 0; | 60 | dev->pause = dev->asym_pause = 0; |
61 | dev->link = 1; | 61 | dev->link = 1; |
62 | dev->interface = PHY_INTERFACE_MODE_GMII; | ||
62 | 63 | ||
63 | dev->autoneg = AUTONEG_ENABLE; | 64 | dev->autoneg = AUTONEG_ENABLE; |
64 | 65 | ||
@@ -137,11 +138,12 @@ void phy_prepare_link(struct phy_device *phydev, | |||
137 | * the desired functionality. | 138 | * the desired functionality. |
138 | */ | 139 | */ |
139 | struct phy_device * phy_connect(struct net_device *dev, const char *phy_id, | 140 | struct phy_device * phy_connect(struct net_device *dev, const char *phy_id, |
140 | void (*handler)(struct net_device *), u32 flags) | 141 | void (*handler)(struct net_device *), u32 flags, |
142 | u32 interface) | ||
141 | { | 143 | { |
142 | struct phy_device *phydev; | 144 | struct phy_device *phydev; |
143 | 145 | ||
144 | phydev = phy_attach(dev, phy_id, flags); | 146 | phydev = phy_attach(dev, phy_id, flags, interface); |
145 | 147 | ||
146 | if (IS_ERR(phydev)) | 148 | if (IS_ERR(phydev)) |
147 | return phydev; | 149 | return phydev; |
@@ -186,7 +188,7 @@ static int phy_compare_id(struct device *dev, void *data) | |||
186 | } | 188 | } |
187 | 189 | ||
188 | struct phy_device *phy_attach(struct net_device *dev, | 190 | struct phy_device *phy_attach(struct net_device *dev, |
189 | const char *phy_id, u32 flags) | 191 | const char *phy_id, u32 flags, u32 interface) |
190 | { | 192 | { |
191 | struct bus_type *bus = &mdio_bus_type; | 193 | struct bus_type *bus = &mdio_bus_type; |
192 | struct phy_device *phydev; | 194 | struct phy_device *phydev; |
@@ -231,6 +233,20 @@ struct phy_device *phy_attach(struct net_device *dev, | |||
231 | 233 | ||
232 | phydev->dev_flags = flags; | 234 | phydev->dev_flags = flags; |
233 | 235 | ||
236 | phydev->interface = interface; | ||
237 | |||
238 | /* Do initial configuration here, now that | ||
239 | * we have certain key parameters | ||
240 | * (dev_flags and interface) */ | ||
241 | if (phydev->drv->config_init) { | ||
242 | int err; | ||
243 | |||
244 | err = phydev->drv->config_init(phydev); | ||
245 | |||
246 | if (err < 0) | ||
247 | return ERR_PTR(err); | ||
248 | } | ||
249 | |||
234 | return phydev; | 250 | return phydev; |
235 | } | 251 | } |
236 | EXPORT_SYMBOL(phy_attach); | 252 | EXPORT_SYMBOL(phy_attach); |
@@ -427,6 +443,7 @@ int genphy_update_link(struct phy_device *phydev) | |||
427 | 443 | ||
428 | return 0; | 444 | return 0; |
429 | } | 445 | } |
446 | EXPORT_SYMBOL(genphy_update_link); | ||
430 | 447 | ||
431 | /* genphy_read_status | 448 | /* genphy_read_status |
432 | * | 449 | * |
@@ -611,13 +628,8 @@ static int phy_probe(struct device *dev) | |||
611 | 628 | ||
612 | spin_unlock(&phydev->lock); | 629 | spin_unlock(&phydev->lock); |
613 | 630 | ||
614 | if (err < 0) | ||
615 | return err; | ||
616 | |||
617 | if (phydev->drv->config_init) | ||
618 | err = phydev->drv->config_init(phydev); | ||
619 | |||
620 | return err; | 631 | return err; |
632 | |||
621 | } | 633 | } |
622 | 634 | ||
623 | static int phy_remove(struct device *dev) | 635 | static int phy_remove(struct device *dev) |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index b977ed85ff39..45d3ca431957 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -571,8 +571,8 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr) | |||
571 | { | 571 | { |
572 | unsigned int val; | 572 | unsigned int val; |
573 | 573 | ||
574 | val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff; | 574 | mdio_write(ioaddr, MII_BMCR, BMCR_RESET); |
575 | mdio_write(ioaddr, MII_BMCR, val); | 575 | val = mdio_read(ioaddr, MII_BMCR); |
576 | } | 576 | } |
577 | 577 | ||
578 | static void rtl8169_check_link_status(struct net_device *dev, | 578 | static void rtl8169_check_link_status(struct net_device *dev, |
@@ -1406,6 +1406,22 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, | |||
1406 | free_netdev(dev); | 1406 | free_netdev(dev); |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | static void rtl8169_phy_reset(struct net_device *dev, | ||
1410 | struct rtl8169_private *tp) | ||
1411 | { | ||
1412 | void __iomem *ioaddr = tp->mmio_addr; | ||
1413 | int i; | ||
1414 | |||
1415 | tp->phy_reset_enable(ioaddr); | ||
1416 | for (i = 0; i < 100; i++) { | ||
1417 | if (!tp->phy_reset_pending(ioaddr)) | ||
1418 | return; | ||
1419 | msleep(1); | ||
1420 | } | ||
1421 | if (netif_msg_link(tp)) | ||
1422 | printk(KERN_ERR "%s: PHY reset failed.\n", dev->name); | ||
1423 | } | ||
1424 | |||
1409 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | 1425 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) |
1410 | { | 1426 | { |
1411 | void __iomem *ioaddr = tp->mmio_addr; | 1427 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -1434,6 +1450,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
1434 | 1450 | ||
1435 | rtl8169_link_option(board_idx, &autoneg, &speed, &duplex); | 1451 | rtl8169_link_option(board_idx, &autoneg, &speed, &duplex); |
1436 | 1452 | ||
1453 | rtl8169_phy_reset(dev, tp); | ||
1454 | |||
1437 | rtl8169_set_speed(dev, autoneg, speed, duplex); | 1455 | rtl8169_set_speed(dev, autoneg, speed, duplex); |
1438 | 1456 | ||
1439 | if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) | 1457 | if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) |
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c index e5cb5b548b88..36460694eb82 100644 --- a/drivers/net/sk98lin/skethtool.c +++ b/drivers/net/sk98lin/skethtool.c | |||
@@ -581,6 +581,30 @@ static int setRxCsum(struct net_device *dev, u32 data) | |||
581 | return 0; | 581 | return 0; |
582 | } | 582 | } |
583 | 583 | ||
584 | static int getRegsLen(struct net_device *dev) | ||
585 | { | ||
586 | return 0x4000; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Returns copy of whole control register region | ||
591 | * Note: skip RAM address register because accessing it will | ||
592 | * cause bus hangs! | ||
593 | */ | ||
594 | static void getRegs(struct net_device *dev, struct ethtool_regs *regs, | ||
595 | void *p) | ||
596 | { | ||
597 | DEV_NET *pNet = netdev_priv(dev); | ||
598 | const void __iomem *io = pNet->pAC->IoBase; | ||
599 | |||
600 | regs->version = 1; | ||
601 | memset(p, 0, regs->len); | ||
602 | memcpy_fromio(p, io, B3_RAM_ADDR); | ||
603 | |||
604 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, | ||
605 | regs->len - B3_RI_WTO_R1); | ||
606 | } | ||
607 | |||
584 | const struct ethtool_ops SkGeEthtoolOps = { | 608 | const struct ethtool_ops SkGeEthtoolOps = { |
585 | .get_settings = getSettings, | 609 | .get_settings = getSettings, |
586 | .set_settings = setSettings, | 610 | .set_settings = setSettings, |
@@ -599,4 +623,6 @@ const struct ethtool_ops SkGeEthtoolOps = { | |||
599 | .set_tx_csum = setTxCsum, | 623 | .set_tx_csum = setTxCsum, |
600 | .get_rx_csum = getRxCsum, | 624 | .get_rx_csum = getRxCsum, |
601 | .set_rx_csum = setRxCsum, | 625 | .set_rx_csum = setRxCsum, |
626 | .get_regs = getRegs, | ||
627 | .get_regs_len = getRegsLen, | ||
602 | }; | 628 | }; |
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index d4913c3de2a1..a5d41ebc9fb4 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c | |||
@@ -113,6 +113,7 @@ | |||
113 | #include <linux/init.h> | 113 | #include <linux/init.h> |
114 | #include <linux/dma-mapping.h> | 114 | #include <linux/dma-mapping.h> |
115 | #include <linux/ip.h> | 115 | #include <linux/ip.h> |
116 | #include <linux/mii.h> | ||
116 | 117 | ||
117 | #include "h/skdrv1st.h" | 118 | #include "h/skdrv1st.h" |
118 | #include "h/skdrv2nd.h" | 119 | #include "h/skdrv2nd.h" |
@@ -2843,6 +2844,56 @@ unsigned long Flags; /* for spin lock */ | |||
2843 | return(&pAC->stats); | 2844 | return(&pAC->stats); |
2844 | } /* SkGeStats */ | 2845 | } /* SkGeStats */ |
2845 | 2846 | ||
2847 | /* | ||
2848 | * Basic MII register access | ||
2849 | */ | ||
2850 | static int SkGeMiiIoctl(struct net_device *dev, | ||
2851 | struct mii_ioctl_data *data, int cmd) | ||
2852 | { | ||
2853 | DEV_NET *pNet = netdev_priv(dev); | ||
2854 | SK_AC *pAC = pNet->pAC; | ||
2855 | SK_IOC IoC = pAC->IoBase; | ||
2856 | int Port = pNet->PortNr; | ||
2857 | SK_GEPORT *pPrt = &pAC->GIni.GP[Port]; | ||
2858 | unsigned long Flags; | ||
2859 | int err = 0; | ||
2860 | int reg = data->reg_num & 0x1f; | ||
2861 | SK_U16 val = data->val_in; | ||
2862 | |||
2863 | if (!netif_running(dev)) | ||
2864 | return -ENODEV; /* Phy still in reset */ | ||
2865 | |||
2866 | spin_lock_irqsave(&pAC->SlowPathLock, Flags); | ||
2867 | switch(cmd) { | ||
2868 | case SIOCGMIIPHY: | ||
2869 | data->phy_id = pPrt->PhyAddr; | ||
2870 | |||
2871 | /* fallthru */ | ||
2872 | case SIOCGMIIREG: | ||
2873 | if (pAC->GIni.GIGenesis) | ||
2874 | SkXmPhyRead(pAC, IoC, Port, reg, &val); | ||
2875 | else | ||
2876 | SkGmPhyRead(pAC, IoC, Port, reg, &val); | ||
2877 | |||
2878 | data->val_out = val; | ||
2879 | break; | ||
2880 | |||
2881 | case SIOCSMIIREG: | ||
2882 | if (!capable(CAP_NET_ADMIN)) | ||
2883 | err = -EPERM; | ||
2884 | |||
2885 | else if (pAC->GIni.GIGenesis) | ||
2886 | SkXmPhyWrite(pAC, IoC, Port, reg, val); | ||
2887 | else | ||
2888 | SkGmPhyWrite(pAC, IoC, Port, reg, val); | ||
2889 | break; | ||
2890 | default: | ||
2891 | err = -EOPNOTSUPP; | ||
2892 | } | ||
2893 | spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); | ||
2894 | return err; | ||
2895 | } | ||
2896 | |||
2846 | 2897 | ||
2847 | /***************************************************************************** | 2898 | /***************************************************************************** |
2848 | * | 2899 | * |
@@ -2876,6 +2927,9 @@ int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32); | |||
2876 | pNet = netdev_priv(dev); | 2927 | pNet = netdev_priv(dev); |
2877 | pAC = pNet->pAC; | 2928 | pAC = pNet->pAC; |
2878 | 2929 | ||
2930 | if (cmd == SIOCGMIIPHY || cmd == SIOCSMIIREG || cmd == SIOCGMIIREG) | ||
2931 | return SkGeMiiIoctl(dev, if_mii(rq), cmd); | ||
2932 | |||
2879 | if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) { | 2933 | if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) { |
2880 | return -EFAULT; | 2934 | return -EFAULT; |
2881 | } | 2935 | } |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b2949035f66a..27b537c8d5e3 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2154,8 +2154,6 @@ static void yukon_link_down(struct skge_port *skge) | |||
2154 | int port = skge->port; | 2154 | int port = skge->port; |
2155 | u16 ctrl; | 2155 | u16 ctrl; |
2156 | 2156 | ||
2157 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); | ||
2158 | |||
2159 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | 2157 | ctrl = gma_read16(hw, port, GM_GP_CTRL); |
2160 | ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); | 2158 | ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); |
2161 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | 2159 | gma_write16(hw, port, GM_GP_CTRL, ctrl); |
@@ -2167,7 +2165,6 @@ static void yukon_link_down(struct skge_port *skge) | |||
2167 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); | 2165 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); |
2168 | } | 2166 | } |
2169 | 2167 | ||
2170 | yukon_reset(hw, port); | ||
2171 | skge_link_down(skge); | 2168 | skge_link_down(skge); |
2172 | 2169 | ||
2173 | yukon_init(hw, port); | 2170 | yukon_init(hw, port); |
@@ -2255,6 +2252,7 @@ static void skge_phy_reset(struct skge_port *skge) | |||
2255 | { | 2252 | { |
2256 | struct skge_hw *hw = skge->hw; | 2253 | struct skge_hw *hw = skge->hw; |
2257 | int port = skge->port; | 2254 | int port = skge->port; |
2255 | struct net_device *dev = hw->dev[port]; | ||
2258 | 2256 | ||
2259 | netif_stop_queue(skge->netdev); | 2257 | netif_stop_queue(skge->netdev); |
2260 | netif_carrier_off(skge->netdev); | 2258 | netif_carrier_off(skge->netdev); |
@@ -2268,6 +2266,8 @@ static void skge_phy_reset(struct skge_port *skge) | |||
2268 | yukon_init(hw, port); | 2266 | yukon_init(hw, port); |
2269 | } | 2267 | } |
2270 | mutex_unlock(&hw->phy_mutex); | 2268 | mutex_unlock(&hw->phy_mutex); |
2269 | |||
2270 | dev->set_multicast_list(dev); | ||
2271 | } | 2271 | } |
2272 | 2272 | ||
2273 | /* Basic MII support */ | 2273 | /* Basic MII support */ |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 16616f5440d0..0ef1848b9761 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -104,6 +104,7 @@ static const struct pci_device_id sky2_id_table[] = { | |||
104 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 104 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
105 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ | 105 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ |
106 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ | 106 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ |
107 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ | ||
107 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, | 108 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, |
108 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, | 109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, |
109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, | 110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, |
@@ -676,17 +677,15 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
676 | /* Flush Rx MAC FIFO on any flow control or error */ | 677 | /* Flush Rx MAC FIFO on any flow control or error */ |
677 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); | 678 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
678 | 679 | ||
679 | /* Set threshold to 0xa (64 bytes) | 680 | /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ |
680 | * ASF disabled so no need to do WA dev #4.30 | 681 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); |
681 | */ | ||
682 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); | ||
683 | 682 | ||
684 | /* Configure Tx MAC FIFO */ | 683 | /* Configure Tx MAC FIFO */ |
685 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); | 684 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); |
686 | sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); | 685 | sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); |
687 | 686 | ||
688 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 687 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
689 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8); | 688 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); |
690 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); | 689 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); |
691 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { | 690 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { |
692 | /* set Tx GMAC FIFO Almost Empty Threshold */ | 691 | /* set Tx GMAC FIFO Almost Empty Threshold */ |
@@ -1060,7 +1059,8 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1060 | sky2->rx_put = sky2->rx_next = 0; | 1059 | sky2->rx_put = sky2->rx_next = 0; |
1061 | sky2_qset(hw, rxq); | 1060 | sky2_qset(hw, rxq); |
1062 | 1061 | ||
1063 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { | 1062 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && |
1063 | (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) { | ||
1064 | /* MAC Rx RAM Read is controlled by hardware */ | 1064 | /* MAC Rx RAM Read is controlled by hardware */ |
1065 | sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); | 1065 | sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); |
1066 | } | 1066 | } |
@@ -1453,7 +1453,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1453 | if (unlikely(netif_msg_tx_done(sky2))) | 1453 | if (unlikely(netif_msg_tx_done(sky2))) |
1454 | printk(KERN_DEBUG "%s: tx done %u\n", | 1454 | printk(KERN_DEBUG "%s: tx done %u\n", |
1455 | dev->name, idx); | 1455 | dev->name, idx); |
1456 | dev_kfree_skb(re->skb); | 1456 | dev_kfree_skb_any(re->skb); |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | le->opcode = 0; /* paranoia */ | 1459 | le->opcode = 0; /* paranoia */ |
@@ -1509,7 +1509,7 @@ static int sky2_down(struct net_device *dev) | |||
1509 | 1509 | ||
1510 | /* WA for dev. #4.209 */ | 1510 | /* WA for dev. #4.209 */ |
1511 | if (hw->chip_id == CHIP_ID_YUKON_EC_U | 1511 | if (hw->chip_id == CHIP_ID_YUKON_EC_U |
1512 | && hw->chip_rev == CHIP_REV_YU_EC_U_A1) | 1512 | && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) |
1513 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1513 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
1514 | sky2->speed != SPEED_1000 ? | 1514 | sky2->speed != SPEED_1000 ? |
1515 | TX_STFW_ENA : TX_STFW_DIS); | 1515 | TX_STFW_ENA : TX_STFW_DIS); |
@@ -2065,7 +2065,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2065 | case OP_RXSTAT: | 2065 | case OP_RXSTAT: |
2066 | skb = sky2_receive(dev, length, status); | 2066 | skb = sky2_receive(dev, length, status); |
2067 | if (!skb) | 2067 | if (!skb) |
2068 | break; | 2068 | goto force_update; |
2069 | 2069 | ||
2070 | skb->protocol = eth_type_trans(skb, dev); | 2070 | skb->protocol = eth_type_trans(skb, dev); |
2071 | dev->last_rx = jiffies; | 2071 | dev->last_rx = jiffies; |
@@ -2081,8 +2081,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2081 | 2081 | ||
2082 | /* Update receiver after 16 frames */ | 2082 | /* Update receiver after 16 frames */ |
2083 | if (++buf_write[le->link] == RX_BUF_WRITE) { | 2083 | if (++buf_write[le->link] == RX_BUF_WRITE) { |
2084 | sky2_put_idx(hw, rxqaddr[le->link], | 2084 | force_update: |
2085 | sky2->rx_put); | 2085 | sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put); |
2086 | buf_write[le->link] = 0; | 2086 | buf_write[le->link] = 0; |
2087 | } | 2087 | } |
2088 | 2088 | ||
@@ -3311,7 +3311,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) | |||
3311 | return IRQ_NONE; | 3311 | return IRQ_NONE; |
3312 | 3312 | ||
3313 | if (status & Y2_IS_IRQ_SW) { | 3313 | if (status & Y2_IS_IRQ_SW) { |
3314 | hw->msi_detected = 1; | 3314 | hw->msi = 1; |
3315 | wake_up(&hw->msi_wait); | 3315 | wake_up(&hw->msi_wait); |
3316 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | 3316 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); |
3317 | } | 3317 | } |
@@ -3330,7 +3330,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) | |||
3330 | 3330 | ||
3331 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); | 3331 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); |
3332 | 3332 | ||
3333 | err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw); | 3333 | err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); |
3334 | if (err) { | 3334 | if (err) { |
3335 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3335 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3336 | pci_name(pdev), pdev->irq); | 3336 | pci_name(pdev), pdev->irq); |
@@ -3340,9 +3340,9 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) | |||
3340 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | 3340 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); |
3341 | sky2_read8(hw, B0_CTST); | 3341 | sky2_read8(hw, B0_CTST); |
3342 | 3342 | ||
3343 | wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10); | 3343 | wait_event_timeout(hw->msi_wait, hw->msi, HZ/10); |
3344 | 3344 | ||
3345 | if (!hw->msi_detected) { | 3345 | if (!hw->msi) { |
3346 | /* MSI test failed, go back to INTx mode */ | 3346 | /* MSI test failed, go back to INTx mode */ |
3347 | printk(KERN_INFO PFX "%s: No interrupt generated using MSI, " | 3347 | printk(KERN_INFO PFX "%s: No interrupt generated using MSI, " |
3348 | "switching to INTx mode.\n", | 3348 | "switching to INTx mode.\n", |
@@ -3475,7 +3475,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3475 | goto err_out_free_netdev; | 3475 | goto err_out_free_netdev; |
3476 | } | 3476 | } |
3477 | 3477 | ||
3478 | err = request_irq(pdev->irq, sky2_intr, IRQF_SHARED, dev->name, hw); | 3478 | err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, |
3479 | dev->name, hw); | ||
3479 | if (err) { | 3480 | if (err) { |
3480 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3481 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3481 | pci_name(pdev), pdev->irq); | 3482 | pci_name(pdev), pdev->irq); |
@@ -3505,7 +3506,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3505 | return 0; | 3506 | return 0; |
3506 | 3507 | ||
3507 | err_out_unregister: | 3508 | err_out_unregister: |
3508 | pci_disable_msi(pdev); | 3509 | if (hw->msi) |
3510 | pci_disable_msi(pdev); | ||
3509 | unregister_netdev(dev); | 3511 | unregister_netdev(dev); |
3510 | err_out_free_netdev: | 3512 | err_out_free_netdev: |
3511 | free_netdev(dev); | 3513 | free_netdev(dev); |
@@ -3548,7 +3550,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
3548 | sky2_read8(hw, B0_CTST); | 3550 | sky2_read8(hw, B0_CTST); |
3549 | 3551 | ||
3550 | free_irq(pdev->irq, hw); | 3552 | free_irq(pdev->irq, hw); |
3551 | pci_disable_msi(pdev); | 3553 | if (hw->msi) |
3554 | pci_disable_msi(pdev); | ||
3552 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); | 3555 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3553 | pci_release_regions(pdev); | 3556 | pci_release_regions(pdev); |
3554 | pci_disable_device(pdev); | 3557 | pci_disable_device(pdev); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 6d2a23f66c9a..7760545edbf2 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -383,8 +383,13 @@ enum { | |||
383 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ | 383 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ |
384 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ | 384 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ |
385 | 385 | ||
386 | CHIP_REV_YU_EC_U_A0 = 0, | 386 | CHIP_REV_YU_EC_U_A0 = 1, |
387 | CHIP_REV_YU_EC_U_A1 = 1, | 387 | CHIP_REV_YU_EC_U_A1 = 2, |
388 | CHIP_REV_YU_EC_U_B0 = 3, | ||
389 | |||
390 | CHIP_REV_YU_FE_A1 = 1, | ||
391 | CHIP_REV_YU_FE_A2 = 2, | ||
392 | |||
388 | }; | 393 | }; |
389 | 394 | ||
390 | /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ | 395 | /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ |
@@ -1895,7 +1900,7 @@ struct sky2_hw { | |||
1895 | dma_addr_t st_dma; | 1900 | dma_addr_t st_dma; |
1896 | 1901 | ||
1897 | struct timer_list idle_timer; | 1902 | struct timer_list idle_timer; |
1898 | int msi_detected; | 1903 | int msi; |
1899 | wait_queue_head_t msi_wait; | 1904 | wait_queue_head_t msi_wait; |
1900 | }; | 1905 | }; |
1901 | 1906 | ||
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 41c503d8bac4..c06ecc8002b9 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -264,8 +264,6 @@ enum alta_offsets { | |||
264 | ASICCtrl = 0x30, | 264 | ASICCtrl = 0x30, |
265 | EEData = 0x34, | 265 | EEData = 0x34, |
266 | EECtrl = 0x36, | 266 | EECtrl = 0x36, |
267 | TxStartThresh = 0x3c, | ||
268 | RxEarlyThresh = 0x3e, | ||
269 | FlashAddr = 0x40, | 267 | FlashAddr = 0x40, |
270 | FlashData = 0x44, | 268 | FlashData = 0x44, |
271 | TxStatus = 0x46, | 269 | TxStatus = 0x46, |
@@ -790,6 +788,7 @@ static int netdev_open(struct net_device *dev) | |||
790 | { | 788 | { |
791 | struct netdev_private *np = netdev_priv(dev); | 789 | struct netdev_private *np = netdev_priv(dev); |
792 | void __iomem *ioaddr = np->base; | 790 | void __iomem *ioaddr = np->base; |
791 | unsigned long flags; | ||
793 | int i; | 792 | int i; |
794 | 793 | ||
795 | /* Do we need to reset the chip??? */ | 794 | /* Do we need to reset the chip??? */ |
@@ -834,6 +833,10 @@ static int netdev_open(struct net_device *dev) | |||
834 | iowrite8(0x01, ioaddr + DebugCtrl1); | 833 | iowrite8(0x01, ioaddr + DebugCtrl1); |
835 | netif_start_queue(dev); | 834 | netif_start_queue(dev); |
836 | 835 | ||
836 | spin_lock_irqsave(&np->lock, flags); | ||
837 | reset_tx(dev); | ||
838 | spin_unlock_irqrestore(&np->lock, flags); | ||
839 | |||
837 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); | 840 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
838 | 841 | ||
839 | if (netif_msg_ifup(np)) | 842 | if (netif_msg_ifup(np)) |
@@ -1081,6 +1084,8 @@ reset_tx (struct net_device *dev) | |||
1081 | 1084 | ||
1082 | /* free all tx skbuff */ | 1085 | /* free all tx skbuff */ |
1083 | for (i = 0; i < TX_RING_SIZE; i++) { | 1086 | for (i = 0; i < TX_RING_SIZE; i++) { |
1087 | np->tx_ring[i].next_desc = 0; | ||
1088 | |||
1084 | skb = np->tx_skbuff[i]; | 1089 | skb = np->tx_skbuff[i]; |
1085 | if (skb) { | 1090 | if (skb) { |
1086 | pci_unmap_single(np->pci_dev, | 1091 | pci_unmap_single(np->pci_dev, |
@@ -1096,6 +1101,10 @@ reset_tx (struct net_device *dev) | |||
1096 | } | 1101 | } |
1097 | np->cur_tx = np->dirty_tx = 0; | 1102 | np->cur_tx = np->dirty_tx = 0; |
1098 | np->cur_task = 0; | 1103 | np->cur_task = 0; |
1104 | |||
1105 | np->last_tx = NULL; | ||
1106 | iowrite8(127, ioaddr + TxDMAPollPeriod); | ||
1107 | |||
1099 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); | 1108 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
1100 | return 0; | 1109 | return 0; |
1101 | } | 1110 | } |
@@ -1111,6 +1120,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1111 | int tx_cnt; | 1120 | int tx_cnt; |
1112 | int tx_status; | 1121 | int tx_status; |
1113 | int handled = 0; | 1122 | int handled = 0; |
1123 | int i; | ||
1114 | 1124 | ||
1115 | 1125 | ||
1116 | do { | 1126 | do { |
@@ -1153,21 +1163,24 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1153 | np->stats.tx_fifo_errors++; | 1163 | np->stats.tx_fifo_errors++; |
1154 | if (tx_status & 0x02) | 1164 | if (tx_status & 0x02) |
1155 | np->stats.tx_window_errors++; | 1165 | np->stats.tx_window_errors++; |
1166 | |||
1156 | /* | 1167 | /* |
1157 | ** This reset has been verified on | 1168 | ** This reset has been verified on |
1158 | ** DFE-580TX boards ! phdm@macqel.be. | 1169 | ** DFE-580TX boards ! phdm@macqel.be. |
1159 | */ | 1170 | */ |
1160 | if (tx_status & 0x10) { /* TxUnderrun */ | 1171 | if (tx_status & 0x10) { /* TxUnderrun */ |
1161 | unsigned short txthreshold; | ||
1162 | |||
1163 | txthreshold = ioread16 (ioaddr + TxStartThresh); | ||
1164 | /* Restart Tx FIFO and transmitter */ | 1172 | /* Restart Tx FIFO and transmitter */ |
1165 | sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); | 1173 | sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); |
1166 | iowrite16 (txthreshold, ioaddr + TxStartThresh); | ||
1167 | /* No need to reset the Tx pointer here */ | 1174 | /* No need to reset the Tx pointer here */ |
1168 | } | 1175 | } |
1169 | /* Restart the Tx. */ | 1176 | /* Restart the Tx. Need to make sure tx enabled */ |
1170 | iowrite16 (TxEnable, ioaddr + MACCtrl1); | 1177 | i = 10; |
1178 | do { | ||
1179 | iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); | ||
1180 | if (ioread16(ioaddr + MACCtrl1) & TxEnabled) | ||
1181 | break; | ||
1182 | mdelay(1); | ||
1183 | } while (--i); | ||
1171 | } | 1184 | } |
1172 | /* Yup, this is a documentation bug. It cost me *hours*. */ | 1185 | /* Yup, this is a documentation bug. It cost me *hours*. */ |
1173 | iowrite16 (0, ioaddr + TxStatus); | 1186 | iowrite16 (0, ioaddr + TxStatus); |
@@ -1629,6 +1642,14 @@ static int netdev_close(struct net_device *dev) | |||
1629 | struct sk_buff *skb; | 1642 | struct sk_buff *skb; |
1630 | int i; | 1643 | int i; |
1631 | 1644 | ||
1645 | /* Wait and kill tasklet */ | ||
1646 | tasklet_kill(&np->rx_tasklet); | ||
1647 | tasklet_kill(&np->tx_tasklet); | ||
1648 | np->cur_tx = 0; | ||
1649 | np->dirty_tx = 0; | ||
1650 | np->cur_task = 0; | ||
1651 | np->last_tx = NULL; | ||
1652 | |||
1632 | netif_stop_queue(dev); | 1653 | netif_stop_queue(dev); |
1633 | 1654 | ||
1634 | if (netif_msg_ifdown(np)) { | 1655 | if (netif_msg_ifdown(np)) { |
@@ -1643,12 +1664,26 @@ static int netdev_close(struct net_device *dev) | |||
1643 | /* Disable interrupts by clearing the interrupt mask. */ | 1664 | /* Disable interrupts by clearing the interrupt mask. */ |
1644 | iowrite16(0x0000, ioaddr + IntrEnable); | 1665 | iowrite16(0x0000, ioaddr + IntrEnable); |
1645 | 1666 | ||
1667 | /* Disable Rx and Tx DMA for safely release resource */ | ||
1668 | iowrite32(0x500, ioaddr + DMACtrl); | ||
1669 | |||
1646 | /* Stop the chip's Tx and Rx processes. */ | 1670 | /* Stop the chip's Tx and Rx processes. */ |
1647 | iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); | 1671 | iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); |
1648 | 1672 | ||
1649 | /* Wait and kill tasklet */ | 1673 | for (i = 2000; i > 0; i--) { |
1650 | tasklet_kill(&np->rx_tasklet); | 1674 | if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) |
1651 | tasklet_kill(&np->tx_tasklet); | 1675 | break; |
1676 | mdelay(1); | ||
1677 | } | ||
1678 | |||
1679 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, | ||
1680 | ioaddr +ASICCtrl + 2); | ||
1681 | |||
1682 | for (i = 2000; i > 0; i--) { | ||
1683 | if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0) | ||
1684 | break; | ||
1685 | mdelay(1); | ||
1686 | } | ||
1652 | 1687 | ||
1653 | #ifdef __i386__ | 1688 | #ifdef __i386__ |
1654 | if (netif_msg_hw(np)) { | 1689 | if (netif_msg_hw(np)) { |
@@ -1686,6 +1721,7 @@ static int netdev_close(struct net_device *dev) | |||
1686 | } | 1721 | } |
1687 | } | 1722 | } |
1688 | for (i = 0; i < TX_RING_SIZE; i++) { | 1723 | for (i = 0; i < TX_RING_SIZE; i++) { |
1724 | np->tx_ring[i].next_desc = 0; | ||
1689 | skb = np->tx_skbuff[i]; | 1725 | skb = np->tx_skbuff[i]; |
1690 | if (skb) { | 1726 | if (skb) { |
1691 | pci_unmap_single(np->pci_dev, | 1727 | pci_unmap_single(np->pci_dev, |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index cd142d0302bc..8f4ecc1109cb 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -1771,7 +1771,7 @@ static struct pci_driver olympic_driver = { | |||
1771 | 1771 | ||
1772 | static int __init olympic_pci_init(void) | 1772 | static int __init olympic_pci_init(void) |
1773 | { | 1773 | { |
1774 | return pci_module_init (&olympic_driver) ; | 1774 | return pci_register_driver(&olympic_driver) ; |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | static void __exit olympic_pci_cleanup(void) | 1777 | static void __exit olympic_pci_cleanup(void) |
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c new file mode 100644 index 000000000000..893808ab3742 --- /dev/null +++ b/drivers/net/tsi108_eth.c | |||
@@ -0,0 +1,1708 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Copyright(c) 2006 Tundra Semiconductor Corporation. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify it | ||
6 | under the terms of the GNU General Public License as published by the Free | ||
7 | Software Foundation; either version 2 of the License, or (at your option) | ||
8 | any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | *******************************************************************************/ | ||
20 | |||
21 | /* This driver is based on the driver code originally developed | ||
22 | * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by | ||
23 | * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation | ||
24 | * | ||
25 | * Currently changes from original version are: | ||
26 | * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com) | ||
27 | * - modifications to handle two ports independently and support for | ||
28 | * additional PHY devices (alexandre.bounine@tundra.com) | ||
29 | * - Get hardware information from platform device. (tie-fei.zang@freescale.com) | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/net.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/etherdevice.h> | ||
39 | #include <linux/skbuff.h> | ||
40 | #include <linux/slab.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/crc32.h> | ||
45 | #include <linux/mii.h> | ||
46 | #include <linux/device.h> | ||
47 | #include <linux/pci.h> | ||
48 | #include <linux/rtnetlink.h> | ||
49 | #include <linux/timer.h> | ||
50 | #include <linux/platform_device.h> | ||
51 | #include <linux/etherdevice.h> | ||
52 | |||
53 | #include <asm/system.h> | ||
54 | #include <asm/io.h> | ||
55 | #include <asm/tsi108.h> | ||
56 | |||
57 | #include "tsi108_eth.h" | ||
58 | |||
59 | #define MII_READ_DELAY 10000 /* max link wait time in msec */ | ||
60 | |||
61 | #define TSI108_RXRING_LEN 256 | ||
62 | |||
63 | /* NOTE: The driver currently does not support receiving packets | ||
64 | * larger than the buffer size, so don't decrease this (unless you | ||
65 | * want to add such support). | ||
66 | */ | ||
67 | #define TSI108_RXBUF_SIZE 1536 | ||
68 | |||
69 | #define TSI108_TXRING_LEN 256 | ||
70 | |||
71 | #define TSI108_TX_INT_FREQ 64 | ||
72 | |||
73 | /* Check the phy status every half a second. */ | ||
74 | #define CHECK_PHY_INTERVAL (HZ/2) | ||
75 | |||
76 | static int tsi108_init_one(struct platform_device *pdev); | ||
77 | static int tsi108_ether_remove(struct platform_device *pdev); | ||
78 | |||
79 | struct tsi108_prv_data { | ||
80 | void __iomem *regs; /* Base of normal regs */ | ||
81 | void __iomem *phyregs; /* Base of register bank used for PHY access */ | ||
82 | |||
83 | unsigned int phy; /* Index of PHY for this interface */ | ||
84 | unsigned int irq_num; | ||
85 | unsigned int id; | ||
86 | |||
87 | struct timer_list timer;/* Timer that triggers the check phy function */ | ||
88 | unsigned int rxtail; /* Next entry in rxring to read */ | ||
89 | unsigned int rxhead; /* Next entry in rxring to give a new buffer */ | ||
90 | unsigned int rxfree; /* Number of free, allocated RX buffers */ | ||
91 | |||
92 | unsigned int rxpending; /* Non-zero if there are still descriptors | ||
93 | * to be processed from a previous descriptor | ||
94 | * interrupt condition that has been cleared */ | ||
95 | |||
96 | unsigned int txtail; /* Next TX descriptor to check status on */ | ||
97 | unsigned int txhead; /* Next TX descriptor to use */ | ||
98 | |||
99 | /* Number of free TX descriptors. This could be calculated from | ||
100 | * rxhead and rxtail if one descriptor were left unused to disambiguate | ||
101 | * full and empty conditions, but it's simpler to just keep track | ||
102 | * explicitly. */ | ||
103 | |||
104 | unsigned int txfree; | ||
105 | |||
106 | unsigned int phy_ok; /* The PHY is currently powered on. */ | ||
107 | |||
108 | /* PHY status (duplex is 1 for half, 2 for full, | ||
109 | * so that the default 0 indicates that neither has | ||
110 | * yet been configured). */ | ||
111 | |||
112 | unsigned int link_up; | ||
113 | unsigned int speed; | ||
114 | unsigned int duplex; | ||
115 | |||
116 | tx_desc *txring; | ||
117 | rx_desc *rxring; | ||
118 | struct sk_buff *txskbs[TSI108_TXRING_LEN]; | ||
119 | struct sk_buff *rxskbs[TSI108_RXRING_LEN]; | ||
120 | |||
121 | dma_addr_t txdma, rxdma; | ||
122 | |||
123 | /* txlock nests in misclock and phy_lock */ | ||
124 | |||
125 | spinlock_t txlock, misclock; | ||
126 | |||
127 | /* stats is used to hold the upper bits of each hardware counter, | ||
128 | * and tmpstats is used to hold the full values for returning | ||
129 | * to the caller of get_stats(). They must be separate in case | ||
130 | * an overflow interrupt occurs before the stats are consumed. | ||
131 | */ | ||
132 | |||
133 | struct net_device_stats stats; | ||
134 | struct net_device_stats tmpstats; | ||
135 | |||
136 | /* These stats are kept separate in hardware, thus require individual | ||
137 | * fields for handling carry. They are combined in get_stats. | ||
138 | */ | ||
139 | |||
140 | unsigned long rx_fcs; /* Add to rx_frame_errors */ | ||
141 | unsigned long rx_short_fcs; /* Add to rx_frame_errors */ | ||
142 | unsigned long rx_long_fcs; /* Add to rx_frame_errors */ | ||
143 | unsigned long rx_underruns; /* Add to rx_length_errors */ | ||
144 | unsigned long rx_overruns; /* Add to rx_length_errors */ | ||
145 | |||
146 | unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */ | ||
147 | unsigned long tx_pause_drop; /* Add to tx_aborted_errors */ | ||
148 | |||
149 | unsigned long mc_hash[16]; | ||
150 | u32 msg_enable; /* debug message level */ | ||
151 | struct mii_if_info mii_if; | ||
152 | unsigned int init_media; | ||
153 | }; | ||
154 | |||
155 | /* Structure for a device driver */ | ||
156 | |||
157 | static struct platform_driver tsi_eth_driver = { | ||
158 | .probe = tsi108_init_one, | ||
159 | .remove = tsi108_ether_remove, | ||
160 | .driver = { | ||
161 | .name = "tsi-ethernet", | ||
162 | }, | ||
163 | }; | ||
164 | |||
165 | static void tsi108_timed_checker(unsigned long dev_ptr); | ||
166 | |||
167 | static void dump_eth_one(struct net_device *dev) | ||
168 | { | ||
169 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
170 | |||
171 | printk("Dumping %s...\n", dev->name); | ||
172 | printk("intstat %x intmask %x phy_ok %d" | ||
173 | " link %d speed %d duplex %d\n", | ||
174 | TSI_READ(TSI108_EC_INTSTAT), | ||
175 | TSI_READ(TSI108_EC_INTMASK), data->phy_ok, | ||
176 | data->link_up, data->speed, data->duplex); | ||
177 | |||
178 | printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n", | ||
179 | data->txhead, data->txtail, data->txfree, | ||
180 | TSI_READ(TSI108_EC_TXSTAT), | ||
181 | TSI_READ(TSI108_EC_TXESTAT), | ||
182 | TSI_READ(TSI108_EC_TXERR)); | ||
183 | |||
184 | printk("RX: head %d, tail %d, free %d, stat %x," | ||
185 | " estat %x, err %x, pending %d\n\n", | ||
186 | data->rxhead, data->rxtail, data->rxfree, | ||
187 | TSI_READ(TSI108_EC_RXSTAT), | ||
188 | TSI_READ(TSI108_EC_RXESTAT), | ||
189 | TSI_READ(TSI108_EC_RXERR), data->rxpending); | ||
190 | } | ||
191 | |||
192 | /* Synchronization is needed between the thread and up/down events. | ||
193 | * Note that the PHY is accessed through the same registers for both | ||
194 | * interfaces, so this can't be made interface-specific. | ||
195 | */ | ||
196 | |||
197 | static DEFINE_SPINLOCK(phy_lock); | ||
198 | |||
199 | static int tsi108_read_mii(struct tsi108_prv_data *data, int reg) | ||
200 | { | ||
201 | unsigned i; | ||
202 | |||
203 | TSI_WRITE_PHY(TSI108_MAC_MII_ADDR, | ||
204 | (data->phy << TSI108_MAC_MII_ADDR_PHY) | | ||
205 | (reg << TSI108_MAC_MII_ADDR_REG)); | ||
206 | TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0); | ||
207 | TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ); | ||
208 | for (i = 0; i < 100; i++) { | ||
209 | if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) & | ||
210 | (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY))) | ||
211 | break; | ||
212 | udelay(10); | ||
213 | } | ||
214 | |||
215 | if (i == 100) | ||
216 | return 0xffff; | ||
217 | else | ||
218 | return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN)); | ||
219 | } | ||
220 | |||
221 | static void tsi108_write_mii(struct tsi108_prv_data *data, | ||
222 | int reg, u16 val) | ||
223 | { | ||
224 | unsigned i = 100; | ||
225 | TSI_WRITE_PHY(TSI108_MAC_MII_ADDR, | ||
226 | (data->phy << TSI108_MAC_MII_ADDR_PHY) | | ||
227 | (reg << TSI108_MAC_MII_ADDR_REG)); | ||
228 | TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val); | ||
229 | while (i--) { | ||
230 | if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) & | ||
231 | TSI108_MAC_MII_IND_BUSY)) | ||
232 | break; | ||
233 | udelay(10); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static int tsi108_mdio_read(struct net_device *dev, int addr, int reg) | ||
238 | { | ||
239 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
240 | return tsi108_read_mii(data, reg); | ||
241 | } | ||
242 | |||
243 | static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val) | ||
244 | { | ||
245 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
246 | tsi108_write_mii(data, reg, val); | ||
247 | } | ||
248 | |||
249 | static inline void tsi108_write_tbi(struct tsi108_prv_data *data, | ||
250 | int reg, u16 val) | ||
251 | { | ||
252 | unsigned i = 1000; | ||
253 | TSI_WRITE(TSI108_MAC_MII_ADDR, | ||
254 | (0x1e << TSI108_MAC_MII_ADDR_PHY) | ||
255 | | (reg << TSI108_MAC_MII_ADDR_REG)); | ||
256 | TSI_WRITE(TSI108_MAC_MII_DATAOUT, val); | ||
257 | while(i--) { | ||
258 | if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY)) | ||
259 | return; | ||
260 | udelay(10); | ||
261 | } | ||
262 | printk(KERN_ERR "%s function time out \n", __FUNCTION__); | ||
263 | } | ||
264 | |||
265 | static int mii_speed(struct mii_if_info *mii) | ||
266 | { | ||
267 | int advert, lpa, val, media; | ||
268 | int lpa2 = 0; | ||
269 | int speed; | ||
270 | |||
271 | if (!mii_link_ok(mii)) | ||
272 | return 0; | ||
273 | |||
274 | val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR); | ||
275 | if ((val & BMSR_ANEGCOMPLETE) == 0) | ||
276 | return 0; | ||
277 | |||
278 | advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE); | ||
279 | lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA); | ||
280 | media = mii_nway_result(advert & lpa); | ||
281 | |||
282 | if (mii->supports_gmii) | ||
283 | lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); | ||
284 | |||
285 | speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : | ||
286 | (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10); | ||
287 | return speed; | ||
288 | } | ||
289 | |||
290 | static void tsi108_check_phy(struct net_device *dev) | ||
291 | { | ||
292 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
293 | u32 mac_cfg2_reg, portctrl_reg; | ||
294 | u32 duplex; | ||
295 | u32 speed; | ||
296 | unsigned long flags; | ||
297 | |||
298 | /* Do a dummy read, as for some reason the first read | ||
299 | * after a link becomes up returns link down, even if | ||
300 | * it's been a while since the link came up. | ||
301 | */ | ||
302 | |||
303 | spin_lock_irqsave(&phy_lock, flags); | ||
304 | |||
305 | if (!data->phy_ok) | ||
306 | goto out; | ||
307 | |||
308 | tsi108_read_mii(data, MII_BMSR); | ||
309 | |||
310 | duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media); | ||
311 | data->init_media = 0; | ||
312 | |||
313 | if (netif_carrier_ok(dev)) { | ||
314 | |||
315 | speed = mii_speed(&data->mii_if); | ||
316 | |||
317 | if ((speed != data->speed) || duplex) { | ||
318 | |||
319 | mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2); | ||
320 | portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL); | ||
321 | |||
322 | mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK; | ||
323 | |||
324 | if (speed == 1000) { | ||
325 | mac_cfg2_reg |= TSI108_MAC_CFG2_GIG; | ||
326 | portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG; | ||
327 | } else { | ||
328 | mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG; | ||
329 | portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG; | ||
330 | } | ||
331 | |||
332 | data->speed = speed; | ||
333 | |||
334 | if (data->mii_if.full_duplex) { | ||
335 | mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX; | ||
336 | portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX; | ||
337 | data->duplex = 2; | ||
338 | } else { | ||
339 | mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX; | ||
340 | portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX; | ||
341 | data->duplex = 1; | ||
342 | } | ||
343 | |||
344 | TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg); | ||
345 | TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg); | ||
346 | |||
347 | if (data->link_up == 0) { | ||
348 | /* The manual says it can take 3-4 usecs for the speed change | ||
349 | * to take effect. | ||
350 | */ | ||
351 | udelay(5); | ||
352 | |||
353 | spin_lock(&data->txlock); | ||
354 | if (is_valid_ether_addr(dev->dev_addr) && data->txfree) | ||
355 | netif_wake_queue(dev); | ||
356 | |||
357 | data->link_up = 1; | ||
358 | spin_unlock(&data->txlock); | ||
359 | } | ||
360 | } | ||
361 | |||
362 | } else { | ||
363 | if (data->link_up == 1) { | ||
364 | netif_stop_queue(dev); | ||
365 | data->link_up = 0; | ||
366 | printk(KERN_NOTICE "%s : link is down\n", dev->name); | ||
367 | } | ||
368 | |||
369 | goto out; | ||
370 | } | ||
371 | |||
372 | |||
373 | out: | ||
374 | spin_unlock_irqrestore(&phy_lock, flags); | ||
375 | } | ||
376 | |||
377 | static inline void | ||
378 | tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, | ||
379 | unsigned long *upper) | ||
380 | { | ||
381 | if (carry & carry_bit) | ||
382 | *upper += carry_shift; | ||
383 | } | ||
384 | |||
385 | static void tsi108_stat_carry(struct net_device *dev) | ||
386 | { | ||
387 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
388 | u32 carry1, carry2; | ||
389 | |||
390 | spin_lock_irq(&data->misclock); | ||
391 | |||
392 | carry1 = TSI_READ(TSI108_STAT_CARRY1); | ||
393 | carry2 = TSI_READ(TSI108_STAT_CARRY2); | ||
394 | |||
395 | TSI_WRITE(TSI108_STAT_CARRY1, carry1); | ||
396 | TSI_WRITE(TSI108_STAT_CARRY2, carry2); | ||
397 | |||
398 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES, | ||
399 | TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes); | ||
400 | |||
401 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS, | ||
402 | TSI108_STAT_RXPKTS_CARRY, | ||
403 | &data->stats.rx_packets); | ||
404 | |||
405 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS, | ||
406 | TSI108_STAT_RXFCS_CARRY, &data->rx_fcs); | ||
407 | |||
408 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST, | ||
409 | TSI108_STAT_RXMCAST_CARRY, | ||
410 | &data->stats.multicast); | ||
411 | |||
412 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN, | ||
413 | TSI108_STAT_RXALIGN_CARRY, | ||
414 | &data->stats.rx_frame_errors); | ||
415 | |||
416 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH, | ||
417 | TSI108_STAT_RXLENGTH_CARRY, | ||
418 | &data->stats.rx_length_errors); | ||
419 | |||
420 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT, | ||
421 | TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns); | ||
422 | |||
423 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO, | ||
424 | TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns); | ||
425 | |||
426 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG, | ||
427 | TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs); | ||
428 | |||
429 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER, | ||
430 | TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs); | ||
431 | |||
432 | tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP, | ||
433 | TSI108_STAT_RXDROP_CARRY, | ||
434 | &data->stats.rx_missed_errors); | ||
435 | |||
436 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES, | ||
437 | TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes); | ||
438 | |||
439 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS, | ||
440 | TSI108_STAT_TXPKTS_CARRY, | ||
441 | &data->stats.tx_packets); | ||
442 | |||
443 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF, | ||
444 | TSI108_STAT_TXEXDEF_CARRY, | ||
445 | &data->stats.tx_aborted_errors); | ||
446 | |||
447 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL, | ||
448 | TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort); | ||
449 | |||
450 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL, | ||
451 | TSI108_STAT_TXTCOL_CARRY, | ||
452 | &data->stats.collisions); | ||
453 | |||
454 | tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE, | ||
455 | TSI108_STAT_TXPAUSEDROP_CARRY, | ||
456 | &data->tx_pause_drop); | ||
457 | |||
458 | spin_unlock_irq(&data->misclock); | ||
459 | } | ||
460 | |||
461 | /* Read a stat counter atomically with respect to carries. | ||
462 | * data->misclock must be held. | ||
463 | */ | ||
464 | static inline unsigned long | ||
465 | tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit, | ||
466 | int carry_shift, unsigned long *upper) | ||
467 | { | ||
468 | int carryreg; | ||
469 | unsigned long val; | ||
470 | |||
471 | if (reg < 0xb0) | ||
472 | carryreg = TSI108_STAT_CARRY1; | ||
473 | else | ||
474 | carryreg = TSI108_STAT_CARRY2; | ||
475 | |||
476 | again: | ||
477 | val = TSI_READ(reg) | *upper; | ||
478 | |||
479 | /* Check to see if it overflowed, but the interrupt hasn't | ||
480 | * been serviced yet. If so, handle the carry here, and | ||
481 | * try again. | ||
482 | */ | ||
483 | |||
484 | if (unlikely(TSI_READ(carryreg) & carry_bit)) { | ||
485 | *upper += carry_shift; | ||
486 | TSI_WRITE(carryreg, carry_bit); | ||
487 | goto again; | ||
488 | } | ||
489 | |||
490 | return val; | ||
491 | } | ||
492 | |||
493 | static struct net_device_stats *tsi108_get_stats(struct net_device *dev) | ||
494 | { | ||
495 | unsigned long excol; | ||
496 | |||
497 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
498 | spin_lock_irq(&data->misclock); | ||
499 | |||
500 | data->tmpstats.rx_packets = | ||
501 | tsi108_read_stat(data, TSI108_STAT_RXPKTS, | ||
502 | TSI108_STAT_CARRY1_RXPKTS, | ||
503 | TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets); | ||
504 | |||
505 | data->tmpstats.tx_packets = | ||
506 | tsi108_read_stat(data, TSI108_STAT_TXPKTS, | ||
507 | TSI108_STAT_CARRY2_TXPKTS, | ||
508 | TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets); | ||
509 | |||
510 | data->tmpstats.rx_bytes = | ||
511 | tsi108_read_stat(data, TSI108_STAT_RXBYTES, | ||
512 | TSI108_STAT_CARRY1_RXBYTES, | ||
513 | TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes); | ||
514 | |||
515 | data->tmpstats.tx_bytes = | ||
516 | tsi108_read_stat(data, TSI108_STAT_TXBYTES, | ||
517 | TSI108_STAT_CARRY2_TXBYTES, | ||
518 | TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes); | ||
519 | |||
520 | data->tmpstats.multicast = | ||
521 | tsi108_read_stat(data, TSI108_STAT_RXMCAST, | ||
522 | TSI108_STAT_CARRY1_RXMCAST, | ||
523 | TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast); | ||
524 | |||
525 | excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL, | ||
526 | TSI108_STAT_CARRY2_TXEXCOL, | ||
527 | TSI108_STAT_TXEXCOL_CARRY, | ||
528 | &data->tx_coll_abort); | ||
529 | |||
530 | data->tmpstats.collisions = | ||
531 | tsi108_read_stat(data, TSI108_STAT_TXTCOL, | ||
532 | TSI108_STAT_CARRY2_TXTCOL, | ||
533 | TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions); | ||
534 | |||
535 | data->tmpstats.collisions += excol; | ||
536 | |||
537 | data->tmpstats.rx_length_errors = | ||
538 | tsi108_read_stat(data, TSI108_STAT_RXLENGTH, | ||
539 | TSI108_STAT_CARRY1_RXLENGTH, | ||
540 | TSI108_STAT_RXLENGTH_CARRY, | ||
541 | &data->stats.rx_length_errors); | ||
542 | |||
543 | data->tmpstats.rx_length_errors += | ||
544 | tsi108_read_stat(data, TSI108_STAT_RXRUNT, | ||
545 | TSI108_STAT_CARRY1_RXRUNT, | ||
546 | TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns); | ||
547 | |||
548 | data->tmpstats.rx_length_errors += | ||
549 | tsi108_read_stat(data, TSI108_STAT_RXJUMBO, | ||
550 | TSI108_STAT_CARRY1_RXJUMBO, | ||
551 | TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns); | ||
552 | |||
553 | data->tmpstats.rx_frame_errors = | ||
554 | tsi108_read_stat(data, TSI108_STAT_RXALIGN, | ||
555 | TSI108_STAT_CARRY1_RXALIGN, | ||
556 | TSI108_STAT_RXALIGN_CARRY, | ||
557 | &data->stats.rx_frame_errors); | ||
558 | |||
559 | data->tmpstats.rx_frame_errors += | ||
560 | tsi108_read_stat(data, TSI108_STAT_RXFCS, | ||
561 | TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY, | ||
562 | &data->rx_fcs); | ||
563 | |||
564 | data->tmpstats.rx_frame_errors += | ||
565 | tsi108_read_stat(data, TSI108_STAT_RXFRAG, | ||
566 | TSI108_STAT_CARRY1_RXFRAG, | ||
567 | TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs); | ||
568 | |||
569 | data->tmpstats.rx_missed_errors = | ||
570 | tsi108_read_stat(data, TSI108_STAT_RXDROP, | ||
571 | TSI108_STAT_CARRY1_RXDROP, | ||
572 | TSI108_STAT_RXDROP_CARRY, | ||
573 | &data->stats.rx_missed_errors); | ||
574 | |||
575 | /* These three are maintained by software. */ | ||
576 | data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors; | ||
577 | data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors; | ||
578 | |||
579 | data->tmpstats.tx_aborted_errors = | ||
580 | tsi108_read_stat(data, TSI108_STAT_TXEXDEF, | ||
581 | TSI108_STAT_CARRY2_TXEXDEF, | ||
582 | TSI108_STAT_TXEXDEF_CARRY, | ||
583 | &data->stats.tx_aborted_errors); | ||
584 | |||
585 | data->tmpstats.tx_aborted_errors += | ||
586 | tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP, | ||
587 | TSI108_STAT_CARRY2_TXPAUSE, | ||
588 | TSI108_STAT_TXPAUSEDROP_CARRY, | ||
589 | &data->tx_pause_drop); | ||
590 | |||
591 | data->tmpstats.tx_aborted_errors += excol; | ||
592 | |||
593 | data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors; | ||
594 | data->tmpstats.rx_errors = data->tmpstats.rx_length_errors + | ||
595 | data->tmpstats.rx_crc_errors + | ||
596 | data->tmpstats.rx_frame_errors + | ||
597 | data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors; | ||
598 | |||
599 | spin_unlock_irq(&data->misclock); | ||
600 | return &data->tmpstats; | ||
601 | } | ||
602 | |||
603 | static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev) | ||
604 | { | ||
605 | TSI_WRITE(TSI108_EC_RXQ_PTRHIGH, | ||
606 | TSI108_EC_RXQ_PTRHIGH_VALID); | ||
607 | |||
608 | TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO | ||
609 | | TSI108_EC_RXCTRL_QUEUE0); | ||
610 | } | ||
611 | |||
612 | static void tsi108_restart_tx(struct tsi108_prv_data * data) | ||
613 | { | ||
614 | TSI_WRITE(TSI108_EC_TXQ_PTRHIGH, | ||
615 | TSI108_EC_TXQ_PTRHIGH_VALID); | ||
616 | |||
617 | TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT | | ||
618 | TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0); | ||
619 | } | ||
620 | |||
621 | /* txlock must be held by caller, with IRQs disabled, and | ||
622 | * with permission to re-enable them when the lock is dropped. | ||
623 | */ | ||
624 | static void tsi108_complete_tx(struct net_device *dev) | ||
625 | { | ||
626 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
627 | int tx; | ||
628 | struct sk_buff *skb; | ||
629 | int release = 0; | ||
630 | |||
631 | while (!data->txfree || data->txhead != data->txtail) { | ||
632 | tx = data->txtail; | ||
633 | |||
634 | if (data->txring[tx].misc & TSI108_TX_OWN) | ||
635 | break; | ||
636 | |||
637 | skb = data->txskbs[tx]; | ||
638 | |||
639 | if (!(data->txring[tx].misc & TSI108_TX_OK)) | ||
640 | printk("%s: bad tx packet, misc %x\n", | ||
641 | dev->name, data->txring[tx].misc); | ||
642 | |||
643 | data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN; | ||
644 | data->txfree++; | ||
645 | |||
646 | if (data->txring[tx].misc & TSI108_TX_EOF) { | ||
647 | dev_kfree_skb_any(skb); | ||
648 | release++; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | if (release) { | ||
653 | if (is_valid_ether_addr(dev->dev_addr) && data->link_up) | ||
654 | netif_wake_queue(dev); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev) | ||
659 | { | ||
660 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
661 | int frags = skb_shinfo(skb)->nr_frags + 1; | ||
662 | int i; | ||
663 | |||
664 | if (!data->phy_ok && net_ratelimit()) | ||
665 | printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name); | ||
666 | |||
667 | if (!data->link_up) { | ||
668 | printk(KERN_ERR "%s: Transmit while link is down!\n", | ||
669 | dev->name); | ||
670 | netif_stop_queue(dev); | ||
671 | return NETDEV_TX_BUSY; | ||
672 | } | ||
673 | |||
674 | if (data->txfree < MAX_SKB_FRAGS + 1) { | ||
675 | netif_stop_queue(dev); | ||
676 | |||
677 | if (net_ratelimit()) | ||
678 | printk(KERN_ERR "%s: Transmit with full tx ring!\n", | ||
679 | dev->name); | ||
680 | return NETDEV_TX_BUSY; | ||
681 | } | ||
682 | |||
683 | if (data->txfree - frags < MAX_SKB_FRAGS + 1) { | ||
684 | netif_stop_queue(dev); | ||
685 | } | ||
686 | |||
687 | spin_lock_irq(&data->txlock); | ||
688 | |||
689 | for (i = 0; i < frags; i++) { | ||
690 | int misc = 0; | ||
691 | int tx = data->txhead; | ||
692 | |||
693 | /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with | ||
694 | * the interrupt bit. TX descriptor-complete interrupts are | ||
695 | * enabled when the queue fills up, and masked when there is | ||
696 | * still free space. This way, when saturating the outbound | ||
697 | * link, the tx interrupts are kept to a reasonable level. | ||
698 | * When the queue is not full, reclamation of skbs still occurs | ||
699 | * as new packets are transmitted, or on a queue-empty | ||
700 | * interrupt. | ||
701 | */ | ||
702 | |||
703 | if ((tx % TSI108_TX_INT_FREQ == 0) && | ||
704 | ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ)) | ||
705 | misc = TSI108_TX_INT; | ||
706 | |||
707 | data->txskbs[tx] = skb; | ||
708 | |||
709 | if (i == 0) { | ||
710 | data->txring[tx].buf0 = dma_map_single(NULL, skb->data, | ||
711 | skb->len - skb->data_len, DMA_TO_DEVICE); | ||
712 | data->txring[tx].len = skb->len - skb->data_len; | ||
713 | misc |= TSI108_TX_SOF; | ||
714 | } else { | ||
715 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; | ||
716 | |||
717 | data->txring[tx].buf0 = | ||
718 | dma_map_page(NULL, frag->page, frag->page_offset, | ||
719 | frag->size, DMA_TO_DEVICE); | ||
720 | data->txring[tx].len = frag->size; | ||
721 | } | ||
722 | |||
723 | if (i == frags - 1) | ||
724 | misc |= TSI108_TX_EOF; | ||
725 | |||
726 | if (netif_msg_pktdata(data)) { | ||
727 | int i; | ||
728 | printk("%s: Tx Frame contents (%d)\n", dev->name, | ||
729 | skb->len); | ||
730 | for (i = 0; i < skb->len; i++) | ||
731 | printk(" %2.2x", skb->data[i]); | ||
732 | printk(".\n"); | ||
733 | } | ||
734 | data->txring[tx].misc = misc | TSI108_TX_OWN; | ||
735 | |||
736 | data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN; | ||
737 | data->txfree--; | ||
738 | } | ||
739 | |||
740 | tsi108_complete_tx(dev); | ||
741 | |||
742 | /* This must be done after the check for completed tx descriptors, | ||
743 | * so that the tail pointer is correct. | ||
744 | */ | ||
745 | |||
746 | if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0)) | ||
747 | tsi108_restart_tx(data); | ||
748 | |||
749 | spin_unlock_irq(&data->txlock); | ||
750 | return NETDEV_TX_OK; | ||
751 | } | ||
752 | |||
753 | static int tsi108_complete_rx(struct net_device *dev, int budget) | ||
754 | { | ||
755 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
756 | int done = 0; | ||
757 | |||
758 | while (data->rxfree && done != budget) { | ||
759 | int rx = data->rxtail; | ||
760 | struct sk_buff *skb; | ||
761 | |||
762 | if (data->rxring[rx].misc & TSI108_RX_OWN) | ||
763 | break; | ||
764 | |||
765 | skb = data->rxskbs[rx]; | ||
766 | data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN; | ||
767 | data->rxfree--; | ||
768 | done++; | ||
769 | |||
770 | if (data->rxring[rx].misc & TSI108_RX_BAD) { | ||
771 | spin_lock_irq(&data->misclock); | ||
772 | |||
773 | if (data->rxring[rx].misc & TSI108_RX_CRC) | ||
774 | data->stats.rx_crc_errors++; | ||
775 | if (data->rxring[rx].misc & TSI108_RX_OVER) | ||
776 | data->stats.rx_fifo_errors++; | ||
777 | |||
778 | spin_unlock_irq(&data->misclock); | ||
779 | |||
780 | dev_kfree_skb_any(skb); | ||
781 | continue; | ||
782 | } | ||
783 | if (netif_msg_pktdata(data)) { | ||
784 | int i; | ||
785 | printk("%s: Rx Frame contents (%d)\n", | ||
786 | dev->name, data->rxring[rx].len); | ||
787 | for (i = 0; i < data->rxring[rx].len; i++) | ||
788 | printk(" %2.2x", skb->data[i]); | ||
789 | printk(".\n"); | ||
790 | } | ||
791 | |||
792 | skb->dev = dev; | ||
793 | skb_put(skb, data->rxring[rx].len); | ||
794 | skb->protocol = eth_type_trans(skb, dev); | ||
795 | netif_receive_skb(skb); | ||
796 | dev->last_rx = jiffies; | ||
797 | } | ||
798 | |||
799 | return done; | ||
800 | } | ||
801 | |||
802 | static int tsi108_refill_rx(struct net_device *dev, int budget) | ||
803 | { | ||
804 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
805 | int done = 0; | ||
806 | |||
807 | while (data->rxfree != TSI108_RXRING_LEN && done != budget) { | ||
808 | int rx = data->rxhead; | ||
809 | struct sk_buff *skb; | ||
810 | |||
811 | data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2); | ||
812 | if (!skb) | ||
813 | break; | ||
814 | |||
815 | skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */ | ||
816 | |||
817 | data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, | ||
818 | TSI108_RX_SKB_SIZE, | ||
819 | DMA_FROM_DEVICE); | ||
820 | |||
821 | /* Sometimes the hardware sets blen to zero after packet | ||
822 | * reception, even though the manual says that it's only ever | ||
823 | * modified by the driver. | ||
824 | */ | ||
825 | |||
826 | data->rxring[rx].blen = TSI108_RX_SKB_SIZE; | ||
827 | data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT; | ||
828 | |||
829 | data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN; | ||
830 | data->rxfree++; | ||
831 | done++; | ||
832 | } | ||
833 | |||
834 | if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) & | ||
835 | TSI108_EC_RXSTAT_QUEUE0)) | ||
836 | tsi108_restart_rx(data, dev); | ||
837 | |||
838 | return done; | ||
839 | } | ||
840 | |||
841 | static int tsi108_poll(struct net_device *dev, int *budget) | ||
842 | { | ||
843 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
844 | u32 estat = TSI_READ(TSI108_EC_RXESTAT); | ||
845 | u32 intstat = TSI_READ(TSI108_EC_INTSTAT); | ||
846 | int total_budget = min(*budget, dev->quota); | ||
847 | int num_received = 0, num_filled = 0, budget_used; | ||
848 | |||
849 | intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | | ||
850 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; | ||
851 | |||
852 | TSI_WRITE(TSI108_EC_RXESTAT, estat); | ||
853 | TSI_WRITE(TSI108_EC_INTSTAT, intstat); | ||
854 | |||
855 | if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) | ||
856 | num_received = tsi108_complete_rx(dev, total_budget); | ||
857 | |||
858 | /* This should normally fill no more slots than the number of | ||
859 | * packets received in tsi108_complete_rx(). The exception | ||
860 | * is when we previously ran out of memory for RX SKBs. In that | ||
861 | * case, it's helpful to obey the budget, not only so that the | ||
862 | * CPU isn't hogged, but so that memory (which may still be low) | ||
863 | * is not hogged by one device. | ||
864 | * | ||
865 | * A work unit is considered to be two SKBs to allow us to catch | ||
866 | * up when the ring has shrunk due to out-of-memory but we're | ||
867 | * still removing the full budget's worth of packets each time. | ||
868 | */ | ||
869 | |||
870 | if (data->rxfree < TSI108_RXRING_LEN) | ||
871 | num_filled = tsi108_refill_rx(dev, total_budget * 2); | ||
872 | |||
873 | if (intstat & TSI108_INT_RXERROR) { | ||
874 | u32 err = TSI_READ(TSI108_EC_RXERR); | ||
875 | TSI_WRITE(TSI108_EC_RXERR, err); | ||
876 | |||
877 | if (err) { | ||
878 | if (net_ratelimit()) | ||
879 | printk(KERN_DEBUG "%s: RX error %x\n", | ||
880 | dev->name, err); | ||
881 | |||
882 | if (!(TSI_READ(TSI108_EC_RXSTAT) & | ||
883 | TSI108_EC_RXSTAT_QUEUE0)) | ||
884 | tsi108_restart_rx(data, dev); | ||
885 | } | ||
886 | } | ||
887 | |||
888 | if (intstat & TSI108_INT_RXOVERRUN) { | ||
889 | spin_lock_irq(&data->misclock); | ||
890 | data->stats.rx_fifo_errors++; | ||
891 | spin_unlock_irq(&data->misclock); | ||
892 | } | ||
893 | |||
894 | budget_used = max(num_received, num_filled / 2); | ||
895 | |||
896 | *budget -= budget_used; | ||
897 | dev->quota -= budget_used; | ||
898 | |||
899 | if (budget_used != total_budget) { | ||
900 | data->rxpending = 0; | ||
901 | netif_rx_complete(dev); | ||
902 | |||
903 | TSI_WRITE(TSI108_EC_INTMASK, | ||
904 | TSI_READ(TSI108_EC_INTMASK) | ||
905 | & ~(TSI108_INT_RXQUEUE0 | ||
906 | | TSI108_INT_RXTHRESH | | ||
907 | TSI108_INT_RXOVERRUN | | ||
908 | TSI108_INT_RXERROR | | ||
909 | TSI108_INT_RXWAIT)); | ||
910 | |||
911 | /* IRQs are level-triggered, so no need to re-check */ | ||
912 | return 0; | ||
913 | } else { | ||
914 | data->rxpending = 1; | ||
915 | } | ||
916 | |||
917 | return 1; | ||
918 | } | ||
919 | |||
920 | static void tsi108_rx_int(struct net_device *dev) | ||
921 | { | ||
922 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
923 | |||
924 | /* A race could cause dev to already be scheduled, so it's not an | ||
925 | * error if that happens (and interrupts shouldn't be re-masked, | ||
926 | * because that can cause harmful races, if poll has already | ||
927 | * unmasked them but not cleared LINK_STATE_SCHED). | ||
928 | * | ||
929 | * This can happen if this code races with tsi108_poll(), which masks | ||
930 | * the interrupts after tsi108_irq_one() read the mask, but before | ||
931 | * netif_rx_schedule is called. It could also happen due to calls | ||
932 | * from tsi108_check_rxring(). | ||
933 | */ | ||
934 | |||
935 | if (netif_rx_schedule_prep(dev)) { | ||
936 | /* Mask, rather than ack, the receive interrupts. The ack | ||
937 | * will happen in tsi108_poll(). | ||
938 | */ | ||
939 | |||
940 | TSI_WRITE(TSI108_EC_INTMASK, | ||
941 | TSI_READ(TSI108_EC_INTMASK) | | ||
942 | TSI108_INT_RXQUEUE0 | ||
943 | | TSI108_INT_RXTHRESH | | ||
944 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | | ||
945 | TSI108_INT_RXWAIT); | ||
946 | __netif_rx_schedule(dev); | ||
947 | } else { | ||
948 | if (!netif_running(dev)) { | ||
949 | /* This can happen if an interrupt occurs while the | ||
950 | * interface is being brought down, as the START | ||
951 | * bit is cleared before the stop function is called. | ||
952 | * | ||
953 | * In this case, the interrupts must be masked, or | ||
954 | * they will continue indefinitely. | ||
955 | * | ||
956 | * There's a race here if the interface is brought down | ||
957 | * and then up in rapid succession, as the device could | ||
958 | * be made running after the above check and before | ||
959 | * the masking below. This will only happen if the IRQ | ||
960 | * thread has a lower priority than the task brining | ||
961 | * up the interface. Fixing this race would likely | ||
962 | * require changes in generic code. | ||
963 | */ | ||
964 | |||
965 | TSI_WRITE(TSI108_EC_INTMASK, | ||
966 | TSI_READ | ||
967 | (TSI108_EC_INTMASK) | | ||
968 | TSI108_INT_RXQUEUE0 | | ||
969 | TSI108_INT_RXTHRESH | | ||
970 | TSI108_INT_RXOVERRUN | | ||
971 | TSI108_INT_RXERROR | | ||
972 | TSI108_INT_RXWAIT); | ||
973 | } | ||
974 | } | ||
975 | } | ||
976 | |||
977 | /* If the RX ring has run out of memory, try periodically | ||
978 | * to allocate some more, as otherwise poll would never | ||
979 | * get called (apart from the initial end-of-queue condition). | ||
980 | * | ||
981 | * This is called once per second (by default) from the thread. | ||
982 | */ | ||
983 | |||
984 | static void tsi108_check_rxring(struct net_device *dev) | ||
985 | { | ||
986 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
987 | |||
988 | /* A poll is scheduled, as opposed to caling tsi108_refill_rx | ||
989 | * directly, so as to keep the receive path single-threaded | ||
990 | * (and thus not needing a lock). | ||
991 | */ | ||
992 | |||
993 | if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4) | ||
994 | tsi108_rx_int(dev); | ||
995 | } | ||
996 | |||
997 | static void tsi108_tx_int(struct net_device *dev) | ||
998 | { | ||
999 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1000 | u32 estat = TSI_READ(TSI108_EC_TXESTAT); | ||
1001 | |||
1002 | TSI_WRITE(TSI108_EC_TXESTAT, estat); | ||
1003 | TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 | | ||
1004 | TSI108_INT_TXIDLE | TSI108_INT_TXERROR); | ||
1005 | if (estat & TSI108_EC_TXESTAT_Q0_ERR) { | ||
1006 | u32 err = TSI_READ(TSI108_EC_TXERR); | ||
1007 | TSI_WRITE(TSI108_EC_TXERR, err); | ||
1008 | |||
1009 | if (err && net_ratelimit()) | ||
1010 | printk(KERN_ERR "%s: TX error %x\n", dev->name, err); | ||
1011 | } | ||
1012 | |||
1013 | if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) { | ||
1014 | spin_lock(&data->txlock); | ||
1015 | tsi108_complete_tx(dev); | ||
1016 | spin_unlock(&data->txlock); | ||
1017 | } | ||
1018 | } | ||
1019 | |||
1020 | |||
1021 | static irqreturn_t tsi108_irq(int irq, void *dev_id) | ||
1022 | { | ||
1023 | struct net_device *dev = dev_id; | ||
1024 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1025 | u32 stat = TSI_READ(TSI108_EC_INTSTAT); | ||
1026 | |||
1027 | if (!(stat & TSI108_INT_ANY)) | ||
1028 | return IRQ_NONE; /* Not our interrupt */ | ||
1029 | |||
1030 | stat &= ~TSI_READ(TSI108_EC_INTMASK); | ||
1031 | |||
1032 | if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE | | ||
1033 | TSI108_INT_TXERROR)) | ||
1034 | tsi108_tx_int(dev); | ||
1035 | if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | | ||
1036 | TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN | | ||
1037 | TSI108_INT_RXERROR)) | ||
1038 | tsi108_rx_int(dev); | ||
1039 | |||
1040 | if (stat & TSI108_INT_SFN) { | ||
1041 | if (net_ratelimit()) | ||
1042 | printk(KERN_DEBUG "%s: SFN error\n", dev->name); | ||
1043 | TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN); | ||
1044 | } | ||
1045 | |||
1046 | if (stat & TSI108_INT_STATCARRY) { | ||
1047 | tsi108_stat_carry(dev); | ||
1048 | TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY); | ||
1049 | } | ||
1050 | |||
1051 | return IRQ_HANDLED; | ||
1052 | } | ||
1053 | |||
1054 | static void tsi108_stop_ethernet(struct net_device *dev) | ||
1055 | { | ||
1056 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1057 | int i = 1000; | ||
1058 | /* Disable all TX and RX queues ... */ | ||
1059 | TSI_WRITE(TSI108_EC_TXCTRL, 0); | ||
1060 | TSI_WRITE(TSI108_EC_RXCTRL, 0); | ||
1061 | |||
1062 | /* ...and wait for them to become idle */ | ||
1063 | while(i--) { | ||
1064 | if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE)) | ||
1065 | break; | ||
1066 | udelay(10); | ||
1067 | } | ||
1068 | i = 1000; | ||
1069 | while(i--){ | ||
1070 | if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE)) | ||
1071 | return; | ||
1072 | udelay(10); | ||
1073 | } | ||
1074 | printk(KERN_ERR "%s function time out \n", __FUNCTION__); | ||
1075 | } | ||
1076 | |||
1077 | static void tsi108_reset_ether(struct tsi108_prv_data * data) | ||
1078 | { | ||
1079 | TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST); | ||
1080 | udelay(100); | ||
1081 | TSI_WRITE(TSI108_MAC_CFG1, 0); | ||
1082 | |||
1083 | TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST); | ||
1084 | udelay(100); | ||
1085 | TSI_WRITE(TSI108_EC_PORTCTRL, | ||
1086 | TSI_READ(TSI108_EC_PORTCTRL) & | ||
1087 | ~TSI108_EC_PORTCTRL_STATRST); | ||
1088 | |||
1089 | TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST); | ||
1090 | udelay(100); | ||
1091 | TSI_WRITE(TSI108_EC_TXCFG, | ||
1092 | TSI_READ(TSI108_EC_TXCFG) & | ||
1093 | ~TSI108_EC_TXCFG_RST); | ||
1094 | |||
1095 | TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST); | ||
1096 | udelay(100); | ||
1097 | TSI_WRITE(TSI108_EC_RXCFG, | ||
1098 | TSI_READ(TSI108_EC_RXCFG) & | ||
1099 | ~TSI108_EC_RXCFG_RST); | ||
1100 | |||
1101 | TSI_WRITE(TSI108_MAC_MII_MGMT_CFG, | ||
1102 | TSI_READ(TSI108_MAC_MII_MGMT_CFG) | | ||
1103 | TSI108_MAC_MII_MGMT_RST); | ||
1104 | udelay(100); | ||
1105 | TSI_WRITE(TSI108_MAC_MII_MGMT_CFG, | ||
1106 | (TSI_READ(TSI108_MAC_MII_MGMT_CFG) & | ||
1107 | ~(TSI108_MAC_MII_MGMT_RST | | ||
1108 | TSI108_MAC_MII_MGMT_CLK)) | 0x07); | ||
1109 | } | ||
1110 | |||
1111 | static int tsi108_get_mac(struct net_device *dev) | ||
1112 | { | ||
1113 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1114 | u32 word1 = TSI_READ(TSI108_MAC_ADDR1); | ||
1115 | u32 word2 = TSI_READ(TSI108_MAC_ADDR2); | ||
1116 | |||
1117 | /* Note that the octets are reversed from what the manual says, | ||
1118 | * producing an even weirder ordering... | ||
1119 | */ | ||
1120 | if (word2 == 0 && word1 == 0) { | ||
1121 | dev->dev_addr[0] = 0x00; | ||
1122 | dev->dev_addr[1] = 0x06; | ||
1123 | dev->dev_addr[2] = 0xd2; | ||
1124 | dev->dev_addr[3] = 0x00; | ||
1125 | dev->dev_addr[4] = 0x00; | ||
1126 | if (0x8 == data->phy) | ||
1127 | dev->dev_addr[5] = 0x01; | ||
1128 | else | ||
1129 | dev->dev_addr[5] = 0x02; | ||
1130 | |||
1131 | word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24); | ||
1132 | |||
1133 | word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) | | ||
1134 | (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24); | ||
1135 | |||
1136 | TSI_WRITE(TSI108_MAC_ADDR1, word1); | ||
1137 | TSI_WRITE(TSI108_MAC_ADDR2, word2); | ||
1138 | } else { | ||
1139 | dev->dev_addr[0] = (word2 >> 16) & 0xff; | ||
1140 | dev->dev_addr[1] = (word2 >> 24) & 0xff; | ||
1141 | dev->dev_addr[2] = (word1 >> 0) & 0xff; | ||
1142 | dev->dev_addr[3] = (word1 >> 8) & 0xff; | ||
1143 | dev->dev_addr[4] = (word1 >> 16) & 0xff; | ||
1144 | dev->dev_addr[5] = (word1 >> 24) & 0xff; | ||
1145 | } | ||
1146 | |||
1147 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
1148 | printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2); | ||
1149 | return -EINVAL; | ||
1150 | } | ||
1151 | |||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | static int tsi108_set_mac(struct net_device *dev, void *addr) | ||
1156 | { | ||
1157 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1158 | u32 word1, word2; | ||
1159 | int i; | ||
1160 | |||
1161 | if (!is_valid_ether_addr(addr)) | ||
1162 | return -EINVAL; | ||
1163 | |||
1164 | for (i = 0; i < 6; i++) | ||
1165 | /* +2 is for the offset of the HW addr type */ | ||
1166 | dev->dev_addr[i] = ((unsigned char *)addr)[i + 2]; | ||
1167 | |||
1168 | word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24); | ||
1169 | |||
1170 | word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) | | ||
1171 | (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24); | ||
1172 | |||
1173 | spin_lock_irq(&data->misclock); | ||
1174 | TSI_WRITE(TSI108_MAC_ADDR1, word1); | ||
1175 | TSI_WRITE(TSI108_MAC_ADDR2, word2); | ||
1176 | spin_lock(&data->txlock); | ||
1177 | |||
1178 | if (data->txfree && data->link_up) | ||
1179 | netif_wake_queue(dev); | ||
1180 | |||
1181 | spin_unlock(&data->txlock); | ||
1182 | spin_unlock_irq(&data->misclock); | ||
1183 | return 0; | ||
1184 | } | ||
1185 | |||
1186 | /* Protected by dev->xmit_lock. */ | ||
1187 | static void tsi108_set_rx_mode(struct net_device *dev) | ||
1188 | { | ||
1189 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1190 | u32 rxcfg = TSI_READ(TSI108_EC_RXCFG); | ||
1191 | |||
1192 | if (dev->flags & IFF_PROMISC) { | ||
1193 | rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH); | ||
1194 | rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE; | ||
1195 | goto out; | ||
1196 | } | ||
1197 | |||
1198 | rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE); | ||
1199 | |||
1200 | if (dev->flags & IFF_ALLMULTI || dev->mc_count) { | ||
1201 | int i; | ||
1202 | struct dev_mc_list *mc = dev->mc_list; | ||
1203 | rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; | ||
1204 | |||
1205 | memset(data->mc_hash, 0, sizeof(data->mc_hash)); | ||
1206 | |||
1207 | while (mc) { | ||
1208 | u32 hash, crc; | ||
1209 | |||
1210 | if (mc->dmi_addrlen == 6) { | ||
1211 | crc = ether_crc(6, mc->dmi_addr); | ||
1212 | hash = crc >> 23; | ||
1213 | |||
1214 | __set_bit(hash, &data->mc_hash[0]); | ||
1215 | } else { | ||
1216 | printk(KERN_ERR | ||
1217 | "%s: got multicast address of length %d " | ||
1218 | "instead of 6.\n", dev->name, | ||
1219 | mc->dmi_addrlen); | ||
1220 | } | ||
1221 | |||
1222 | mc = mc->next; | ||
1223 | } | ||
1224 | |||
1225 | TSI_WRITE(TSI108_EC_HASHADDR, | ||
1226 | TSI108_EC_HASHADDR_AUTOINC | | ||
1227 | TSI108_EC_HASHADDR_MCAST); | ||
1228 | |||
1229 | for (i = 0; i < 16; i++) { | ||
1230 | /* The manual says that the hardware may drop | ||
1231 | * back-to-back writes to the data register. | ||
1232 | */ | ||
1233 | udelay(1); | ||
1234 | TSI_WRITE(TSI108_EC_HASHDATA, | ||
1235 | data->mc_hash[i]); | ||
1236 | } | ||
1237 | } | ||
1238 | |||
1239 | out: | ||
1240 | TSI_WRITE(TSI108_EC_RXCFG, rxcfg); | ||
1241 | } | ||
1242 | |||
1243 | static void tsi108_init_phy(struct net_device *dev) | ||
1244 | { | ||
1245 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1246 | u32 i = 0; | ||
1247 | u16 phyval = 0; | ||
1248 | unsigned long flags; | ||
1249 | |||
1250 | spin_lock_irqsave(&phy_lock, flags); | ||
1251 | |||
1252 | tsi108_write_mii(data, MII_BMCR, BMCR_RESET); | ||
1253 | while (i--){ | ||
1254 | if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET)) | ||
1255 | break; | ||
1256 | udelay(10); | ||
1257 | } | ||
1258 | if (i == 0) | ||
1259 | printk(KERN_ERR "%s function time out \n", __FUNCTION__); | ||
1260 | |||
1261 | #if (TSI108_PHY_TYPE == PHY_BCM54XX) /* Broadcom BCM54xx PHY */ | ||
1262 | tsi108_write_mii(data, 0x09, 0x0300); | ||
1263 | tsi108_write_mii(data, 0x10, 0x1020); | ||
1264 | tsi108_write_mii(data, 0x1c, 0x8c00); | ||
1265 | #endif | ||
1266 | |||
1267 | tsi108_write_mii(data, | ||
1268 | MII_BMCR, | ||
1269 | BMCR_ANENABLE | BMCR_ANRESTART); | ||
1270 | while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART) | ||
1271 | cpu_relax(); | ||
1272 | |||
1273 | /* Set G/MII mode and receive clock select in TBI control #2. The | ||
1274 | * second port won't work if this isn't done, even though we don't | ||
1275 | * use TBI mode. | ||
1276 | */ | ||
1277 | |||
1278 | tsi108_write_tbi(data, 0x11, 0x30); | ||
1279 | |||
1280 | /* FIXME: It seems to take more than 2 back-to-back reads to the | ||
1281 | * PHY_STAT register before the link up status bit is set. | ||
1282 | */ | ||
1283 | |||
1284 | data->link_up = 1; | ||
1285 | |||
1286 | while (!((phyval = tsi108_read_mii(data, MII_BMSR)) & | ||
1287 | BMSR_LSTATUS)) { | ||
1288 | if (i++ > (MII_READ_DELAY / 10)) { | ||
1289 | data->link_up = 0; | ||
1290 | break; | ||
1291 | } | ||
1292 | spin_unlock_irqrestore(&phy_lock, flags); | ||
1293 | msleep(10); | ||
1294 | spin_lock_irqsave(&phy_lock, flags); | ||
1295 | } | ||
1296 | |||
1297 | printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval); | ||
1298 | data->phy_ok = 1; | ||
1299 | data->init_media = 1; | ||
1300 | spin_unlock_irqrestore(&phy_lock, flags); | ||
1301 | } | ||
1302 | |||
1303 | static void tsi108_kill_phy(struct net_device *dev) | ||
1304 | { | ||
1305 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1306 | unsigned long flags; | ||
1307 | |||
1308 | spin_lock_irqsave(&phy_lock, flags); | ||
1309 | tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN); | ||
1310 | data->phy_ok = 0; | ||
1311 | spin_unlock_irqrestore(&phy_lock, flags); | ||
1312 | } | ||
1313 | |||
1314 | static int tsi108_open(struct net_device *dev) | ||
1315 | { | ||
1316 | int i; | ||
1317 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1318 | unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc); | ||
1319 | unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc); | ||
1320 | |||
1321 | i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev); | ||
1322 | if (i != 0) { | ||
1323 | printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n", | ||
1324 | data->id, data->irq_num); | ||
1325 | return i; | ||
1326 | } else { | ||
1327 | dev->irq = data->irq_num; | ||
1328 | printk(KERN_NOTICE | ||
1329 | "tsi108_open : Port %d Assigned IRQ %d to %s\n", | ||
1330 | data->id, dev->irq, dev->name); | ||
1331 | } | ||
1332 | |||
1333 | data->rxring = dma_alloc_coherent(NULL, rxring_size, | ||
1334 | &data->rxdma, GFP_KERNEL); | ||
1335 | |||
1336 | if (!data->rxring) { | ||
1337 | printk(KERN_DEBUG | ||
1338 | "TSI108_ETH: failed to allocate memory for rxring!\n"); | ||
1339 | return -ENOMEM; | ||
1340 | } else { | ||
1341 | memset(data->rxring, 0, rxring_size); | ||
1342 | } | ||
1343 | |||
1344 | data->txring = dma_alloc_coherent(NULL, txring_size, | ||
1345 | &data->txdma, GFP_KERNEL); | ||
1346 | |||
1347 | if (!data->txring) { | ||
1348 | printk(KERN_DEBUG | ||
1349 | "TSI108_ETH: failed to allocate memory for txring!\n"); | ||
1350 | pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); | ||
1351 | return -ENOMEM; | ||
1352 | } else { | ||
1353 | memset(data->txring, 0, txring_size); | ||
1354 | } | ||
1355 | |||
1356 | for (i = 0; i < TSI108_RXRING_LEN; i++) { | ||
1357 | data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc); | ||
1358 | data->rxring[i].blen = TSI108_RXBUF_SIZE; | ||
1359 | data->rxring[i].vlan = 0; | ||
1360 | } | ||
1361 | |||
1362 | data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma; | ||
1363 | |||
1364 | data->rxtail = 0; | ||
1365 | data->rxhead = 0; | ||
1366 | |||
1367 | for (i = 0; i < TSI108_RXRING_LEN; i++) { | ||
1368 | struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN); | ||
1369 | |||
1370 | if (!skb) { | ||
1371 | /* Bah. No memory for now, but maybe we'll get | ||
1372 | * some more later. | ||
1373 | * For now, we'll live with the smaller ring. | ||
1374 | */ | ||
1375 | printk(KERN_WARNING | ||
1376 | "%s: Could only allocate %d receive skb(s).\n", | ||
1377 | dev->name, i); | ||
1378 | data->rxhead = i; | ||
1379 | break; | ||
1380 | } | ||
1381 | |||
1382 | data->rxskbs[i] = skb; | ||
1383 | /* Align the payload on a 4-byte boundary */ | ||
1384 | skb_reserve(skb, 2); | ||
1385 | data->rxskbs[i] = skb; | ||
1386 | data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); | ||
1387 | data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; | ||
1388 | } | ||
1389 | |||
1390 | data->rxfree = i; | ||
1391 | TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma); | ||
1392 | |||
1393 | for (i = 0; i < TSI108_TXRING_LEN; i++) { | ||
1394 | data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc); | ||
1395 | data->txring[i].misc = 0; | ||
1396 | } | ||
1397 | |||
1398 | data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma; | ||
1399 | data->txtail = 0; | ||
1400 | data->txhead = 0; | ||
1401 | data->txfree = TSI108_TXRING_LEN; | ||
1402 | TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); | ||
1403 | tsi108_init_phy(dev); | ||
1404 | |||
1405 | setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); | ||
1406 | mod_timer(&data->timer, jiffies + 1); | ||
1407 | |||
1408 | tsi108_restart_rx(data, dev); | ||
1409 | |||
1410 | TSI_WRITE(TSI108_EC_INTSTAT, ~0); | ||
1411 | |||
1412 | TSI_WRITE(TSI108_EC_INTMASK, | ||
1413 | ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR | | ||
1414 | TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 | | ||
1415 | TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT | | ||
1416 | TSI108_INT_SFN | TSI108_INT_STATCARRY)); | ||
1417 | |||
1418 | TSI_WRITE(TSI108_MAC_CFG1, | ||
1419 | TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN); | ||
1420 | netif_start_queue(dev); | ||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | static int tsi108_close(struct net_device *dev) | ||
1425 | { | ||
1426 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1427 | |||
1428 | netif_stop_queue(dev); | ||
1429 | |||
1430 | del_timer_sync(&data->timer); | ||
1431 | |||
1432 | tsi108_stop_ethernet(dev); | ||
1433 | tsi108_kill_phy(dev); | ||
1434 | TSI_WRITE(TSI108_EC_INTMASK, ~0); | ||
1435 | TSI_WRITE(TSI108_MAC_CFG1, 0); | ||
1436 | |||
1437 | /* Check for any pending TX packets, and drop them. */ | ||
1438 | |||
1439 | while (!data->txfree || data->txhead != data->txtail) { | ||
1440 | int tx = data->txtail; | ||
1441 | struct sk_buff *skb; | ||
1442 | skb = data->txskbs[tx]; | ||
1443 | data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN; | ||
1444 | data->txfree++; | ||
1445 | dev_kfree_skb(skb); | ||
1446 | } | ||
1447 | |||
1448 | synchronize_irq(data->irq_num); | ||
1449 | free_irq(data->irq_num, dev); | ||
1450 | |||
1451 | /* Discard the RX ring. */ | ||
1452 | |||
1453 | while (data->rxfree) { | ||
1454 | int rx = data->rxtail; | ||
1455 | struct sk_buff *skb; | ||
1456 | |||
1457 | skb = data->rxskbs[rx]; | ||
1458 | data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN; | ||
1459 | data->rxfree--; | ||
1460 | dev_kfree_skb(skb); | ||
1461 | } | ||
1462 | |||
1463 | dma_free_coherent(0, | ||
1464 | TSI108_RXRING_LEN * sizeof(rx_desc), | ||
1465 | data->rxring, data->rxdma); | ||
1466 | dma_free_coherent(0, | ||
1467 | TSI108_TXRING_LEN * sizeof(tx_desc), | ||
1468 | data->txring, data->txdma); | ||
1469 | |||
1470 | return 0; | ||
1471 | } | ||
1472 | |||
1473 | static void tsi108_init_mac(struct net_device *dev) | ||
1474 | { | ||
1475 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1476 | |||
1477 | TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE | | ||
1478 | TSI108_MAC_CFG2_PADCRC); | ||
1479 | |||
1480 | TSI_WRITE(TSI108_EC_TXTHRESH, | ||
1481 | (192 << TSI108_EC_TXTHRESH_STARTFILL) | | ||
1482 | (192 << TSI108_EC_TXTHRESH_STOPFILL)); | ||
1483 | |||
1484 | TSI_WRITE(TSI108_STAT_CARRYMASK1, | ||
1485 | ~(TSI108_STAT_CARRY1_RXBYTES | | ||
1486 | TSI108_STAT_CARRY1_RXPKTS | | ||
1487 | TSI108_STAT_CARRY1_RXFCS | | ||
1488 | TSI108_STAT_CARRY1_RXMCAST | | ||
1489 | TSI108_STAT_CARRY1_RXALIGN | | ||
1490 | TSI108_STAT_CARRY1_RXLENGTH | | ||
1491 | TSI108_STAT_CARRY1_RXRUNT | | ||
1492 | TSI108_STAT_CARRY1_RXJUMBO | | ||
1493 | TSI108_STAT_CARRY1_RXFRAG | | ||
1494 | TSI108_STAT_CARRY1_RXJABBER | | ||
1495 | TSI108_STAT_CARRY1_RXDROP)); | ||
1496 | |||
1497 | TSI_WRITE(TSI108_STAT_CARRYMASK2, | ||
1498 | ~(TSI108_STAT_CARRY2_TXBYTES | | ||
1499 | TSI108_STAT_CARRY2_TXPKTS | | ||
1500 | TSI108_STAT_CARRY2_TXEXDEF | | ||
1501 | TSI108_STAT_CARRY2_TXEXCOL | | ||
1502 | TSI108_STAT_CARRY2_TXTCOL | | ||
1503 | TSI108_STAT_CARRY2_TXPAUSE)); | ||
1504 | |||
1505 | TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN); | ||
1506 | TSI_WRITE(TSI108_MAC_CFG1, 0); | ||
1507 | |||
1508 | TSI_WRITE(TSI108_EC_RXCFG, | ||
1509 | TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE); | ||
1510 | |||
1511 | TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT | | ||
1512 | TSI108_EC_TXQ_CFG_EOQ_OWN_INT | | ||
1513 | TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT << | ||
1514 | TSI108_EC_TXQ_CFG_SFNPORT)); | ||
1515 | |||
1516 | TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT | | ||
1517 | TSI108_EC_RXQ_CFG_EOQ_OWN_INT | | ||
1518 | TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT << | ||
1519 | TSI108_EC_RXQ_CFG_SFNPORT)); | ||
1520 | |||
1521 | TSI_WRITE(TSI108_EC_TXQ_BUFCFG, | ||
1522 | TSI108_EC_TXQ_BUFCFG_BURST256 | | ||
1523 | TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT << | ||
1524 | TSI108_EC_TXQ_BUFCFG_SFNPORT)); | ||
1525 | |||
1526 | TSI_WRITE(TSI108_EC_RXQ_BUFCFG, | ||
1527 | TSI108_EC_RXQ_BUFCFG_BURST256 | | ||
1528 | TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT << | ||
1529 | TSI108_EC_RXQ_BUFCFG_SFNPORT)); | ||
1530 | |||
1531 | TSI_WRITE(TSI108_EC_INTMASK, ~0); | ||
1532 | } | ||
1533 | |||
1534 | static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1535 | { | ||
1536 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1537 | return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL); | ||
1538 | } | ||
1539 | |||
1540 | static int | ||
1541 | tsi108_init_one(struct platform_device *pdev) | ||
1542 | { | ||
1543 | struct net_device *dev = NULL; | ||
1544 | struct tsi108_prv_data *data = NULL; | ||
1545 | hw_info *einfo; | ||
1546 | int err = 0; | ||
1547 | |||
1548 | einfo = pdev->dev.platform_data; | ||
1549 | |||
1550 | if (NULL == einfo) { | ||
1551 | printk(KERN_ERR "tsi-eth %d: Missing additional data!\n", | ||
1552 | pdev->id); | ||
1553 | return -ENODEV; | ||
1554 | } | ||
1555 | |||
1556 | /* Create an ethernet device instance */ | ||
1557 | |||
1558 | dev = alloc_etherdev(sizeof(struct tsi108_prv_data)); | ||
1559 | if (!dev) { | ||
1560 | printk("tsi108_eth: Could not allocate a device structure\n"); | ||
1561 | return -ENOMEM; | ||
1562 | } | ||
1563 | |||
1564 | printk("tsi108_eth%d: probe...\n", pdev->id); | ||
1565 | data = netdev_priv(dev); | ||
1566 | |||
1567 | pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", | ||
1568 | pdev->id, einfo->regs, einfo->phyregs, | ||
1569 | einfo->phy, einfo->irq_num); | ||
1570 | |||
1571 | data->regs = ioremap(einfo->regs, 0x400); | ||
1572 | if (NULL == data->regs) { | ||
1573 | err = -ENOMEM; | ||
1574 | goto regs_fail; | ||
1575 | } | ||
1576 | |||
1577 | data->phyregs = ioremap(einfo->phyregs, 0x400); | ||
1578 | if (NULL == data->phyregs) { | ||
1579 | err = -ENOMEM; | ||
1580 | goto regs_fail; | ||
1581 | } | ||
1582 | /* MII setup */ | ||
1583 | data->mii_if.dev = dev; | ||
1584 | data->mii_if.mdio_read = tsi108_mdio_read; | ||
1585 | data->mii_if.mdio_write = tsi108_mdio_write; | ||
1586 | data->mii_if.phy_id = einfo->phy; | ||
1587 | data->mii_if.phy_id_mask = 0x1f; | ||
1588 | data->mii_if.reg_num_mask = 0x1f; | ||
1589 | data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if); | ||
1590 | |||
1591 | data->phy = einfo->phy; | ||
1592 | data->irq_num = einfo->irq_num; | ||
1593 | data->id = pdev->id; | ||
1594 | dev->open = tsi108_open; | ||
1595 | dev->stop = tsi108_close; | ||
1596 | dev->hard_start_xmit = tsi108_send_packet; | ||
1597 | dev->set_mac_address = tsi108_set_mac; | ||
1598 | dev->set_multicast_list = tsi108_set_rx_mode; | ||
1599 | dev->get_stats = tsi108_get_stats; | ||
1600 | dev->poll = tsi108_poll; | ||
1601 | dev->do_ioctl = tsi108_do_ioctl; | ||
1602 | dev->weight = 64; /* 64 is more suitable for GigE interface - klai */ | ||
1603 | |||
1604 | /* Apparently, the Linux networking code won't use scatter-gather | ||
1605 | * if the hardware doesn't do checksums. However, it's faster | ||
1606 | * to checksum in place and use SG, as (among other reasons) | ||
1607 | * the cache won't be dirtied (which then has to be flushed | ||
1608 | * before DMA). The checksumming is done by the driver (via | ||
1609 | * a new function skb_csum_dev() in net/core/skbuff.c). | ||
1610 | */ | ||
1611 | |||
1612 | dev->features = NETIF_F_HIGHDMA; | ||
1613 | SET_MODULE_OWNER(dev); | ||
1614 | |||
1615 | spin_lock_init(&data->txlock); | ||
1616 | spin_lock_init(&data->misclock); | ||
1617 | |||
1618 | tsi108_reset_ether(data); | ||
1619 | tsi108_kill_phy(dev); | ||
1620 | |||
1621 | if ((err = tsi108_get_mac(dev)) != 0) { | ||
1622 | printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n", | ||
1623 | dev->name); | ||
1624 | goto register_fail; | ||
1625 | } | ||
1626 | |||
1627 | tsi108_init_mac(dev); | ||
1628 | err = register_netdev(dev); | ||
1629 | if (err) { | ||
1630 | printk(KERN_ERR "%s: Cannot register net device, aborting.\n", | ||
1631 | dev->name); | ||
1632 | goto register_fail; | ||
1633 | } | ||
1634 | |||
1635 | printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: " | ||
1636 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, | ||
1637 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | ||
1638 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | ||
1639 | #ifdef DEBUG | ||
1640 | data->msg_enable = DEBUG; | ||
1641 | dump_eth_one(dev); | ||
1642 | #endif | ||
1643 | |||
1644 | return 0; | ||
1645 | |||
1646 | register_fail: | ||
1647 | iounmap(data->regs); | ||
1648 | iounmap(data->phyregs); | ||
1649 | |||
1650 | regs_fail: | ||
1651 | free_netdev(dev); | ||
1652 | return err; | ||
1653 | } | ||
1654 | |||
1655 | /* There's no way to either get interrupts from the PHY when | ||
1656 | * something changes, or to have the Tsi108 automatically communicate | ||
1657 | * with the PHY to reconfigure itself. | ||
1658 | * | ||
1659 | * Thus, we have to do it using a timer. | ||
1660 | */ | ||
1661 | |||
1662 | static void tsi108_timed_checker(unsigned long dev_ptr) | ||
1663 | { | ||
1664 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1665 | struct tsi108_prv_data *data = netdev_priv(dev); | ||
1666 | |||
1667 | tsi108_check_phy(dev); | ||
1668 | tsi108_check_rxring(dev); | ||
1669 | mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); | ||
1670 | } | ||
1671 | |||
1672 | static int tsi108_ether_init(void) | ||
1673 | { | ||
1674 | int ret; | ||
1675 | ret = platform_driver_register (&tsi_eth_driver); | ||
1676 | if (ret < 0){ | ||
1677 | printk("tsi108_ether_init: error initializing ethernet " | ||
1678 | "device\n"); | ||
1679 | return ret; | ||
1680 | } | ||
1681 | return 0; | ||
1682 | } | ||
1683 | |||
1684 | static int tsi108_ether_remove(struct platform_device *pdev) | ||
1685 | { | ||
1686 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1687 | struct tsi108_prv_data *priv = netdev_priv(dev); | ||
1688 | |||
1689 | unregister_netdev(dev); | ||
1690 | tsi108_stop_ethernet(dev); | ||
1691 | platform_set_drvdata(pdev, NULL); | ||
1692 | iounmap(priv->regs); | ||
1693 | iounmap(priv->phyregs); | ||
1694 | free_netdev(dev); | ||
1695 | |||
1696 | return 0; | ||
1697 | } | ||
1698 | static void tsi108_ether_exit(void) | ||
1699 | { | ||
1700 | platform_driver_unregister(&tsi_eth_driver); | ||
1701 | } | ||
1702 | |||
1703 | module_init(tsi108_ether_init); | ||
1704 | module_exit(tsi108_ether_exit); | ||
1705 | |||
1706 | MODULE_AUTHOR("Tundra Semiconductor Corporation"); | ||
1707 | MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver"); | ||
1708 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h new file mode 100644 index 000000000000..77a769df228a --- /dev/null +++ b/drivers/net/tsi108_eth.h | |||
@@ -0,0 +1,365 @@ | |||
1 | /* | ||
2 | * (C) Copyright 2005 Tundra Semiconductor Corp. | ||
3 | * Kong Lai, <kong.lai@tundra.com). | ||
4 | * | ||
5 | * See file CREDITS for list of people who contributed to this | ||
6 | * project. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
21 | * MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller. | ||
26 | */ | ||
27 | |||
28 | #ifndef __TSI108_ETH_H | ||
29 | #define __TSI108_ETH_H | ||
30 | |||
31 | #include <linux/types.h> | ||
32 | |||
33 | #define TSI_WRITE(offset, val) \ | ||
34 | out_be32((data->regs + (offset)), val) | ||
35 | |||
36 | #define TSI_READ(offset) \ | ||
37 | in_be32((data->regs + (offset))) | ||
38 | |||
39 | #define TSI_WRITE_PHY(offset, val) \ | ||
40 | out_be32((data->phyregs + (offset)), val) | ||
41 | |||
42 | #define TSI_READ_PHY(offset) \ | ||
43 | in_be32((data->phyregs + (offset))) | ||
44 | |||
45 | /* | ||
46 | * PHY Configuration Options | ||
47 | * | ||
48 | * NOTE: Enable set of definitions corresponding to your board type | ||
49 | */ | ||
50 | #define PHY_MV88E 1 /* Marvel 88Exxxx PHY */ | ||
51 | #define PHY_BCM54XX 2 /* Broardcom BCM54xx PHY */ | ||
52 | #define TSI108_PHY_TYPE PHY_MV88E | ||
53 | |||
54 | /* | ||
55 | * TSI108 GIGE port registers | ||
56 | */ | ||
57 | |||
58 | #define TSI108_ETH_PORT_NUM 2 | ||
59 | #define TSI108_PBM_PORT 2 | ||
60 | #define TSI108_SDRAM_PORT 4 | ||
61 | |||
62 | #define TSI108_MAC_CFG1 (0x000) | ||
63 | #define TSI108_MAC_CFG1_SOFTRST (1 << 31) | ||
64 | #define TSI108_MAC_CFG1_LOOPBACK (1 << 8) | ||
65 | #define TSI108_MAC_CFG1_RXEN (1 << 2) | ||
66 | #define TSI108_MAC_CFG1_TXEN (1 << 0) | ||
67 | |||
68 | #define TSI108_MAC_CFG2 (0x004) | ||
69 | #define TSI108_MAC_CFG2_DFLT_PREAMBLE (7 << 12) | ||
70 | #define TSI108_MAC_CFG2_IFACE_MASK (3 << 8) | ||
71 | #define TSI108_MAC_CFG2_NOGIG (1 << 8) | ||
72 | #define TSI108_MAC_CFG2_GIG (2 << 8) | ||
73 | #define TSI108_MAC_CFG2_PADCRC (1 << 2) | ||
74 | #define TSI108_MAC_CFG2_FULLDUPLEX (1 << 0) | ||
75 | |||
76 | #define TSI108_MAC_MII_MGMT_CFG (0x020) | ||
77 | #define TSI108_MAC_MII_MGMT_CLK (7 << 0) | ||
78 | #define TSI108_MAC_MII_MGMT_RST (1 << 31) | ||
79 | |||
80 | #define TSI108_MAC_MII_CMD (0x024) | ||
81 | #define TSI108_MAC_MII_CMD_READ (1 << 0) | ||
82 | |||
83 | #define TSI108_MAC_MII_ADDR (0x028) | ||
84 | #define TSI108_MAC_MII_ADDR_REG 0 | ||
85 | #define TSI108_MAC_MII_ADDR_PHY 8 | ||
86 | |||
87 | #define TSI108_MAC_MII_DATAOUT (0x02c) | ||
88 | #define TSI108_MAC_MII_DATAIN (0x030) | ||
89 | |||
90 | #define TSI108_MAC_MII_IND (0x034) | ||
91 | #define TSI108_MAC_MII_IND_NOTVALID (1 << 2) | ||
92 | #define TSI108_MAC_MII_IND_SCANNING (1 << 1) | ||
93 | #define TSI108_MAC_MII_IND_BUSY (1 << 0) | ||
94 | |||
95 | #define TSI108_MAC_IFCTRL (0x038) | ||
96 | #define TSI108_MAC_IFCTRL_PHYMODE (1 << 24) | ||
97 | |||
98 | #define TSI108_MAC_ADDR1 (0x040) | ||
99 | #define TSI108_MAC_ADDR2 (0x044) | ||
100 | |||
101 | #define TSI108_STAT_RXBYTES (0x06c) | ||
102 | #define TSI108_STAT_RXBYTES_CARRY (1 << 24) | ||
103 | |||
104 | #define TSI108_STAT_RXPKTS (0x070) | ||
105 | #define TSI108_STAT_RXPKTS_CARRY (1 << 18) | ||
106 | |||
107 | #define TSI108_STAT_RXFCS (0x074) | ||
108 | #define TSI108_STAT_RXFCS_CARRY (1 << 12) | ||
109 | |||
110 | #define TSI108_STAT_RXMCAST (0x078) | ||
111 | #define TSI108_STAT_RXMCAST_CARRY (1 << 18) | ||
112 | |||
113 | #define TSI108_STAT_RXALIGN (0x08c) | ||
114 | #define TSI108_STAT_RXALIGN_CARRY (1 << 12) | ||
115 | |||
116 | #define TSI108_STAT_RXLENGTH (0x090) | ||
117 | #define TSI108_STAT_RXLENGTH_CARRY (1 << 12) | ||
118 | |||
119 | #define TSI108_STAT_RXRUNT (0x09c) | ||
120 | #define TSI108_STAT_RXRUNT_CARRY (1 << 12) | ||
121 | |||
122 | #define TSI108_STAT_RXJUMBO (0x0a0) | ||
123 | #define TSI108_STAT_RXJUMBO_CARRY (1 << 12) | ||
124 | |||
125 | #define TSI108_STAT_RXFRAG (0x0a4) | ||
126 | #define TSI108_STAT_RXFRAG_CARRY (1 << 12) | ||
127 | |||
128 | #define TSI108_STAT_RXJABBER (0x0a8) | ||
129 | #define TSI108_STAT_RXJABBER_CARRY (1 << 12) | ||
130 | |||
131 | #define TSI108_STAT_RXDROP (0x0ac) | ||
132 | #define TSI108_STAT_RXDROP_CARRY (1 << 12) | ||
133 | |||
134 | #define TSI108_STAT_TXBYTES (0x0b0) | ||
135 | #define TSI108_STAT_TXBYTES_CARRY (1 << 24) | ||
136 | |||
137 | #define TSI108_STAT_TXPKTS (0x0b4) | ||
138 | #define TSI108_STAT_TXPKTS_CARRY (1 << 18) | ||
139 | |||
140 | #define TSI108_STAT_TXEXDEF (0x0c8) | ||
141 | #define TSI108_STAT_TXEXDEF_CARRY (1 << 12) | ||
142 | |||
143 | #define TSI108_STAT_TXEXCOL (0x0d8) | ||
144 | #define TSI108_STAT_TXEXCOL_CARRY (1 << 12) | ||
145 | |||
146 | #define TSI108_STAT_TXTCOL (0x0dc) | ||
147 | #define TSI108_STAT_TXTCOL_CARRY (1 << 13) | ||
148 | |||
149 | #define TSI108_STAT_TXPAUSEDROP (0x0e4) | ||
150 | #define TSI108_STAT_TXPAUSEDROP_CARRY (1 << 12) | ||
151 | |||
152 | #define TSI108_STAT_CARRY1 (0x100) | ||
153 | #define TSI108_STAT_CARRY1_RXBYTES (1 << 16) | ||
154 | #define TSI108_STAT_CARRY1_RXPKTS (1 << 15) | ||
155 | #define TSI108_STAT_CARRY1_RXFCS (1 << 14) | ||
156 | #define TSI108_STAT_CARRY1_RXMCAST (1 << 13) | ||
157 | #define TSI108_STAT_CARRY1_RXALIGN (1 << 8) | ||
158 | #define TSI108_STAT_CARRY1_RXLENGTH (1 << 7) | ||
159 | #define TSI108_STAT_CARRY1_RXRUNT (1 << 4) | ||
160 | #define TSI108_STAT_CARRY1_RXJUMBO (1 << 3) | ||
161 | #define TSI108_STAT_CARRY1_RXFRAG (1 << 2) | ||
162 | #define TSI108_STAT_CARRY1_RXJABBER (1 << 1) | ||
163 | #define TSI108_STAT_CARRY1_RXDROP (1 << 0) | ||
164 | |||
165 | #define TSI108_STAT_CARRY2 (0x104) | ||
166 | #define TSI108_STAT_CARRY2_TXBYTES (1 << 13) | ||
167 | #define TSI108_STAT_CARRY2_TXPKTS (1 << 12) | ||
168 | #define TSI108_STAT_CARRY2_TXEXDEF (1 << 7) | ||
169 | #define TSI108_STAT_CARRY2_TXEXCOL (1 << 3) | ||
170 | #define TSI108_STAT_CARRY2_TXTCOL (1 << 2) | ||
171 | #define TSI108_STAT_CARRY2_TXPAUSE (1 << 0) | ||
172 | |||
173 | #define TSI108_STAT_CARRYMASK1 (0x108) | ||
174 | #define TSI108_STAT_CARRYMASK2 (0x10c) | ||
175 | |||
176 | #define TSI108_EC_PORTCTRL (0x200) | ||
177 | #define TSI108_EC_PORTCTRL_STATRST (1 << 31) | ||
178 | #define TSI108_EC_PORTCTRL_STATEN (1 << 28) | ||
179 | #define TSI108_EC_PORTCTRL_NOGIG (1 << 18) | ||
180 | #define TSI108_EC_PORTCTRL_HALFDUPLEX (1 << 16) | ||
181 | |||
182 | #define TSI108_EC_INTSTAT (0x204) | ||
183 | #define TSI108_EC_INTMASK (0x208) | ||
184 | |||
185 | #define TSI108_INT_ANY (1 << 31) | ||
186 | #define TSI108_INT_SFN (1 << 30) | ||
187 | #define TSI108_INT_RXIDLE (1 << 29) | ||
188 | #define TSI108_INT_RXABORT (1 << 28) | ||
189 | #define TSI108_INT_RXERROR (1 << 27) | ||
190 | #define TSI108_INT_RXOVERRUN (1 << 26) | ||
191 | #define TSI108_INT_RXTHRESH (1 << 25) | ||
192 | #define TSI108_INT_RXWAIT (1 << 24) | ||
193 | #define TSI108_INT_RXQUEUE0 (1 << 16) | ||
194 | #define TSI108_INT_STATCARRY (1 << 15) | ||
195 | #define TSI108_INT_TXIDLE (1 << 13) | ||
196 | #define TSI108_INT_TXABORT (1 << 12) | ||
197 | #define TSI108_INT_TXERROR (1 << 11) | ||
198 | #define TSI108_INT_TXUNDERRUN (1 << 10) | ||
199 | #define TSI108_INT_TXTHRESH (1 << 9) | ||
200 | #define TSI108_INT_TXWAIT (1 << 8) | ||
201 | #define TSI108_INT_TXQUEUE0 (1 << 0) | ||
202 | |||
203 | #define TSI108_EC_TXCFG (0x220) | ||
204 | #define TSI108_EC_TXCFG_RST (1 << 31) | ||
205 | |||
206 | #define TSI108_EC_TXCTRL (0x224) | ||
207 | #define TSI108_EC_TXCTRL_IDLEINT (1 << 31) | ||
208 | #define TSI108_EC_TXCTRL_ABORT (1 << 30) | ||
209 | #define TSI108_EC_TXCTRL_GO (1 << 15) | ||
210 | #define TSI108_EC_TXCTRL_QUEUE0 (1 << 0) | ||
211 | |||
212 | #define TSI108_EC_TXSTAT (0x228) | ||
213 | #define TSI108_EC_TXSTAT_ACTIVE (1 << 15) | ||
214 | #define TSI108_EC_TXSTAT_QUEUE0 (1 << 0) | ||
215 | |||
216 | #define TSI108_EC_TXESTAT (0x22c) | ||
217 | #define TSI108_EC_TXESTAT_Q0_ERR (1 << 24) | ||
218 | #define TSI108_EC_TXESTAT_Q0_DESCINT (1 << 16) | ||
219 | #define TSI108_EC_TXESTAT_Q0_EOF (1 << 8) | ||
220 | #define TSI108_EC_TXESTAT_Q0_EOQ (1 << 0) | ||
221 | |||
222 | #define TSI108_EC_TXERR (0x278) | ||
223 | |||
224 | #define TSI108_EC_TXQ_CFG (0x280) | ||
225 | #define TSI108_EC_TXQ_CFG_DESC_INT (1 << 20) | ||
226 | #define TSI108_EC_TXQ_CFG_EOQ_OWN_INT (1 << 19) | ||
227 | #define TSI108_EC_TXQ_CFG_WSWP (1 << 11) | ||
228 | #define TSI108_EC_TXQ_CFG_BSWP (1 << 10) | ||
229 | #define TSI108_EC_TXQ_CFG_SFNPORT 0 | ||
230 | |||
231 | #define TSI108_EC_TXQ_BUFCFG (0x284) | ||
232 | #define TSI108_EC_TXQ_BUFCFG_BURST8 (0 << 8) | ||
233 | #define TSI108_EC_TXQ_BUFCFG_BURST32 (1 << 8) | ||
234 | #define TSI108_EC_TXQ_BUFCFG_BURST128 (2 << 8) | ||
235 | #define TSI108_EC_TXQ_BUFCFG_BURST256 (3 << 8) | ||
236 | #define TSI108_EC_TXQ_BUFCFG_WSWP (1 << 11) | ||
237 | #define TSI108_EC_TXQ_BUFCFG_BSWP (1 << 10) | ||
238 | #define TSI108_EC_TXQ_BUFCFG_SFNPORT 0 | ||
239 | |||
240 | #define TSI108_EC_TXQ_PTRLOW (0x288) | ||
241 | |||
242 | #define TSI108_EC_TXQ_PTRHIGH (0x28c) | ||
243 | #define TSI108_EC_TXQ_PTRHIGH_VALID (1 << 31) | ||
244 | |||
245 | #define TSI108_EC_TXTHRESH (0x230) | ||
246 | #define TSI108_EC_TXTHRESH_STARTFILL 0 | ||
247 | #define TSI108_EC_TXTHRESH_STOPFILL 16 | ||
248 | |||
249 | #define TSI108_EC_RXCFG (0x320) | ||
250 | #define TSI108_EC_RXCFG_RST (1 << 31) | ||
251 | |||
252 | #define TSI108_EC_RXSTAT (0x328) | ||
253 | #define TSI108_EC_RXSTAT_ACTIVE (1 << 15) | ||
254 | #define TSI108_EC_RXSTAT_QUEUE0 (1 << 0) | ||
255 | |||
256 | #define TSI108_EC_RXESTAT (0x32c) | ||
257 | #define TSI108_EC_RXESTAT_Q0_ERR (1 << 24) | ||
258 | #define TSI108_EC_RXESTAT_Q0_DESCINT (1 << 16) | ||
259 | #define TSI108_EC_RXESTAT_Q0_EOF (1 << 8) | ||
260 | #define TSI108_EC_RXESTAT_Q0_EOQ (1 << 0) | ||
261 | |||
262 | #define TSI108_EC_HASHADDR (0x360) | ||
263 | #define TSI108_EC_HASHADDR_AUTOINC (1 << 31) | ||
264 | #define TSI108_EC_HASHADDR_DO1STREAD (1 << 30) | ||
265 | #define TSI108_EC_HASHADDR_UNICAST (0 << 4) | ||
266 | #define TSI108_EC_HASHADDR_MCAST (1 << 4) | ||
267 | |||
268 | #define TSI108_EC_HASHDATA (0x364) | ||
269 | |||
270 | #define TSI108_EC_RXQ_PTRLOW (0x388) | ||
271 | |||
272 | #define TSI108_EC_RXQ_PTRHIGH (0x38c) | ||
273 | #define TSI108_EC_RXQ_PTRHIGH_VALID (1 << 31) | ||
274 | |||
275 | /* Station Enable -- accept packets destined for us */ | ||
276 | #define TSI108_EC_RXCFG_SE (1 << 13) | ||
277 | /* Unicast Frame Enable -- for packets not destined for us */ | ||
278 | #define TSI108_EC_RXCFG_UFE (1 << 12) | ||
279 | /* Multicast Frame Enable */ | ||
280 | #define TSI108_EC_RXCFG_MFE (1 << 11) | ||
281 | /* Broadcast Frame Enable */ | ||
282 | #define TSI108_EC_RXCFG_BFE (1 << 10) | ||
283 | #define TSI108_EC_RXCFG_UC_HASH (1 << 9) | ||
284 | #define TSI108_EC_RXCFG_MC_HASH (1 << 8) | ||
285 | |||
286 | #define TSI108_EC_RXQ_CFG (0x380) | ||
287 | #define TSI108_EC_RXQ_CFG_DESC_INT (1 << 20) | ||
288 | #define TSI108_EC_RXQ_CFG_EOQ_OWN_INT (1 << 19) | ||
289 | #define TSI108_EC_RXQ_CFG_WSWP (1 << 11) | ||
290 | #define TSI108_EC_RXQ_CFG_BSWP (1 << 10) | ||
291 | #define TSI108_EC_RXQ_CFG_SFNPORT 0 | ||
292 | |||
293 | #define TSI108_EC_RXQ_BUFCFG (0x384) | ||
294 | #define TSI108_EC_RXQ_BUFCFG_BURST8 (0 << 8) | ||
295 | #define TSI108_EC_RXQ_BUFCFG_BURST32 (1 << 8) | ||
296 | #define TSI108_EC_RXQ_BUFCFG_BURST128 (2 << 8) | ||
297 | #define TSI108_EC_RXQ_BUFCFG_BURST256 (3 << 8) | ||
298 | #define TSI108_EC_RXQ_BUFCFG_WSWP (1 << 11) | ||
299 | #define TSI108_EC_RXQ_BUFCFG_BSWP (1 << 10) | ||
300 | #define TSI108_EC_RXQ_BUFCFG_SFNPORT 0 | ||
301 | |||
302 | #define TSI108_EC_RXCTRL (0x324) | ||
303 | #define TSI108_EC_RXCTRL_ABORT (1 << 30) | ||
304 | #define TSI108_EC_RXCTRL_GO (1 << 15) | ||
305 | #define TSI108_EC_RXCTRL_QUEUE0 (1 << 0) | ||
306 | |||
307 | #define TSI108_EC_RXERR (0x378) | ||
308 | |||
309 | #define TSI108_TX_EOF (1 << 0) /* End of frame; last fragment of packet */ | ||
310 | #define TSI108_TX_SOF (1 << 1) /* Start of frame; first frag. of packet */ | ||
311 | #define TSI108_TX_VLAN (1 << 2) /* Per-frame VLAN: enables VLAN override */ | ||
312 | #define TSI108_TX_HUGE (1 << 3) /* Huge frame enable */ | ||
313 | #define TSI108_TX_PAD (1 << 4) /* Pad the packet if too short */ | ||
314 | #define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */ | ||
315 | #define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */ | ||
316 | #define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */ | ||
317 | #define TSI108_TX_COL (1 << 20) /* Set if a collision occured */ | ||
318 | #define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occured */ | ||
319 | #define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occured */ | ||
320 | #define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */ | ||
321 | #define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */ | ||
322 | #define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */ | ||
323 | |||
324 | /* Note: the descriptor layouts assume big-endian byte order. */ | ||
325 | typedef struct { | ||
326 | u32 buf0; | ||
327 | u32 buf1; /* Base address of buffer */ | ||
328 | u32 next0; /* Address of next descriptor, if any */ | ||
329 | u32 next1; | ||
330 | u16 vlan; /* VLAN, if override enabled for this packet */ | ||
331 | u16 len; /* Length of buffer in bytes */ | ||
332 | u32 misc; /* See TSI108_TX_* above */ | ||
333 | u32 reserved0; /*reserved0 and reserved1 are added to make the desc */ | ||
334 | u32 reserved1; /* 32-byte aligned */ | ||
335 | } __attribute__ ((aligned(32))) tx_desc; | ||
336 | |||
337 | #define TSI108_RX_EOF (1 << 0) /* End of frame; last fragment of packet */ | ||
338 | #define TSI108_RX_SOF (1 << 1) /* Start of frame; first frag. of packet */ | ||
339 | #define TSI108_RX_VLAN (1 << 2) /* Set on SOF if packet has a VLAN */ | ||
340 | #define TSI108_RX_FTYPE (1 << 3) /* Length/Type field is type, not length */ | ||
341 | #define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */ | ||
342 | #define TSI108_RX_HASH (1 << 7)/* Hash table match */ | ||
343 | #define TSI108_RX_BAD (1 << 8) /* Bad frame */ | ||
344 | #define TSI108_RX_OVER (1 << 9) /* FIFO overrun occured */ | ||
345 | #define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */ | ||
346 | #define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */ | ||
347 | #define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */ | ||
348 | #define TSI108_RX_OWN (1 << 15) /* Set if the device owns the descriptor */ | ||
349 | |||
350 | #define TSI108_RX_SKB_SIZE 1536 /* The RX skb length */ | ||
351 | |||
352 | typedef struct { | ||
353 | u32 buf0; /* Base address of buffer */ | ||
354 | u32 buf1; /* Base address of buffer */ | ||
355 | u32 next0; /* Address of next descriptor, if any */ | ||
356 | u32 next1; /* Address of next descriptor, if any */ | ||
357 | u16 vlan; /* VLAN of received packet, first frag only */ | ||
358 | u16 len; /* Length of received fragment in bytes */ | ||
359 | u16 blen; /* Length of buffer in bytes */ | ||
360 | u16 misc; /* See TSI108_RX_* above */ | ||
361 | u32 reserved0; /* reserved0 and reserved1 are added to make the desc */ | ||
362 | u32 reserved1; /* 32-byte aligned */ | ||
363 | } __attribute__ ((aligned(32))) rx_desc; | ||
364 | |||
365 | #endif /* __TSI108_ETH_H */ | ||
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index f6b3a94e97bf..9d67f11422ec 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -1906,9 +1906,7 @@ fill_defaults: | |||
1906 | de->media[i].csr15 = t21041_csr15[i]; | 1906 | de->media[i].csr15 = t21041_csr15[i]; |
1907 | } | 1907 | } |
1908 | 1908 | ||
1909 | de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL); | 1909 | de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL); |
1910 | if (de->ee_data) | ||
1911 | memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE); | ||
1912 | 1910 | ||
1913 | return; | 1911 | return; |
1914 | 1912 | ||
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 4dd8a0bae860..7f59a3d4fda2 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -187,7 +187,7 @@ struct rx_desc { | |||
187 | struct dmfe_board_info { | 187 | struct dmfe_board_info { |
188 | u32 chip_id; /* Chip vendor/Device ID */ | 188 | u32 chip_id; /* Chip vendor/Device ID */ |
189 | u32 chip_revision; /* Chip revision */ | 189 | u32 chip_revision; /* Chip revision */ |
190 | struct DEVICE *next_dev; /* next device */ | 190 | struct DEVICE *dev; /* net device */ |
191 | struct pci_dev *pdev; /* PCI device */ | 191 | struct pci_dev *pdev; /* PCI device */ |
192 | spinlock_t lock; | 192 | spinlock_t lock; |
193 | 193 | ||
@@ -399,6 +399,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
399 | /* Init system & device */ | 399 | /* Init system & device */ |
400 | db = netdev_priv(dev); | 400 | db = netdev_priv(dev); |
401 | 401 | ||
402 | db->dev = dev; | ||
403 | |||
402 | /* Allocate Tx/Rx descriptor memory */ | 404 | /* Allocate Tx/Rx descriptor memory */ |
403 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | 405 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); |
404 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | 406 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); |
@@ -426,6 +428,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
426 | dev->poll_controller = &poll_dmfe; | 428 | dev->poll_controller = &poll_dmfe; |
427 | #endif | 429 | #endif |
428 | dev->ethtool_ops = &netdev_ethtool_ops; | 430 | dev->ethtool_ops = &netdev_ethtool_ops; |
431 | netif_carrier_off(db->dev); | ||
429 | spin_lock_init(&db->lock); | 432 | spin_lock_init(&db->lock); |
430 | 433 | ||
431 | pci_read_config_dword(pdev, 0x50, &pci_pmr); | 434 | pci_read_config_dword(pdev, 0x50, &pci_pmr); |
@@ -1050,6 +1053,7 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
1050 | 1053 | ||
1051 | static const struct ethtool_ops netdev_ethtool_ops = { | 1054 | static const struct ethtool_ops netdev_ethtool_ops = { |
1052 | .get_drvinfo = netdev_get_drvinfo, | 1055 | .get_drvinfo = netdev_get_drvinfo, |
1056 | .get_link = ethtool_op_get_link, | ||
1053 | }; | 1057 | }; |
1054 | 1058 | ||
1055 | /* | 1059 | /* |
@@ -1144,6 +1148,7 @@ static void dmfe_timer(unsigned long data) | |||
1144 | /* Link Failed */ | 1148 | /* Link Failed */ |
1145 | DMFE_DBUG(0, "Link Failed", tmp_cr12); | 1149 | DMFE_DBUG(0, "Link Failed", tmp_cr12); |
1146 | db->link_failed = 1; | 1150 | db->link_failed = 1; |
1151 | netif_carrier_off(db->dev); | ||
1147 | 1152 | ||
1148 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | 1153 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ |
1149 | /* AUTO or force 1M Homerun/Longrun don't need */ | 1154 | /* AUTO or force 1M Homerun/Longrun don't need */ |
@@ -1166,6 +1171,8 @@ static void dmfe_timer(unsigned long data) | |||
1166 | if ( (db->media_mode & DMFE_AUTO) && | 1171 | if ( (db->media_mode & DMFE_AUTO) && |
1167 | dmfe_sense_speed(db) ) | 1172 | dmfe_sense_speed(db) ) |
1168 | db->link_failed = 1; | 1173 | db->link_failed = 1; |
1174 | else | ||
1175 | netif_carrier_on(db->dev); | ||
1169 | dmfe_process_mode(db); | 1176 | dmfe_process_mode(db); |
1170 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | 1177 | /* SHOW_MEDIA_TYPE(db->op_mode); */ |
1171 | } | 1178 | } |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index b37888011067..1f05511fa390 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/ethtool.h> | 30 | #include <linux/ethtool.h> |
31 | #include <linux/mii.h> | 31 | #include <linux/mii.h> |
32 | 32 | ||
33 | #include <asm/of_device.h> | 33 | #include <asm/of_platform.h> |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
36 | #include <asm/io.h> | 36 | #include <asm/io.h> |
@@ -4301,12 +4301,12 @@ static int __init ucc_geth_init(void) | |||
4301 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | 4301 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, |
4302 | sizeof(ugeth_primary_info)); | 4302 | sizeof(ugeth_primary_info)); |
4303 | 4303 | ||
4304 | return of_register_driver(&ucc_geth_driver); | 4304 | return of_register_platform_driver(&ucc_geth_driver); |
4305 | } | 4305 | } |
4306 | 4306 | ||
4307 | static void __exit ucc_geth_exit(void) | 4307 | static void __exit ucc_geth_exit(void) |
4308 | { | 4308 | { |
4309 | of_unregister_driver(&ucc_geth_driver); | 4309 | of_unregister_platform_driver(&ucc_geth_driver); |
4310 | } | 4310 | } |
4311 | 4311 | ||
4312 | module_init(ucc_geth_init); | 4312 | module_init(ucc_geth_init); |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index b5d0d7fb647a..d5ab9cf13257 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -57,44 +57,6 @@ config COSA | |||
57 | The driver will be compiled as a module: the | 57 | The driver will be compiled as a module: the |
58 | module will be called cosa. | 58 | module will be called cosa. |
59 | 59 | ||
60 | config DSCC4 | ||
61 | tristate "Etinc PCISYNC serial board support" | ||
62 | depends on WAN && PCI && m | ||
63 | help | ||
64 | Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens) | ||
65 | DSCC4 chipset. | ||
66 | |||
67 | This is supposed to work with the four port card. Take a look at | ||
68 | <http://www.cogenit.fr/dscc4/> for further information about the | ||
69 | driver. | ||
70 | |||
71 | To compile this driver as a module, choose M here: the | ||
72 | module will be called dscc4. | ||
73 | |||
74 | config DSCC4_PCISYNC | ||
75 | bool "Etinc PCISYNC features" | ||
76 | depends on DSCC4 | ||
77 | help | ||
78 | Due to Etinc's design choice for its PCISYNC cards, some operations | ||
79 | are only allowed on specific ports of the DSCC4. This option is the | ||
80 | only way for the driver to know that it shouldn't return a success | ||
81 | code for these operations. | ||
82 | |||
83 | Please say Y if your card is an Etinc's PCISYNC. | ||
84 | |||
85 | config DSCC4_PCI_RST | ||
86 | bool "Hard reset support" | ||
87 | depends on DSCC4 | ||
88 | help | ||
89 | Various DSCC4 bugs forbid any reliable software reset of the ASIC. | ||
90 | As a replacement, some vendors provide a way to assert the PCI #RST | ||
91 | pin of DSCC4 through the GPIO port of the card. If you choose Y, | ||
92 | the driver will make use of this feature before module removal | ||
93 | (i.e. rmmod). The feature is known to be available on Commtech's | ||
94 | cards. Contact your manufacturer for details. | ||
95 | |||
96 | Say Y if your card supports this feature. | ||
97 | |||
98 | # | 60 | # |
99 | # Lan Media's board. Currently 1000, 1200, 5200, 5245 | 61 | # Lan Media's board. Currently 1000, 1200, 5200, 5245 |
100 | # | 62 | # |
@@ -323,6 +285,44 @@ config FARSYNC | |||
323 | To compile this driver as a module, choose M here: the | 285 | To compile this driver as a module, choose M here: the |
324 | module will be called farsync. | 286 | module will be called farsync. |
325 | 287 | ||
288 | config DSCC4 | ||
289 | tristate "Etinc PCISYNC serial board support" | ||
290 | depends on HDLC && PCI && m | ||
291 | help | ||
292 | Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens) | ||
293 | DSCC4 chipset. | ||
294 | |||
295 | This is supposed to work with the four port card. Take a look at | ||
296 | <http://www.cogenit.fr/dscc4/> for further information about the | ||
297 | driver. | ||
298 | |||
299 | To compile this driver as a module, choose M here: the | ||
300 | module will be called dscc4. | ||
301 | |||
302 | config DSCC4_PCISYNC | ||
303 | bool "Etinc PCISYNC features" | ||
304 | depends on DSCC4 | ||
305 | help | ||
306 | Due to Etinc's design choice for its PCISYNC cards, some operations | ||
307 | are only allowed on specific ports of the DSCC4. This option is the | ||
308 | only way for the driver to know that it shouldn't return a success | ||
309 | code for these operations. | ||
310 | |||
311 | Please say Y if your card is an Etinc's PCISYNC. | ||
312 | |||
313 | config DSCC4_PCI_RST | ||
314 | bool "Hard reset support" | ||
315 | depends on DSCC4 | ||
316 | help | ||
317 | Various DSCC4 bugs forbid any reliable software reset of the ASIC. | ||
318 | As a replacement, some vendors provide a way to assert the PCI #RST | ||
319 | pin of DSCC4 through the GPIO port of the card. If you choose Y, | ||
320 | the driver will make use of this feature before module removal | ||
321 | (i.e. rmmod). The feature is known to be available on Commtech's | ||
322 | cards. Contact your manufacturer for details. | ||
323 | |||
324 | Say Y if your card supports this feature. | ||
325 | |||
326 | config DLCI | 326 | config DLCI |
327 | tristate "Frame Relay DLCI support" | 327 | tristate "Frame Relay DLCI support" |
328 | depends on WAN | 328 | depends on WAN |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 0c07b8b7250d..10bcb48e80d0 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -595,7 +595,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index); | |||
595 | static void atmel_smooth_qual(struct atmel_private *priv); | 595 | static void atmel_smooth_qual(struct atmel_private *priv); |
596 | static void atmel_writeAR(struct net_device *dev, u16 data); | 596 | static void atmel_writeAR(struct net_device *dev, u16 data); |
597 | static int probe_atmel_card(struct net_device *dev); | 597 | static int probe_atmel_card(struct net_device *dev); |
598 | static int reset_atmel_card(struct net_device *dev ); | 598 | static int reset_atmel_card(struct net_device *dev); |
599 | static void atmel_enter_state(struct atmel_private *priv, int new_state); | 599 | static void atmel_enter_state(struct atmel_private *priv, int new_state); |
600 | int atmel_open (struct net_device *dev); | 600 | int atmel_open (struct net_device *dev); |
601 | 601 | ||
@@ -784,11 +784,11 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, | |||
784 | 784 | ||
785 | static int start_tx(struct sk_buff *skb, struct net_device *dev) | 785 | static int start_tx(struct sk_buff *skb, struct net_device *dev) |
786 | { | 786 | { |
787 | static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; | ||
787 | struct atmel_private *priv = netdev_priv(dev); | 788 | struct atmel_private *priv = netdev_priv(dev); |
788 | struct ieee80211_hdr_4addr header; | 789 | struct ieee80211_hdr_4addr header; |
789 | unsigned long flags; | 790 | unsigned long flags; |
790 | u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; | 791 | u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
791 | u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; | ||
792 | 792 | ||
793 | if (priv->card && priv->present_callback && | 793 | if (priv->card && priv->present_callback && |
794 | !(*priv->present_callback)(priv->card)) { | 794 | !(*priv->present_callback)(priv->card)) { |
@@ -1193,7 +1193,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id) | |||
1193 | 1193 | ||
1194 | atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */ | 1194 | atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */ |
1195 | 1195 | ||
1196 | for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++) | 1196 | for (i = 0; i < ARRAY_SIZE(irq_order); i++) |
1197 | if (isr & irq_order[i]) | 1197 | if (isr & irq_order[i]) |
1198 | break; | 1198 | break; |
1199 | 1199 | ||
@@ -1345,10 +1345,10 @@ int atmel_open(struct net_device *dev) | |||
1345 | atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain); | 1345 | atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain); |
1346 | } else { | 1346 | } else { |
1347 | priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS); | 1347 | priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS); |
1348 | for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) | 1348 | for (i = 0; i < ARRAY_SIZE(channel_table); i++) |
1349 | if (priv->reg_domain == channel_table[i].reg_domain) | 1349 | if (priv->reg_domain == channel_table[i].reg_domain) |
1350 | break; | 1350 | break; |
1351 | if (i == sizeof(channel_table)/sizeof(channel_table[0])) { | 1351 | if (i == ARRAY_SIZE(channel_table)) { |
1352 | priv->reg_domain = REG_DOMAIN_MKK1; | 1352 | priv->reg_domain = REG_DOMAIN_MKK1; |
1353 | printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name); | 1353 | printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name); |
1354 | } | 1354 | } |
@@ -1393,7 +1393,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel) | |||
1393 | else return suitable default channel */ | 1393 | else return suitable default channel */ |
1394 | int i; | 1394 | int i; |
1395 | 1395 | ||
1396 | for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) | 1396 | for (i = 0; i < ARRAY_SIZE(channel_table); i++) |
1397 | if (priv->reg_domain == channel_table[i].reg_domain) { | 1397 | if (priv->reg_domain == channel_table[i].reg_domain) { |
1398 | if (channel >= channel_table[i].min && | 1398 | if (channel >= channel_table[i].min && |
1399 | channel <= channel_table[i].max) | 1399 | channel <= channel_table[i].max) |
@@ -1437,7 +1437,7 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv) | |||
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | r = "<unknown>"; | 1439 | r = "<unknown>"; |
1440 | for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) | 1440 | for (i = 0; i < ARRAY_SIZE(channel_table); i++) |
1441 | if (priv->reg_domain == channel_table[i].reg_domain) | 1441 | if (priv->reg_domain == channel_table[i].reg_domain) |
1442 | r = channel_table[i].name; | 1442 | r = channel_table[i].name; |
1443 | 1443 | ||
@@ -1736,7 +1736,7 @@ static int atmel_set_encode(struct net_device *dev, | |||
1736 | /* Disable the key */ | 1736 | /* Disable the key */ |
1737 | priv->wep_key_len[index] = 0; | 1737 | priv->wep_key_len[index] = 0; |
1738 | /* Check if the key is not marked as invalid */ | 1738 | /* Check if the key is not marked as invalid */ |
1739 | if(!(dwrq->flags & IW_ENCODE_NOKEY)) { | 1739 | if (!(dwrq->flags & IW_ENCODE_NOKEY)) { |
1740 | /* Cleanup */ | 1740 | /* Cleanup */ |
1741 | memset(priv->wep_keys[index], 0, 13); | 1741 | memset(priv->wep_keys[index], 0, 13); |
1742 | /* Copy the key in the driver */ | 1742 | /* Copy the key in the driver */ |
@@ -1907,7 +1907,7 @@ static int atmel_get_encodeext(struct net_device *dev, | |||
1907 | 1907 | ||
1908 | encoding->flags = idx + 1; | 1908 | encoding->flags = idx + 1; |
1909 | memset(ext, 0, sizeof(*ext)); | 1909 | memset(ext, 0, sizeof(*ext)); |
1910 | 1910 | ||
1911 | if (!priv->wep_is_on) { | 1911 | if (!priv->wep_is_on) { |
1912 | ext->alg = IW_ENCODE_ALG_NONE; | 1912 | ext->alg = IW_ENCODE_ALG_NONE; |
1913 | ext->key_len = 0; | 1913 | ext->key_len = 0; |
@@ -2343,6 +2343,14 @@ static int atmel_get_scan(struct net_device *dev, | |||
2343 | iwe.u.freq.e = 0; | 2343 | iwe.u.freq.e = 0; |
2344 | current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); | 2344 | current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); |
2345 | 2345 | ||
2346 | /* Add quality statistics */ | ||
2347 | iwe.cmd = IWEVQUAL; | ||
2348 | iwe.u.qual.level = priv->BSSinfo[i].RSSI; | ||
2349 | iwe.u.qual.qual = iwe.u.qual.level; | ||
2350 | /* iwe.u.qual.noise = SOMETHING */ | ||
2351 | current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA , &iwe, IW_EV_QUAL_LEN); | ||
2352 | |||
2353 | |||
2346 | iwe.cmd = SIOCGIWENCODE; | 2354 | iwe.cmd = SIOCGIWENCODE; |
2347 | if (priv->BSSinfo[i].UsingWEP) | 2355 | if (priv->BSSinfo[i].UsingWEP) |
2348 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; | 2356 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; |
@@ -2373,7 +2381,7 @@ static int atmel_get_range(struct net_device *dev, | |||
2373 | range->min_nwid = 0x0000; | 2381 | range->min_nwid = 0x0000; |
2374 | range->max_nwid = 0x0000; | 2382 | range->max_nwid = 0x0000; |
2375 | range->num_channels = 0; | 2383 | range->num_channels = 0; |
2376 | for (j = 0; j < sizeof(channel_table)/sizeof(channel_table[0]); j++) | 2384 | for (j = 0; j < ARRAY_SIZE(channel_table); j++) |
2377 | if (priv->reg_domain == channel_table[j].reg_domain) { | 2385 | if (priv->reg_domain == channel_table[j].reg_domain) { |
2378 | range->num_channels = channel_table[j].max - channel_table[j].min + 1; | 2386 | range->num_channels = channel_table[j].max - channel_table[j].min + 1; |
2379 | break; | 2387 | break; |
@@ -2579,9 +2587,9 @@ static const struct iw_priv_args atmel_private_args[] = { | |||
2579 | 2587 | ||
2580 | static const struct iw_handler_def atmel_handler_def = | 2588 | static const struct iw_handler_def atmel_handler_def = |
2581 | { | 2589 | { |
2582 | .num_standard = sizeof(atmel_handler)/sizeof(iw_handler), | 2590 | .num_standard = ARRAY_SIZE(atmel_handler), |
2583 | .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler), | 2591 | .num_private = ARRAY_SIZE(atmel_private_handler), |
2584 | .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args), | 2592 | .num_private_args = ARRAY_SIZE(atmel_private_args), |
2585 | .standard = (iw_handler *) atmel_handler, | 2593 | .standard = (iw_handler *) atmel_handler, |
2586 | .private = (iw_handler *) atmel_private_handler, | 2594 | .private = (iw_handler *) atmel_private_handler, |
2587 | .private_args = (struct iw_priv_args *) atmel_private_args, | 2595 | .private_args = (struct iw_priv_args *) atmel_private_args, |
@@ -2645,7 +2653,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2645 | 2653 | ||
2646 | domain[REGDOMAINSZ] = 0; | 2654 | domain[REGDOMAINSZ] = 0; |
2647 | rc = -EINVAL; | 2655 | rc = -EINVAL; |
2648 | for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) { | 2656 | for (i = 0; i < ARRAY_SIZE(channel_table); i++) { |
2649 | /* strcasecmp doesn't exist in the library */ | 2657 | /* strcasecmp doesn't exist in the library */ |
2650 | char *a = channel_table[i].name; | 2658 | char *a = channel_table[i].name; |
2651 | char *b = domain; | 2659 | char *b = domain; |
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index 785664090bb4..5c410989c4d7 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c | |||
@@ -5,12 +5,12 @@ | |||
5 | Copyright 2000-2001 ATMEL Corporation. | 5 | Copyright 2000-2001 ATMEL Corporation. |
6 | Copyright 2003 Simon Kelley. | 6 | Copyright 2003 Simon Kelley. |
7 | 7 | ||
8 | This code was developed from version 2.1.1 of the Atmel drivers, | 8 | This code was developed from version 2.1.1 of the Atmel drivers, |
9 | released by Atmel corp. under the GPL in December 2002. It also | 9 | released by Atmel corp. under the GPL in December 2002. It also |
10 | includes code from the Linux aironet drivers (C) Benjamin Reed, | 10 | includes code from the Linux aironet drivers (C) Benjamin Reed, |
11 | and the Linux PCMCIA package, (C) David Hinds. | 11 | and the Linux PCMCIA package, (C) David Hinds. |
12 | 12 | ||
13 | For all queries about this code, please contact the current author, | 13 | For all queries about this code, please contact the current author, |
14 | Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. | 14 | Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. |
15 | 15 | ||
16 | This program is free software; you can redistribute it and/or modify | 16 | This program is free software; you can redistribute it and/or modify |
@@ -87,7 +87,7 @@ MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); | |||
87 | event is received. The config() and release() entry points are | 87 | event is received. The config() and release() entry points are |
88 | used to configure or release a socket, in response to card | 88 | used to configure or release a socket, in response to card |
89 | insertion and ejection events. They are invoked from the atmel_cs | 89 | insertion and ejection events. They are invoked from the atmel_cs |
90 | event handler. | 90 | event handler. |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static int atmel_config(struct pcmcia_device *link); | 93 | static int atmel_config(struct pcmcia_device *link); |
@@ -133,22 +133,22 @@ static void atmel_detach(struct pcmcia_device *p_dev); | |||
133 | device IO routines can use a flag like this to throttle IO to a | 133 | device IO routines can use a flag like this to throttle IO to a |
134 | card that is not ready to accept it. | 134 | card that is not ready to accept it. |
135 | */ | 135 | */ |
136 | 136 | ||
137 | typedef struct local_info_t { | 137 | typedef struct local_info_t { |
138 | dev_node_t node; | 138 | dev_node_t node; |
139 | struct net_device *eth_dev; | 139 | struct net_device *eth_dev; |
140 | } local_info_t; | 140 | } local_info_t; |
141 | 141 | ||
142 | /*====================================================================== | 142 | /*====================================================================== |
143 | 143 | ||
144 | atmel_attach() creates an "instance" of the driver, allocating | 144 | atmel_attach() creates an "instance" of the driver, allocating |
145 | local data structures for one device. The device is registered | 145 | local data structures for one device. The device is registered |
146 | with Card Services. | 146 | with Card Services. |
147 | 147 | ||
148 | The dev_link structure is initialized, but we don't actually | 148 | The dev_link structure is initialized, but we don't actually |
149 | configure the card at this point -- we wait until we receive a | 149 | configure the card at this point -- we wait until we receive a |
150 | card insertion event. | 150 | card insertion event. |
151 | 151 | ||
152 | ======================================================================*/ | 152 | ======================================================================*/ |
153 | 153 | ||
154 | static int atmel_probe(struct pcmcia_device *p_dev) | 154 | static int atmel_probe(struct pcmcia_device *p_dev) |
@@ -184,12 +184,12 @@ static int atmel_probe(struct pcmcia_device *p_dev) | |||
184 | } /* atmel_attach */ | 184 | } /* atmel_attach */ |
185 | 185 | ||
186 | /*====================================================================== | 186 | /*====================================================================== |
187 | 187 | ||
188 | This deletes a driver "instance". The device is de-registered | 188 | This deletes a driver "instance". The device is de-registered |
189 | with Card Services. If it has been released, all local data | 189 | with Card Services. If it has been released, all local data |
190 | structures are freed. Otherwise, the structures will be freed | 190 | structures are freed. Otherwise, the structures will be freed |
191 | when the device is released. | 191 | when the device is released. |
192 | 192 | ||
193 | ======================================================================*/ | 193 | ======================================================================*/ |
194 | 194 | ||
195 | static void atmel_detach(struct pcmcia_device *link) | 195 | static void atmel_detach(struct pcmcia_device *link) |
@@ -202,11 +202,11 @@ static void atmel_detach(struct pcmcia_device *link) | |||
202 | } | 202 | } |
203 | 203 | ||
204 | /*====================================================================== | 204 | /*====================================================================== |
205 | 205 | ||
206 | atmel_config() is scheduled to run after a CARD_INSERTION event | 206 | atmel_config() is scheduled to run after a CARD_INSERTION event |
207 | is received, to configure the PCMCIA socket, and to make the | 207 | is received, to configure the PCMCIA socket, and to make the |
208 | device available to the system. | 208 | device available to the system. |
209 | 209 | ||
210 | ======================================================================*/ | 210 | ======================================================================*/ |
211 | 211 | ||
212 | #define CS_CHECK(fn, ret) \ | 212 | #define CS_CHECK(fn, ret) \ |
@@ -237,12 +237,12 @@ static int atmel_config(struct pcmcia_device *link) | |||
237 | did = handle_to_dev(link).driver_data; | 237 | did = handle_to_dev(link).driver_data; |
238 | 238 | ||
239 | DEBUG(0, "atmel_config(0x%p)\n", link); | 239 | DEBUG(0, "atmel_config(0x%p)\n", link); |
240 | 240 | ||
241 | tuple.Attributes = 0; | 241 | tuple.Attributes = 0; |
242 | tuple.TupleData = buf; | 242 | tuple.TupleData = buf; |
243 | tuple.TupleDataMax = sizeof(buf); | 243 | tuple.TupleDataMax = sizeof(buf); |
244 | tuple.TupleOffset = 0; | 244 | tuple.TupleOffset = 0; |
245 | 245 | ||
246 | /* | 246 | /* |
247 | This reads the card's CONFIG tuple to find its configuration | 247 | This reads the card's CONFIG tuple to find its configuration |
248 | registers. | 248 | registers. |
@@ -258,7 +258,7 @@ static int atmel_config(struct pcmcia_device *link) | |||
258 | In this loop, we scan the CIS for configuration table entries, | 258 | In this loop, we scan the CIS for configuration table entries, |
259 | each of which describes a valid card configuration, including | 259 | each of which describes a valid card configuration, including |
260 | voltage, IO window, memory window, and interrupt settings. | 260 | voltage, IO window, memory window, and interrupt settings. |
261 | 261 | ||
262 | We make no assumptions about the card to be configured: we use | 262 | We make no assumptions about the card to be configured: we use |
263 | just the information available in the CIS. In an ideal world, | 263 | just the information available in the CIS. In an ideal world, |
264 | this would work for any PCMCIA card, but it requires a complete | 264 | this would work for any PCMCIA card, but it requires a complete |
@@ -274,17 +274,17 @@ static int atmel_config(struct pcmcia_device *link) | |||
274 | if (pcmcia_get_tuple_data(link, &tuple) != 0 || | 274 | if (pcmcia_get_tuple_data(link, &tuple) != 0 || |
275 | pcmcia_parse_tuple(link, &tuple, &parse) != 0) | 275 | pcmcia_parse_tuple(link, &tuple, &parse) != 0) |
276 | goto next_entry; | 276 | goto next_entry; |
277 | 277 | ||
278 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; | 278 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; |
279 | if (cfg->index == 0) goto next_entry; | 279 | if (cfg->index == 0) goto next_entry; |
280 | link->conf.ConfigIndex = cfg->index; | 280 | link->conf.ConfigIndex = cfg->index; |
281 | 281 | ||
282 | /* Does this card need audio output? */ | 282 | /* Does this card need audio output? */ |
283 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | 283 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { |
284 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 284 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
285 | link->conf.Status = CCSR_AUDIO_ENA; | 285 | link->conf.Status = CCSR_AUDIO_ENA; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* Use power settings for Vcc and Vpp if present */ | 288 | /* Use power settings for Vcc and Vpp if present */ |
289 | /* Note that the CIS values need to be rescaled */ | 289 | /* Note that the CIS values need to be rescaled */ |
290 | if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) | 290 | if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) |
@@ -293,11 +293,11 @@ static int atmel_config(struct pcmcia_device *link) | |||
293 | else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) | 293 | else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) |
294 | link->conf.Vpp = | 294 | link->conf.Vpp = |
295 | dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; | 295 | dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; |
296 | 296 | ||
297 | /* Do we need to allocate an interrupt? */ | 297 | /* Do we need to allocate an interrupt? */ |
298 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | 298 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) |
299 | link->conf.Attributes |= CONF_ENABLE_IRQ; | 299 | link->conf.Attributes |= CONF_ENABLE_IRQ; |
300 | 300 | ||
301 | /* IO window settings */ | 301 | /* IO window settings */ |
302 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | 302 | link->io.NumPorts1 = link->io.NumPorts2 = 0; |
303 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | 303 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { |
@@ -315,18 +315,18 @@ static int atmel_config(struct pcmcia_device *link) | |||
315 | link->io.NumPorts2 = io->win[1].len; | 315 | link->io.NumPorts2 = io->win[1].len; |
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
319 | /* This reserves IO space but doesn't actually enable it */ | 319 | /* This reserves IO space but doesn't actually enable it */ |
320 | if (pcmcia_request_io(link, &link->io) != 0) | 320 | if (pcmcia_request_io(link, &link->io) != 0) |
321 | goto next_entry; | 321 | goto next_entry; |
322 | 322 | ||
323 | /* If we got this far, we're cool! */ | 323 | /* If we got this far, we're cool! */ |
324 | break; | 324 | break; |
325 | 325 | ||
326 | next_entry: | 326 | next_entry: |
327 | CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple)); | 327 | CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple)); |
328 | } | 328 | } |
329 | 329 | ||
330 | /* | 330 | /* |
331 | Allocate an interrupt line. Note that this does not assign a | 331 | Allocate an interrupt line. Note that this does not assign a |
332 | handler to the interrupt, unless the 'Handler' member of the | 332 | handler to the interrupt, unless the 'Handler' member of the |
@@ -334,31 +334,31 @@ static int atmel_config(struct pcmcia_device *link) | |||
334 | */ | 334 | */ |
335 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 335 | if (link->conf.Attributes & CONF_ENABLE_IRQ) |
336 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 336 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); |
337 | 337 | ||
338 | /* | 338 | /* |
339 | This actually configures the PCMCIA socket -- setting up | 339 | This actually configures the PCMCIA socket -- setting up |
340 | the I/O windows and the interrupt mapping, and putting the | 340 | the I/O windows and the interrupt mapping, and putting the |
341 | card and host interface into "Memory and IO" mode. | 341 | card and host interface into "Memory and IO" mode. |
342 | */ | 342 | */ |
343 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 343 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); |
344 | 344 | ||
345 | if (link->irq.AssignedIRQ == 0) { | 345 | if (link->irq.AssignedIRQ == 0) { |
346 | printk(KERN_ALERT | 346 | printk(KERN_ALERT |
347 | "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); | 347 | "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); |
348 | goto cs_failed; | 348 | goto cs_failed; |
349 | } | 349 | } |
350 | 350 | ||
351 | ((local_info_t*)link->priv)->eth_dev = | 351 | ((local_info_t*)link->priv)->eth_dev = |
352 | init_atmel_card(link->irq.AssignedIRQ, | 352 | init_atmel_card(link->irq.AssignedIRQ, |
353 | link->io.BasePort1, | 353 | link->io.BasePort1, |
354 | did ? did->driver_info : ATMEL_FW_TYPE_NONE, | 354 | did ? did->driver_info : ATMEL_FW_TYPE_NONE, |
355 | &handle_to_dev(link), | 355 | &handle_to_dev(link), |
356 | card_present, | 356 | card_present, |
357 | link); | 357 | link); |
358 | if (!((local_info_t*)link->priv)->eth_dev) | 358 | if (!((local_info_t*)link->priv)->eth_dev) |
359 | goto cs_failed; | 359 | goto cs_failed; |
360 | 360 | ||
361 | 361 | ||
362 | /* | 362 | /* |
363 | At this point, the dev_node_t structure(s) need to be | 363 | At this point, the dev_node_t structure(s) need to be |
364 | initialized and arranged in a linked list at link->dev_node. | 364 | initialized and arranged in a linked list at link->dev_node. |
@@ -376,11 +376,11 @@ static int atmel_config(struct pcmcia_device *link) | |||
376 | } | 376 | } |
377 | 377 | ||
378 | /*====================================================================== | 378 | /*====================================================================== |
379 | 379 | ||
380 | After a card is removed, atmel_release() will unregister the | 380 | After a card is removed, atmel_release() will unregister the |
381 | device, and release the PCMCIA configuration. If the device is | 381 | device, and release the PCMCIA configuration. If the device is |
382 | still open, this will be postponed until it is closed. | 382 | still open, this will be postponed until it is closed. |
383 | 383 | ||
384 | ======================================================================*/ | 384 | ======================================================================*/ |
385 | 385 | ||
386 | static void atmel_release(struct pcmcia_device *link) | 386 | static void atmel_release(struct pcmcia_device *link) |
@@ -517,7 +517,7 @@ static void atmel_cs_cleanup(void) | |||
517 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | 517 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
518 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | 518 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
519 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 519 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
520 | POSSIBILITY OF SUCH DAMAGE. | 520 | POSSIBILITY OF SUCH DAMAGE. |
521 | */ | 521 | */ |
522 | 522 | ||
523 | module_init(atmel_cs_init); | 523 | module_init(atmel_cs_init); |
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c index 3bfa791c323d..92f87fbe750f 100644 --- a/drivers/net/wireless/atmel_pci.c +++ b/drivers/net/wireless/atmel_pci.c | |||
@@ -53,18 +53,18 @@ static int __devinit atmel_pci_probe(struct pci_dev *pdev, | |||
53 | const struct pci_device_id *pent) | 53 | const struct pci_device_id *pent) |
54 | { | 54 | { |
55 | struct net_device *dev; | 55 | struct net_device *dev; |
56 | 56 | ||
57 | if (pci_enable_device(pdev)) | 57 | if (pci_enable_device(pdev)) |
58 | return -ENODEV; | 58 | return -ENODEV; |
59 | 59 | ||
60 | pci_set_master(pdev); | 60 | pci_set_master(pdev); |
61 | 61 | ||
62 | dev = init_atmel_card(pdev->irq, pdev->resource[1].start, | 62 | dev = init_atmel_card(pdev->irq, pdev->resource[1].start, |
63 | ATMEL_FW_TYPE_506, | 63 | ATMEL_FW_TYPE_506, |
64 | &pdev->dev, NULL, NULL); | 64 | &pdev->dev, NULL, NULL); |
65 | if (!dev) | 65 | if (!dev) |
66 | return -ENODEV; | 66 | return -ENODEV; |
67 | 67 | ||
68 | pci_set_drvdata(pdev, dev); | 68 | pci_set_drvdata(pdev, dev); |
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index d6a8bf09878e..94dfb92fab5c 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h | |||
@@ -159,6 +159,7 @@ | |||
159 | 159 | ||
160 | /* Chipcommon registers. */ | 160 | /* Chipcommon registers. */ |
161 | #define BCM43xx_CHIPCOMMON_CAPABILITIES 0x04 | 161 | #define BCM43xx_CHIPCOMMON_CAPABILITIES 0x04 |
162 | #define BCM43xx_CHIPCOMMON_CTL 0x28 | ||
162 | #define BCM43xx_CHIPCOMMON_PLLONDELAY 0xB0 | 163 | #define BCM43xx_CHIPCOMMON_PLLONDELAY 0xB0 |
163 | #define BCM43xx_CHIPCOMMON_FREFSELDELAY 0xB4 | 164 | #define BCM43xx_CHIPCOMMON_FREFSELDELAY 0xB4 |
164 | #define BCM43xx_CHIPCOMMON_SLOWCLKCTL 0xB8 | 165 | #define BCM43xx_CHIPCOMMON_SLOWCLKCTL 0xB8 |
@@ -172,6 +173,33 @@ | |||
172 | /* SBTOPCI2 values. */ | 173 | /* SBTOPCI2 values. */ |
173 | #define BCM43xx_SBTOPCI2_PREFETCH 0x4 | 174 | #define BCM43xx_SBTOPCI2_PREFETCH 0x4 |
174 | #define BCM43xx_SBTOPCI2_BURST 0x8 | 175 | #define BCM43xx_SBTOPCI2_BURST 0x8 |
176 | #define BCM43xx_SBTOPCI2_MEMREAD_MULTI 0x20 | ||
177 | |||
178 | /* PCI-E core registers. */ | ||
179 | #define BCM43xx_PCIECORE_REG_ADDR 0x0130 | ||
180 | #define BCM43xx_PCIECORE_REG_DATA 0x0134 | ||
181 | #define BCM43xx_PCIECORE_MDIO_CTL 0x0128 | ||
182 | #define BCM43xx_PCIECORE_MDIO_DATA 0x012C | ||
183 | |||
184 | /* PCI-E registers. */ | ||
185 | #define BCM43xx_PCIE_TLP_WORKAROUND 0x0004 | ||
186 | #define BCM43xx_PCIE_DLLP_LINKCTL 0x0100 | ||
187 | |||
188 | /* PCI-E MDIO bits. */ | ||
189 | #define BCM43xx_PCIE_MDIO_ST 0x40000000 | ||
190 | #define BCM43xx_PCIE_MDIO_WT 0x10000000 | ||
191 | #define BCM43xx_PCIE_MDIO_DEV 22 | ||
192 | #define BCM43xx_PCIE_MDIO_REG 18 | ||
193 | #define BCM43xx_PCIE_MDIO_TA 0x00020000 | ||
194 | #define BCM43xx_PCIE_MDIO_TC 0x0100 | ||
195 | |||
196 | /* MDIO devices. */ | ||
197 | #define BCM43xx_MDIO_SERDES_RX 0x1F | ||
198 | |||
199 | /* SERDES RX registers. */ | ||
200 | #define BCM43xx_SERDES_RXTIMER 0x2 | ||
201 | #define BCM43xx_SERDES_CDR 0x6 | ||
202 | #define BCM43xx_SERDES_CDR_BW 0x7 | ||
175 | 203 | ||
176 | /* Chipcommon capabilities. */ | 204 | /* Chipcommon capabilities. */ |
177 | #define BCM43xx_CAPABILITIES_PCTL 0x00040000 | 205 | #define BCM43xx_CAPABILITIES_PCTL 0x00040000 |
@@ -221,6 +249,7 @@ | |||
221 | #define BCM43xx_COREID_USB20_HOST 0x819 | 249 | #define BCM43xx_COREID_USB20_HOST 0x819 |
222 | #define BCM43xx_COREID_USB20_DEV 0x81a | 250 | #define BCM43xx_COREID_USB20_DEV 0x81a |
223 | #define BCM43xx_COREID_SDIO_HOST 0x81b | 251 | #define BCM43xx_COREID_SDIO_HOST 0x81b |
252 | #define BCM43xx_COREID_PCIE 0x820 | ||
224 | 253 | ||
225 | /* Core Information Registers */ | 254 | /* Core Information Registers */ |
226 | #define BCM43xx_CIR_BASE 0xf00 | 255 | #define BCM43xx_CIR_BASE 0xf00 |
@@ -365,6 +394,9 @@ | |||
365 | #define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT 7 | 394 | #define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT 7 |
366 | #define BCM43xx_DEFAULT_LONG_RETRY_LIMIT 4 | 395 | #define BCM43xx_DEFAULT_LONG_RETRY_LIMIT 4 |
367 | 396 | ||
397 | /* FIXME: the next line is a guess as to what the maximum RSSI value might be */ | ||
398 | #define RX_RSSI_MAX 60 | ||
399 | |||
368 | /* Max size of a security key */ | 400 | /* Max size of a security key */ |
369 | #define BCM43xx_SEC_KEYSIZE 16 | 401 | #define BCM43xx_SEC_KEYSIZE 16 |
370 | /* Security algorithms. */ | 402 | /* Security algorithms. */ |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index a1b783813d8e..5b3c27359a18 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -130,6 +130,10 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging."); | |||
130 | { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 130 | { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
131 | /* Broadcom 4307 802.11b */ | 131 | /* Broadcom 4307 802.11b */ |
132 | { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 132 | { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
133 | /* Broadcom 4311 802.11(a)/b/g */ | ||
134 | { PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
135 | /* Broadcom 4312 802.11a/b/g */ | ||
136 | { PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
133 | /* Broadcom 4318 802.11b/g */ | 137 | /* Broadcom 4318 802.11b/g */ |
134 | { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 138 | { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
135 | /* Broadcom 4319 802.11a/b/g */ | 139 | /* Broadcom 4319 802.11a/b/g */ |
@@ -2600,8 +2604,9 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm) | |||
2600 | /* fetch sb_id_hi from core information registers */ | 2604 | /* fetch sb_id_hi from core information registers */ |
2601 | sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI); | 2605 | sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI); |
2602 | 2606 | ||
2603 | core_id = (sb_id_hi & 0xFFF0) >> 4; | 2607 | core_id = (sb_id_hi & 0x8FF0) >> 4; |
2604 | core_rev = (sb_id_hi & 0xF); | 2608 | core_rev = (sb_id_hi & 0x7000) >> 8; |
2609 | core_rev |= (sb_id_hi & 0xF); | ||
2605 | core_vendor = (sb_id_hi & 0xFFFF0000) >> 16; | 2610 | core_vendor = (sb_id_hi & 0xFFFF0000) >> 16; |
2606 | 2611 | ||
2607 | /* if present, chipcommon is always core 0; read the chipid from it */ | 2612 | /* if present, chipcommon is always core 0; read the chipid from it */ |
@@ -2679,14 +2684,10 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm) | |||
2679 | bcm->chip_id, bcm->chip_rev); | 2684 | bcm->chip_id, bcm->chip_rev); |
2680 | dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count); | 2685 | dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count); |
2681 | if (bcm->core_chipcommon.available) { | 2686 | if (bcm->core_chipcommon.available) { |
2682 | dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n", | 2687 | dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n", |
2683 | core_id, core_rev, core_vendor, | 2688 | core_id, core_rev, core_vendor); |
2684 | bcm43xx_core_enabled(bcm) ? "enabled" : "disabled"); | ||
2685 | } | ||
2686 | |||
2687 | if (bcm->core_chipcommon.available) | ||
2688 | current_core = 1; | 2689 | current_core = 1; |
2689 | else | 2690 | } else |
2690 | current_core = 0; | 2691 | current_core = 0; |
2691 | for ( ; current_core < core_count; current_core++) { | 2692 | for ( ; current_core < core_count; current_core++) { |
2692 | struct bcm43xx_coreinfo *core; | 2693 | struct bcm43xx_coreinfo *core; |
@@ -2704,13 +2705,13 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm) | |||
2704 | core_rev = (sb_id_hi & 0xF); | 2705 | core_rev = (sb_id_hi & 0xF); |
2705 | core_vendor = (sb_id_hi & 0xFFFF0000) >> 16; | 2706 | core_vendor = (sb_id_hi & 0xFFFF0000) >> 16; |
2706 | 2707 | ||
2707 | dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n", | 2708 | dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n", |
2708 | current_core, core_id, core_rev, core_vendor, | 2709 | current_core, core_id, core_rev, core_vendor); |
2709 | bcm43xx_core_enabled(bcm) ? "enabled" : "disabled" ); | ||
2710 | 2710 | ||
2711 | core = NULL; | 2711 | core = NULL; |
2712 | switch (core_id) { | 2712 | switch (core_id) { |
2713 | case BCM43xx_COREID_PCI: | 2713 | case BCM43xx_COREID_PCI: |
2714 | case BCM43xx_COREID_PCIE: | ||
2714 | core = &bcm->core_pci; | 2715 | core = &bcm->core_pci; |
2715 | if (core->available) { | 2716 | if (core->available) { |
2716 | printk(KERN_WARNING PFX "Multiple PCI cores found.\n"); | 2717 | printk(KERN_WARNING PFX "Multiple PCI cores found.\n"); |
@@ -2749,12 +2750,12 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm) | |||
2749 | case 6: | 2750 | case 6: |
2750 | case 7: | 2751 | case 7: |
2751 | case 9: | 2752 | case 9: |
2753 | case 10: | ||
2752 | break; | 2754 | break; |
2753 | default: | 2755 | default: |
2754 | printk(KERN_ERR PFX "Error: Unsupported 80211 core revision %u\n", | 2756 | printk(KERN_WARNING PFX |
2757 | "Unsupported 80211 core revision %u\n", | ||
2755 | core_rev); | 2758 | core_rev); |
2756 | err = -ENODEV; | ||
2757 | goto out; | ||
2758 | } | 2759 | } |
2759 | bcm->nr_80211_available++; | 2760 | bcm->nr_80211_available++; |
2760 | core->priv = ext_80211; | 2761 | core->priv = ext_80211; |
@@ -2868,16 +2869,11 @@ static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm, | |||
2868 | u32 sbimconfiglow; | 2869 | u32 sbimconfiglow; |
2869 | u8 limit; | 2870 | u8 limit; |
2870 | 2871 | ||
2871 | if (bcm->chip_rev < 5) { | 2872 | if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) { |
2872 | sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW); | 2873 | sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW); |
2873 | sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK; | 2874 | sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK; |
2874 | sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK; | 2875 | sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK; |
2875 | if (bcm->bustype == BCM43xx_BUSTYPE_PCI) | 2876 | sbimconfiglow |= 0x32; |
2876 | sbimconfiglow |= 0x32; | ||
2877 | else if (bcm->bustype == BCM43xx_BUSTYPE_SB) | ||
2878 | sbimconfiglow |= 0x53; | ||
2879 | else | ||
2880 | assert(0); | ||
2881 | bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow); | 2877 | bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow); |
2882 | } | 2878 | } |
2883 | 2879 | ||
@@ -3004,22 +3000,64 @@ static void bcm43xx_pcicore_broadcast_value(struct bcm43xx_private *bcm, | |||
3004 | 3000 | ||
3005 | static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm) | 3001 | static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm) |
3006 | { | 3002 | { |
3007 | int err; | 3003 | int err = 0; |
3008 | struct bcm43xx_coreinfo *old_core; | ||
3009 | 3004 | ||
3010 | old_core = bcm->current_core; | 3005 | bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); |
3011 | err = bcm43xx_switch_core(bcm, &bcm->core_pci); | ||
3012 | if (err) | ||
3013 | goto out; | ||
3014 | 3006 | ||
3015 | bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000); | 3007 | if (bcm->core_chipcommon.available) { |
3008 | err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon); | ||
3009 | if (err) | ||
3010 | goto out; | ||
3011 | |||
3012 | bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000); | ||
3013 | |||
3014 | /* this function is always called when a PCI core is mapped */ | ||
3015 | err = bcm43xx_switch_core(bcm, &bcm->core_pci); | ||
3016 | if (err) | ||
3017 | goto out; | ||
3018 | } else | ||
3019 | bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000); | ||
3020 | |||
3021 | bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate); | ||
3016 | 3022 | ||
3017 | bcm43xx_switch_core(bcm, old_core); | ||
3018 | assert(err == 0); | ||
3019 | out: | 3023 | out: |
3020 | return err; | 3024 | return err; |
3021 | } | 3025 | } |
3022 | 3026 | ||
3027 | static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address) | ||
3028 | { | ||
3029 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address); | ||
3030 | return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA); | ||
3031 | } | ||
3032 | |||
3033 | static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address, | ||
3034 | u32 data) | ||
3035 | { | ||
3036 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address); | ||
3037 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data); | ||
3038 | } | ||
3039 | |||
3040 | static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg, | ||
3041 | u16 data) | ||
3042 | { | ||
3043 | int i; | ||
3044 | |||
3045 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082); | ||
3046 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST | | ||
3047 | BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) | | ||
3048 | (reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA | | ||
3049 | data); | ||
3050 | udelay(10); | ||
3051 | |||
3052 | for (i = 0; i < 10; i++) { | ||
3053 | if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) & | ||
3054 | BCM43xx_PCIE_MDIO_TC) | ||
3055 | break; | ||
3056 | msleep(1); | ||
3057 | } | ||
3058 | bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0); | ||
3059 | } | ||
3060 | |||
3023 | /* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable. | 3061 | /* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable. |
3024 | * To enable core 0, pass a core_mask of 1<<0 | 3062 | * To enable core 0, pass a core_mask of 1<<0 |
3025 | */ | 3063 | */ |
@@ -3039,7 +3077,8 @@ static int bcm43xx_setup_backplane_pci_connection(struct bcm43xx_private *bcm, | |||
3039 | if (err) | 3077 | if (err) |
3040 | goto out; | 3078 | goto out; |
3041 | 3079 | ||
3042 | if (bcm->core_pci.rev < 6) { | 3080 | if (bcm->current_core->rev < 6 || |
3081 | bcm->current_core->id == BCM43xx_COREID_PCI) { | ||
3043 | value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC); | 3082 | value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC); |
3044 | value |= (1 << backplane_flag_nr); | 3083 | value |= (1 << backplane_flag_nr); |
3045 | bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value); | 3084 | bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value); |
@@ -3057,21 +3096,46 @@ static int bcm43xx_setup_backplane_pci_connection(struct bcm43xx_private *bcm, | |||
3057 | } | 3096 | } |
3058 | } | 3097 | } |
3059 | 3098 | ||
3060 | value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2); | 3099 | if (bcm->current_core->id == BCM43xx_COREID_PCI) { |
3061 | value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST; | 3100 | value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2); |
3062 | bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value); | 3101 | value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST; |
3063 | 3102 | bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value); | |
3064 | if (bcm->core_pci.rev < 5) { | 3103 | |
3065 | value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW); | 3104 | if (bcm->current_core->rev < 5) { |
3066 | value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT) | 3105 | value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW); |
3067 | & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK; | 3106 | value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT) |
3068 | value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT) | 3107 | & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK; |
3069 | & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK; | 3108 | value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT) |
3070 | bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value); | 3109 | & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK; |
3071 | err = bcm43xx_pcicore_commit_settings(bcm); | 3110 | bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value); |
3072 | assert(err == 0); | 3111 | err = bcm43xx_pcicore_commit_settings(bcm); |
3112 | assert(err == 0); | ||
3113 | } else if (bcm->current_core->rev >= 11) { | ||
3114 | value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2); | ||
3115 | value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI; | ||
3116 | bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value); | ||
3117 | } | ||
3118 | } else { | ||
3119 | if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) { | ||
3120 | value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND); | ||
3121 | value |= 0x8; | ||
3122 | bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND, | ||
3123 | value); | ||
3124 | } | ||
3125 | if (bcm->current_core->rev == 0) { | ||
3126 | bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX, | ||
3127 | BCM43xx_SERDES_RXTIMER, 0x8128); | ||
3128 | bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX, | ||
3129 | BCM43xx_SERDES_CDR, 0x0100); | ||
3130 | bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX, | ||
3131 | BCM43xx_SERDES_CDR_BW, 0x1466); | ||
3132 | } else if (bcm->current_core->rev == 1) { | ||
3133 | value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL); | ||
3134 | value |= 0x40; | ||
3135 | bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL, | ||
3136 | value); | ||
3137 | } | ||
3073 | } | 3138 | } |
3074 | |||
3075 | out_switch_back: | 3139 | out_switch_back: |
3076 | err = bcm43xx_switch_core(bcm, old_core); | 3140 | err = bcm43xx_switch_core(bcm, old_core); |
3077 | out: | 3141 | out: |
@@ -3140,55 +3204,27 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm) | |||
3140 | 3204 | ||
3141 | static void do_periodic_work(struct bcm43xx_private *bcm) | 3205 | static void do_periodic_work(struct bcm43xx_private *bcm) |
3142 | { | 3206 | { |
3143 | unsigned int state; | 3207 | if (bcm->periodic_state % 8 == 0) |
3144 | |||
3145 | state = bcm->periodic_state; | ||
3146 | if (state % 8 == 0) | ||
3147 | bcm43xx_periodic_every120sec(bcm); | 3208 | bcm43xx_periodic_every120sec(bcm); |
3148 | if (state % 4 == 0) | 3209 | if (bcm->periodic_state % 4 == 0) |
3149 | bcm43xx_periodic_every60sec(bcm); | 3210 | bcm43xx_periodic_every60sec(bcm); |
3150 | if (state % 2 == 0) | 3211 | if (bcm->periodic_state % 2 == 0) |
3151 | bcm43xx_periodic_every30sec(bcm); | 3212 | bcm43xx_periodic_every30sec(bcm); |
3152 | if (state % 1 == 0) | 3213 | bcm43xx_periodic_every15sec(bcm); |
3153 | bcm43xx_periodic_every15sec(bcm); | ||
3154 | bcm->periodic_state = state + 1; | ||
3155 | 3214 | ||
3156 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); | 3215 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); |
3157 | } | 3216 | } |
3158 | 3217 | ||
3159 | /* Estimate a "Badness" value based on the periodic work | ||
3160 | * state-machine state. "Badness" is worse (bigger), if the | ||
3161 | * periodic work will take longer. | ||
3162 | */ | ||
3163 | static int estimate_periodic_work_badness(unsigned int state) | ||
3164 | { | ||
3165 | int badness = 0; | ||
3166 | |||
3167 | if (state % 8 == 0) /* every 120 sec */ | ||
3168 | badness += 10; | ||
3169 | if (state % 4 == 0) /* every 60 sec */ | ||
3170 | badness += 5; | ||
3171 | if (state % 2 == 0) /* every 30 sec */ | ||
3172 | badness += 1; | ||
3173 | if (state % 1 == 0) /* every 15 sec */ | ||
3174 | badness += 1; | ||
3175 | |||
3176 | #define BADNESS_LIMIT 4 | ||
3177 | return badness; | ||
3178 | } | ||
3179 | |||
3180 | static void bcm43xx_periodic_work_handler(void *d) | 3218 | static void bcm43xx_periodic_work_handler(void *d) |
3181 | { | 3219 | { |
3182 | struct bcm43xx_private *bcm = d; | 3220 | struct bcm43xx_private *bcm = d; |
3183 | struct net_device *net_dev = bcm->net_dev; | 3221 | struct net_device *net_dev = bcm->net_dev; |
3184 | unsigned long flags; | 3222 | unsigned long flags; |
3185 | u32 savedirqs = 0; | 3223 | u32 savedirqs = 0; |
3186 | int badness; | ||
3187 | unsigned long orig_trans_start = 0; | 3224 | unsigned long orig_trans_start = 0; |
3188 | 3225 | ||
3189 | mutex_lock(&bcm->mutex); | 3226 | mutex_lock(&bcm->mutex); |
3190 | badness = estimate_periodic_work_badness(bcm->periodic_state); | 3227 | if (unlikely(bcm->periodic_state % 4 == 0)) { |
3191 | if (badness > BADNESS_LIMIT) { | ||
3192 | /* Periodic work will take a long time, so we want it to | 3228 | /* Periodic work will take a long time, so we want it to |
3193 | * be preemtible. | 3229 | * be preemtible. |
3194 | */ | 3230 | */ |
@@ -3220,7 +3256,7 @@ static void bcm43xx_periodic_work_handler(void *d) | |||
3220 | 3256 | ||
3221 | do_periodic_work(bcm); | 3257 | do_periodic_work(bcm); |
3222 | 3258 | ||
3223 | if (badness > BADNESS_LIMIT) { | 3259 | if (unlikely(bcm->periodic_state % 4 == 0)) { |
3224 | spin_lock_irqsave(&bcm->irq_lock, flags); | 3260 | spin_lock_irqsave(&bcm->irq_lock, flags); |
3225 | tasklet_enable(&bcm->isr_tasklet); | 3261 | tasklet_enable(&bcm->isr_tasklet); |
3226 | bcm43xx_interrupt_enable(bcm, savedirqs); | 3262 | bcm43xx_interrupt_enable(bcm, savedirqs); |
@@ -3231,6 +3267,7 @@ static void bcm43xx_periodic_work_handler(void *d) | |||
3231 | net_dev->trans_start = orig_trans_start; | 3267 | net_dev->trans_start = orig_trans_start; |
3232 | } | 3268 | } |
3233 | mmiowb(); | 3269 | mmiowb(); |
3270 | bcm->periodic_state++; | ||
3234 | spin_unlock_irqrestore(&bcm->irq_lock, flags); | 3271 | spin_unlock_irqrestore(&bcm->irq_lock, flags); |
3235 | mutex_unlock(&bcm->mutex); | 3272 | mutex_unlock(&bcm->mutex); |
3236 | } | 3273 | } |
@@ -3676,7 +3713,7 @@ static int bcm43xx_read_phyinfo(struct bcm43xx_private *bcm) | |||
3676 | bcm->ieee->freq_band = IEEE80211_24GHZ_BAND; | 3713 | bcm->ieee->freq_band = IEEE80211_24GHZ_BAND; |
3677 | break; | 3714 | break; |
3678 | case BCM43xx_PHYTYPE_G: | 3715 | case BCM43xx_PHYTYPE_G: |
3679 | if (phy_rev > 7) | 3716 | if (phy_rev > 8) |
3680 | phy_rev_ok = 0; | 3717 | phy_rev_ok = 0; |
3681 | bcm->ieee->modulation = IEEE80211_OFDM_MODULATION | | 3718 | bcm->ieee->modulation = IEEE80211_OFDM_MODULATION | |
3682 | IEEE80211_CCK_MODULATION; | 3719 | IEEE80211_CCK_MODULATION; |
@@ -3688,6 +3725,8 @@ static int bcm43xx_read_phyinfo(struct bcm43xx_private *bcm) | |||
3688 | phy_type); | 3725 | phy_type); |
3689 | return -ENODEV; | 3726 | return -ENODEV; |
3690 | }; | 3727 | }; |
3728 | bcm->ieee->perfect_rssi = RX_RSSI_MAX; | ||
3729 | bcm->ieee->worst_rssi = 0; | ||
3691 | if (!phy_rev_ok) { | 3730 | if (!phy_rev_ok) { |
3692 | printk(KERN_WARNING PFX "Invalid PHY Revision %x\n", | 3731 | printk(KERN_WARNING PFX "Invalid PHY Revision %x\n", |
3693 | phy_rev); | 3732 | phy_rev); |
@@ -3974,11 +4013,6 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb, | |||
3974 | return NETDEV_TX_OK; | 4013 | return NETDEV_TX_OK; |
3975 | } | 4014 | } |
3976 | 4015 | ||
3977 | static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev) | ||
3978 | { | ||
3979 | return &(bcm43xx_priv(net_dev)->ieee->stats); | ||
3980 | } | ||
3981 | |||
3982 | static void bcm43xx_net_tx_timeout(struct net_device *net_dev) | 4016 | static void bcm43xx_net_tx_timeout(struct net_device *net_dev) |
3983 | { | 4017 | { |
3984 | struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); | 4018 | struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); |
@@ -4092,7 +4126,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev, | |||
4092 | 4126 | ||
4093 | net_dev->open = bcm43xx_net_open; | 4127 | net_dev->open = bcm43xx_net_open; |
4094 | net_dev->stop = bcm43xx_net_stop; | 4128 | net_dev->stop = bcm43xx_net_stop; |
4095 | net_dev->get_stats = bcm43xx_net_get_stats; | ||
4096 | net_dev->tx_timeout = bcm43xx_net_tx_timeout; | 4129 | net_dev->tx_timeout = bcm43xx_net_tx_timeout; |
4097 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4130 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4098 | net_dev->poll_controller = bcm43xx_net_poll_controller; | 4131 | net_dev->poll_controller = bcm43xx_net_poll_controller; |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.c b/drivers/net/wireless/bcm43xx/bcm43xx_power.c index 6569da3a7a39..7e774f410953 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_power.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_power.c | |||
@@ -153,8 +153,6 @@ int bcm43xx_pctl_init(struct bcm43xx_private *bcm) | |||
153 | int err, maxfreq; | 153 | int err, maxfreq; |
154 | struct bcm43xx_coreinfo *old_core; | 154 | struct bcm43xx_coreinfo *old_core; |
155 | 155 | ||
156 | if (!(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL)) | ||
157 | return 0; | ||
158 | old_core = bcm->current_core; | 156 | old_core = bcm->current_core; |
159 | err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon); | 157 | err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon); |
160 | if (err == -ENODEV) | 158 | if (err == -ENODEV) |
@@ -162,11 +160,27 @@ int bcm43xx_pctl_init(struct bcm43xx_private *bcm) | |||
162 | if (err) | 160 | if (err) |
163 | goto out; | 161 | goto out; |
164 | 162 | ||
165 | maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1); | 163 | if (bcm->chip_id == 0x4321) { |
166 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY, | 164 | if (bcm->chip_rev == 0) |
167 | (maxfreq * 150 + 999999) / 1000000); | 165 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x03A4); |
168 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY, | 166 | if (bcm->chip_rev == 1) |
169 | (maxfreq * 15 + 999999) / 1000000); | 167 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x00A4); |
168 | } | ||
169 | |||
170 | if (bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL) { | ||
171 | if (bcm->current_core->rev >= 10) { | ||
172 | /* Set Idle Power clock rate to 1Mhz */ | ||
173 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL, | ||
174 | (bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL) | ||
175 | & 0x0000FFFF) | 0x40000); | ||
176 | } else { | ||
177 | maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1); | ||
178 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY, | ||
179 | (maxfreq * 150 + 999999) / 1000000); | ||
180 | bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY, | ||
181 | (maxfreq * 15 + 999999) / 1000000); | ||
182 | } | ||
183 | } | ||
170 | 184 | ||
171 | err = bcm43xx_switch_core(bcm, old_core); | 185 | err = bcm43xx_switch_core(bcm, old_core); |
172 | assert(err == 0); | 186 | assert(err == 0); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index d27016f8c736..a659442b9c15 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c | |||
@@ -47,9 +47,6 @@ | |||
47 | #define BCM43xx_WX_VERSION 18 | 47 | #define BCM43xx_WX_VERSION 18 |
48 | 48 | ||
49 | #define MAX_WX_STRING 80 | 49 | #define MAX_WX_STRING 80 |
50 | /* FIXME: the next line is a guess as to what the maximum RSSI value might be */ | ||
51 | #define RX_RSSI_MAX 60 | ||
52 | |||
53 | 50 | ||
54 | static int bcm43xx_wx_get_name(struct net_device *net_dev, | 51 | static int bcm43xx_wx_get_name(struct net_device *net_dev, |
55 | struct iw_request_info *info, | 52 | struct iw_request_info *info, |
@@ -693,6 +690,7 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev, | |||
693 | bcm->ieee->host_encrypt = !!on; | 690 | bcm->ieee->host_encrypt = !!on; |
694 | bcm->ieee->host_decrypt = !!on; | 691 | bcm->ieee->host_decrypt = !!on; |
695 | bcm->ieee->host_build_iv = !on; | 692 | bcm->ieee->host_build_iv = !on; |
693 | bcm->ieee->host_strip_iv_icv = !on; | ||
696 | spin_unlock_irqrestore(&bcm->irq_lock, flags); | 694 | spin_unlock_irqrestore(&bcm->irq_lock, flags); |
697 | mutex_unlock(&bcm->mutex); | 695 | mutex_unlock(&bcm->mutex); |
698 | 696 | ||
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c index 0159e4e93201..3e2462671690 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c | |||
@@ -544,24 +544,6 @@ int bcm43xx_rx(struct bcm43xx_private *bcm, | |||
544 | } | 544 | } |
545 | 545 | ||
546 | frame_ctl = le16_to_cpu(wlhdr->frame_ctl); | 546 | frame_ctl = le16_to_cpu(wlhdr->frame_ctl); |
547 | if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) { | ||
548 | frame_ctl &= ~IEEE80211_FCTL_PROTECTED; | ||
549 | wlhdr->frame_ctl = cpu_to_le16(frame_ctl); | ||
550 | /* trim IV and ICV */ | ||
551 | /* FIXME: this must be done only for WEP encrypted packets */ | ||
552 | if (skb->len < 32) { | ||
553 | dprintkl(KERN_ERR PFX "RX packet dropped (PROTECTED flag " | ||
554 | "set and length < 32)\n"); | ||
555 | return -EINVAL; | ||
556 | } else { | ||
557 | memmove(skb->data + 4, skb->data, 24); | ||
558 | skb_pull(skb, 4); | ||
559 | skb_trim(skb, skb->len - 4); | ||
560 | stats.len -= 8; | ||
561 | } | ||
562 | wlhdr = (struct ieee80211_hdr_4addr *)(skb->data); | ||
563 | } | ||
564 | |||
565 | switch (WLAN_FC_GET_TYPE(frame_ctl)) { | 547 | switch (WLAN_FC_GET_TYPE(frame_ctl)) { |
566 | case IEEE80211_FTYPE_MGMT: | 548 | case IEEE80211_FTYPE_MGMT: |
567 | ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats); | 549 | ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats); |
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index c2fa011be291..d1de9766c831 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c | |||
@@ -425,8 +425,14 @@ static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
425 | static int prism2_pci_resume(struct pci_dev *pdev) | 425 | static int prism2_pci_resume(struct pci_dev *pdev) |
426 | { | 426 | { |
427 | struct net_device *dev = pci_get_drvdata(pdev); | 427 | struct net_device *dev = pci_get_drvdata(pdev); |
428 | int err; | ||
428 | 429 | ||
429 | pci_enable_device(pdev); | 430 | err = pci_enable_device(pdev); |
431 | if (err) { | ||
432 | printk(KERN_ERR "%s: pci_enable_device failed on resume\n", | ||
433 | dev->name); | ||
434 | return err; | ||
435 | } | ||
430 | pci_restore_state(pdev); | 436 | pci_restore_state(pdev); |
431 | prism2_hw_config(dev, 0); | 437 | prism2_hw_config(dev, 0); |
432 | if (netif_running(dev)) { | 438 | if (netif_running(dev)) { |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 4e4eaa2a99ca..79607b8b877c 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -5827,19 +5827,6 @@ static void ipw2100_tx_timeout(struct net_device *dev) | |||
5827 | schedule_reset(priv); | 5827 | schedule_reset(priv); |
5828 | } | 5828 | } |
5829 | 5829 | ||
5830 | /* | ||
5831 | * TODO: reimplement it so that it reads statistics | ||
5832 | * from the adapter using ordinal tables | ||
5833 | * instead of/in addition to collecting them | ||
5834 | * in the driver | ||
5835 | */ | ||
5836 | static struct net_device_stats *ipw2100_stats(struct net_device *dev) | ||
5837 | { | ||
5838 | struct ipw2100_priv *priv = ieee80211_priv(dev); | ||
5839 | |||
5840 | return &priv->ieee->stats; | ||
5841 | } | ||
5842 | |||
5843 | static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) | 5830 | static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) |
5844 | { | 5831 | { |
5845 | /* This is called when wpa_supplicant loads and closes the driver | 5832 | /* This is called when wpa_supplicant loads and closes the driver |
@@ -6022,7 +6009,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
6022 | dev->open = ipw2100_open; | 6009 | dev->open = ipw2100_open; |
6023 | dev->stop = ipw2100_close; | 6010 | dev->stop = ipw2100_close; |
6024 | dev->init = ipw2100_net_init; | 6011 | dev->init = ipw2100_net_init; |
6025 | dev->get_stats = ipw2100_stats; | ||
6026 | dev->ethtool_ops = &ipw2100_ethtool_ops; | 6012 | dev->ethtool_ops = &ipw2100_ethtool_ops; |
6027 | dev->tx_timeout = ipw2100_tx_timeout; | 6013 | dev->tx_timeout = ipw2100_tx_timeout; |
6028 | dev->wireless_handlers = &ipw2100_wx_handler_def; | 6014 | dev->wireless_handlers = &ipw2100_wx_handler_def; |
@@ -6423,6 +6409,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6423 | { | 6409 | { |
6424 | struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); | 6410 | struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); |
6425 | struct net_device *dev = priv->net_dev; | 6411 | struct net_device *dev = priv->net_dev; |
6412 | int err; | ||
6426 | u32 val; | 6413 | u32 val; |
6427 | 6414 | ||
6428 | if (IPW2100_PM_DISABLED) | 6415 | if (IPW2100_PM_DISABLED) |
@@ -6433,7 +6420,12 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6433 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); | 6420 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); |
6434 | 6421 | ||
6435 | pci_set_power_state(pci_dev, PCI_D0); | 6422 | pci_set_power_state(pci_dev, PCI_D0); |
6436 | pci_enable_device(pci_dev); | 6423 | err = pci_enable_device(pci_dev); |
6424 | if (err) { | ||
6425 | printk(KERN_ERR "%s: pci_enable_device failed on resume\n", | ||
6426 | dev->name); | ||
6427 | return err; | ||
6428 | } | ||
6437 | pci_restore_state(pci_dev); | 6429 | pci_restore_state(pci_dev); |
6438 | 6430 | ||
6439 | /* | 6431 | /* |
@@ -7568,11 +7560,10 @@ static int ipw2100_wx_set_genie(struct net_device *dev, | |||
7568 | return -EINVAL; | 7560 | return -EINVAL; |
7569 | 7561 | ||
7570 | if (wrqu->data.length) { | 7562 | if (wrqu->data.length) { |
7571 | buf = kmalloc(wrqu->data.length, GFP_KERNEL); | 7563 | buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); |
7572 | if (buf == NULL) | 7564 | if (buf == NULL) |
7573 | return -ENOMEM; | 7565 | return -ENOMEM; |
7574 | 7566 | ||
7575 | memcpy(buf, extra, wrqu->data.length); | ||
7576 | kfree(ieee->wpa_ie); | 7567 | kfree(ieee->wpa_ie); |
7577 | ieee->wpa_ie = buf; | 7568 | ieee->wpa_ie = buf; |
7578 | ieee->wpa_ie_len = wrqu->data.length; | 7569 | ieee->wpa_ie_len = wrqu->data.length; |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 72120d5c2f7b..c692d01a76ca 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -11727,12 +11727,18 @@ static int ipw_pci_resume(struct pci_dev *pdev) | |||
11727 | { | 11727 | { |
11728 | struct ipw_priv *priv = pci_get_drvdata(pdev); | 11728 | struct ipw_priv *priv = pci_get_drvdata(pdev); |
11729 | struct net_device *dev = priv->net_dev; | 11729 | struct net_device *dev = priv->net_dev; |
11730 | int err; | ||
11730 | u32 val; | 11731 | u32 val; |
11731 | 11732 | ||
11732 | printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); | 11733 | printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); |
11733 | 11734 | ||
11734 | pci_set_power_state(pdev, PCI_D0); | 11735 | pci_set_power_state(pdev, PCI_D0); |
11735 | pci_enable_device(pdev); | 11736 | err = pci_enable_device(pdev); |
11737 | if (err) { | ||
11738 | printk(KERN_ERR "%s: pci_enable_device failed on resume\n", | ||
11739 | dev->name); | ||
11740 | return err; | ||
11741 | } | ||
11736 | pci_restore_state(pdev); | 11742 | pci_restore_state(pdev); |
11737 | 11743 | ||
11738 | /* | 11744 | /* |
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h index be1abea4b64f..f4e5e06760c1 100644 --- a/drivers/net/wireless/orinoco_pci.h +++ b/drivers/net/wireless/orinoco_pci.h | |||
@@ -60,7 +60,12 @@ static int orinoco_pci_resume(struct pci_dev *pdev) | |||
60 | int err; | 60 | int err; |
61 | 61 | ||
62 | pci_set_power_state(pdev, 0); | 62 | pci_set_power_state(pdev, 0); |
63 | pci_enable_device(pdev); | 63 | err = pci_enable_device(pdev); |
64 | if (err) { | ||
65 | printk(KERN_ERR "%s: pci_enable_device failed on resume\n", | ||
66 | dev->name); | ||
67 | return err; | ||
68 | } | ||
64 | pci_restore_state(pdev); | 69 | pci_restore_state(pdev); |
65 | 70 | ||
66 | err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, | 71 | err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, |
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c index 23deee69974b..02fc67bccbd0 100644 --- a/drivers/net/wireless/prism54/isl_38xx.c +++ b/drivers/net/wireless/prism54/isl_38xx.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_ | 3 | * Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_ |
5 | * | 4 | * |
@@ -38,7 +37,7 @@ | |||
38 | * isl38xx_disable_interrupts - disable all interrupts | 37 | * isl38xx_disable_interrupts - disable all interrupts |
39 | * @device: pci memory base address | 38 | * @device: pci memory base address |
40 | * | 39 | * |
41 | * Instructs the device to disable all interrupt reporting by asserting | 40 | * Instructs the device to disable all interrupt reporting by asserting |
42 | * the IRQ line. New events may still show up in the interrupt identification | 41 | * the IRQ line. New events may still show up in the interrupt identification |
43 | * register located at offset %ISL38XX_INT_IDENT_REG. | 42 | * register located at offset %ISL38XX_INT_IDENT_REG. |
44 | */ | 43 | */ |
@@ -204,17 +203,19 @@ isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address) | |||
204 | /* enable the interrupt for detecting initialization */ | 203 | /* enable the interrupt for detecting initialization */ |
205 | 204 | ||
206 | /* Note: Do not enable other interrupts here. We want the | 205 | /* Note: Do not enable other interrupts here. We want the |
207 | * device to have come up first 100% before allowing any other | 206 | * device to have come up first 100% before allowing any other |
208 | * interrupts. */ | 207 | * interrupts. */ |
209 | isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG); | 208 | isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG); |
210 | udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */ | 209 | udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */ |
211 | } | 210 | } |
212 | 211 | ||
213 | void | 212 | void |
214 | isl38xx_enable_common_interrupts(void __iomem *device_base) { | 213 | isl38xx_enable_common_interrupts(void __iomem *device_base) |
214 | { | ||
215 | u32 reg; | 215 | u32 reg; |
216 | reg = ( ISL38XX_INT_IDENT_UPDATE | | 216 | |
217 | ISL38XX_INT_IDENT_SLEEP | ISL38XX_INT_IDENT_WAKEUP); | 217 | reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP | |
218 | ISL38XX_INT_IDENT_WAKEUP; | ||
218 | isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG); | 219 | isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG); |
219 | udelay(ISL38XX_WRITEIO_DELAY); | 220 | udelay(ISL38XX_WRITEIO_DELAY); |
220 | } | 221 | } |
@@ -234,23 +235,21 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue) | |||
234 | /* send queues */ | 235 | /* send queues */ |
235 | case ISL38XX_CB_TX_MGMTQ: | 236 | case ISL38XX_CB_TX_MGMTQ: |
236 | BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); | 237 | BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); |
238 | |||
237 | case ISL38XX_CB_TX_DATA_LQ: | 239 | case ISL38XX_CB_TX_DATA_LQ: |
238 | case ISL38XX_CB_TX_DATA_HQ: | 240 | case ISL38XX_CB_TX_DATA_HQ: |
239 | BUG_ON(delta > ISL38XX_CB_TX_QSIZE); | 241 | BUG_ON(delta > ISL38XX_CB_TX_QSIZE); |
240 | return delta; | 242 | return delta; |
241 | break; | ||
242 | 243 | ||
243 | /* receive queues */ | 244 | /* receive queues */ |
244 | case ISL38XX_CB_RX_MGMTQ: | 245 | case ISL38XX_CB_RX_MGMTQ: |
245 | BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); | 246 | BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); |
246 | return ISL38XX_CB_MGMT_QSIZE - delta; | 247 | return ISL38XX_CB_MGMT_QSIZE - delta; |
247 | break; | ||
248 | 248 | ||
249 | case ISL38XX_CB_RX_DATA_LQ: | 249 | case ISL38XX_CB_RX_DATA_LQ: |
250 | case ISL38XX_CB_RX_DATA_HQ: | 250 | case ISL38XX_CB_RX_DATA_HQ: |
251 | BUG_ON(delta > ISL38XX_CB_RX_QSIZE); | 251 | BUG_ON(delta > ISL38XX_CB_RX_QSIZE); |
252 | return ISL38XX_CB_RX_QSIZE - delta; | 252 | return ISL38XX_CB_RX_QSIZE - delta; |
253 | break; | ||
254 | } | 253 | } |
255 | BUG(); | 254 | BUG(); |
256 | return 0; | 255 | return 0; |
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h index 8af20980af8d..3fadcb6f5297 100644 --- a/drivers/net/wireless/prism54/isl_38xx.h +++ b/drivers/net/wireless/prism54/isl_38xx.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * | 3 | * |
5 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -67,10 +66,10 @@ | |||
67 | * @base: (host) memory base address of the device | 66 | * @base: (host) memory base address of the device |
68 | * @val: 32bit value (host order) to write | 67 | * @val: 32bit value (host order) to write |
69 | * @offset: byte offset into @base to write value to | 68 | * @offset: byte offset into @base to write value to |
70 | * | 69 | * |
71 | * This helper takes care of writing a 32bit datum to the | 70 | * This helper takes care of writing a 32bit datum to the |
72 | * specified offset into the device's pci memory space, and making sure | 71 | * specified offset into the device's pci memory space, and making sure |
73 | * the pci memory buffers get flushed by performing one harmless read | 72 | * the pci memory buffers get flushed by performing one harmless read |
74 | * from the %ISL38XX_PCI_POSTING_FLUSH offset. | 73 | * from the %ISL38XX_PCI_POSTING_FLUSH offset. |
75 | */ | 74 | */ |
76 | static inline void | 75 | static inline void |
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 286325ca3293..4a20e45de3ca 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * (C) 2003,2004 Aurelien Alleaume <slts@free.fr> | 3 | * (C) 2003,2004 Aurelien Alleaume <slts@free.fr> |
5 | * (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> | 4 | * (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> |
@@ -55,12 +54,12 @@ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22, | |||
55 | * prism54_mib_mode_helper - MIB change mode helper function | 54 | * prism54_mib_mode_helper - MIB change mode helper function |
56 | * @mib: the &struct islpci_mib object to modify | 55 | * @mib: the &struct islpci_mib object to modify |
57 | * @iw_mode: new mode (%IW_MODE_*) | 56 | * @iw_mode: new mode (%IW_MODE_*) |
58 | * | 57 | * |
59 | * This is a helper function, hence it does not lock. Make sure | 58 | * This is a helper function, hence it does not lock. Make sure |
60 | * caller deals with locking *if* necessary. This function sets the | 59 | * caller deals with locking *if* necessary. This function sets the |
61 | * mode-dependent mib values and does the mapping of the Linux | 60 | * mode-dependent mib values and does the mapping of the Linux |
62 | * Wireless API modes to Device firmware modes. It also checks for | 61 | * Wireless API modes to Device firmware modes. It also checks for |
63 | * correct valid Linux wireless modes. | 62 | * correct valid Linux wireless modes. |
64 | */ | 63 | */ |
65 | static int | 64 | static int |
66 | prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) | 65 | prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) |
@@ -118,7 +117,7 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) | |||
118 | * | 117 | * |
119 | * this function initializes the struct given as @mib with defaults, | 118 | * this function initializes the struct given as @mib with defaults, |
120 | * of which many are retrieved from the global module parameter | 119 | * of which many are retrieved from the global module parameter |
121 | * variables. | 120 | * variables. |
122 | */ | 121 | */ |
123 | 122 | ||
124 | void | 123 | void |
@@ -134,7 +133,7 @@ prism54_mib_init(islpci_private *priv) | |||
134 | authen = CARD_DEFAULT_AUTHEN; | 133 | authen = CARD_DEFAULT_AUTHEN; |
135 | wep = CARD_DEFAULT_WEP; | 134 | wep = CARD_DEFAULT_WEP; |
136 | filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */ | 135 | filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */ |
137 | dot1x = CARD_DEFAULT_DOT1X; | 136 | dot1x = CARD_DEFAULT_DOT1X; |
138 | mlme = CARD_DEFAULT_MLME_MODE; | 137 | mlme = CARD_DEFAULT_MLME_MODE; |
139 | conformance = CARD_DEFAULT_CONFORMANCE; | 138 | conformance = CARD_DEFAULT_CONFORMANCE; |
140 | power = 127; | 139 | power = 127; |
@@ -228,7 +227,7 @@ prism54_get_wireless_stats(struct net_device *ndev) | |||
228 | } else | 227 | } else |
229 | priv->iwstatistics.qual.updated = 0; | 228 | priv->iwstatistics.qual.updated = 0; |
230 | 229 | ||
231 | /* Update our wireless stats, but do not schedule to often | 230 | /* Update our wireless stats, but do not schedule to often |
232 | * (max 1 HZ) */ | 231 | * (max 1 HZ) */ |
233 | if ((priv->stats_timestamp == 0) || | 232 | if ((priv->stats_timestamp == 0) || |
234 | time_after(jiffies, priv->stats_timestamp + 1 * HZ)) { | 233 | time_after(jiffies, priv->stats_timestamp + 1 * HZ)) { |
@@ -705,7 +704,7 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, | |||
705 | * Starting with WE-17, the buffer can be as big as needed. | 704 | * Starting with WE-17, the buffer can be as big as needed. |
706 | * But the device won't repport anything if you change the value | 705 | * But the device won't repport anything if you change the value |
707 | * of IWMAX_BSS=24. */ | 706 | * of IWMAX_BSS=24. */ |
708 | 707 | ||
709 | rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); | 708 | rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); |
710 | bsslist = r.ptr; | 709 | bsslist = r.ptr; |
711 | 710 | ||
@@ -785,7 +784,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, | |||
785 | return rvalue; | 784 | return rvalue; |
786 | } | 785 | } |
787 | 786 | ||
788 | /* Provides no functionality, just completes the ioctl. In essence this is a | 787 | /* Provides no functionality, just completes the ioctl. In essence this is a |
789 | * just a cosmetic ioctl. | 788 | * just a cosmetic ioctl. |
790 | */ | 789 | */ |
791 | static int | 790 | static int |
@@ -1104,7 +1103,7 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, | |||
1104 | &key); | 1103 | &key); |
1105 | } | 1104 | } |
1106 | /* | 1105 | /* |
1107 | * If a valid key is set, encryption should be enabled | 1106 | * If a valid key is set, encryption should be enabled |
1108 | * (user may turn it off later). | 1107 | * (user may turn it off later). |
1109 | * This is also how "iwconfig ethX key on" works | 1108 | * This is also how "iwconfig ethX key on" works |
1110 | */ | 1109 | */ |
@@ -1126,7 +1125,7 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, | |||
1126 | } | 1125 | } |
1127 | /* now read the flags */ | 1126 | /* now read the flags */ |
1128 | if (dwrq->flags & IW_ENCODE_DISABLED) { | 1127 | if (dwrq->flags & IW_ENCODE_DISABLED) { |
1129 | /* Encoding disabled, | 1128 | /* Encoding disabled, |
1130 | * authen = DOT11_AUTH_OS; | 1129 | * authen = DOT11_AUTH_OS; |
1131 | * invoke = 0; | 1130 | * invoke = 0; |
1132 | * exunencrypt = 0; */ | 1131 | * exunencrypt = 0; */ |
@@ -1214,7 +1213,7 @@ prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, | |||
1214 | vwrq->value = (s32) r.u / 4; | 1213 | vwrq->value = (s32) r.u / 4; |
1215 | vwrq->fixed = 1; | 1214 | vwrq->fixed = 1; |
1216 | /* radio is not turned of | 1215 | /* radio is not turned of |
1217 | * btw: how is possible to turn off only the radio | 1216 | * btw: how is possible to turn off only the radio |
1218 | */ | 1217 | */ |
1219 | vwrq->disabled = 0; | 1218 | vwrq->disabled = 0; |
1220 | 1219 | ||
@@ -2354,17 +2353,17 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2354 | handle_request(priv, mlme, oid); | 2353 | handle_request(priv, mlme, oid); |
2355 | send_formatted_event(priv, "Authenticate request (ex)", mlme, 1); | 2354 | send_formatted_event(priv, "Authenticate request (ex)", mlme, 1); |
2356 | 2355 | ||
2357 | if (priv->iw_mode != IW_MODE_MASTER | 2356 | if (priv->iw_mode != IW_MODE_MASTER |
2358 | && mlmeex->state != DOT11_STATE_AUTHING) | 2357 | && mlmeex->state != DOT11_STATE_AUTHING) |
2359 | break; | 2358 | break; |
2360 | 2359 | ||
2361 | confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC); | 2360 | confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC); |
2362 | 2361 | ||
2363 | if (!confirm) | 2362 | if (!confirm) |
2364 | break; | 2363 | break; |
2365 | 2364 | ||
2366 | memcpy(&confirm->address, mlmeex->address, ETH_ALEN); | 2365 | memcpy(&confirm->address, mlmeex->address, ETH_ALEN); |
2367 | printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", | 2366 | printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", |
2368 | mlmeex->address[0], | 2367 | mlmeex->address[0], |
2369 | mlmeex->address[1], | 2368 | mlmeex->address[1], |
2370 | mlmeex->address[2], | 2369 | mlmeex->address[2], |
@@ -2398,10 +2397,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2398 | handle_request(priv, mlme, oid); | 2397 | handle_request(priv, mlme, oid); |
2399 | send_formatted_event(priv, "Associate request (ex)", mlme, 1); | 2398 | send_formatted_event(priv, "Associate request (ex)", mlme, 1); |
2400 | 2399 | ||
2401 | if (priv->iw_mode != IW_MODE_MASTER | 2400 | if (priv->iw_mode != IW_MODE_MASTER |
2402 | && mlmeex->state != DOT11_STATE_ASSOCING) | 2401 | && mlmeex->state != DOT11_STATE_ASSOCING) |
2403 | break; | 2402 | break; |
2404 | 2403 | ||
2405 | confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); | 2404 | confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); |
2406 | 2405 | ||
2407 | if (!confirm) | 2406 | if (!confirm) |
@@ -2417,7 +2416,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2417 | 2416 | ||
2418 | if (!wpa_ie_len) { | 2417 | if (!wpa_ie_len) { |
2419 | printk(KERN_DEBUG "No WPA IE found from " | 2418 | printk(KERN_DEBUG "No WPA IE found from " |
2420 | "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", | 2419 | "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", |
2421 | mlmeex->address[0], | 2420 | mlmeex->address[0], |
2422 | mlmeex->address[1], | 2421 | mlmeex->address[1], |
2423 | mlmeex->address[2], | 2422 | mlmeex->address[2], |
@@ -2435,14 +2434,14 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2435 | mgt_set_varlen(priv, oid, confirm, wpa_ie_len); | 2434 | mgt_set_varlen(priv, oid, confirm, wpa_ie_len); |
2436 | 2435 | ||
2437 | kfree(confirm); | 2436 | kfree(confirm); |
2438 | 2437 | ||
2439 | break; | 2438 | break; |
2440 | 2439 | ||
2441 | case DOT11_OID_REASSOCIATEEX: | 2440 | case DOT11_OID_REASSOCIATEEX: |
2442 | handle_request(priv, mlme, oid); | 2441 | handle_request(priv, mlme, oid); |
2443 | send_formatted_event(priv, "Reassociate request (ex)", mlme, 1); | 2442 | send_formatted_event(priv, "Reassociate request (ex)", mlme, 1); |
2444 | 2443 | ||
2445 | if (priv->iw_mode != IW_MODE_MASTER | 2444 | if (priv->iw_mode != IW_MODE_MASTER |
2446 | && mlmeex->state != DOT11_STATE_ASSOCING) | 2445 | && mlmeex->state != DOT11_STATE_ASSOCING) |
2447 | break; | 2446 | break; |
2448 | 2447 | ||
@@ -2461,7 +2460,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2461 | 2460 | ||
2462 | if (!wpa_ie_len) { | 2461 | if (!wpa_ie_len) { |
2463 | printk(KERN_DEBUG "No WPA IE found from " | 2462 | printk(KERN_DEBUG "No WPA IE found from " |
2464 | "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", | 2463 | "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", |
2465 | mlmeex->address[0], | 2464 | mlmeex->address[0], |
2466 | mlmeex->address[1], | 2465 | mlmeex->address[1], |
2467 | mlmeex->address[2], | 2466 | mlmeex->address[2], |
@@ -2473,13 +2472,13 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2473 | break; | 2472 | break; |
2474 | } | 2473 | } |
2475 | 2474 | ||
2476 | confirm->size = wpa_ie_len; | 2475 | confirm->size = wpa_ie_len; |
2477 | memcpy(&confirm->data, wpa_ie, wpa_ie_len); | 2476 | memcpy(&confirm->data, wpa_ie, wpa_ie_len); |
2478 | 2477 | ||
2479 | mgt_set_varlen(priv, oid, confirm, wpa_ie_len); | 2478 | mgt_set_varlen(priv, oid, confirm, wpa_ie_len); |
2480 | 2479 | ||
2481 | kfree(confirm); | 2480 | kfree(confirm); |
2482 | 2481 | ||
2483 | break; | 2482 | break; |
2484 | 2483 | ||
2485 | default: | 2484 | default: |
@@ -2545,10 +2544,10 @@ enum { | |||
2545 | #define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ | 2544 | #define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ |
2546 | ((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data)) | 2545 | ((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data)) |
2547 | 2546 | ||
2548 | /* Maximum length for algorithm names (-1 for nul termination) | 2547 | /* Maximum length for algorithm names (-1 for nul termination) |
2549 | * used in ioctl() */ | 2548 | * used in ioctl() */ |
2550 | #define HOSTAP_CRYPT_ALG_NAME_LEN 16 | 2549 | #define HOSTAP_CRYPT_ALG_NAME_LEN 16 |
2551 | 2550 | ||
2552 | struct prism2_hostapd_param { | 2551 | struct prism2_hostapd_param { |
2553 | u32 cmd; | 2552 | u32 cmd; |
2554 | u8 sta_addr[ETH_ALEN]; | 2553 | u8 sta_addr[ETH_ALEN]; |
@@ -2621,7 +2620,7 @@ prism2_ioctl_set_encryption(struct net_device *dev, | |||
2621 | &key); | 2620 | &key); |
2622 | } | 2621 | } |
2623 | /* | 2622 | /* |
2624 | * If a valid key is set, encryption should be enabled | 2623 | * If a valid key is set, encryption should be enabled |
2625 | * (user may turn it off later). | 2624 | * (user may turn it off later). |
2626 | * This is also how "iwconfig ethX key on" works | 2625 | * This is also how "iwconfig ethX key on" works |
2627 | */ | 2626 | */ |
@@ -2643,7 +2642,7 @@ prism2_ioctl_set_encryption(struct net_device *dev, | |||
2643 | } | 2642 | } |
2644 | /* now read the flags */ | 2643 | /* now read the flags */ |
2645 | if (param->u.crypt.flags & IW_ENCODE_DISABLED) { | 2644 | if (param->u.crypt.flags & IW_ENCODE_DISABLED) { |
2646 | /* Encoding disabled, | 2645 | /* Encoding disabled, |
2647 | * authen = DOT11_AUTH_OS; | 2646 | * authen = DOT11_AUTH_OS; |
2648 | * invoke = 0; | 2647 | * invoke = 0; |
2649 | * exunencrypt = 0; */ | 2648 | * exunencrypt = 0; */ |
@@ -2710,7 +2709,7 @@ prism2_ioctl_set_generic_element(struct net_device *ndev, | |||
2710 | 2709 | ||
2711 | ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); | 2710 | ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len); |
2712 | 2711 | ||
2713 | if (ret == 0) | 2712 | if (ret == 0) |
2714 | printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", | 2713 | printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", |
2715 | ndev->name); | 2714 | ndev->name); |
2716 | } | 2715 | } |
@@ -2870,7 +2869,7 @@ prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, | |||
2870 | mlme = DOT11_MLME_AUTO; | 2869 | mlme = DOT11_MLME_AUTO; |
2871 | printk("%s: Disabling WPA\n", ndev->name); | 2870 | printk("%s: Disabling WPA\n", ndev->name); |
2872 | break; | 2871 | break; |
2873 | case 2: | 2872 | case 2: |
2874 | case 1: /* WPA */ | 2873 | case 1: /* WPA */ |
2875 | printk("%s: Enabling WPA\n", ndev->name); | 2874 | printk("%s: Enabling WPA\n", ndev->name); |
2876 | break; | 2875 | break; |
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h index 65f33acd0a42..e8183d30c52e 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/prism54/isl_ioctl.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * (C) 2003 Aurelien Alleaume <slts@free.fr> | 3 | * (C) 2003 Aurelien Alleaume <slts@free.fr> |
5 | * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> | 4 | * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> |
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h index 419edf7ccf1a..b7534c2869c8 100644 --- a/drivers/net/wireless/prism54/isl_oid.h +++ b/drivers/net/wireless/prism54/isl_oid.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * | ||
4 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> | 2 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> |
5 | * Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> | 3 | * Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> |
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | 4 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> |
@@ -23,7 +21,7 @@ | |||
23 | #if !defined(_ISL_OID_H) | 21 | #if !defined(_ISL_OID_H) |
24 | #define _ISL_OID_H | 22 | #define _ISL_OID_H |
25 | 23 | ||
26 | /* | 24 | /* |
27 | * MIB related constant and structure definitions for communicating | 25 | * MIB related constant and structure definitions for communicating |
28 | * with the device firmware | 26 | * with the device firmware |
29 | */ | 27 | */ |
@@ -99,21 +97,21 @@ struct obj_attachment { | |||
99 | char data[0]; | 97 | char data[0]; |
100 | } __attribute__((packed)); | 98 | } __attribute__((packed)); |
101 | 99 | ||
102 | /* | 100 | /* |
103 | * in case everything's ok, the inlined function below will be | 101 | * in case everything's ok, the inlined function below will be |
104 | * optimized away by the compiler... | 102 | * optimized away by the compiler... |
105 | */ | 103 | */ |
106 | static inline void | 104 | static inline void |
107 | __bug_on_wrong_struct_sizes(void) | 105 | __bug_on_wrong_struct_sizes(void) |
108 | { | 106 | { |
109 | BUG_ON(sizeof (struct obj_ssid) != 34); | 107 | BUILD_BUG_ON(sizeof (struct obj_ssid) != 34); |
110 | BUG_ON(sizeof (struct obj_key) != 34); | 108 | BUILD_BUG_ON(sizeof (struct obj_key) != 34); |
111 | BUG_ON(sizeof (struct obj_mlme) != 12); | 109 | BUILD_BUG_ON(sizeof (struct obj_mlme) != 12); |
112 | BUG_ON(sizeof (struct obj_mlmeex) != 14); | 110 | BUILD_BUG_ON(sizeof (struct obj_mlmeex) != 14); |
113 | BUG_ON(sizeof (struct obj_buffer) != 8); | 111 | BUILD_BUG_ON(sizeof (struct obj_buffer) != 8); |
114 | BUG_ON(sizeof (struct obj_bss) != 60); | 112 | BUILD_BUG_ON(sizeof (struct obj_bss) != 60); |
115 | BUG_ON(sizeof (struct obj_bsslist) != 4); | 113 | BUILD_BUG_ON(sizeof (struct obj_bsslist) != 4); |
116 | BUG_ON(sizeof (struct obj_frequencies) != 2); | 114 | BUILD_BUG_ON(sizeof (struct obj_frequencies) != 2); |
117 | } | 115 | } |
118 | 116 | ||
119 | enum dot11_state_t { | 117 | enum dot11_state_t { |
@@ -154,13 +152,13 @@ enum dot11_priv_t { | |||
154 | 152 | ||
155 | /* Prism "Nitro" / Frameburst / "Packet Frame Grouping" | 153 | /* Prism "Nitro" / Frameburst / "Packet Frame Grouping" |
156 | * Value is in microseconds. Represents the # microseconds | 154 | * Value is in microseconds. Represents the # microseconds |
157 | * the firmware will take to group frames before sending out then out | 155 | * the firmware will take to group frames before sending out then out |
158 | * together with a CSMA contention. Without this all frames are | 156 | * together with a CSMA contention. Without this all frames are |
159 | * sent with a CSMA contention. | 157 | * sent with a CSMA contention. |
160 | * Bibliography: | 158 | * Bibliography: |
161 | * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html | 159 | * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html |
162 | */ | 160 | */ |
163 | enum dot11_maxframeburst_t { | 161 | enum dot11_maxframeburst_t { |
164 | /* Values for DOT11_OID_MAXFRAMEBURST */ | 162 | /* Values for DOT11_OID_MAXFRAMEBURST */ |
165 | DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */ | 163 | DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */ |
166 | DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */ | 164 | DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */ |
@@ -176,9 +174,9 @@ enum dot11_maxframeburst_t { | |||
176 | /* Support for 802.11 long and short frame preambles. | 174 | /* Support for 802.11 long and short frame preambles. |
177 | * Long preamble uses 128-bit sync field, 8-bit CRC | 175 | * Long preamble uses 128-bit sync field, 8-bit CRC |
178 | * Short preamble uses 56-bit sync field, 16-bit CRC | 176 | * Short preamble uses 56-bit sync field, 16-bit CRC |
179 | * | 177 | * |
180 | * 802.11a -- not sure, both optionally ? | 178 | * 802.11a -- not sure, both optionally ? |
181 | * 802.11b supports long and optionally short | 179 | * 802.11b supports long and optionally short |
182 | * 802.11g supports both */ | 180 | * 802.11g supports both */ |
183 | enum dot11_preamblesettings_t { | 181 | enum dot11_preamblesettings_t { |
184 | DOT11_PREAMBLESETTING_LONG = 0, | 182 | DOT11_PREAMBLESETTING_LONG = 0, |
@@ -194,7 +192,7 @@ enum dot11_preamblesettings_t { | |||
194 | * Long uses 802.11a slot timing (9 usec ?) | 192 | * Long uses 802.11a slot timing (9 usec ?) |
195 | * Short uses 802.11b slot timing (20 use ?) */ | 193 | * Short uses 802.11b slot timing (20 use ?) */ |
196 | enum dot11_slotsettings_t { | 194 | enum dot11_slotsettings_t { |
197 | DOT11_SLOTSETTINGS_LONG = 0, | 195 | DOT11_SLOTSETTINGS_LONG = 0, |
198 | /* Allows *only* long 802.11b slot timing */ | 196 | /* Allows *only* long 802.11b slot timing */ |
199 | DOT11_SLOTSETTINGS_SHORT = 1, | 197 | DOT11_SLOTSETTINGS_SHORT = 1, |
200 | /* Allows *only* long 802.11a slot timing */ | 198 | /* Allows *only* long 802.11a slot timing */ |
@@ -203,7 +201,7 @@ enum dot11_slotsettings_t { | |||
203 | }; | 201 | }; |
204 | 202 | ||
205 | /* All you need to know, ERP is "Extended Rate PHY". | 203 | /* All you need to know, ERP is "Extended Rate PHY". |
206 | * An Extended Rate PHY (ERP) STA or AP shall support three different | 204 | * An Extended Rate PHY (ERP) STA or AP shall support three different |
207 | * preamble and header formats: | 205 | * preamble and header formats: |
208 | * Long preamble (refer to above) | 206 | * Long preamble (refer to above) |
209 | * Short preamble (refer to above) | 207 | * Short preamble (refer to above) |
@@ -221,7 +219,7 @@ enum do11_nonerpstatus_t { | |||
221 | /* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-* | 219 | /* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-* |
222 | * The key here is DOT11 NON ERP NEVER protects against | 220 | * The key here is DOT11 NON ERP NEVER protects against |
223 | * NON ERP STA's. You *don't* want this unless | 221 | * NON ERP STA's. You *don't* want this unless |
224 | * you know what you are doing. It means you will only | 222 | * you know what you are doing. It means you will only |
225 | * get Extended Rate capabilities */ | 223 | * get Extended Rate capabilities */ |
226 | enum dot11_nonerpprotection_t { | 224 | enum dot11_nonerpprotection_t { |
227 | DOT11_NONERP_NEVER = 0, | 225 | DOT11_NONERP_NEVER = 0, |
@@ -229,13 +227,13 @@ enum dot11_nonerpprotection_t { | |||
229 | DOT11_NONERP_DYNAMIC = 2 | 227 | DOT11_NONERP_DYNAMIC = 2 |
230 | }; | 228 | }; |
231 | 229 | ||
232 | /* Preset OID configuration for 802.11 modes | 230 | /* Preset OID configuration for 802.11 modes |
233 | * Note: DOT11_OID_CW[MIN|MAX] hold the values of the | 231 | * Note: DOT11_OID_CW[MIN|MAX] hold the values of the |
234 | * DCS MIN|MAX backoff used */ | 232 | * DCS MIN|MAX backoff used */ |
235 | enum dot11_profile_t { /* And set/allowed values */ | 233 | enum dot11_profile_t { /* And set/allowed values */ |
236 | /* Allowed values for DOT11_OID_PROFILES */ | 234 | /* Allowed values for DOT11_OID_PROFILES */ |
237 | DOT11_PROFILE_B_ONLY = 0, | 235 | DOT11_PROFILE_B_ONLY = 0, |
238 | /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps | 236 | /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps |
239 | * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC | 237 | * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC |
240 | * DOT11_OID_CWMIN: 31 | 238 | * DOT11_OID_CWMIN: 31 |
241 | * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC | 239 | * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC |
@@ -275,7 +273,7 @@ enum oid_inl_conformance_t { | |||
275 | OID_INL_CONFORMANCE_NONE = 0, /* Perform active scanning */ | 273 | OID_INL_CONFORMANCE_NONE = 0, /* Perform active scanning */ |
276 | OID_INL_CONFORMANCE_STRICT = 1, /* Strictly adhere to 802.11d */ | 274 | OID_INL_CONFORMANCE_STRICT = 1, /* Strictly adhere to 802.11d */ |
277 | OID_INL_CONFORMANCE_FLEXIBLE = 2, /* Use passed 802.11d info to | 275 | OID_INL_CONFORMANCE_FLEXIBLE = 2, /* Use passed 802.11d info to |
278 | * determine channel AND/OR just make assumption that active | 276 | * determine channel AND/OR just make assumption that active |
279 | * channels are valid channels */ | 277 | * channels are valid channels */ |
280 | }; | 278 | }; |
281 | 279 | ||
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index ec1c00f19eb3..1e0603ca436c 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> | 3 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> |
5 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> | 4 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> |
@@ -413,7 +412,7 @@ prism54_bring_down(islpci_private *priv) | |||
413 | islpci_set_state(priv, PRV_STATE_PREBOOT); | 412 | islpci_set_state(priv, PRV_STATE_PREBOOT); |
414 | 413 | ||
415 | /* disable all device interrupts in case they weren't */ | 414 | /* disable all device interrupts in case they weren't */ |
416 | isl38xx_disable_interrupts(priv->device_base); | 415 | isl38xx_disable_interrupts(priv->device_base); |
417 | 416 | ||
418 | /* For safety reasons, we may want to ensure that no DMA transfer is | 417 | /* For safety reasons, we may want to ensure that no DMA transfer is |
419 | * currently in progress by emptying the TX and RX queues. */ | 418 | * currently in progress by emptying the TX and RX queues. */ |
@@ -480,7 +479,7 @@ islpci_reset_if(islpci_private *priv) | |||
480 | 479 | ||
481 | DEFINE_WAIT(wait); | 480 | DEFINE_WAIT(wait); |
482 | prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE); | 481 | prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE); |
483 | 482 | ||
484 | /* now the last step is to reset the interface */ | 483 | /* now the last step is to reset the interface */ |
485 | isl38xx_interface_reset(priv->device_base, priv->device_host_address); | 484 | isl38xx_interface_reset(priv->device_base, priv->device_host_address); |
486 | islpci_set_state(priv, PRV_STATE_PREINIT); | 485 | islpci_set_state(priv, PRV_STATE_PREINIT); |
@@ -488,7 +487,7 @@ islpci_reset_if(islpci_private *priv) | |||
488 | for(count = 0; count < 2 && result; count++) { | 487 | for(count = 0; count < 2 && result; count++) { |
489 | /* The software reset acknowledge needs about 220 msec here. | 488 | /* The software reset acknowledge needs about 220 msec here. |
490 | * Be conservative and wait for up to one second. */ | 489 | * Be conservative and wait for up to one second. */ |
491 | 490 | ||
492 | remaining = schedule_timeout_uninterruptible(HZ); | 491 | remaining = schedule_timeout_uninterruptible(HZ); |
493 | 492 | ||
494 | if(remaining > 0) { | 493 | if(remaining > 0) { |
@@ -496,7 +495,7 @@ islpci_reset_if(islpci_private *priv) | |||
496 | break; | 495 | break; |
497 | } | 496 | } |
498 | 497 | ||
499 | /* If we're here it's because our IRQ hasn't yet gone through. | 498 | /* If we're here it's because our IRQ hasn't yet gone through. |
500 | * Retry a bit more... | 499 | * Retry a bit more... |
501 | */ | 500 | */ |
502 | printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n", | 501 | printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n", |
@@ -514,7 +513,7 @@ islpci_reset_if(islpci_private *priv) | |||
514 | 513 | ||
515 | /* Now that the device is 100% up, let's allow | 514 | /* Now that the device is 100% up, let's allow |
516 | * for the other interrupts -- | 515 | * for the other interrupts -- |
517 | * NOTE: this is not *yet* true since we've only allowed the | 516 | * NOTE: this is not *yet* true since we've only allowed the |
518 | * INIT interrupt on the IRQ line. We can perhaps poll | 517 | * INIT interrupt on the IRQ line. We can perhaps poll |
519 | * the IRQ line until we know for sure the reset went through */ | 518 | * the IRQ line until we know for sure the reset went through */ |
520 | isl38xx_enable_common_interrupts(priv->device_base); | 519 | isl38xx_enable_common_interrupts(priv->device_base); |
@@ -716,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv) | |||
716 | 715 | ||
717 | prism54_acl_init(&priv->acl); | 716 | prism54_acl_init(&priv->acl); |
718 | prism54_wpa_bss_ie_init(priv); | 717 | prism54_wpa_bss_ie_init(priv); |
719 | if (mgt_init(priv)) | 718 | if (mgt_init(priv)) |
720 | goto out_free; | 719 | goto out_free; |
721 | 720 | ||
722 | return 0; | 721 | return 0; |
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h index 2f7e525d0cf6..a9aa1662eaa4 100644 --- a/drivers/net/wireless/prism54/islpci_dev.h +++ b/drivers/net/wireless/prism54/islpci_dev.h | |||
@@ -1,6 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
3 | * Copyright (C) 2002 Intersil Americas Inc. | ||
4 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> | 3 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> |
5 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> | 4 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> |
6 | * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr> | 5 | * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr> |
@@ -72,12 +71,12 @@ struct islpci_bss_wpa_ie { | |||
72 | u8 bssid[ETH_ALEN]; | 71 | u8 bssid[ETH_ALEN]; |
73 | u8 wpa_ie[MAX_WPA_IE_LEN]; | 72 | u8 wpa_ie[MAX_WPA_IE_LEN]; |
74 | size_t wpa_ie_len; | 73 | size_t wpa_ie_len; |
75 | 74 | ||
76 | }; | 75 | }; |
77 | 76 | ||
78 | typedef struct { | 77 | typedef struct { |
79 | spinlock_t slock; /* generic spinlock; */ | 78 | spinlock_t slock; /* generic spinlock; */ |
80 | 79 | ||
81 | u32 priv_oid; | 80 | u32 priv_oid; |
82 | 81 | ||
83 | /* our mib cache */ | 82 | /* our mib cache */ |
@@ -85,7 +84,7 @@ typedef struct { | |||
85 | struct rw_semaphore mib_sem; | 84 | struct rw_semaphore mib_sem; |
86 | void **mib; | 85 | void **mib; |
87 | char nickname[IW_ESSID_MAX_SIZE+1]; | 86 | char nickname[IW_ESSID_MAX_SIZE+1]; |
88 | 87 | ||
89 | /* Take care of the wireless stats */ | 88 | /* Take care of the wireless stats */ |
90 | struct work_struct stats_work; | 89 | struct work_struct stats_work; |
91 | struct semaphore stats_sem; | 90 | struct semaphore stats_sem; |
@@ -120,7 +119,7 @@ typedef struct { | |||
120 | struct net_device *ndev; | 119 | struct net_device *ndev; |
121 | 120 | ||
122 | /* device queue interface members */ | 121 | /* device queue interface members */ |
123 | struct isl38xx_cb *control_block; /* device control block | 122 | struct isl38xx_cb *control_block; /* device control block |
124 | (== driver_mem_address!) */ | 123 | (== driver_mem_address!) */ |
125 | 124 | ||
126 | /* Each queue has three indexes: | 125 | /* Each queue has three indexes: |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index a8261d8454dd..676d83813dc8 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | 3 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> |
5 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -48,7 +47,7 @@ islpci_eth_cleanup_transmit(islpci_private *priv, | |||
48 | /* read the index of the first fragment to be freed */ | 47 | /* read the index of the first fragment to be freed */ |
49 | index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE; | 48 | index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE; |
50 | 49 | ||
51 | /* check for holes in the arrays caused by multi fragment frames | 50 | /* check for holes in the arrays caused by multi fragment frames |
52 | * searching for the last fragment of a frame */ | 51 | * searching for the last fragment of a frame */ |
53 | if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) { | 52 | if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) { |
54 | /* entry is the last fragment of a frame | 53 | /* entry is the last fragment of a frame |
@@ -253,6 +252,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) | |||
253 | * header and without the FCS. But there a is a bit that | 252 | * header and without the FCS. But there a is a bit that |
254 | * indicates if the packet is corrupted :-) */ | 253 | * indicates if the packet is corrupted :-) */ |
255 | struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data; | 254 | struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data; |
255 | |||
256 | if (hdr->flags & 0x01) | 256 | if (hdr->flags & 0x01) |
257 | /* This one is bad. Drop it ! */ | 257 | /* This one is bad. Drop it ! */ |
258 | return -1; | 258 | return -1; |
@@ -284,7 +284,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) | |||
284 | (struct avs_80211_1_header *) skb_push(*skb, | 284 | (struct avs_80211_1_header *) skb_push(*skb, |
285 | sizeof (struct | 285 | sizeof (struct |
286 | avs_80211_1_header)); | 286 | avs_80211_1_header)); |
287 | 287 | ||
288 | avs->version = cpu_to_be32(P80211CAPTURE_VERSION); | 288 | avs->version = cpu_to_be32(P80211CAPTURE_VERSION); |
289 | avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header)); | 289 | avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header)); |
290 | avs->mactime = cpu_to_be64(le64_to_cpu(clock)); | 290 | avs->mactime = cpu_to_be64(le64_to_cpu(clock)); |
@@ -390,7 +390,7 @@ islpci_eth_receive(islpci_private *priv) | |||
390 | struct rx_annex_header *annex = | 390 | struct rx_annex_header *annex = |
391 | (struct rx_annex_header *) skb->data; | 391 | (struct rx_annex_header *) skb->data; |
392 | wstats.level = annex->rfmon.rssi; | 392 | wstats.level = annex->rfmon.rssi; |
393 | /* The noise value can be a bit outdated if nobody's | 393 | /* The noise value can be a bit outdated if nobody's |
394 | * reading wireless stats... */ | 394 | * reading wireless stats... */ |
395 | wstats.noise = priv->local_iwstatistics.qual.noise; | 395 | wstats.noise = priv->local_iwstatistics.qual.noise; |
396 | wstats.qual = wstats.level - wstats.noise; | 396 | wstats.qual = wstats.level - wstats.noise; |
@@ -464,10 +464,8 @@ islpci_eth_receive(islpci_private *priv) | |||
464 | break; | 464 | break; |
465 | } | 465 | } |
466 | /* update the fragment address */ | 466 | /* update the fragment address */ |
467 | control_block->rx_data_low[index].address = cpu_to_le32((u32) | 467 | control_block->rx_data_low[index].address = |
468 | priv-> | 468 | cpu_to_le32((u32)priv->pci_map_rx_address[index]); |
469 | pci_map_rx_address | ||
470 | [index]); | ||
471 | wmb(); | 469 | wmb(); |
472 | 470 | ||
473 | /* increment the driver read pointer */ | 471 | /* increment the driver read pointer */ |
@@ -484,10 +482,12 @@ islpci_eth_receive(islpci_private *priv) | |||
484 | void | 482 | void |
485 | islpci_do_reset_and_wake(void *data) | 483 | islpci_do_reset_and_wake(void *data) |
486 | { | 484 | { |
487 | islpci_private *priv = (islpci_private *) data; | 485 | islpci_private *priv = data; |
486 | |||
488 | islpci_reset(priv, 1); | 487 | islpci_reset(priv, 1); |
489 | netif_wake_queue(priv->ndev); | ||
490 | priv->reset_task_pending = 0; | 488 | priv->reset_task_pending = 0; |
489 | smp_wmb(); | ||
490 | netif_wake_queue(priv->ndev); | ||
491 | } | 491 | } |
492 | 492 | ||
493 | void | 493 | void |
@@ -499,12 +499,14 @@ islpci_eth_tx_timeout(struct net_device *ndev) | |||
499 | /* increment the transmit error counter */ | 499 | /* increment the transmit error counter */ |
500 | statistics->tx_errors++; | 500 | statistics->tx_errors++; |
501 | 501 | ||
502 | printk(KERN_WARNING "%s: tx_timeout", ndev->name); | ||
503 | if (!priv->reset_task_pending) { | 502 | if (!priv->reset_task_pending) { |
504 | priv->reset_task_pending = 1; | 503 | printk(KERN_WARNING |
505 | printk(", scheduling a reset"); | 504 | "%s: tx_timeout, scheduling reset", ndev->name); |
506 | netif_stop_queue(ndev); | 505 | netif_stop_queue(ndev); |
506 | priv->reset_task_pending = 1; | ||
507 | schedule_work(&priv->reset_task); | 507 | schedule_work(&priv->reset_task); |
508 | } else { | ||
509 | printk(KERN_WARNING | ||
510 | "%s: tx_timeout, waiting for reset", ndev->name); | ||
508 | } | 511 | } |
509 | printk("\n"); | ||
510 | } | 512 | } |
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h index bc9d7a60b8d6..26789454067c 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/prism54/islpci_eth.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * | 3 | * |
5 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c index f692dccf0d07..58257b40c043 100644 --- a/drivers/net/wireless/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/prism54/islpci_hotplug.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> | 3 | * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> |
5 | * | 4 | * |
@@ -40,8 +39,8 @@ static int init_pcitm = 0; | |||
40 | module_param(init_pcitm, int, 0); | 39 | module_param(init_pcitm, int, 0); |
41 | 40 | ||
42 | /* In this order: vendor, device, subvendor, subdevice, class, class_mask, | 41 | /* In this order: vendor, device, subvendor, subdevice, class, class_mask, |
43 | * driver_data | 42 | * driver_data |
44 | * If you have an update for this please contact prism54-devel@prism54.org | 43 | * If you have an update for this please contact prism54-devel@prism54.org |
45 | * The latest list can be found at http://prism54.org/supported_cards.php */ | 44 | * The latest list can be found at http://prism54.org/supported_cards.php */ |
46 | static const struct pci_device_id prism54_id_tbl[] = { | 45 | static const struct pci_device_id prism54_id_tbl[] = { |
47 | /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ | 46 | /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ |
@@ -132,15 +131,15 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
132 | 131 | ||
133 | /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) | 132 | /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) |
134 | * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) | 133 | * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) |
135 | * The RETRY_TIMEOUT is used to set the number of retries that the core, as a | 134 | * The RETRY_TIMEOUT is used to set the number of retries that the core, as a |
136 | * Master, will perform before abandoning a cycle. The default value for | 135 | * Master, will perform before abandoning a cycle. The default value for |
137 | * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new | 136 | * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new |
138 | * devices. A write of zero to the RETRY_TIMEOUT register disables this | 137 | * devices. A write of zero to the RETRY_TIMEOUT register disables this |
139 | * function to allow use with any non-compliant legacy devices that may | 138 | * function to allow use with any non-compliant legacy devices that may |
140 | * execute more retries. | 139 | * execute more retries. |
141 | * | 140 | * |
142 | * Writing zero to both these two registers will disable both timeouts and | 141 | * Writing zero to both these two registers will disable both timeouts and |
143 | * *can* solve problems caused by devices that are slow to respond. | 142 | * *can* solve problems caused by devices that are slow to respond. |
144 | * Make this configurable - MSW | 143 | * Make this configurable - MSW |
145 | */ | 144 | */ |
146 | if ( init_pcitm >= 0 ) { | 145 | if ( init_pcitm >= 0 ) { |
@@ -171,14 +170,15 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
171 | pci_set_master(pdev); | 170 | pci_set_master(pdev); |
172 | 171 | ||
173 | /* enable MWI */ | 172 | /* enable MWI */ |
174 | pci_set_mwi(pdev); | 173 | if (!pci_set_mwi(pdev)) |
174 | printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME); | ||
175 | 175 | ||
176 | /* setup the network device interface and its structure */ | 176 | /* setup the network device interface and its structure */ |
177 | if (!(ndev = islpci_setup(pdev))) { | 177 | if (!(ndev = islpci_setup(pdev))) { |
178 | /* error configuring the driver as a network device */ | 178 | /* error configuring the driver as a network device */ |
179 | printk(KERN_ERR "%s: could not configure network device\n", | 179 | printk(KERN_ERR "%s: could not configure network device\n", |
180 | DRV_NAME); | 180 | DRV_NAME); |
181 | goto do_pci_release_regions; | 181 | goto do_pci_clear_mwi; |
182 | } | 182 | } |
183 | 183 | ||
184 | priv = netdev_priv(ndev); | 184 | priv = netdev_priv(ndev); |
@@ -208,6 +208,8 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
208 | pci_set_drvdata(pdev, NULL); | 208 | pci_set_drvdata(pdev, NULL); |
209 | free_netdev(ndev); | 209 | free_netdev(ndev); |
210 | priv = NULL; | 210 | priv = NULL; |
211 | do_pci_clear_mwi: | ||
212 | pci_clear_mwi(pdev); | ||
211 | do_pci_release_regions: | 213 | do_pci_release_regions: |
212 | pci_release_regions(pdev); | 214 | pci_release_regions(pdev); |
213 | do_pci_disable_device: | 215 | do_pci_disable_device: |
@@ -241,7 +243,7 @@ prism54_remove(struct pci_dev *pdev) | |||
241 | isl38xx_disable_interrupts(priv->device_base); | 243 | isl38xx_disable_interrupts(priv->device_base); |
242 | islpci_set_state(priv, PRV_STATE_OFF); | 244 | islpci_set_state(priv, PRV_STATE_OFF); |
243 | /* This bellow causes a lockup at rmmod time. It might be | 245 | /* This bellow causes a lockup at rmmod time. It might be |
244 | * because some interrupts still linger after rmmod time, | 246 | * because some interrupts still linger after rmmod time, |
245 | * see bug #17 */ | 247 | * see bug #17 */ |
246 | /* pci_set_power_state(pdev, 3);*/ /* try to power-off */ | 248 | /* pci_set_power_state(pdev, 3);*/ /* try to power-off */ |
247 | } | 249 | } |
@@ -255,6 +257,8 @@ prism54_remove(struct pci_dev *pdev) | |||
255 | free_netdev(ndev); | 257 | free_netdev(ndev); |
256 | priv = NULL; | 258 | priv = NULL; |
257 | 259 | ||
260 | pci_clear_mwi(pdev); | ||
261 | |||
258 | pci_release_regions(pdev); | 262 | pci_release_regions(pdev); |
259 | 263 | ||
260 | pci_disable_device(pdev); | 264 | pci_disable_device(pdev); |
@@ -288,12 +292,19 @@ prism54_resume(struct pci_dev *pdev) | |||
288 | { | 292 | { |
289 | struct net_device *ndev = pci_get_drvdata(pdev); | 293 | struct net_device *ndev = pci_get_drvdata(pdev); |
290 | islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; | 294 | islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; |
291 | BUG_ON(!priv); | 295 | int err; |
292 | 296 | ||
293 | pci_enable_device(pdev); | 297 | BUG_ON(!priv); |
294 | 298 | ||
295 | printk(KERN_NOTICE "%s: got resume request\n", ndev->name); | 299 | printk(KERN_NOTICE "%s: got resume request\n", ndev->name); |
296 | 300 | ||
301 | err = pci_enable_device(pdev); | ||
302 | if (err) { | ||
303 | printk(KERN_ERR "%s: pci_enable_device failed on resume\n", | ||
304 | ndev->name); | ||
305 | return err; | ||
306 | } | ||
307 | |||
297 | pci_restore_state(pdev); | 308 | pci_restore_state(pdev); |
298 | 309 | ||
299 | /* alright let's go into the PREBOOT state */ | 310 | /* alright let's go into the PREBOOT state */ |
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index 2e061a80b294..036a875054c9 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net> | 3 | * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net> |
5 | * | 4 | * |
@@ -502,7 +501,7 @@ islpci_mgt_transaction(struct net_device *ndev, | |||
502 | printk(KERN_WARNING "%s: timeout waiting for mgmt response\n", | 501 | printk(KERN_WARNING "%s: timeout waiting for mgmt response\n", |
503 | ndev->name); | 502 | ndev->name); |
504 | 503 | ||
505 | /* TODO: we should reset the device here */ | 504 | /* TODO: we should reset the device here */ |
506 | out: | 505 | out: |
507 | finish_wait(&priv->mgmt_wqueue, &wait); | 506 | finish_wait(&priv->mgmt_wqueue, &wait); |
508 | up(&priv->mgmt_sem); | 507 | up(&priv->mgmt_sem); |
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h index 2982be3363ef..fc53b587b722 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.h +++ b/drivers/net/wireless/prism54/islpci_mgt.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Copyright (C) 2002 Intersil Americas Inc. | 2 | * Copyright (C) 2002 Intersil Americas Inc. |
4 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> | 3 | * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> |
5 | * | 4 | * |
@@ -36,8 +35,8 @@ extern int pc_debug; | |||
36 | 35 | ||
37 | 36 | ||
38 | /* General driver definitions */ | 37 | /* General driver definitions */ |
39 | #define PCIDEVICE_LATENCY_TIMER_MIN 0x40 | 38 | #define PCIDEVICE_LATENCY_TIMER_MIN 0x40 |
40 | #define PCIDEVICE_LATENCY_TIMER_VAL 0x50 | 39 | #define PCIDEVICE_LATENCY_TIMER_VAL 0x50 |
41 | 40 | ||
42 | /* Debugging verbose definitions */ | 41 | /* Debugging verbose definitions */ |
43 | #define SHOW_NOTHING 0x00 /* overrules everything */ | 42 | #define SHOW_NOTHING 0x00 /* overrules everything */ |
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c index ebb238785839..fbc52b6a3024 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/prism54/oid_mgt.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr> | 2 | * Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -503,7 +503,7 @@ mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len | |||
503 | } | 503 | } |
504 | if (ret || response_op == PIMFOR_OP_ERROR) | 504 | if (ret || response_op == PIMFOR_OP_ERROR) |
505 | ret = -EIO; | 505 | ret = -EIO; |
506 | } else | 506 | } else |
507 | ret = -EIO; | 507 | ret = -EIO; |
508 | 508 | ||
509 | /* re-set given data to what it was */ | 509 | /* re-set given data to what it was */ |
@@ -727,7 +727,7 @@ mgt_commit(islpci_private *priv) | |||
727 | * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL | 727 | * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL |
728 | * FREQUENCY,EXTENDEDRATES. | 728 | * FREQUENCY,EXTENDEDRATES. |
729 | * | 729 | * |
730 | * The way to do this is to set ESSID. Note though that they may get | 730 | * The way to do this is to set ESSID. Note though that they may get |
731 | * unlatch before though by setting another OID. */ | 731 | * unlatch before though by setting another OID. */ |
732 | #if 0 | 732 | #if 0 |
733 | void | 733 | void |
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h index d71eca55a302..aa1d1747784f 100644 --- a/drivers/net/wireless/prism54/prismcompat.h +++ b/drivers/net/wireless/prism54/prismcompat.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * (C) 2004 Margit Schubert-While <margitsw@t-online.de> | 2 | * (C) 2004 Margit Schubert-While <margitsw@t-online.de> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Compatibility header file to aid support of different kernel versions | 20 | * Compatibility header file to aid support of different kernel versions |
21 | */ | 21 | */ |
22 | 22 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index aa661b2b76c7..8be99ebbe1cd 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c | |||
@@ -1076,6 +1076,31 @@ static int set_mandatory_rates(struct zd_chip *chip, enum ieee80211_std std) | |||
1076 | return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL); | 1076 | return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL); |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, | ||
1080 | u8 rts_rate, int preamble) | ||
1081 | { | ||
1082 | int rts_mod = ZD_RX_CCK; | ||
1083 | u32 value = 0; | ||
1084 | |||
1085 | /* Modulation bit */ | ||
1086 | if (ZD_CS_TYPE(rts_rate) == ZD_CS_OFDM) | ||
1087 | rts_mod = ZD_RX_OFDM; | ||
1088 | |||
1089 | dev_dbg_f(zd_chip_dev(chip), "rts_rate=%x preamble=%x\n", | ||
1090 | rts_rate, preamble); | ||
1091 | |||
1092 | value |= rts_rate << RTSCTS_SH_RTS_RATE; | ||
1093 | value |= rts_mod << RTSCTS_SH_RTS_MOD_TYPE; | ||
1094 | value |= preamble << RTSCTS_SH_RTS_PMB_TYPE; | ||
1095 | value |= preamble << RTSCTS_SH_CTS_PMB_TYPE; | ||
1096 | |||
1097 | /* We always send 11M self-CTS messages, like the vendor driver. */ | ||
1098 | value |= ZD_CCK_RATE_11M << RTSCTS_SH_CTS_RATE; | ||
1099 | value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE; | ||
1100 | |||
1101 | return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE); | ||
1102 | } | ||
1103 | |||
1079 | int zd_chip_enable_hwint(struct zd_chip *chip) | 1104 | int zd_chip_enable_hwint(struct zd_chip *chip) |
1080 | { | 1105 | { |
1081 | int r; | 1106 | int r; |
@@ -1355,17 +1380,12 @@ out: | |||
1355 | return r; | 1380 | return r; |
1356 | } | 1381 | } |
1357 | 1382 | ||
1358 | int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) | 1383 | int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates) |
1359 | { | 1384 | { |
1360 | int r; | 1385 | ZD_ASSERT((cr_rates & ~(CR_RATES_80211B | CR_RATES_80211G)) == 0); |
1361 | 1386 | dev_dbg_f(zd_chip_dev(chip), "%x\n", cr_rates); | |
1362 | if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G)) | ||
1363 | return -EINVAL; | ||
1364 | 1387 | ||
1365 | mutex_lock(&chip->mutex); | 1388 | return zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); |
1366 | r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); | ||
1367 | mutex_unlock(&chip->mutex); | ||
1368 | return r; | ||
1369 | } | 1389 | } |
1370 | 1390 | ||
1371 | static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) | 1391 | static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) |
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index ae59597ce4e1..ca892b9a6448 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h | |||
@@ -337,24 +337,24 @@ | |||
337 | #define CR_MAC_PS_STATE CTL_REG(0x050C) | 337 | #define CR_MAC_PS_STATE CTL_REG(0x050C) |
338 | 338 | ||
339 | #define CR_INTERRUPT CTL_REG(0x0510) | 339 | #define CR_INTERRUPT CTL_REG(0x0510) |
340 | #define INT_TX_COMPLETE 0x00000001 | 340 | #define INT_TX_COMPLETE (1 << 0) |
341 | #define INT_RX_COMPLETE 0x00000002 | 341 | #define INT_RX_COMPLETE (1 << 1) |
342 | #define INT_RETRY_FAIL 0x00000004 | 342 | #define INT_RETRY_FAIL (1 << 2) |
343 | #define INT_WAKEUP 0x00000008 | 343 | #define INT_WAKEUP (1 << 3) |
344 | #define INT_DTIM_NOTIFY 0x00000020 | 344 | #define INT_DTIM_NOTIFY (1 << 5) |
345 | #define INT_CFG_NEXT_BCN 0x00000040 | 345 | #define INT_CFG_NEXT_BCN (1 << 6) |
346 | #define INT_BUS_ABORT 0x00000080 | 346 | #define INT_BUS_ABORT (1 << 7) |
347 | #define INT_TX_FIFO_READY 0x00000100 | 347 | #define INT_TX_FIFO_READY (1 << 8) |
348 | #define INT_UART 0x00000200 | 348 | #define INT_UART (1 << 9) |
349 | #define INT_TX_COMPLETE_EN 0x00010000 | 349 | #define INT_TX_COMPLETE_EN (1 << 16) |
350 | #define INT_RX_COMPLETE_EN 0x00020000 | 350 | #define INT_RX_COMPLETE_EN (1 << 17) |
351 | #define INT_RETRY_FAIL_EN 0x00040000 | 351 | #define INT_RETRY_FAIL_EN (1 << 18) |
352 | #define INT_WAKEUP_EN 0x00080000 | 352 | #define INT_WAKEUP_EN (1 << 19) |
353 | #define INT_DTIM_NOTIFY_EN 0x00200000 | 353 | #define INT_DTIM_NOTIFY_EN (1 << 21) |
354 | #define INT_CFG_NEXT_BCN_EN 0x00400000 | 354 | #define INT_CFG_NEXT_BCN_EN (1 << 22) |
355 | #define INT_BUS_ABORT_EN 0x00800000 | 355 | #define INT_BUS_ABORT_EN (1 << 23) |
356 | #define INT_TX_FIFO_READY_EN 0x01000000 | 356 | #define INT_TX_FIFO_READY_EN (1 << 24) |
357 | #define INT_UART_EN 0x02000000 | 357 | #define INT_UART_EN (1 << 25) |
358 | 358 | ||
359 | #define CR_TSF_LOW_PART CTL_REG(0x0514) | 359 | #define CR_TSF_LOW_PART CTL_REG(0x0514) |
360 | #define CR_TSF_HIGH_PART CTL_REG(0x0518) | 360 | #define CR_TSF_HIGH_PART CTL_REG(0x0518) |
@@ -398,18 +398,18 @@ | |||
398 | * device will use a rate in this table that is less than or equal to the rate | 398 | * device will use a rate in this table that is less than or equal to the rate |
399 | * of the incoming frame which prompted the response */ | 399 | * of the incoming frame which prompted the response */ |
400 | #define CR_BASIC_RATE_TBL CTL_REG(0x0630) | 400 | #define CR_BASIC_RATE_TBL CTL_REG(0x0630) |
401 | #define CR_RATE_1M 0x0001 /* 802.11b */ | 401 | #define CR_RATE_1M (1 << 0) /* 802.11b */ |
402 | #define CR_RATE_2M 0x0002 /* 802.11b */ | 402 | #define CR_RATE_2M (1 << 1) /* 802.11b */ |
403 | #define CR_RATE_5_5M 0x0004 /* 802.11b */ | 403 | #define CR_RATE_5_5M (1 << 2) /* 802.11b */ |
404 | #define CR_RATE_11M 0x0008 /* 802.11b */ | 404 | #define CR_RATE_11M (1 << 3) /* 802.11b */ |
405 | #define CR_RATE_6M 0x0100 /* 802.11g */ | 405 | #define CR_RATE_6M (1 << 8) /* 802.11g */ |
406 | #define CR_RATE_9M 0x0200 /* 802.11g */ | 406 | #define CR_RATE_9M (1 << 9) /* 802.11g */ |
407 | #define CR_RATE_12M 0x0400 /* 802.11g */ | 407 | #define CR_RATE_12M (1 << 10) /* 802.11g */ |
408 | #define CR_RATE_18M 0x0800 /* 802.11g */ | 408 | #define CR_RATE_18M (1 << 11) /* 802.11g */ |
409 | #define CR_RATE_24M 0x1000 /* 802.11g */ | 409 | #define CR_RATE_24M (1 << 12) /* 802.11g */ |
410 | #define CR_RATE_36M 0x2000 /* 802.11g */ | 410 | #define CR_RATE_36M (1 << 13) /* 802.11g */ |
411 | #define CR_RATE_48M 0x4000 /* 802.11g */ | 411 | #define CR_RATE_48M (1 << 14) /* 802.11g */ |
412 | #define CR_RATE_54M 0x8000 /* 802.11g */ | 412 | #define CR_RATE_54M (1 << 15) /* 802.11g */ |
413 | #define CR_RATES_80211G 0xff00 | 413 | #define CR_RATES_80211G 0xff00 |
414 | #define CR_RATES_80211B 0x000f | 414 | #define CR_RATES_80211B 0x000f |
415 | 415 | ||
@@ -420,15 +420,24 @@ | |||
420 | #define CR_MANDATORY_RATE_TBL CTL_REG(0x0634) | 420 | #define CR_MANDATORY_RATE_TBL CTL_REG(0x0634) |
421 | #define CR_RTS_CTS_RATE CTL_REG(0x0638) | 421 | #define CR_RTS_CTS_RATE CTL_REG(0x0638) |
422 | 422 | ||
423 | /* These are all bit indexes in CR_RTS_CTS_RATE, so remember to shift. */ | ||
424 | #define RTSCTS_SH_RTS_RATE 0 | ||
425 | #define RTSCTS_SH_EXP_CTS_RATE 4 | ||
426 | #define RTSCTS_SH_RTS_MOD_TYPE 8 | ||
427 | #define RTSCTS_SH_RTS_PMB_TYPE 9 | ||
428 | #define RTSCTS_SH_CTS_RATE 16 | ||
429 | #define RTSCTS_SH_CTS_MOD_TYPE 24 | ||
430 | #define RTSCTS_SH_CTS_PMB_TYPE 25 | ||
431 | |||
423 | #define CR_WEP_PROTECT CTL_REG(0x063C) | 432 | #define CR_WEP_PROTECT CTL_REG(0x063C) |
424 | #define CR_RX_THRESHOLD CTL_REG(0x0640) | 433 | #define CR_RX_THRESHOLD CTL_REG(0x0640) |
425 | 434 | ||
426 | /* register for controlling the LEDS */ | 435 | /* register for controlling the LEDS */ |
427 | #define CR_LED CTL_REG(0x0644) | 436 | #define CR_LED CTL_REG(0x0644) |
428 | /* masks for controlling LEDs */ | 437 | /* masks for controlling LEDs */ |
429 | #define LED1 0x0100 | 438 | #define LED1 (1 << 8) |
430 | #define LED2 0x0200 | 439 | #define LED2 (1 << 9) |
431 | #define LED_SW 0x0400 | 440 | #define LED_SW (1 << 10) |
432 | 441 | ||
433 | /* Seems to indicate that the configuration is over. | 442 | /* Seems to indicate that the configuration is over. |
434 | */ | 443 | */ |
@@ -455,18 +464,18 @@ | |||
455 | * registers, so one could argue it is a LOCK bit. But calling it | 464 | * registers, so one could argue it is a LOCK bit. But calling it |
456 | * LOCK_PHY_REGS makes it confusing. | 465 | * LOCK_PHY_REGS makes it confusing. |
457 | */ | 466 | */ |
458 | #define UNLOCK_PHY_REGS 0x0080 | 467 | #define UNLOCK_PHY_REGS (1 << 7) |
459 | 468 | ||
460 | #define CR_DEVICE_STATE CTL_REG(0x0684) | 469 | #define CR_DEVICE_STATE CTL_REG(0x0684) |
461 | #define CR_UNDERRUN_CNT CTL_REG(0x0688) | 470 | #define CR_UNDERRUN_CNT CTL_REG(0x0688) |
462 | 471 | ||
463 | #define CR_RX_FILTER CTL_REG(0x068c) | 472 | #define CR_RX_FILTER CTL_REG(0x068c) |
464 | #define RX_FILTER_ASSOC_RESPONSE 0x0002 | 473 | #define RX_FILTER_ASSOC_RESPONSE (1 << 1) |
465 | #define RX_FILTER_REASSOC_RESPONSE 0x0008 | 474 | #define RX_FILTER_REASSOC_RESPONSE (1 << 3) |
466 | #define RX_FILTER_PROBE_RESPONSE 0x0020 | 475 | #define RX_FILTER_PROBE_RESPONSE (1 << 5) |
467 | #define RX_FILTER_BEACON 0x0100 | 476 | #define RX_FILTER_BEACON (1 << 8) |
468 | #define RX_FILTER_DISASSOC 0x0400 | 477 | #define RX_FILTER_DISASSOC (1 << 10) |
469 | #define RX_FILTER_AUTH 0x0800 | 478 | #define RX_FILTER_AUTH (1 << 11) |
470 | #define AP_RX_FILTER 0x0400feff | 479 | #define AP_RX_FILTER 0x0400feff |
471 | #define STA_RX_FILTER 0x0000ffff | 480 | #define STA_RX_FILTER 0x0000ffff |
472 | 481 | ||
@@ -794,6 +803,9 @@ void zd_chip_disable_rx(struct zd_chip *chip); | |||
794 | int zd_chip_enable_hwint(struct zd_chip *chip); | 803 | int zd_chip_enable_hwint(struct zd_chip *chip); |
795 | int zd_chip_disable_hwint(struct zd_chip *chip); | 804 | int zd_chip_disable_hwint(struct zd_chip *chip); |
796 | 805 | ||
806 | int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, | ||
807 | u8 rts_rate, int preamble); | ||
808 | |||
797 | static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type) | 809 | static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type) |
798 | { | 810 | { |
799 | return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type); | 811 | return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type); |
@@ -809,7 +821,17 @@ static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates) | |||
809 | return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates); | 821 | return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates); |
810 | } | 822 | } |
811 | 823 | ||
812 | int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates); | 824 | int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates); |
825 | |||
826 | static inline int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) | ||
827 | { | ||
828 | int r; | ||
829 | |||
830 | mutex_lock(&chip->mutex); | ||
831 | r = zd_chip_set_basic_rates_locked(chip, cr_rates); | ||
832 | mutex_unlock(&chip->mutex); | ||
833 | return r; | ||
834 | } | ||
813 | 835 | ||
814 | static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter) | 836 | static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter) |
815 | { | 837 | { |
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h index a13ec72eb304..fb22f62cf1f3 100644 --- a/drivers/net/wireless/zd1211rw/zd_def.h +++ b/drivers/net/wireless/zd1211rw/zd_def.h | |||
@@ -39,6 +39,7 @@ do { \ | |||
39 | if (!(x)) { \ | 39 | if (!(x)) { \ |
40 | pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ | 40 | pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ |
41 | __FILE__, __LINE__, __stringify(x)); \ | 41 | __FILE__, __LINE__, __stringify(x)); \ |
42 | dump_stack(); \ | ||
42 | } \ | 43 | } \ |
43 | } while (0) | 44 | } while (0) |
44 | #else | 45 | #else |
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c index 66905f7b61ff..189160efd2ae 100644 --- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c | |||
@@ -37,7 +37,12 @@ static const struct channel_range channel_ranges[] = { | |||
37 | [ZD_REGDOMAIN_JAPAN] = { 1, 14}, | 37 | [ZD_REGDOMAIN_JAPAN] = { 1, 14}, |
38 | [ZD_REGDOMAIN_SPAIN] = { 1, 14}, | 38 | [ZD_REGDOMAIN_SPAIN] = { 1, 14}, |
39 | [ZD_REGDOMAIN_FRANCE] = { 1, 14}, | 39 | [ZD_REGDOMAIN_FRANCE] = { 1, 14}, |
40 | [ZD_REGDOMAIN_JAPAN_ADD] = {14, 15}, | 40 | |
41 | /* Japan originally only had channel 14 available (see CHNL_ID 0x40 in | ||
42 | * 802.11). However, in 2001 the range was extended to include channels | ||
43 | * 1-13. The ZyDAS devices still use the old region code but are | ||
44 | * designed to allow the extra channel access in Japan. */ | ||
45 | [ZD_REGDOMAIN_JAPAN_ADD] = { 1, 15}, | ||
41 | }; | 46 | }; |
42 | 47 | ||
43 | const struct channel_range *zd_channel_range(u8 regdomain) | 48 | const struct channel_range *zd_channel_range(u8 regdomain) |
@@ -133,9 +138,6 @@ int zd_find_channel(u8 *channel, const struct iw_freq *freq) | |||
133 | int i, r; | 138 | int i, r; |
134 | u32 mhz; | 139 | u32 mhz; |
135 | 140 | ||
136 | if (!(freq->flags & IW_FREQ_FIXED)) | ||
137 | return 0; | ||
138 | |||
139 | if (freq->m < 1000) { | 141 | if (freq->m < 1000) { |
140 | if (freq->m > NUM_CHANNELS || freq->m == 0) | 142 | if (freq->m > NUM_CHANNELS || freq->m == 0) |
141 | return -EINVAL; | 143 | return -EINVAL; |
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h index 36329890dfec..26b8298dff8c 100644 --- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h | |||
@@ -50,6 +50,7 @@ static inline u8 zd_ofdm_plcp_header_rate( | |||
50 | return header->prefix[0] & 0xf; | 50 | return header->prefix[0] & 0xf; |
51 | } | 51 | } |
52 | 52 | ||
53 | /* These are referred to as zd_rates */ | ||
53 | #define ZD_OFDM_RATE_6M 0xb | 54 | #define ZD_OFDM_RATE_6M 0xb |
54 | #define ZD_OFDM_RATE_9M 0xf | 55 | #define ZD_OFDM_RATE_9M 0xf |
55 | #define ZD_OFDM_RATE_12M 0xa | 56 | #define ZD_OFDM_RATE_12M 0xa |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index e5fedf968c19..2696f95b9278 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | static void ieee_init(struct ieee80211_device *ieee); | 33 | static void ieee_init(struct ieee80211_device *ieee); |
34 | static void softmac_init(struct ieee80211softmac_device *sm); | 34 | static void softmac_init(struct ieee80211softmac_device *sm); |
35 | static void set_rts_cts_work(void *d); | ||
36 | static void set_basic_rates_work(void *d); | ||
35 | 37 | ||
36 | static void housekeeping_init(struct zd_mac *mac); | 38 | static void housekeeping_init(struct zd_mac *mac); |
37 | static void housekeeping_enable(struct zd_mac *mac); | 39 | static void housekeeping_enable(struct zd_mac *mac); |
@@ -46,6 +48,8 @@ int zd_mac_init(struct zd_mac *mac, | |||
46 | memset(mac, 0, sizeof(*mac)); | 48 | memset(mac, 0, sizeof(*mac)); |
47 | spin_lock_init(&mac->lock); | 49 | spin_lock_init(&mac->lock); |
48 | mac->netdev = netdev; | 50 | mac->netdev = netdev; |
51 | INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac); | ||
52 | INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac); | ||
49 | 53 | ||
50 | ieee_init(ieee); | 54 | ieee_init(ieee); |
51 | softmac_init(ieee80211_priv(netdev)); | 55 | softmac_init(ieee80211_priv(netdev)); |
@@ -213,6 +217,13 @@ int zd_mac_stop(struct net_device *netdev) | |||
213 | housekeeping_disable(mac); | 217 | housekeeping_disable(mac); |
214 | ieee80211softmac_stop(netdev); | 218 | ieee80211softmac_stop(netdev); |
215 | 219 | ||
220 | /* Ensure no work items are running or queued from this point */ | ||
221 | cancel_delayed_work(&mac->set_rts_cts_work); | ||
222 | cancel_delayed_work(&mac->set_basic_rates_work); | ||
223 | flush_workqueue(zd_workqueue); | ||
224 | mac->updating_rts_rate = 0; | ||
225 | mac->updating_basic_rates = 0; | ||
226 | |||
216 | zd_chip_disable_hwint(chip); | 227 | zd_chip_disable_hwint(chip); |
217 | zd_chip_switch_radio_off(chip); | 228 | zd_chip_switch_radio_off(chip); |
218 | zd_chip_disable_int(chip); | 229 | zd_chip_disable_int(chip); |
@@ -286,6 +297,186 @@ u8 zd_mac_get_regdomain(struct zd_mac *mac) | |||
286 | return regdomain; | 297 | return regdomain; |
287 | } | 298 | } |
288 | 299 | ||
300 | /* Fallback to lowest rate, if rate is unknown. */ | ||
301 | static u8 rate_to_zd_rate(u8 rate) | ||
302 | { | ||
303 | switch (rate) { | ||
304 | case IEEE80211_CCK_RATE_2MB: | ||
305 | return ZD_CCK_RATE_2M; | ||
306 | case IEEE80211_CCK_RATE_5MB: | ||
307 | return ZD_CCK_RATE_5_5M; | ||
308 | case IEEE80211_CCK_RATE_11MB: | ||
309 | return ZD_CCK_RATE_11M; | ||
310 | case IEEE80211_OFDM_RATE_6MB: | ||
311 | return ZD_OFDM_RATE_6M; | ||
312 | case IEEE80211_OFDM_RATE_9MB: | ||
313 | return ZD_OFDM_RATE_9M; | ||
314 | case IEEE80211_OFDM_RATE_12MB: | ||
315 | return ZD_OFDM_RATE_12M; | ||
316 | case IEEE80211_OFDM_RATE_18MB: | ||
317 | return ZD_OFDM_RATE_18M; | ||
318 | case IEEE80211_OFDM_RATE_24MB: | ||
319 | return ZD_OFDM_RATE_24M; | ||
320 | case IEEE80211_OFDM_RATE_36MB: | ||
321 | return ZD_OFDM_RATE_36M; | ||
322 | case IEEE80211_OFDM_RATE_48MB: | ||
323 | return ZD_OFDM_RATE_48M; | ||
324 | case IEEE80211_OFDM_RATE_54MB: | ||
325 | return ZD_OFDM_RATE_54M; | ||
326 | } | ||
327 | return ZD_CCK_RATE_1M; | ||
328 | } | ||
329 | |||
330 | static u16 rate_to_cr_rate(u8 rate) | ||
331 | { | ||
332 | switch (rate) { | ||
333 | case IEEE80211_CCK_RATE_2MB: | ||
334 | return CR_RATE_1M; | ||
335 | case IEEE80211_CCK_RATE_5MB: | ||
336 | return CR_RATE_5_5M; | ||
337 | case IEEE80211_CCK_RATE_11MB: | ||
338 | return CR_RATE_11M; | ||
339 | case IEEE80211_OFDM_RATE_6MB: | ||
340 | return CR_RATE_6M; | ||
341 | case IEEE80211_OFDM_RATE_9MB: | ||
342 | return CR_RATE_9M; | ||
343 | case IEEE80211_OFDM_RATE_12MB: | ||
344 | return CR_RATE_12M; | ||
345 | case IEEE80211_OFDM_RATE_18MB: | ||
346 | return CR_RATE_18M; | ||
347 | case IEEE80211_OFDM_RATE_24MB: | ||
348 | return CR_RATE_24M; | ||
349 | case IEEE80211_OFDM_RATE_36MB: | ||
350 | return CR_RATE_36M; | ||
351 | case IEEE80211_OFDM_RATE_48MB: | ||
352 | return CR_RATE_48M; | ||
353 | case IEEE80211_OFDM_RATE_54MB: | ||
354 | return CR_RATE_54M; | ||
355 | } | ||
356 | return CR_RATE_1M; | ||
357 | } | ||
358 | |||
359 | static void try_enable_tx(struct zd_mac *mac) | ||
360 | { | ||
361 | unsigned long flags; | ||
362 | |||
363 | spin_lock_irqsave(&mac->lock, flags); | ||
364 | if (mac->updating_rts_rate == 0 && mac->updating_basic_rates == 0) | ||
365 | netif_wake_queue(mac->netdev); | ||
366 | spin_unlock_irqrestore(&mac->lock, flags); | ||
367 | } | ||
368 | |||
369 | static void set_rts_cts_work(void *d) | ||
370 | { | ||
371 | struct zd_mac *mac = d; | ||
372 | unsigned long flags; | ||
373 | u8 rts_rate; | ||
374 | unsigned int short_preamble; | ||
375 | |||
376 | mutex_lock(&mac->chip.mutex); | ||
377 | |||
378 | spin_lock_irqsave(&mac->lock, flags); | ||
379 | mac->updating_rts_rate = 0; | ||
380 | rts_rate = mac->rts_rate; | ||
381 | short_preamble = mac->short_preamble; | ||
382 | spin_unlock_irqrestore(&mac->lock, flags); | ||
383 | |||
384 | zd_chip_set_rts_cts_rate_locked(&mac->chip, rts_rate, short_preamble); | ||
385 | mutex_unlock(&mac->chip.mutex); | ||
386 | |||
387 | try_enable_tx(mac); | ||
388 | } | ||
389 | |||
390 | static void set_basic_rates_work(void *d) | ||
391 | { | ||
392 | struct zd_mac *mac = d; | ||
393 | unsigned long flags; | ||
394 | u16 basic_rates; | ||
395 | |||
396 | mutex_lock(&mac->chip.mutex); | ||
397 | |||
398 | spin_lock_irqsave(&mac->lock, flags); | ||
399 | mac->updating_basic_rates = 0; | ||
400 | basic_rates = mac->basic_rates; | ||
401 | spin_unlock_irqrestore(&mac->lock, flags); | ||
402 | |||
403 | zd_chip_set_basic_rates_locked(&mac->chip, basic_rates); | ||
404 | mutex_unlock(&mac->chip.mutex); | ||
405 | |||
406 | try_enable_tx(mac); | ||
407 | } | ||
408 | |||
409 | static void bssinfo_change(struct net_device *netdev, u32 changes) | ||
410 | { | ||
411 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
412 | struct ieee80211softmac_device *softmac = ieee80211_priv(netdev); | ||
413 | struct ieee80211softmac_bss_info *bssinfo = &softmac->bssinfo; | ||
414 | int need_set_rts_cts = 0; | ||
415 | int need_set_rates = 0; | ||
416 | u16 basic_rates; | ||
417 | unsigned long flags; | ||
418 | |||
419 | dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes); | ||
420 | |||
421 | if (changes & IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE) { | ||
422 | spin_lock_irqsave(&mac->lock, flags); | ||
423 | mac->short_preamble = bssinfo->short_preamble; | ||
424 | spin_unlock_irqrestore(&mac->lock, flags); | ||
425 | need_set_rts_cts = 1; | ||
426 | } | ||
427 | |||
428 | if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) { | ||
429 | /* Set RTS rate to highest available basic rate */ | ||
430 | u8 rate = ieee80211softmac_highest_supported_rate(softmac, | ||
431 | &bssinfo->supported_rates, 1); | ||
432 | rate = rate_to_zd_rate(rate); | ||
433 | |||
434 | spin_lock_irqsave(&mac->lock, flags); | ||
435 | if (rate != mac->rts_rate) { | ||
436 | mac->rts_rate = rate; | ||
437 | need_set_rts_cts = 1; | ||
438 | } | ||
439 | spin_unlock_irqrestore(&mac->lock, flags); | ||
440 | |||
441 | /* Set basic rates */ | ||
442 | need_set_rates = 1; | ||
443 | if (bssinfo->supported_rates.count == 0) { | ||
444 | /* Allow the device to be flexible */ | ||
445 | basic_rates = CR_RATES_80211B | CR_RATES_80211G; | ||
446 | } else { | ||
447 | int i = 0; | ||
448 | basic_rates = 0; | ||
449 | |||
450 | for (i = 0; i < bssinfo->supported_rates.count; i++) { | ||
451 | u16 rate = bssinfo->supported_rates.rates[i]; | ||
452 | if ((rate & IEEE80211_BASIC_RATE_MASK) == 0) | ||
453 | continue; | ||
454 | |||
455 | rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
456 | basic_rates |= rate_to_cr_rate(rate); | ||
457 | } | ||
458 | } | ||
459 | spin_lock_irqsave(&mac->lock, flags); | ||
460 | mac->basic_rates = basic_rates; | ||
461 | spin_unlock_irqrestore(&mac->lock, flags); | ||
462 | } | ||
463 | |||
464 | /* Schedule any changes we made above */ | ||
465 | |||
466 | spin_lock_irqsave(&mac->lock, flags); | ||
467 | if (need_set_rts_cts && !mac->updating_rts_rate) { | ||
468 | mac->updating_rts_rate = 1; | ||
469 | netif_stop_queue(mac->netdev); | ||
470 | queue_work(zd_workqueue, &mac->set_rts_cts_work); | ||
471 | } | ||
472 | if (need_set_rates && !mac->updating_basic_rates) { | ||
473 | mac->updating_basic_rates = 1; | ||
474 | netif_stop_queue(mac->netdev); | ||
475 | queue_work(zd_workqueue, &mac->set_basic_rates_work); | ||
476 | } | ||
477 | spin_unlock_irqrestore(&mac->lock, flags); | ||
478 | } | ||
479 | |||
289 | static void set_channel(struct net_device *netdev, u8 channel) | 480 | static void set_channel(struct net_device *netdev, u8 channel) |
290 | { | 481 | { |
291 | struct zd_mac *mac = zd_netdev_mac(netdev); | 482 | struct zd_mac *mac = zd_netdev_mac(netdev); |
@@ -295,7 +486,6 @@ static void set_channel(struct net_device *netdev, u8 channel) | |||
295 | zd_chip_set_channel(&mac->chip, channel); | 486 | zd_chip_set_channel(&mac->chip, channel); |
296 | } | 487 | } |
297 | 488 | ||
298 | /* TODO: Should not work in Managed mode. */ | ||
299 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel) | 489 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel) |
300 | { | 490 | { |
301 | unsigned long lock_flags; | 491 | unsigned long lock_flags; |
@@ -317,31 +507,22 @@ int zd_mac_request_channel(struct zd_mac *mac, u8 channel) | |||
317 | return 0; | 507 | return 0; |
318 | } | 508 | } |
319 | 509 | ||
320 | int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags) | 510 | u8 zd_mac_get_channel(struct zd_mac *mac) |
321 | { | 511 | { |
322 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | 512 | u8 channel = zd_chip_get_channel(&mac->chip); |
323 | 513 | ||
324 | *channel = zd_chip_get_channel(&mac->chip); | 514 | dev_dbg_f(zd_mac_dev(mac), "channel %u\n", channel); |
325 | if (ieee->iw_mode != IW_MODE_INFRA) { | 515 | return channel; |
326 | spin_lock_irq(&mac->lock); | ||
327 | *flags = *channel == mac->requested_channel ? | ||
328 | MAC_FIXED_CHANNEL : 0; | ||
329 | spin_unlock(&mac->lock); | ||
330 | } else { | ||
331 | *flags = 0; | ||
332 | } | ||
333 | dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags); | ||
334 | return 0; | ||
335 | } | 516 | } |
336 | 517 | ||
337 | /* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */ | 518 | /* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */ |
338 | static u8 cs_typed_rate(u8 cs_rate) | 519 | static u8 zd_rate_typed(u8 zd_rate) |
339 | { | 520 | { |
340 | static const u8 typed_rates[16] = { | 521 | static const u8 typed_rates[16] = { |
341 | [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M, | 522 | [ZD_CCK_RATE_1M] = ZD_CS_CCK|ZD_CCK_RATE_1M, |
342 | [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M, | 523 | [ZD_CCK_RATE_2M] = ZD_CS_CCK|ZD_CCK_RATE_2M, |
343 | [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M, | 524 | [ZD_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CCK_RATE_5_5M, |
344 | [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M, | 525 | [ZD_CCK_RATE_11M] = ZD_CS_CCK|ZD_CCK_RATE_11M, |
345 | [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M, | 526 | [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M, |
346 | [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M, | 527 | [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M, |
347 | [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M, | 528 | [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M, |
@@ -353,37 +534,7 @@ static u8 cs_typed_rate(u8 cs_rate) | |||
353 | }; | 534 | }; |
354 | 535 | ||
355 | ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f); | 536 | ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f); |
356 | return typed_rates[cs_rate & ZD_CS_RATE_MASK]; | 537 | return typed_rates[zd_rate & ZD_CS_RATE_MASK]; |
357 | } | ||
358 | |||
359 | /* Fallback to lowest rate, if rate is unknown. */ | ||
360 | static u8 rate_to_cs_rate(u8 rate) | ||
361 | { | ||
362 | switch (rate) { | ||
363 | case IEEE80211_CCK_RATE_2MB: | ||
364 | return ZD_CS_CCK_RATE_2M; | ||
365 | case IEEE80211_CCK_RATE_5MB: | ||
366 | return ZD_CS_CCK_RATE_5_5M; | ||
367 | case IEEE80211_CCK_RATE_11MB: | ||
368 | return ZD_CS_CCK_RATE_11M; | ||
369 | case IEEE80211_OFDM_RATE_6MB: | ||
370 | return ZD_OFDM_RATE_6M; | ||
371 | case IEEE80211_OFDM_RATE_9MB: | ||
372 | return ZD_OFDM_RATE_9M; | ||
373 | case IEEE80211_OFDM_RATE_12MB: | ||
374 | return ZD_OFDM_RATE_12M; | ||
375 | case IEEE80211_OFDM_RATE_18MB: | ||
376 | return ZD_OFDM_RATE_18M; | ||
377 | case IEEE80211_OFDM_RATE_24MB: | ||
378 | return ZD_OFDM_RATE_24M; | ||
379 | case IEEE80211_OFDM_RATE_36MB: | ||
380 | return ZD_OFDM_RATE_36M; | ||
381 | case IEEE80211_OFDM_RATE_48MB: | ||
382 | return ZD_OFDM_RATE_48M; | ||
383 | case IEEE80211_OFDM_RATE_54MB: | ||
384 | return ZD_OFDM_RATE_54M; | ||
385 | } | ||
386 | return ZD_CS_CCK_RATE_1M; | ||
387 | } | 538 | } |
388 | 539 | ||
389 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode) | 540 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode) |
@@ -484,13 +635,13 @@ int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range) | |||
484 | return 0; | 635 | return 0; |
485 | } | 636 | } |
486 | 637 | ||
487 | static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length) | 638 | static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) |
488 | { | 639 | { |
489 | static const u8 rate_divisor[] = { | 640 | static const u8 rate_divisor[] = { |
490 | [ZD_CS_CCK_RATE_1M] = 1, | 641 | [ZD_CCK_RATE_1M] = 1, |
491 | [ZD_CS_CCK_RATE_2M] = 2, | 642 | [ZD_CCK_RATE_2M] = 2, |
492 | [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */ | 643 | [ZD_CCK_RATE_5_5M] = 11, /* bits must be doubled */ |
493 | [ZD_CS_CCK_RATE_11M] = 11, | 644 | [ZD_CCK_RATE_11M] = 11, |
494 | [ZD_OFDM_RATE_6M] = 6, | 645 | [ZD_OFDM_RATE_6M] = 6, |
495 | [ZD_OFDM_RATE_9M] = 9, | 646 | [ZD_OFDM_RATE_9M] = 9, |
496 | [ZD_OFDM_RATE_12M] = 12, | 647 | [ZD_OFDM_RATE_12M] = 12, |
@@ -504,15 +655,15 @@ static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length) | |||
504 | u32 bits = (u32)tx_length * 8; | 655 | u32 bits = (u32)tx_length * 8; |
505 | u32 divisor; | 656 | u32 divisor; |
506 | 657 | ||
507 | divisor = rate_divisor[cs_rate]; | 658 | divisor = rate_divisor[zd_rate]; |
508 | if (divisor == 0) | 659 | if (divisor == 0) |
509 | return -EINVAL; | 660 | return -EINVAL; |
510 | 661 | ||
511 | switch (cs_rate) { | 662 | switch (zd_rate) { |
512 | case ZD_CS_CCK_RATE_5_5M: | 663 | case ZD_CCK_RATE_5_5M: |
513 | bits = (2*bits) + 10; /* round up to the next integer */ | 664 | bits = (2*bits) + 10; /* round up to the next integer */ |
514 | break; | 665 | break; |
515 | case ZD_CS_CCK_RATE_11M: | 666 | case ZD_CCK_RATE_11M: |
516 | if (service) { | 667 | if (service) { |
517 | u32 t = bits % 11; | 668 | u32 t = bits % 11; |
518 | *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION; | 669 | *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION; |
@@ -532,16 +683,16 @@ enum { | |||
532 | R2M_11A = 0x02, | 683 | R2M_11A = 0x02, |
533 | }; | 684 | }; |
534 | 685 | ||
535 | static u8 cs_rate_to_modulation(u8 cs_rate, int flags) | 686 | static u8 zd_rate_to_modulation(u8 zd_rate, int flags) |
536 | { | 687 | { |
537 | u8 modulation; | 688 | u8 modulation; |
538 | 689 | ||
539 | modulation = cs_typed_rate(cs_rate); | 690 | modulation = zd_rate_typed(zd_rate); |
540 | if (flags & R2M_SHORT_PREAMBLE) { | 691 | if (flags & R2M_SHORT_PREAMBLE) { |
541 | switch (ZD_CS_RATE(modulation)) { | 692 | switch (ZD_CS_RATE(modulation)) { |
542 | case ZD_CS_CCK_RATE_2M: | 693 | case ZD_CCK_RATE_2M: |
543 | case ZD_CS_CCK_RATE_5_5M: | 694 | case ZD_CCK_RATE_5_5M: |
544 | case ZD_CS_CCK_RATE_11M: | 695 | case ZD_CCK_RATE_11M: |
545 | modulation |= ZD_CS_CCK_PREA_SHORT; | 696 | modulation |= ZD_CS_CCK_PREA_SHORT; |
546 | return modulation; | 697 | return modulation; |
547 | } | 698 | } |
@@ -558,39 +709,36 @@ static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs, | |||
558 | { | 709 | { |
559 | struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); | 710 | struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); |
560 | u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl)); | 711 | u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl)); |
561 | u8 rate, cs_rate; | 712 | u8 rate, zd_rate; |
562 | int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0; | 713 | int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0; |
714 | int is_multicast = is_multicast_ether_addr(hdr->addr1); | ||
715 | int short_preamble = ieee80211softmac_short_preamble_ok(softmac, | ||
716 | is_multicast, is_mgt); | ||
717 | int flags = 0; | ||
718 | |||
719 | /* FIXME: 802.11a? */ | ||
720 | rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt); | ||
563 | 721 | ||
564 | /* FIXME: 802.11a? short preamble? */ | 722 | if (short_preamble) |
565 | rate = ieee80211softmac_suggest_txrate(softmac, | 723 | flags |= R2M_SHORT_PREAMBLE; |
566 | is_multicast_ether_addr(hdr->addr1), is_mgt); | ||
567 | 724 | ||
568 | cs_rate = rate_to_cs_rate(rate); | 725 | zd_rate = rate_to_zd_rate(rate); |
569 | cs->modulation = cs_rate_to_modulation(cs_rate, 0); | 726 | cs->modulation = zd_rate_to_modulation(zd_rate, flags); |
570 | } | 727 | } |
571 | 728 | ||
572 | static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, | 729 | static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, |
573 | struct ieee80211_hdr_4addr *header) | 730 | struct ieee80211_hdr_4addr *header) |
574 | { | 731 | { |
732 | struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); | ||
575 | unsigned int tx_length = le16_to_cpu(cs->tx_length); | 733 | unsigned int tx_length = le16_to_cpu(cs->tx_length); |
576 | u16 fctl = le16_to_cpu(header->frame_ctl); | 734 | u16 fctl = le16_to_cpu(header->frame_ctl); |
577 | u16 ftype = WLAN_FC_GET_TYPE(fctl); | 735 | u16 ftype = WLAN_FC_GET_TYPE(fctl); |
578 | u16 stype = WLAN_FC_GET_STYPE(fctl); | 736 | u16 stype = WLAN_FC_GET_STYPE(fctl); |
579 | 737 | ||
580 | /* | 738 | /* |
581 | * CONTROL: | 739 | * CONTROL TODO: |
582 | * - start at 0x00 | ||
583 | * - if fragment 0, enable bit 0 | ||
584 | * - if backoff needed, enable bit 0 | 740 | * - if backoff needed, enable bit 0 |
585 | * - if burst (backoff not needed) disable bit 0 | 741 | * - if burst (backoff not needed) disable bit 0 |
586 | * - if multicast, enable bit 1 | ||
587 | * - if PS-POLL frame, enable bit 2 | ||
588 | * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable | ||
589 | * bit 4 (FIXME: wtf) | ||
590 | * - if frag_len > RTS threshold, set bit 5 as long if it isnt | ||
591 | * multicast or mgt | ||
592 | * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit | ||
593 | * 7 | ||
594 | */ | 742 | */ |
595 | 743 | ||
596 | cs->control = 0; | 744 | cs->control = 0; |
@@ -607,17 +755,18 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, | |||
607 | if (stype == IEEE80211_STYPE_PSPOLL) | 755 | if (stype == IEEE80211_STYPE_PSPOLL) |
608 | cs->control |= ZD_CS_PS_POLL_FRAME; | 756 | cs->control |= ZD_CS_PS_POLL_FRAME; |
609 | 757 | ||
758 | /* Unicast data frames over the threshold should have RTS */ | ||
610 | if (!is_multicast_ether_addr(header->addr1) && | 759 | if (!is_multicast_ether_addr(header->addr1) && |
611 | ftype != IEEE80211_FTYPE_MGMT && | 760 | ftype != IEEE80211_FTYPE_MGMT && |
612 | tx_length > zd_netdev_ieee80211(mac->netdev)->rts) | 761 | tx_length > zd_netdev_ieee80211(mac->netdev)->rts) |
613 | { | 762 | cs->control |= ZD_CS_RTS; |
614 | /* FIXME: check the logic */ | 763 | |
615 | if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) { | 764 | /* Use CTS-to-self protection if required */ |
616 | /* 802.11g */ | 765 | if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM && |
617 | cs->control |= ZD_CS_SELF_CTS; | 766 | ieee80211softmac_protection_needed(softmac)) { |
618 | } else { /* 802.11b */ | 767 | /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */ |
619 | cs->control |= ZD_CS_RTS; | 768 | cs->control &= ~ZD_CS_RTS; |
620 | } | 769 | cs->control |= ZD_CS_SELF_CTS; |
621 | } | 770 | } |
622 | 771 | ||
623 | /* FIXME: Management frame? */ | 772 | /* FIXME: Management frame? */ |
@@ -782,9 +931,11 @@ static int is_data_packet_for_us(struct ieee80211_device *ieee, | |||
782 | (netdev->flags & IFF_PROMISC); | 931 | (netdev->flags & IFF_PROMISC); |
783 | } | 932 | } |
784 | 933 | ||
785 | /* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0 | 934 | /* Filters received packets. The function returns 1 if the packet should be |
786 | * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is | 935 | * forwarded to ieee80211_rx(). If the packet should be ignored the function |
787 | * called here. | 936 | * returns 0. If an invalid packet is found the function returns -EINVAL. |
937 | * | ||
938 | * The function calls ieee80211_rx_mgt() directly. | ||
788 | * | 939 | * |
789 | * It has been based on ieee80211_rx_any. | 940 | * It has been based on ieee80211_rx_any. |
790 | */ | 941 | */ |
@@ -810,9 +961,9 @@ static int filter_rx(struct ieee80211_device *ieee, | |||
810 | ieee80211_rx_mgt(ieee, hdr, stats); | 961 | ieee80211_rx_mgt(ieee, hdr, stats); |
811 | return 0; | 962 | return 0; |
812 | case IEEE80211_FTYPE_CTL: | 963 | case IEEE80211_FTYPE_CTL: |
813 | /* Ignore invalid short buffers */ | ||
814 | return 0; | 964 | return 0; |
815 | case IEEE80211_FTYPE_DATA: | 965 | case IEEE80211_FTYPE_DATA: |
966 | /* Ignore invalid short buffers */ | ||
816 | if (length < sizeof(struct ieee80211_hdr_3addr)) | 967 | if (length < sizeof(struct ieee80211_hdr_3addr)) |
817 | return -EINVAL; | 968 | return -EINVAL; |
818 | return is_data_packet_for_us(ieee, hdr); | 969 | return is_data_packet_for_us(ieee, hdr); |
@@ -993,6 +1144,7 @@ static void ieee_init(struct ieee80211_device *ieee) | |||
993 | static void softmac_init(struct ieee80211softmac_device *sm) | 1144 | static void softmac_init(struct ieee80211softmac_device *sm) |
994 | { | 1145 | { |
995 | sm->set_channel = set_channel; | 1146 | sm->set_channel = set_channel; |
1147 | sm->bssinfo_change = bssinfo_change; | ||
996 | } | 1148 | } |
997 | 1149 | ||
998 | struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) | 1150 | struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) |
@@ -1028,66 +1180,6 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) | |||
1028 | return iw_stats; | 1180 | return iw_stats; |
1029 | } | 1181 | } |
1030 | 1182 | ||
1031 | #ifdef DEBUG | ||
1032 | static const char* decryption_types[] = { | ||
1033 | [ZD_RX_NO_WEP] = "none", | ||
1034 | [ZD_RX_WEP64] = "WEP64", | ||
1035 | [ZD_RX_TKIP] = "TKIP", | ||
1036 | [ZD_RX_AES] = "AES", | ||
1037 | [ZD_RX_WEP128] = "WEP128", | ||
1038 | [ZD_RX_WEP256] = "WEP256", | ||
1039 | }; | ||
1040 | |||
1041 | static const char *decryption_type_string(u8 type) | ||
1042 | { | ||
1043 | const char *s; | ||
1044 | |||
1045 | if (type < ARRAY_SIZE(decryption_types)) { | ||
1046 | s = decryption_types[type]; | ||
1047 | } else { | ||
1048 | s = NULL; | ||
1049 | } | ||
1050 | return s ? s : "unknown"; | ||
1051 | } | ||
1052 | |||
1053 | static int is_ofdm(u8 frame_status) | ||
1054 | { | ||
1055 | return (frame_status & ZD_RX_OFDM); | ||
1056 | } | ||
1057 | |||
1058 | void zd_dump_rx_status(const struct rx_status *status) | ||
1059 | { | ||
1060 | const char* modulation; | ||
1061 | u8 quality; | ||
1062 | |||
1063 | if (is_ofdm(status->frame_status)) { | ||
1064 | modulation = "ofdm"; | ||
1065 | quality = status->signal_quality_ofdm; | ||
1066 | } else { | ||
1067 | modulation = "cck"; | ||
1068 | quality = status->signal_quality_cck; | ||
1069 | } | ||
1070 | pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n", | ||
1071 | modulation, status->signal_strength, quality, | ||
1072 | decryption_type_string(status->decryption_type)); | ||
1073 | if (status->frame_status & ZD_RX_ERROR) { | ||
1074 | pr_debug("rx error %s%s%s%s%s%s\n", | ||
1075 | (status->frame_status & ZD_RX_TIMEOUT_ERROR) ? | ||
1076 | "timeout " : "", | ||
1077 | (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ? | ||
1078 | "fifo " : "", | ||
1079 | (status->frame_status & ZD_RX_DECRYPTION_ERROR) ? | ||
1080 | "decryption " : "", | ||
1081 | (status->frame_status & ZD_RX_CRC32_ERROR) ? | ||
1082 | "crc32 " : "", | ||
1083 | (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ? | ||
1084 | "addr1 " : "", | ||
1085 | (status->frame_status & ZD_RX_CRC16_ERROR) ? | ||
1086 | "crc16" : ""); | ||
1087 | } | ||
1088 | } | ||
1089 | #endif /* DEBUG */ | ||
1090 | |||
1091 | #define LINK_LED_WORK_DELAY HZ | 1183 | #define LINK_LED_WORK_DELAY HZ |
1092 | 1184 | ||
1093 | static void link_led_handler(void *p) | 1185 | static void link_led_handler(void *p) |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index e4dd40a6fec3..5dcfb251f02e 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #include <linux/wireless.h> | 21 | #include <linux/wireless.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/workqueue.h> | ||
23 | #include <net/ieee80211.h> | 24 | #include <net/ieee80211.h> |
24 | #include <net/ieee80211softmac.h> | 25 | #include <net/ieee80211softmac.h> |
25 | 26 | ||
@@ -48,10 +49,11 @@ struct zd_ctrlset { | |||
48 | #define ZD_CS_CCK 0x00 | 49 | #define ZD_CS_CCK 0x00 |
49 | #define ZD_CS_OFDM 0x10 | 50 | #define ZD_CS_OFDM 0x10 |
50 | 51 | ||
51 | #define ZD_CS_CCK_RATE_1M 0x00 | 52 | /* These are referred to as zd_rates */ |
52 | #define ZD_CS_CCK_RATE_2M 0x01 | 53 | #define ZD_CCK_RATE_1M 0x00 |
53 | #define ZD_CS_CCK_RATE_5_5M 0x02 | 54 | #define ZD_CCK_RATE_2M 0x01 |
54 | #define ZD_CS_CCK_RATE_11M 0x03 | 55 | #define ZD_CCK_RATE_5_5M 0x02 |
56 | #define ZD_CCK_RATE_11M 0x03 | ||
55 | /* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*. | 57 | /* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*. |
56 | */ | 58 | */ |
57 | 59 | ||
@@ -116,10 +118,6 @@ struct rx_status { | |||
116 | #define ZD_RX_CRC16_ERROR 0x40 | 118 | #define ZD_RX_CRC16_ERROR 0x40 |
117 | #define ZD_RX_ERROR 0x80 | 119 | #define ZD_RX_ERROR 0x80 |
118 | 120 | ||
119 | enum mac_flags { | ||
120 | MAC_FIXED_CHANNEL = 0x01, | ||
121 | }; | ||
122 | |||
123 | struct housekeeping { | 121 | struct housekeeping { |
124 | struct work_struct link_led_work; | 122 | struct work_struct link_led_work; |
125 | }; | 123 | }; |
@@ -130,15 +128,33 @@ struct zd_mac { | |||
130 | struct zd_chip chip; | 128 | struct zd_chip chip; |
131 | spinlock_t lock; | 129 | spinlock_t lock; |
132 | struct net_device *netdev; | 130 | struct net_device *netdev; |
131 | |||
133 | /* Unlocked reading possible */ | 132 | /* Unlocked reading possible */ |
134 | struct iw_statistics iw_stats; | 133 | struct iw_statistics iw_stats; |
134 | |||
135 | struct housekeeping housekeeping; | 135 | struct housekeeping housekeeping; |
136 | struct work_struct set_rts_cts_work; | ||
137 | struct work_struct set_basic_rates_work; | ||
138 | |||
136 | unsigned int stats_count; | 139 | unsigned int stats_count; |
137 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 140 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
138 | u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 141 | u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
139 | u8 regdomain; | 142 | u8 regdomain; |
140 | u8 default_regdomain; | 143 | u8 default_regdomain; |
141 | u8 requested_channel; | 144 | u8 requested_channel; |
145 | |||
146 | /* A bitpattern of cr_rates */ | ||
147 | u16 basic_rates; | ||
148 | |||
149 | /* A zd_rate */ | ||
150 | u8 rts_rate; | ||
151 | |||
152 | /* Short preamble (used for RTS/CTS) */ | ||
153 | unsigned int short_preamble:1; | ||
154 | |||
155 | /* flags to indicate update in progress */ | ||
156 | unsigned int updating_rts_rate:1; | ||
157 | unsigned int updating_basic_rates:1; | ||
142 | }; | 158 | }; |
143 | 159 | ||
144 | static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac) | 160 | static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac) |
@@ -180,7 +196,7 @@ int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain); | |||
180 | u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); | 196 | u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); |
181 | 197 | ||
182 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel); | 198 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel); |
183 | int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags); | 199 | u8 zd_mac_get_channel(struct zd_mac *mac); |
184 | 200 | ||
185 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode); | 201 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode); |
186 | int zd_mac_get_mode(struct zd_mac *mac, u32 *mode); | 202 | int zd_mac_get_mode(struct zd_mac *mac, u32 *mode); |
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c index af3a7b36d078..60f1b0f6d45b 100644 --- a/drivers/net/wireless/zd1211rw/zd_netdev.c +++ b/drivers/net/wireless/zd1211rw/zd_netdev.c | |||
@@ -107,21 +107,10 @@ static int iw_get_freq(struct net_device *netdev, | |||
107 | struct iw_request_info *info, | 107 | struct iw_request_info *info, |
108 | union iwreq_data *req, char *extra) | 108 | union iwreq_data *req, char *extra) |
109 | { | 109 | { |
110 | int r; | ||
111 | struct zd_mac *mac = zd_netdev_mac(netdev); | 110 | struct zd_mac *mac = zd_netdev_mac(netdev); |
112 | struct iw_freq *freq = &req->freq; | 111 | struct iw_freq *freq = &req->freq; |
113 | u8 channel; | ||
114 | u8 flags; | ||
115 | |||
116 | r = zd_mac_get_channel(mac, &channel, &flags); | ||
117 | if (r) | ||
118 | return r; | ||
119 | 112 | ||
120 | freq->flags = (flags & MAC_FIXED_CHANNEL) ? | 113 | return zd_channel_to_freq(freq, zd_mac_get_channel(mac)); |
121 | IW_FREQ_FIXED : IW_FREQ_AUTO; | ||
122 | dev_dbg_f(zd_mac_dev(mac), "channel %s\n", | ||
123 | (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto"); | ||
124 | return zd_channel_to_freq(freq, channel); | ||
125 | } | 114 | } |
126 | 115 | ||
127 | static int iw_set_mode(struct net_device *netdev, | 116 | static int iw_set_mode(struct net_device *netdev, |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index a15b09549245..aa782e88754b 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -47,11 +47,17 @@ static struct usb_device_id usb_ids[] = { | |||
47 | { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 }, | 47 | { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 }, |
48 | { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 }, | 48 | { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 }, |
49 | { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 }, | 49 | { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 }, |
50 | { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 }, | ||
51 | { USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 }, | ||
52 | { USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 }, | ||
53 | { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, | ||
54 | { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, | ||
50 | /* ZD1211B */ | 55 | /* ZD1211B */ |
51 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, | 56 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, |
52 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, | 57 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, |
53 | { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, | 58 | { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, |
54 | { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, | 59 | { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, |
60 | { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, | ||
55 | /* "Driverless" devices that need ejecting */ | 61 | /* "Driverless" devices that need ejecting */ |
56 | { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, | 62 | { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, |
57 | {} | 63 | {} |
@@ -587,6 +593,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | |||
587 | unsigned int l, k, n; | 593 | unsigned int l, k, n; |
588 | for (i = 0, l = 0;; i++) { | 594 | for (i = 0, l = 0;; i++) { |
589 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); | 595 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); |
596 | if (k == 0) | ||
597 | return; | ||
590 | n = l+k; | 598 | n = l+k; |
591 | if (n > length) | 599 | if (n > length) |
592 | return; | 600 | return; |
@@ -1110,27 +1118,28 @@ static int __init usb_init(void) | |||
1110 | { | 1118 | { |
1111 | int r; | 1119 | int r; |
1112 | 1120 | ||
1113 | pr_debug("usb_init()\n"); | 1121 | pr_debug("%s usb_init()\n", driver.name); |
1114 | 1122 | ||
1115 | zd_workqueue = create_singlethread_workqueue(driver.name); | 1123 | zd_workqueue = create_singlethread_workqueue(driver.name); |
1116 | if (zd_workqueue == NULL) { | 1124 | if (zd_workqueue == NULL) { |
1117 | printk(KERN_ERR "%s: couldn't create workqueue\n", driver.name); | 1125 | printk(KERN_ERR "%s couldn't create workqueue\n", driver.name); |
1118 | return -ENOMEM; | 1126 | return -ENOMEM; |
1119 | } | 1127 | } |
1120 | 1128 | ||
1121 | r = usb_register(&driver); | 1129 | r = usb_register(&driver); |
1122 | if (r) { | 1130 | if (r) { |
1123 | printk(KERN_ERR "usb_register() failed. Error number %d\n", r); | 1131 | printk(KERN_ERR "%s usb_register() failed. Error number %d\n", |
1132 | driver.name, r); | ||
1124 | return r; | 1133 | return r; |
1125 | } | 1134 | } |
1126 | 1135 | ||
1127 | pr_debug("zd1211rw initialized\n"); | 1136 | pr_debug("%s initialized\n", driver.name); |
1128 | return 0; | 1137 | return 0; |
1129 | } | 1138 | } |
1130 | 1139 | ||
1131 | static void __exit usb_exit(void) | 1140 | static void __exit usb_exit(void) |
1132 | { | 1141 | { |
1133 | pr_debug("usb_exit()\n"); | 1142 | pr_debug("%s usb_exit()\n", driver.name); |
1134 | usb_deregister(&driver); | 1143 | usb_deregister(&driver); |
1135 | destroy_workqueue(zd_workqueue); | 1144 | destroy_workqueue(zd_workqueue); |
1136 | } | 1145 | } |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index df04e050c647..d85e2ea0b6af 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -34,8 +34,16 @@ | |||
34 | #include <asm/amigaints.h> | 34 | #include <asm/amigaints.h> |
35 | #include <asm/amigahw.h> | 35 | #include <asm/amigahw.h> |
36 | 36 | ||
37 | #include "8390.h" | 37 | #define EI_SHIFT(x) (ei_local->reg_offset[x]) |
38 | #define ei_inb(port) in_8(port) | ||
39 | #define ei_outb(val,port) out_8(port,val) | ||
40 | #define ei_inb_p(port) in_8(port) | ||
41 | #define ei_outb_p(val,port) out_8(port,val) | ||
38 | 42 | ||
43 | static const char version[] = | ||
44 | "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; | ||
45 | |||
46 | #include "lib8390.c" | ||
39 | 47 | ||
40 | #define DRV_NAME "zorro8390" | 48 | #define DRV_NAME "zorro8390" |
41 | 49 | ||
@@ -114,7 +122,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
114 | break; | 122 | break; |
115 | board = z->resource.start; | 123 | board = z->resource.start; |
116 | ioaddr = board+cards[i].offset; | 124 | ioaddr = board+cards[i].offset; |
117 | dev = alloc_ei_netdev(); | 125 | dev = ____alloc_ei_netdev(0); |
118 | if (!dev) | 126 | if (!dev) |
119 | return -ENOMEM; | 127 | return -ENOMEM; |
120 | SET_MODULE_OWNER(dev); | 128 | SET_MODULE_OWNER(dev); |
@@ -201,7 +209,7 @@ static int __devinit zorro8390_init(struct net_device *dev, | |||
201 | dev->irq = IRQ_AMIGA_PORTS; | 209 | dev->irq = IRQ_AMIGA_PORTS; |
202 | 210 | ||
203 | /* Install the Interrupt handler */ | 211 | /* Install the Interrupt handler */ |
204 | i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, DRV_NAME, dev); | 212 | i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev); |
205 | if (i) return i; | 213 | if (i) return i; |
206 | 214 | ||
207 | for(i = 0; i < ETHER_ADDR_LEN; i++) { | 215 | for(i = 0; i < ETHER_ADDR_LEN; i++) { |
@@ -226,10 +234,10 @@ static int __devinit zorro8390_init(struct net_device *dev, | |||
226 | dev->open = &zorro8390_open; | 234 | dev->open = &zorro8390_open; |
227 | dev->stop = &zorro8390_close; | 235 | dev->stop = &zorro8390_close; |
228 | #ifdef CONFIG_NET_POLL_CONTROLLER | 236 | #ifdef CONFIG_NET_POLL_CONTROLLER |
229 | dev->poll_controller = ei_poll; | 237 | dev->poll_controller = __ei_poll; |
230 | #endif | 238 | #endif |
231 | 239 | ||
232 | NS8390_init(dev, 0); | 240 | __NS8390_init(dev, 0); |
233 | err = register_netdev(dev); | 241 | err = register_netdev(dev); |
234 | if (err) { | 242 | if (err) { |
235 | free_irq(IRQ_AMIGA_PORTS, dev); | 243 | free_irq(IRQ_AMIGA_PORTS, dev); |
@@ -246,7 +254,7 @@ static int __devinit zorro8390_init(struct net_device *dev, | |||
246 | 254 | ||
247 | static int zorro8390_open(struct net_device *dev) | 255 | static int zorro8390_open(struct net_device *dev) |
248 | { | 256 | { |
249 | ei_open(dev); | 257 | __ei_open(dev); |
250 | return 0; | 258 | return 0; |
251 | } | 259 | } |
252 | 260 | ||
@@ -254,7 +262,7 @@ static int zorro8390_close(struct net_device *dev) | |||
254 | { | 262 | { |
255 | if (ei_debug > 1) | 263 | if (ei_debug > 1) |
256 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); | 264 | printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); |
257 | ei_close(dev); | 265 | __ei_close(dev); |
258 | return 0; | 266 | return 0; |
259 | } | 267 | } |
260 | 268 | ||
@@ -405,7 +413,7 @@ static void zorro8390_block_output(struct net_device *dev, int count, | |||
405 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", | 413 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", |
406 | dev->name); | 414 | dev->name); |
407 | zorro8390_reset_8390(dev); | 415 | zorro8390_reset_8390(dev); |
408 | NS8390_init(dev,1); | 416 | __NS8390_init(dev,1); |
409 | break; | 417 | break; |
410 | } | 418 | } |
411 | 419 | ||
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index edfa012fad3a..aff25c000abf 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -724,7 +724,7 @@ | |||
724 | #define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port) (0x2470 + (port<<10)) | 724 | #define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port) (0x2470 + (port<<10)) |
725 | #define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port) (0x2474 + (port<<10)) | 725 | #define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port) (0x2474 + (port<<10)) |
726 | #define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (0x247c + (port<<10)) | 726 | #define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (0x247c + (port<<10)) |
727 | #define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10) | 727 | #define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10)) |
728 | #define MV643XX_ETH_PORT_DEBUG_0_REG(port) (0x248c + (port<<10)) | 728 | #define MV643XX_ETH_PORT_DEBUG_0_REG(port) (0x248c + (port<<10)) |
729 | #define MV643XX_ETH_PORT_DEBUG_1_REG(port) (0x2490 + (port<<10)) | 729 | #define MV643XX_ETH_PORT_DEBUG_1_REG(port) (0x2490 + (port<<10)) |
730 | #define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port) (0x2494 + (port<<10)) | 730 | #define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port) (0x2494 + (port<<10)) |
@@ -1135,7 +1135,7 @@ struct mv64xxx_i2c_pdata { | |||
1135 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19) | 1135 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19) |
1136 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20) | 1136 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20) |
1137 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19)) | 1137 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19)) |
1138 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 ((1<<21) | 1138 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 (1<<21) |
1139 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19)) | 1139 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19)) |
1140 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20)) | 1140 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20)) |
1141 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19)) | 1141 | #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19)) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e060a7637947..fd5033b8a927 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1213,6 +1213,10 @@ | |||
1213 | #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 | 1213 | #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 |
1214 | #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 | 1214 | #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 |
1215 | #define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 | 1215 | #define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 |
1216 | #define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054C | ||
1217 | #define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D | ||
1218 | #define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E | ||
1219 | #define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F | ||
1216 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 | 1220 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 |
1217 | 1221 | ||
1218 | #define PCI_VENDOR_ID_IMS 0x10e0 | 1222 | #define PCI_VENDOR_ID_IMS 0x10e0 |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 9447a57ee8a9..edd4c88ca7d8 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -20,6 +20,10 @@ | |||
20 | 20 | ||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/ethtool.h> | ||
24 | #include <linux/mii.h> | ||
25 | #include <linux/timer.h> | ||
26 | #include <linux/workqueue.h> | ||
23 | 27 | ||
24 | #define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ | 28 | #define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ |
25 | SUPPORTED_10baseT_Full | \ | 29 | SUPPORTED_10baseT_Full | \ |
@@ -43,15 +47,26 @@ | |||
43 | #define PHY_HAS_INTERRUPT 0x00000001 | 47 | #define PHY_HAS_INTERRUPT 0x00000001 |
44 | #define PHY_HAS_MAGICANEG 0x00000002 | 48 | #define PHY_HAS_MAGICANEG 0x00000002 |
45 | 49 | ||
50 | /* Interface Mode definitions */ | ||
51 | typedef enum { | ||
52 | PHY_INTERFACE_MODE_MII, | ||
53 | PHY_INTERFACE_MODE_GMII, | ||
54 | PHY_INTERFACE_MODE_SGMII, | ||
55 | PHY_INTERFACE_MODE_TBI, | ||
56 | PHY_INTERFACE_MODE_RMII, | ||
57 | PHY_INTERFACE_MODE_RGMII, | ||
58 | PHY_INTERFACE_MODE_RTBI | ||
59 | } phy_interface_t; | ||
60 | |||
46 | #define MII_BUS_MAX 4 | 61 | #define MII_BUS_MAX 4 |
47 | 62 | ||
48 | 63 | ||
49 | #define PHY_INIT_TIMEOUT 100000 | 64 | #define PHY_INIT_TIMEOUT 100000 |
50 | #define PHY_STATE_TIME 1 | 65 | #define PHY_STATE_TIME 1 |
51 | #define PHY_FORCE_TIMEOUT 10 | 66 | #define PHY_FORCE_TIMEOUT 10 |
52 | #define PHY_AN_TIMEOUT 10 | 67 | #define PHY_AN_TIMEOUT 10 |
53 | 68 | ||
54 | #define PHY_MAX_ADDR 32 | 69 | #define PHY_MAX_ADDR 32 |
55 | 70 | ||
56 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ | 71 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ |
57 | #define PHY_ID_FMT "%x:%02x" | 72 | #define PHY_ID_FMT "%x:%02x" |
@@ -83,8 +98,8 @@ struct mii_bus { | |||
83 | int *irq; | 98 | int *irq; |
84 | }; | 99 | }; |
85 | 100 | ||
86 | #define PHY_INTERRUPT_DISABLED 0x0 | 101 | #define PHY_INTERRUPT_DISABLED 0x0 |
87 | #define PHY_INTERRUPT_ENABLED 0x80000000 | 102 | #define PHY_INTERRUPT_ENABLED 0x80000000 |
88 | 103 | ||
89 | /* PHY state machine states: | 104 | /* PHY state machine states: |
90 | * | 105 | * |
@@ -226,6 +241,8 @@ struct phy_device { | |||
226 | 241 | ||
227 | u32 dev_flags; | 242 | u32 dev_flags; |
228 | 243 | ||
244 | phy_interface_t interface; | ||
245 | |||
229 | /* Bus address of the PHY (0-32) */ | 246 | /* Bus address of the PHY (0-32) */ |
230 | int addr; | 247 | int addr; |
231 | 248 | ||
@@ -341,9 +358,10 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr); | |||
341 | int phy_clear_interrupt(struct phy_device *phydev); | 358 | int phy_clear_interrupt(struct phy_device *phydev); |
342 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); | 359 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); |
343 | struct phy_device * phy_attach(struct net_device *dev, | 360 | struct phy_device * phy_attach(struct net_device *dev, |
344 | const char *phy_id, u32 flags); | 361 | const char *phy_id, u32 flags, phy_interface_t interface); |
345 | struct phy_device * phy_connect(struct net_device *dev, const char *phy_id, | 362 | struct phy_device * phy_connect(struct net_device *dev, const char *phy_id, |
346 | void (*handler)(struct net_device *), u32 flags); | 363 | void (*handler)(struct net_device *), u32 flags, |
364 | phy_interface_t interface); | ||
347 | void phy_disconnect(struct phy_device *phydev); | 365 | void phy_disconnect(struct phy_device *phydev); |
348 | void phy_detach(struct phy_device *phydev); | 366 | void phy_detach(struct phy_device *phydev); |
349 | void phy_start(struct phy_device *phydev); | 367 | void phy_start(struct phy_device *phydev); |
diff --git a/include/linux/wireless.h b/include/linux/wireless.h index a50a0130fd9e..7c269f4992eb 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h | |||
@@ -546,6 +546,8 @@ | |||
546 | /* MLME requests (SIOCSIWMLME / struct iw_mlme) */ | 546 | /* MLME requests (SIOCSIWMLME / struct iw_mlme) */ |
547 | #define IW_MLME_DEAUTH 0 | 547 | #define IW_MLME_DEAUTH 0 |
548 | #define IW_MLME_DISASSOC 1 | 548 | #define IW_MLME_DISASSOC 1 |
549 | #define IW_MLME_AUTH 2 | ||
550 | #define IW_MLME_ASSOC 3 | ||
549 | 551 | ||
550 | /* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */ | 552 | /* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */ |
551 | #define IW_AUTH_INDEX 0x0FFF | 553 | #define IW_AUTH_INDEX 0x0FFF |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index b174ebb277a9..e6af381e206d 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -1037,6 +1037,10 @@ struct ieee80211_device { | |||
1037 | /* host performs multicast decryption */ | 1037 | /* host performs multicast decryption */ |
1038 | int host_mc_decrypt; | 1038 | int host_mc_decrypt; |
1039 | 1039 | ||
1040 | /* host should strip IV and ICV from protected frames */ | ||
1041 | /* meaningful only when hardware decryption is being used */ | ||
1042 | int host_strip_iv_icv; | ||
1043 | |||
1040 | int host_open_frag; | 1044 | int host_open_frag; |
1041 | int host_build_iv; | 1045 | int host_build_iv; |
1042 | int ieee802_1x; /* is IEEE 802.1X used */ | 1046 | int ieee802_1x; /* is IEEE 802.1X used */ |
@@ -1076,6 +1080,8 @@ struct ieee80211_device { | |||
1076 | int perfect_rssi; | 1080 | int perfect_rssi; |
1077 | int worst_rssi; | 1081 | int worst_rssi; |
1078 | 1082 | ||
1083 | u16 prev_seq_ctl; /* used to drop duplicate frames */ | ||
1084 | |||
1079 | /* Callback functions */ | 1085 | /* Callback functions */ |
1080 | void (*set_security) (struct net_device * dev, | 1086 | void (*set_security) (struct net_device * dev, |
1081 | struct ieee80211_security * sec); | 1087 | struct ieee80211_security * sec); |
diff --git a/net/core/dev.c b/net/core/dev.c index 81c426adcd1e..411c2428d268 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3035,15 +3035,6 @@ int register_netdev(struct net_device *dev) | |||
3035 | goto out; | 3035 | goto out; |
3036 | } | 3036 | } |
3037 | 3037 | ||
3038 | /* | ||
3039 | * Back compatibility hook. Kill this one in 2.5 | ||
3040 | */ | ||
3041 | if (dev->name[0] == 0 || dev->name[0] == ' ') { | ||
3042 | err = dev_alloc_name(dev, "eth%d"); | ||
3043 | if (err < 0) | ||
3044 | goto out; | ||
3045 | } | ||
3046 | |||
3047 | err = register_netdevice(dev); | 3038 | err = register_netdevice(dev); |
3048 | out: | 3039 | out: |
3049 | rtnl_unlock(); | 3040 | rtnl_unlock(); |
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 13b1e5fff7e4..b1c6d1f717d9 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -67,7 +67,7 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
67 | return 0; | 67 | return 0; |
68 | 68 | ||
69 | ieee->networks = | 69 | ieee->networks = |
70 | kmalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network), | 70 | kzalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network), |
71 | GFP_KERNEL); | 71 | GFP_KERNEL); |
72 | if (!ieee->networks) { | 72 | if (!ieee->networks) { |
73 | printk(KERN_WARNING "%s: Out of memory allocating beacons\n", | 73 | printk(KERN_WARNING "%s: Out of memory allocating beacons\n", |
@@ -75,9 +75,6 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | } | 76 | } |
77 | 77 | ||
78 | memset(ieee->networks, 0, | ||
79 | MAX_NETWORK_COUNT * sizeof(struct ieee80211_network)); | ||
80 | |||
81 | return 0; | 78 | return 0; |
82 | } | 79 | } |
83 | 80 | ||
@@ -118,6 +115,21 @@ static void ieee80211_networks_initialize(struct ieee80211_device *ieee) | |||
118 | &ieee->network_free_list); | 115 | &ieee->network_free_list); |
119 | } | 116 | } |
120 | 117 | ||
118 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | ||
119 | { | ||
120 | if ((new_mtu < 68) || (new_mtu > IEEE80211_DATA_LEN)) | ||
121 | return -EINVAL; | ||
122 | dev->mtu = new_mtu; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static struct net_device_stats *ieee80211_generic_get_stats( | ||
127 | struct net_device *dev) | ||
128 | { | ||
129 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
130 | return &ieee->stats; | ||
131 | } | ||
132 | |||
121 | struct net_device *alloc_ieee80211(int sizeof_priv) | 133 | struct net_device *alloc_ieee80211(int sizeof_priv) |
122 | { | 134 | { |
123 | struct ieee80211_device *ieee; | 135 | struct ieee80211_device *ieee; |
@@ -133,6 +145,11 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
133 | } | 145 | } |
134 | ieee = netdev_priv(dev); | 146 | ieee = netdev_priv(dev); |
135 | dev->hard_start_xmit = ieee80211_xmit; | 147 | dev->hard_start_xmit = ieee80211_xmit; |
148 | dev->change_mtu = ieee80211_change_mtu; | ||
149 | |||
150 | /* Drivers are free to override this if the generic implementation | ||
151 | * does not meet their needs. */ | ||
152 | dev->get_stats = ieee80211_generic_get_stats; | ||
136 | 153 | ||
137 | ieee->dev = dev; | 154 | ieee->dev = dev; |
138 | 155 | ||
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 2759312a4204..d97e5412e31b 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -415,17 +415,16 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
415 | ieee->host_mc_decrypt : ieee->host_decrypt; | 415 | ieee->host_mc_decrypt : ieee->host_decrypt; |
416 | 416 | ||
417 | if (can_be_decrypted) { | 417 | if (can_be_decrypted) { |
418 | int idx = 0; | ||
419 | if (skb->len >= hdrlen + 3) { | 418 | if (skb->len >= hdrlen + 3) { |
420 | /* Top two-bits of byte 3 are the key index */ | 419 | /* Top two-bits of byte 3 are the key index */ |
421 | idx = skb->data[hdrlen + 3] >> 6; | 420 | keyidx = skb->data[hdrlen + 3] >> 6; |
422 | } | 421 | } |
423 | 422 | ||
424 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx | 423 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx |
425 | * is only allowed 2-bits of storage, no value of idx can | 424 | * is only allowed 2-bits of storage, no value of keyidx can |
426 | * be provided via above code that would result in idx | 425 | * be provided via above code that would result in keyidx |
427 | * being out of range */ | 426 | * being out of range */ |
428 | crypt = ieee->crypt[idx]; | 427 | crypt = ieee->crypt[keyidx]; |
429 | 428 | ||
430 | #ifdef NOT_YET | 429 | #ifdef NOT_YET |
431 | sta = NULL; | 430 | sta = NULL; |
@@ -479,6 +478,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
479 | goto rx_exit; | 478 | goto rx_exit; |
480 | } | 479 | } |
481 | #endif | 480 | #endif |
481 | /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */ | ||
482 | if (sc == ieee->prev_seq_ctl) | ||
483 | goto rx_dropped; | ||
484 | else | ||
485 | ieee->prev_seq_ctl = sc; | ||
482 | 486 | ||
483 | /* Data frame - extract src/dst addresses */ | 487 | /* Data frame - extract src/dst addresses */ |
484 | if (skb->len < IEEE80211_3ADDR_LEN) | 488 | if (skb->len < IEEE80211_3ADDR_LEN) |
@@ -655,6 +659,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
655 | goto rx_dropped; | 659 | goto rx_dropped; |
656 | } | 660 | } |
657 | 661 | ||
662 | /* If the frame was decrypted in hardware, we may need to strip off | ||
663 | * any security data (IV, ICV, etc) that was left behind */ | ||
664 | if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && | ||
665 | ieee->host_strip_iv_icv) { | ||
666 | int trimlen = 0; | ||
667 | |||
668 | /* Top two-bits of byte 3 are the key index */ | ||
669 | if (skb->len >= hdrlen + 3) | ||
670 | keyidx = skb->data[hdrlen + 3] >> 6; | ||
671 | |||
672 | /* To strip off any security data which appears before the | ||
673 | * payload, we simply increase hdrlen (as the header gets | ||
674 | * chopped off immediately below). For the security data which | ||
675 | * appears after the payload, we use skb_trim. */ | ||
676 | |||
677 | switch (ieee->sec.encode_alg[keyidx]) { | ||
678 | case SEC_ALG_WEP: | ||
679 | /* 4 byte IV */ | ||
680 | hdrlen += 4; | ||
681 | /* 4 byte ICV */ | ||
682 | trimlen = 4; | ||
683 | break; | ||
684 | case SEC_ALG_TKIP: | ||
685 | /* 4 byte IV, 4 byte ExtIV */ | ||
686 | hdrlen += 8; | ||
687 | /* 8 byte MIC, 4 byte ICV */ | ||
688 | trimlen = 12; | ||
689 | break; | ||
690 | case SEC_ALG_CCMP: | ||
691 | /* 8 byte CCMP header */ | ||
692 | hdrlen += 8; | ||
693 | /* 8 byte MIC */ | ||
694 | trimlen = 8; | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | if (skb->len < trimlen) | ||
699 | goto rx_dropped; | ||
700 | |||
701 | __skb_trim(skb, skb->len - trimlen); | ||
702 | |||
703 | if (skb->len < hdrlen) | ||
704 | goto rx_dropped; | ||
705 | } | ||
706 | |||
658 | /* skb: hdr + (possible reassembled) full plaintext payload */ | 707 | /* skb: hdr + (possible reassembled) full plaintext payload */ |
659 | 708 | ||
660 | payload = skb->data + hdrlen; | 709 | payload = skb->data + hdrlen; |
@@ -1255,12 +1304,11 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1255 | case MFIE_TYPE_IBSS_DFS: | 1304 | case MFIE_TYPE_IBSS_DFS: |
1256 | if (network->ibss_dfs) | 1305 | if (network->ibss_dfs) |
1257 | break; | 1306 | break; |
1258 | network->ibss_dfs = | 1307 | network->ibss_dfs = kmemdup(info_element->data, |
1259 | kmalloc(info_element->len, GFP_ATOMIC); | 1308 | info_element->len, |
1309 | GFP_ATOMIC); | ||
1260 | if (!network->ibss_dfs) | 1310 | if (!network->ibss_dfs) |
1261 | return 1; | 1311 | return 1; |
1262 | memcpy(network->ibss_dfs, info_element->data, | ||
1263 | info_element->len); | ||
1264 | network->flags |= NETWORK_HAS_IBSS_DFS; | 1312 | network->flags |= NETWORK_HAS_IBSS_DFS; |
1265 | break; | 1313 | break; |
1266 | 1314 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 4cef39e171d0..0612015f1c78 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -158,7 +158,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
158 | /* Make sure that we've got an auth queue item for this request */ | 158 | /* Make sure that we've got an auth queue item for this request */ |
159 | if(aq == NULL) | 159 | if(aq == NULL) |
160 | { | 160 | { |
161 | printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); | 161 | dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); |
162 | /* Error #? */ | 162 | /* Error #? */ |
163 | return -1; | 163 | return -1; |
164 | } | 164 | } |
@@ -166,7 +166,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
166 | /* Check for out of order authentication */ | 166 | /* Check for out of order authentication */ |
167 | if(!net->authenticating) | 167 | if(!net->authenticating) |
168 | { | 168 | { |
169 | printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2)); | 169 | dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2)); |
170 | return -1; | 170 | return -1; |
171 | } | 171 | } |
172 | 172 | ||
@@ -216,10 +216,16 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
216 | net->challenge_len = *data++; | 216 | net->challenge_len = *data++; |
217 | if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) | 217 | if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) |
218 | net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; | 218 | net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; |
219 | if (net->challenge != NULL) | 219 | kfree(net->challenge); |
220 | kfree(net->challenge); | 220 | net->challenge = kmemdup(data, net->challenge_len, |
221 | net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); | 221 | GFP_ATOMIC); |
222 | memcpy(net->challenge, data, net->challenge_len); | 222 | if (net->challenge == NULL) { |
223 | printkl(KERN_NOTICE PFX "Shared Key " | ||
224 | "Authentication failed due to " | ||
225 | "memory shortage.\n"); | ||
226 | spin_unlock_irqrestore(&mac->lock, flags); | ||
227 | break; | ||
228 | } | ||
223 | aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; | 229 | aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; |
224 | 230 | ||
225 | /* We reuse the work struct from the auth request here. | 231 | /* We reuse the work struct from the auth request here. |
@@ -342,7 +348,7 @@ ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, | |||
342 | /* Make sure the network is authenticated */ | 348 | /* Make sure the network is authenticated */ |
343 | if (!net->authenticated) | 349 | if (!net->authenticated) |
344 | { | 350 | { |
345 | printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n"); | 351 | dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n"); |
346 | /* Error okay? */ | 352 | /* Error okay? */ |
347 | return -EPERM; | 353 | return -EPERM; |
348 | } | 354 | } |
@@ -376,7 +382,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
376 | net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); | 382 | net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); |
377 | 383 | ||
378 | if (net == NULL) { | 384 | if (net == NULL) { |
379 | printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", | 385 | dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", |
380 | MAC_ARG(deauth->header.addr2)); | 386 | MAC_ARG(deauth->header.addr2)); |
381 | return 0; | 387 | return 0; |
382 | } | 388 | } |
@@ -384,7 +390,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
384 | /* Make sure the network is authenticated */ | 390 | /* Make sure the network is authenticated */ |
385 | if(!net->authenticated) | 391 | if(!net->authenticated) |
386 | { | 392 | { |
387 | printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n"); | 393 | dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n"); |
388 | /* Error okay? */ | 394 | /* Error okay? */ |
389 | return -EPERM; | 395 | return -EPERM; |
390 | } | 396 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index ad67368b58ed..5507feab32de 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -134,7 +134,8 @@ void ieee80211softmac_scan(void *d) | |||
134 | si->started = 0; | 134 | si->started = 0; |
135 | spin_unlock_irqrestore(&sm->lock, flags); | 135 | spin_unlock_irqrestore(&sm->lock, flags); |
136 | 136 | ||
137 | dprintk(PFX "Scanning finished\n"); | 137 | dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n", |
138 | sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel); | ||
138 | ieee80211softmac_scan_finished(sm); | 139 | ieee80211softmac_scan_finished(sm); |
139 | complete_all(&sm->scaninfo->finished); | 140 | complete_all(&sm->scaninfo->finished); |
140 | } | 141 | } |
@@ -182,8 +183,6 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
182 | sm->scaninfo->channels = sm->ieee->geo.bg; | 183 | sm->scaninfo->channels = sm->ieee->geo.bg; |
183 | sm->scaninfo->number_channels = sm->ieee->geo.bg_channels; | 184 | sm->scaninfo->number_channels = sm->ieee->geo.bg_channels; |
184 | } | 185 | } |
185 | dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel); | ||
186 | dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels); | ||
187 | sm->scaninfo->current_channel_idx = 0; | 186 | sm->scaninfo->current_channel_idx = 0; |
188 | sm->scaninfo->started = 1; | 187 | sm->scaninfo->started = 1; |
189 | sm->scaninfo->stop = 0; | 188 | sm->scaninfo->stop = 0; |