diff options
194 files changed, 6721 insertions, 5975 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile index 1c955883cf58..2975291e296a 100644 --- a/Documentation/DocBook/Makefile +++ b/Documentation/DocBook/Makefile | |||
@@ -9,7 +9,7 @@ | |||
9 | DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \ | 9 | DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \ |
10 | kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ | 10 | kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ |
11 | procfs-guide.xml writing_usb_driver.xml \ | 11 | procfs-guide.xml writing_usb_driver.xml \ |
12 | sis900.xml kernel-api.xml journal-api.xml lsm.xml usb.xml \ | 12 | kernel-api.xml journal-api.xml lsm.xml usb.xml \ |
13 | gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml | 13 | gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml |
14 | 14 | ||
15 | ### | 15 | ### |
diff --git a/Documentation/DocBook/sis900.tmpl b/Documentation/DocBook/sis900.tmpl deleted file mode 100644 index 6c2cbac93c3f..000000000000 --- a/Documentation/DocBook/sis900.tmpl +++ /dev/null | |||
@@ -1,585 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" | ||
3 | "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []> | ||
4 | |||
5 | <book id="SiS900Guide"> | ||
6 | |||
7 | <bookinfo> | ||
8 | |||
9 | <title>SiS 900/7016 Fast Ethernet Device Driver</title> | ||
10 | |||
11 | <authorgroup> | ||
12 | <author> | ||
13 | <firstname>Ollie</firstname> | ||
14 | <surname>Lho</surname> | ||
15 | </author> | ||
16 | |||
17 | <author> | ||
18 | <firstname>Lei Chun</firstname> | ||
19 | <surname>Chang</surname> | ||
20 | </author> | ||
21 | </authorgroup> | ||
22 | |||
23 | <edition>Document Revision: 0.3 for SiS900 driver v1.06 & v1.07</edition> | ||
24 | <pubdate>November 16, 2000</pubdate> | ||
25 | |||
26 | <copyright> | ||
27 | <year>1999</year> | ||
28 | <holder>Silicon Integrated System Corp.</holder> | ||
29 | </copyright> | ||
30 | |||
31 | <legalnotice> | ||
32 | <para> | ||
33 | This program is free software; you can redistribute it and/or modify | ||
34 | it under the terms of the GNU General Public License as published by | ||
35 | the Free Software Foundation; either version 2 of the License, or | ||
36 | (at your option) any later version. | ||
37 | </para> | ||
38 | |||
39 | <para> | ||
40 | This program is distributed in the hope that it will be useful, | ||
41 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
42 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
43 | GNU General Public License for more details. | ||
44 | </para> | ||
45 | |||
46 | <para> | ||
47 | You should have received a copy of the GNU General Public License | ||
48 | along with this program; if not, write to the Free Software | ||
49 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
50 | </para> | ||
51 | </legalnotice> | ||
52 | |||
53 | <abstract> | ||
54 | <para> | ||
55 | This document gives some information on installation and usage of SiS 900/7016 | ||
56 | device driver under Linux. | ||
57 | </para> | ||
58 | </abstract> | ||
59 | |||
60 | </bookinfo> | ||
61 | |||
62 | <toc></toc> | ||
63 | |||
64 | <chapter id="intro"> | ||
65 | <title>Introduction</title> | ||
66 | |||
67 | <para> | ||
68 | This document describes the revision 1.06 and 1.07 of SiS 900/7016 Fast Ethernet | ||
69 | device driver under Linux. The driver is developed by Silicon Integrated | ||
70 | System Corp. and distributed freely under the GNU General Public License (GPL). | ||
71 | The driver can be compiled as a loadable module and used under Linux kernel | ||
72 | version 2.2.x. (rev. 1.06) | ||
73 | With minimal changes, the driver can also be used under 2.3.x and 2.4.x kernel | ||
74 | (rev. 1.07), please see | ||
75 | <xref linkend="install"/>. If you are intended to | ||
76 | use the driver for earlier kernels, you are on your own. | ||
77 | </para> | ||
78 | |||
79 | <para> | ||
80 | The driver is tested with usual TCP/IP applications including | ||
81 | FTP, Telnet, Netscape etc. and is used constantly by the developers. | ||
82 | </para> | ||
83 | |||
84 | <para> | ||
85 | Please send all comments/fixes/questions to | ||
86 | <ulink url="mailto:lcchang@sis.com.tw">Lei-Chun Chang</ulink>. | ||
87 | </para> | ||
88 | </chapter> | ||
89 | |||
90 | <chapter id="changes"> | ||
91 | <title>Changes</title> | ||
92 | |||
93 | <para> | ||
94 | Changes made in Revision 1.07 | ||
95 | |||
96 | <orderedlist> | ||
97 | <listitem> | ||
98 | <para> | ||
99 | Separation of sis900.c and sis900.h in order to move most | ||
100 | constant definition to sis900.h (many of those constants were | ||
101 | corrected) | ||
102 | </para> | ||
103 | </listitem> | ||
104 | |||
105 | <listitem> | ||
106 | <para> | ||
107 | Clean up PCI detection, the pci-scan from Donald Becker were not used, | ||
108 | just simple pci_find_*. | ||
109 | </para> | ||
110 | </listitem> | ||
111 | |||
112 | <listitem> | ||
113 | <para> | ||
114 | MII detection is modified to support multiple mii transceiver. | ||
115 | </para> | ||
116 | </listitem> | ||
117 | |||
118 | <listitem> | ||
119 | <para> | ||
120 | Bugs in read_eeprom, mdio_* were removed. | ||
121 | </para> | ||
122 | </listitem> | ||
123 | |||
124 | <listitem> | ||
125 | <para> | ||
126 | Lot of sis900 irrelevant comments were removed/changed and | ||
127 | more comments were added to reflect the real situation. | ||
128 | </para> | ||
129 | </listitem> | ||
130 | |||
131 | <listitem> | ||
132 | <para> | ||
133 | Clean up of physical/virtual address space mess in buffer | ||
134 | descriptors. | ||
135 | </para> | ||
136 | </listitem> | ||
137 | |||
138 | <listitem> | ||
139 | <para> | ||
140 | Better transmit/receive error handling. | ||
141 | </para> | ||
142 | </listitem> | ||
143 | |||
144 | <listitem> | ||
145 | <para> | ||
146 | The driver now uses zero-copy single buffer management | ||
147 | scheme to improve performance. | ||
148 | </para> | ||
149 | </listitem> | ||
150 | |||
151 | <listitem> | ||
152 | <para> | ||
153 | Names of variables were changed to be more consistent. | ||
154 | </para> | ||
155 | </listitem> | ||
156 | |||
157 | <listitem> | ||
158 | <para> | ||
159 | Clean up of auo-negotiation and timer code. | ||
160 | </para> | ||
161 | </listitem> | ||
162 | |||
163 | <listitem> | ||
164 | <para> | ||
165 | Automatic detection and change of PHY on the fly. | ||
166 | </para> | ||
167 | </listitem> | ||
168 | |||
169 | <listitem> | ||
170 | <para> | ||
171 | Bug in mac probing fixed. | ||
172 | </para> | ||
173 | </listitem> | ||
174 | |||
175 | <listitem> | ||
176 | <para> | ||
177 | Fix 630E equalier problem by modifying the equalizer workaround rule. | ||
178 | </para> | ||
179 | </listitem> | ||
180 | |||
181 | <listitem> | ||
182 | <para> | ||
183 | Support for ICS1893 10/100 Interated PHYceiver. | ||
184 | </para> | ||
185 | </listitem> | ||
186 | |||
187 | <listitem> | ||
188 | <para> | ||
189 | Support for media select by ifconfig. | ||
190 | </para> | ||
191 | </listitem> | ||
192 | |||
193 | <listitem> | ||
194 | <para> | ||
195 | Added kernel-doc extratable documentation. | ||
196 | </para> | ||
197 | </listitem> | ||
198 | |||
199 | </orderedlist> | ||
200 | </para> | ||
201 | </chapter> | ||
202 | |||
203 | <chapter id="tested"> | ||
204 | <title>Tested Environment</title> | ||
205 | |||
206 | <para> | ||
207 | This driver is developed on the following hardware | ||
208 | |||
209 | <itemizedlist> | ||
210 | <listitem> | ||
211 | |||
212 | <para> | ||
213 | Intel Celeron 500 with SiS 630 (rev 02) chipset | ||
214 | </para> | ||
215 | </listitem> | ||
216 | <listitem> | ||
217 | |||
218 | <para> | ||
219 | SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card | ||
220 | </para> | ||
221 | </listitem> | ||
222 | |||
223 | </itemizedlist> | ||
224 | |||
225 | and tested with these software environments | ||
226 | |||
227 | <itemizedlist> | ||
228 | <listitem> | ||
229 | |||
230 | <para> | ||
231 | Red Hat Linux version 6.2 | ||
232 | </para> | ||
233 | </listitem> | ||
234 | <listitem> | ||
235 | |||
236 | <para> | ||
237 | Linux kernel version 2.4.0 | ||
238 | </para> | ||
239 | </listitem> | ||
240 | <listitem> | ||
241 | |||
242 | <para> | ||
243 | Netscape version 4.6 | ||
244 | </para> | ||
245 | </listitem> | ||
246 | <listitem> | ||
247 | |||
248 | <para> | ||
249 | NcFTP 3.0.0 beta 18 | ||
250 | </para> | ||
251 | </listitem> | ||
252 | <listitem> | ||
253 | |||
254 | <para> | ||
255 | Samba version 2.0.3 | ||
256 | </para> | ||
257 | </listitem> | ||
258 | |||
259 | </itemizedlist> | ||
260 | |||
261 | </para> | ||
262 | |||
263 | </chapter> | ||
264 | |||
265 | <chapter id="files"> | ||
266 | <title>Files in This Package</title> | ||
267 | |||
268 | <para> | ||
269 | In the package you can find these files: | ||
270 | </para> | ||
271 | |||
272 | <para> | ||
273 | <variablelist> | ||
274 | |||
275 | <varlistentry> | ||
276 | <term>sis900.c</term> | ||
277 | <listitem> | ||
278 | <para> | ||
279 | Driver source file in C | ||
280 | </para> | ||
281 | </listitem> | ||
282 | </varlistentry> | ||
283 | |||
284 | <varlistentry> | ||
285 | <term>sis900.h</term> | ||
286 | <listitem> | ||
287 | <para> | ||
288 | Header file for sis900.c | ||
289 | </para> | ||
290 | </listitem> | ||
291 | </varlistentry> | ||
292 | |||
293 | <varlistentry> | ||
294 | <term>sis900.sgml</term> | ||
295 | <listitem> | ||
296 | <para> | ||
297 | DocBook SGML source of the document | ||
298 | </para> | ||
299 | </listitem> | ||
300 | </varlistentry> | ||
301 | |||
302 | <varlistentry> | ||
303 | <term>sis900.txt</term> | ||
304 | <listitem> | ||
305 | <para> | ||
306 | Driver document in plain text | ||
307 | </para> | ||
308 | </listitem> | ||
309 | </varlistentry> | ||
310 | |||
311 | </variablelist> | ||
312 | </para> | ||
313 | </chapter> | ||
314 | |||
315 | <chapter id="install"> | ||
316 | <title>Installation</title> | ||
317 | |||
318 | <para> | ||
319 | Silicon Integrated System Corp. is cooperating closely with core Linux Kernel | ||
320 | developers. The revisions of SiS 900 driver are distributed by the usuall channels | ||
321 | for kernel tar files and patches. Those kernel tar files for official kernel and | ||
322 | patches for kernel pre-release can be download at | ||
323 | <ulink url="http://ftp.kernel.org/pub/linux/kernel/">official kernel ftp site</ulink> | ||
324 | and its mirrors. | ||
325 | The 1.06 revision can be found in kernel version later than 2.3.15 and pre-2.2.14, | ||
326 | and 1.07 revision can be found in kernel version 2.4.0. | ||
327 | If you have no prior experience in networking under Linux, please read | ||
328 | <ulink url="http://www.tldp.org/">Ethernet HOWTO</ulink> and | ||
329 | <ulink url="http://www.tldp.org/">Networking HOWTO</ulink> available from | ||
330 | Linux Documentation Project (LDP). | ||
331 | </para> | ||
332 | |||
333 | <para> | ||
334 | The driver is bundled in release later than 2.2.11 and 2.3.15 so this | ||
335 | is the most easy case. | ||
336 | Be sure you have the appropriate packages for compiling kernel source. | ||
337 | Those packages are listed in Document/Changes in kernel source | ||
338 | distribution. If you have to install the driver other than those bundled | ||
339 | in kernel release, you should have your driver file | ||
340 | <filename>sis900.c</filename> and <filename>sis900.h</filename> | ||
341 | copied into <filename class="directory">/usr/src/linux/drivers/net/</filename> first. | ||
342 | There are two alternative ways to install the driver | ||
343 | </para> | ||
344 | |||
345 | <sect1> | ||
346 | <title>Building the driver as loadable module</title> | ||
347 | |||
348 | <para> | ||
349 | To build the driver as a loadable kernel module you have to reconfigure | ||
350 | the kernel to activate network support by | ||
351 | </para> | ||
352 | |||
353 | <para><screen> | ||
354 | make menuconfig | ||
355 | </screen></para> | ||
356 | |||
357 | <para> | ||
358 | Choose <quote>Loadable module support ---></quote>, | ||
359 | then select <quote>Enable loadable module support</quote>. | ||
360 | </para> | ||
361 | |||
362 | <para> | ||
363 | Choose <quote>Network Device Support ---></quote>, select | ||
364 | <quote>Ethernet (10 or 100Mbit)</quote>. | ||
365 | Then select <quote>EISA, VLB, PCI and on board controllers</quote>, | ||
366 | and choose <quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote> | ||
367 | to <quote>M</quote>. | ||
368 | </para> | ||
369 | |||
370 | <para> | ||
371 | After reconfiguring the kernel, you can make the driver module by | ||
372 | </para> | ||
373 | |||
374 | <para><screen> | ||
375 | make modules | ||
376 | </screen></para> | ||
377 | |||
378 | <para> | ||
379 | The driver should be compiled with no errors. After compiling the driver, | ||
380 | the driver can be installed to proper place by | ||
381 | </para> | ||
382 | |||
383 | <para><screen> | ||
384 | make modules_install | ||
385 | </screen></para> | ||
386 | |||
387 | <para> | ||
388 | Load the driver into kernel by | ||
389 | </para> | ||
390 | |||
391 | <para><screen> | ||
392 | insmod sis900 | ||
393 | </screen></para> | ||
394 | |||
395 | <para> | ||
396 | When loading the driver into memory, some information message can be view by | ||
397 | </para> | ||
398 | |||
399 | <para> | ||
400 | <screen> | ||
401 | dmesg | ||
402 | </screen> | ||
403 | |||
404 | or | ||
405 | |||
406 | <screen> | ||
407 | cat /var/log/message | ||
408 | </screen> | ||
409 | </para> | ||
410 | |||
411 | <para> | ||
412 | If the driver is loaded properly you will have messages similar to this: | ||
413 | </para> | ||
414 | |||
415 | <para><screen> | ||
416 | sis900.c: v1.07.06 11/07/2000 | ||
417 | eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4. | ||
418 | eth0: SiS 900 Internal MII PHY transceiver found at address 1. | ||
419 | eth0: Using SiS 900 Internal MII PHY as default | ||
420 | </screen></para> | ||
421 | |||
422 | <para> | ||
423 | showing the version of the driver and the results of probing routine. | ||
424 | </para> | ||
425 | |||
426 | <para> | ||
427 | Once the driver is loaded, network can be brought up by | ||
428 | </para> | ||
429 | |||
430 | <para><screen> | ||
431 | /sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE | ||
432 | </screen></para> | ||
433 | |||
434 | <para> | ||
435 | where IPADDR, BROADCAST, NETMASK are your IP address, broadcast address and | ||
436 | netmask respectively. TYPE is used to set medium type used by the device. | ||
437 | Typical values are "10baseT"(twisted-pair 10Mbps Ethernet) or "100baseT" | ||
438 | (twisted-pair 100Mbps Ethernet). For more information on how to configure | ||
439 | network interface, please refer to | ||
440 | <ulink url="http://www.tldp.org/">Networking HOWTO</ulink>. | ||
441 | </para> | ||
442 | |||
443 | <para> | ||
444 | The link status is also shown by kernel messages. For example, after the | ||
445 | network interface is activated, you may have the message: | ||
446 | </para> | ||
447 | |||
448 | <para><screen> | ||
449 | eth0: Media Link On 100mbps full-duplex | ||
450 | </screen></para> | ||
451 | |||
452 | <para> | ||
453 | If you try to unplug the twist pair (TP) cable you will get | ||
454 | </para> | ||
455 | |||
456 | <para><screen> | ||
457 | eth0: Media Link Off | ||
458 | </screen></para> | ||
459 | |||
460 | <para> | ||
461 | indicating that the link is failed. | ||
462 | </para> | ||
463 | </sect1> | ||
464 | |||
465 | <sect1> | ||
466 | <title>Building the driver into kernel</title> | ||
467 | |||
468 | <para> | ||
469 | If you want to make the driver into kernel, choose <quote>Y</quote> | ||
470 | rather than <quote>M</quote> on | ||
471 | <quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote> | ||
472 | when configuring the kernel. Build the kernel image in the usual way | ||
473 | </para> | ||
474 | |||
475 | <para><screen> | ||
476 | make clean | ||
477 | |||
478 | make bzlilo | ||
479 | </screen></para> | ||
480 | |||
481 | <para> | ||
482 | Next time the system reboot, you have the driver in memory. | ||
483 | </para> | ||
484 | |||
485 | </sect1> | ||
486 | </chapter> | ||
487 | |||
488 | <chapter id="problems"> | ||
489 | <title>Known Problems and Bugs</title> | ||
490 | |||
491 | <para> | ||
492 | There are some known problems and bugs. If you find any other bugs please | ||
493 | mail to <ulink url="mailto:lcchang@sis.com.tw">lcchang@sis.com.tw</ulink> | ||
494 | |||
495 | <orderedlist> | ||
496 | |||
497 | <listitem> | ||
498 | <para> | ||
499 | AM79C901 HomePNA PHY is not thoroughly tested, there may be some | ||
500 | bugs in the <quote>on the fly</quote> change of transceiver. | ||
501 | </para> | ||
502 | </listitem> | ||
503 | |||
504 | <listitem> | ||
505 | <para> | ||
506 | A bug is hidden somewhere in the receive buffer management code, | ||
507 | the bug causes NULL pointer reference in the kernel. This fault is | ||
508 | caught before bad things happen and reported with the message: | ||
509 | |||
510 | <computeroutput> | ||
511 | eth0: NULL pointer encountered in Rx ring, skipping | ||
512 | </computeroutput> | ||
513 | |||
514 | which can be viewed with <literal remap="tt">dmesg</literal> or | ||
515 | <literal remap="tt">cat /var/log/message</literal>. | ||
516 | </para> | ||
517 | </listitem> | ||
518 | |||
519 | <listitem> | ||
520 | <para> | ||
521 | The media type change from 10Mbps to 100Mbps twisted-pair ethernet | ||
522 | by ifconfig causes the media link down. | ||
523 | </para> | ||
524 | </listitem> | ||
525 | |||
526 | </orderedlist> | ||
527 | </para> | ||
528 | </chapter> | ||
529 | |||
530 | <chapter id="RHistory"> | ||
531 | <title>Revision History</title> | ||
532 | |||
533 | <para> | ||
534 | <itemizedlist> | ||
535 | |||
536 | <listitem> | ||
537 | <para> | ||
538 | November 13, 2000, Revision 1.07, seventh release, 630E problem fixed | ||
539 | and further clean up. | ||
540 | </para> | ||
541 | </listitem> | ||
542 | |||
543 | <listitem> | ||
544 | <para> | ||
545 | November 4, 1999, Revision 1.06, Second release, lots of clean up | ||
546 | and optimization. | ||
547 | </para> | ||
548 | </listitem> | ||
549 | |||
550 | <listitem> | ||
551 | <para> | ||
552 | August 8, 1999, Revision 1.05, Initial Public Release | ||
553 | </para> | ||
554 | </listitem> | ||
555 | |||
556 | </itemizedlist> | ||
557 | </para> | ||
558 | </chapter> | ||
559 | |||
560 | <chapter id="acknowledgements"> | ||
561 | <title>Acknowledgements</title> | ||
562 | |||
563 | <para> | ||
564 | This driver was originally derived form | ||
565 | <ulink url="mailto:becker@cesdis1.gsfc.nasa.gov">Donald Becker</ulink>'s | ||
566 | <ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/pci-skeleton.c" | ||
567 | >pci-skeleton</ulink> and | ||
568 | <ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/rtl8139.c" | ||
569 | >rtl8139</ulink> drivers. Donald also provided various suggestion | ||
570 | regarded with improvements made in revision 1.06. | ||
571 | </para> | ||
572 | |||
573 | <para> | ||
574 | The 1.05 revision was created by | ||
575 | <ulink url="mailto:cmhuang@sis.com.tw">Jim Huang</ulink>, AMD 79c901 | ||
576 | support was added by <ulink url="mailto:lcs@sis.com.tw">Chin-Shan Li</ulink>. | ||
577 | </para> | ||
578 | </chapter> | ||
579 | |||
580 | <chapter id="functions"> | ||
581 | <title>List of Functions</title> | ||
582 | !Idrivers/net/sis900.c | ||
583 | </chapter> | ||
584 | |||
585 | </book> | ||
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 81bc51369f59..28a31c5e2289 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -151,6 +151,13 @@ Who: Ralf Baechle <ralf@linux-mips.org> | |||
151 | 151 | ||
152 | --------------------------- | 152 | --------------------------- |
153 | 153 | ||
154 | What: eepro100 network driver | ||
155 | When: January 2007 | ||
156 | Why: replaced by the e100 driver | ||
157 | Who: Adrian Bunk <bunk@stusta.de> | ||
158 | |||
159 | --------------------------- | ||
160 | |||
154 | What: Legacy /proc/pci interface (PCI_LEGACY_PROC) | 161 | What: Legacy /proc/pci interface (PCI_LEGACY_PROC) |
155 | When: March 2006 | 162 | When: March 2006 |
156 | Why: deprecated since 2.5.53 in favor of lspci(8) | 163 | Why: deprecated since 2.5.53 in favor of lspci(8) |
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 5b01d5cc4e95..b1181ce232d9 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
@@ -92,8 +92,6 @@ routing.txt | |||
92 | - the new routing mechanism | 92 | - the new routing mechanism |
93 | shaper.txt | 93 | shaper.txt |
94 | - info on the module that can shape/limit transmitted traffic. | 94 | - info on the module that can shape/limit transmitted traffic. |
95 | sis900.txt | ||
96 | - SiS 900/7016 Fast Ethernet device driver info. | ||
97 | sk98lin.txt | 95 | sk98lin.txt |
98 | - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit | 96 | - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit |
99 | Ethernet Adapter family driver info | 97 | Ethernet Adapter family driver info |
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/README.ipw2100 index 3ab40379d1cf..f3fcaa41f774 100644 --- a/Documentation/networking/README.ipw2100 +++ b/Documentation/networking/README.ipw2100 | |||
@@ -3,18 +3,18 @@ Intel(R) PRO/Wireless 2100 Driver for Linux in support of: | |||
3 | 3 | ||
4 | Intel(R) PRO/Wireless 2100 Network Connection | 4 | Intel(R) PRO/Wireless 2100 Network Connection |
5 | 5 | ||
6 | Copyright (C) 2003-2005, Intel Corporation | 6 | Copyright (C) 2003-2006, Intel Corporation |
7 | 7 | ||
8 | README.ipw2100 | 8 | README.ipw2100 |
9 | 9 | ||
10 | Version: 1.1.3 | 10 | Version: git-1.1.5 |
11 | Date : October 17, 2005 | 11 | Date : January 25, 2006 |
12 | 12 | ||
13 | Index | 13 | Index |
14 | ----------------------------------------------- | 14 | ----------------------------------------------- |
15 | 0. IMPORTANT INFORMATION BEFORE USING THIS DRIVER | 15 | 0. IMPORTANT INFORMATION BEFORE USING THIS DRIVER |
16 | 1. Introduction | 16 | 1. Introduction |
17 | 2. Release 1.1.3 Current Features | 17 | 2. Release git-1.1.5 Current Features |
18 | 3. Command Line Parameters | 18 | 3. Command Line Parameters |
19 | 4. Sysfs Helper Files | 19 | 4. Sysfs Helper Files |
20 | 5. Radio Kill Switch | 20 | 5. Radio Kill Switch |
@@ -89,7 +89,7 @@ potential fixes and patches, as well as links to the development mailing list | |||
89 | for the driver project. | 89 | for the driver project. |
90 | 90 | ||
91 | 91 | ||
92 | 2. Release 1.1.3 Current Supported Features | 92 | 2. Release git-1.1.5 Current Supported Features |
93 | ----------------------------------------------- | 93 | ----------------------------------------------- |
94 | - Managed (BSS) and Ad-Hoc (IBSS) | 94 | - Managed (BSS) and Ad-Hoc (IBSS) |
95 | - WEP (shared key and open) | 95 | - WEP (shared key and open) |
@@ -270,7 +270,7 @@ For installation support on the ipw2100 1.1.0 driver on Linux kernels | |||
270 | 9. License | 270 | 9. License |
271 | ----------------------------------------------- | 271 | ----------------------------------------------- |
272 | 272 | ||
273 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 273 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
274 | 274 | ||
275 | This program is free software; you can redistribute it and/or modify it | 275 | This program is free software; you can redistribute it and/or modify it |
276 | under the terms of the GNU General Public License (version 2) as | 276 | under the terms of the GNU General Public License (version 2) as |
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200 index c6492d3839fa..acb30c5dcff3 100644 --- a/Documentation/networking/README.ipw2200 +++ b/Documentation/networking/README.ipw2200 | |||
@@ -10,7 +10,7 @@ both hardware adapters listed above. In this document the Intel(R) | |||
10 | PRO/Wireless 2915ABG Driver for Linux will be used to reference the | 10 | PRO/Wireless 2915ABG Driver for Linux will be used to reference the |
11 | unified driver. | 11 | unified driver. |
12 | 12 | ||
13 | Copyright (C) 2004-2005, Intel Corporation | 13 | Copyright (C) 2004-2006, Intel Corporation |
14 | 14 | ||
15 | README.ipw2200 | 15 | README.ipw2200 |
16 | 16 | ||
@@ -26,9 +26,11 @@ Index | |||
26 | 1.2. Module parameters | 26 | 1.2. Module parameters |
27 | 1.3. Wireless Extension Private Methods | 27 | 1.3. Wireless Extension Private Methods |
28 | 1.4. Sysfs Helper Files | 28 | 1.4. Sysfs Helper Files |
29 | 1.5. Supported channels | ||
29 | 2. Ad-Hoc Networking | 30 | 2. Ad-Hoc Networking |
30 | 3. Interacting with Wireless Tools | 31 | 3. Interacting with Wireless Tools |
31 | 3.1. iwconfig mode | 32 | 3.1. iwconfig mode |
33 | 3.2. iwconfig sens | ||
32 | 4. About the Version Numbers | 34 | 4. About the Version Numbers |
33 | 5. Firmware installation | 35 | 5. Firmware installation |
34 | 6. Support | 36 | 6. Support |
@@ -314,6 +316,35 @@ For the device level files, see /sys/bus/pci/drivers/ipw2200: | |||
314 | running ifconfig and is therefore disabled by default. | 316 | running ifconfig and is therefore disabled by default. |
315 | 317 | ||
316 | 318 | ||
319 | 1.5. Supported channels | ||
320 | ----------------------------------------------- | ||
321 | |||
322 | Upon loading the Intel(R) PRO/Wireless 2915ABG Driver for Linux, a | ||
323 | message stating the detected geography code and the number of 802.11 | ||
324 | channels supported by the card will be displayed in the log. | ||
325 | |||
326 | The geography code corresponds to a regulatory domain as shown in the | ||
327 | table below. | ||
328 | |||
329 | Supported channels | ||
330 | Code Geography 802.11bg 802.11a | ||
331 | |||
332 | --- Restricted 11 0 | ||
333 | ZZF Custom US/Canada 11 8 | ||
334 | ZZD Rest of World 13 0 | ||
335 | ZZA Custom USA & Europe & High 11 13 | ||
336 | ZZB Custom NA & Europe 11 13 | ||
337 | ZZC Custom Japan 11 4 | ||
338 | ZZM Custom 11 0 | ||
339 | ZZE Europe 13 19 | ||
340 | ZZJ Custom Japan 14 4 | ||
341 | ZZR Rest of World 14 0 | ||
342 | ZZH High Band 13 4 | ||
343 | ZZG Custom Europe 13 4 | ||
344 | ZZK Europe 13 24 | ||
345 | ZZL Europe 11 13 | ||
346 | |||
347 | |||
317 | 2. Ad-Hoc Networking | 348 | 2. Ad-Hoc Networking |
318 | ----------------------------------------------- | 349 | ----------------------------------------------- |
319 | 350 | ||
@@ -353,6 +384,15 @@ When configuring the mode of the adapter, all run-time configured parameters | |||
353 | are reset to the value used when the module was loaded. This includes | 384 | are reset to the value used when the module was loaded. This includes |
354 | channels, rates, ESSID, etc. | 385 | channels, rates, ESSID, etc. |
355 | 386 | ||
387 | 3.2 iwconfig sens | ||
388 | ----------------------------------------------- | ||
389 | |||
390 | The 'iwconfig ethX sens XX' command will not set the signal sensitivity | ||
391 | threshold, as described in iwconfig documentation, but rather the number | ||
392 | of consecutive missed beacons that will trigger handover, i.e. roaming | ||
393 | to another access point. At the same time, it will set the disassociation | ||
394 | threshold to 3 times the given value. | ||
395 | |||
356 | 396 | ||
357 | 4. About the Version Numbers | 397 | 4. About the Version Numbers |
358 | ----------------------------------------------- | 398 | ----------------------------------------------- |
@@ -408,7 +448,7 @@ For general information and support, go to: | |||
408 | 7. License | 448 | 7. License |
409 | ----------------------------------------------- | 449 | ----------------------------------------------- |
410 | 450 | ||
411 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 451 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
412 | 452 | ||
413 | This program is free software; you can redistribute it and/or modify it | 453 | This program is free software; you can redistribute it and/or modify it |
414 | under the terms of the GNU General Public License version 2 as | 454 | under the terms of the GNU General Public License version 2 as |
diff --git a/Documentation/networking/sis900.txt b/Documentation/networking/sis900.txt deleted file mode 100644 index bddffd7385ae..000000000000 --- a/Documentation/networking/sis900.txt +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | |||
2 | SiS 900/7016 Fast Ethernet Device Driver | ||
3 | |||
4 | Ollie Lho | ||
5 | |||
6 | Lei Chun Chang | ||
7 | |||
8 | Copyright © 1999 by Silicon Integrated System Corp. | ||
9 | |||
10 | This document gives some information on installation and usage of SiS | ||
11 | 900/7016 device driver under Linux. | ||
12 | |||
13 | This program is free software; you can redistribute it and/or modify | ||
14 | it under the terms of the GNU General Public License as published by | ||
15 | the Free Software Foundation; either version 2 of the License, or (at | ||
16 | your option) any later version. | ||
17 | |||
18 | This program is distributed in the hope that it will be useful, but | ||
19 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
21 | General Public License for more details. | ||
22 | |||
23 | You should have received a copy of the GNU General Public License | ||
24 | along with this program; if not, write to the Free Software | ||
25 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
26 | USA | ||
27 | _________________________________________________________________ | ||
28 | |||
29 | Table of Contents | ||
30 | 1. Introduction | ||
31 | 2. Changes | ||
32 | 3. Tested Environment | ||
33 | 4. Files in This Package | ||
34 | 5. Installation | ||
35 | |||
36 | Building the driver as loadable module | ||
37 | Building the driver into kernel | ||
38 | |||
39 | 6. Known Problems and Bugs | ||
40 | 7. Revision History | ||
41 | 8. Acknowledgements | ||
42 | _________________________________________________________________ | ||
43 | |||
44 | Chapter 1. Introduction | ||
45 | |||
46 | This document describes the revision 1.06 and 1.07 of SiS 900/7016 | ||
47 | Fast Ethernet device driver under Linux. The driver is developed by | ||
48 | Silicon Integrated System Corp. and distributed freely under the GNU | ||
49 | General Public License (GPL). The driver can be compiled as a loadable | ||
50 | module and used under Linux kernel version 2.2.x. (rev. 1.06) With | ||
51 | minimal changes, the driver can also be used under 2.3.x and 2.4.x | ||
52 | kernel (rev. 1.07), please see Chapter 5. If you are intended to use | ||
53 | the driver for earlier kernels, you are on your own. | ||
54 | |||
55 | The driver is tested with usual TCP/IP applications including FTP, | ||
56 | Telnet, Netscape etc. and is used constantly by the developers. | ||
57 | |||
58 | Please send all comments/fixes/questions to Lei-Chun Chang. | ||
59 | _________________________________________________________________ | ||
60 | |||
61 | Chapter 2. Changes | ||
62 | |||
63 | Changes made in Revision 1.07 | ||
64 | |||
65 | 1. Separation of sis900.c and sis900.h in order to move most constant | ||
66 | definition to sis900.h (many of those constants were corrected) | ||
67 | 2. Clean up PCI detection, the pci-scan from Donald Becker were not | ||
68 | used, just simple pci_find_*. | ||
69 | 3. MII detection is modified to support multiple mii transceiver. | ||
70 | 4. Bugs in read_eeprom, mdio_* were removed. | ||
71 | 5. Lot of sis900 irrelevant comments were removed/changed and more | ||
72 | comments were added to reflect the real situation. | ||
73 | 6. Clean up of physical/virtual address space mess in buffer | ||
74 | descriptors. | ||
75 | 7. Better transmit/receive error handling. | ||
76 | 8. The driver now uses zero-copy single buffer management scheme to | ||
77 | improve performance. | ||
78 | 9. Names of variables were changed to be more consistent. | ||
79 | 10. Clean up of auo-negotiation and timer code. | ||
80 | 11. Automatic detection and change of PHY on the fly. | ||
81 | 12. Bug in mac probing fixed. | ||
82 | 13. Fix 630E equalier problem by modifying the equalizer workaround | ||
83 | rule. | ||
84 | 14. Support for ICS1893 10/100 Interated PHYceiver. | ||
85 | 15. Support for media select by ifconfig. | ||
86 | 16. Added kernel-doc extratable documentation. | ||
87 | _________________________________________________________________ | ||
88 | |||
89 | Chapter 3. Tested Environment | ||
90 | |||
91 | This driver is developed on the following hardware | ||
92 | |||
93 | * Intel Celeron 500 with SiS 630 (rev 02) chipset | ||
94 | * SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card | ||
95 | |||
96 | and tested with these software environments | ||
97 | |||
98 | * Red Hat Linux version 6.2 | ||
99 | * Linux kernel version 2.4.0 | ||
100 | * Netscape version 4.6 | ||
101 | * NcFTP 3.0.0 beta 18 | ||
102 | * Samba version 2.0.3 | ||
103 | _________________________________________________________________ | ||
104 | |||
105 | Chapter 4. Files in This Package | ||
106 | |||
107 | In the package you can find these files: | ||
108 | |||
109 | sis900.c | ||
110 | Driver source file in C | ||
111 | |||
112 | sis900.h | ||
113 | Header file for sis900.c | ||
114 | |||
115 | sis900.sgml | ||
116 | DocBook SGML source of the document | ||
117 | |||
118 | sis900.txt | ||
119 | Driver document in plain text | ||
120 | _________________________________________________________________ | ||
121 | |||
122 | Chapter 5. Installation | ||
123 | |||
124 | Silicon Integrated System Corp. is cooperating closely with core Linux | ||
125 | Kernel developers. The revisions of SiS 900 driver are distributed by | ||
126 | the usuall channels for kernel tar files and patches. Those kernel tar | ||
127 | files for official kernel and patches for kernel pre-release can be | ||
128 | download at official kernel ftp site and its mirrors. The 1.06 | ||
129 | revision can be found in kernel version later than 2.3.15 and | ||
130 | pre-2.2.14, and 1.07 revision can be found in kernel version 2.4.0. If | ||
131 | you have no prior experience in networking under Linux, please read | ||
132 | Ethernet HOWTO and Networking HOWTO available from Linux Documentation | ||
133 | Project (LDP). | ||
134 | |||
135 | The driver is bundled in release later than 2.2.11 and 2.3.15 so this | ||
136 | is the most easy case. Be sure you have the appropriate packages for | ||
137 | compiling kernel source. Those packages are listed in Document/Changes | ||
138 | in kernel source distribution. If you have to install the driver other | ||
139 | than those bundled in kernel release, you should have your driver file | ||
140 | sis900.c and sis900.h copied into /usr/src/linux/drivers/net/ first. | ||
141 | There are two alternative ways to install the driver | ||
142 | _________________________________________________________________ | ||
143 | |||
144 | Building the driver as loadable module | ||
145 | |||
146 | To build the driver as a loadable kernel module you have to | ||
147 | reconfigure the kernel to activate network support by | ||
148 | |||
149 | make menuconfig | ||
150 | |||
151 | Choose "Loadable module support --->", then select "Enable loadable | ||
152 | module support". | ||
153 | |||
154 | Choose "Network Device Support --->", select "Ethernet (10 or | ||
155 | 100Mbit)". Then select "EISA, VLB, PCI and on board controllers", and | ||
156 | choose "SiS 900/7016 PCI Fast Ethernet Adapter support" to "M". | ||
157 | |||
158 | After reconfiguring the kernel, you can make the driver module by | ||
159 | |||
160 | make modules | ||
161 | |||
162 | The driver should be compiled with no errors. After compiling the | ||
163 | driver, the driver can be installed to proper place by | ||
164 | |||
165 | make modules_install | ||
166 | |||
167 | Load the driver into kernel by | ||
168 | |||
169 | insmod sis900 | ||
170 | |||
171 | When loading the driver into memory, some information message can be | ||
172 | view by | ||
173 | |||
174 | dmesg | ||
175 | |||
176 | or | ||
177 | cat /var/log/message | ||
178 | |||
179 | If the driver is loaded properly you will have messages similar to | ||
180 | this: | ||
181 | |||
182 | sis900.c: v1.07.06 11/07/2000 | ||
183 | eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4. | ||
184 | eth0: SiS 900 Internal MII PHY transceiver found at address 1. | ||
185 | eth0: Using SiS 900 Internal MII PHY as default | ||
186 | |||
187 | showing the version of the driver and the results of probing routine. | ||
188 | |||
189 | Once the driver is loaded, network can be brought up by | ||
190 | |||
191 | /sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE | ||
192 | |||
193 | where IPADDR, BROADCAST, NETMASK are your IP address, broadcast | ||
194 | address and netmask respectively. TYPE is used to set medium type used | ||
195 | by the device. Typical values are "10baseT"(twisted-pair 10Mbps | ||
196 | Ethernet) or "100baseT" (twisted-pair 100Mbps Ethernet). For more | ||
197 | information on how to configure network interface, please refer to | ||
198 | Networking HOWTO. | ||
199 | |||
200 | The link status is also shown by kernel messages. For example, after | ||
201 | the network interface is activated, you may have the message: | ||
202 | |||
203 | eth0: Media Link On 100mbps full-duplex | ||
204 | |||
205 | If you try to unplug the twist pair (TP) cable you will get | ||
206 | |||
207 | eth0: Media Link Off | ||
208 | |||
209 | indicating that the link is failed. | ||
210 | _________________________________________________________________ | ||
211 | |||
212 | Building the driver into kernel | ||
213 | |||
214 | If you want to make the driver into kernel, choose "Y" rather than "M" | ||
215 | on "SiS 900/7016 PCI Fast Ethernet Adapter support" when configuring | ||
216 | the kernel. Build the kernel image in the usual way | ||
217 | |||
218 | make clean | ||
219 | |||
220 | make bzlilo | ||
221 | |||
222 | Next time the system reboot, you have the driver in memory. | ||
223 | _________________________________________________________________ | ||
224 | |||
225 | Chapter 6. Known Problems and Bugs | ||
226 | |||
227 | There are some known problems and bugs. If you find any other bugs | ||
228 | please mail to lcchang@sis.com.tw | ||
229 | |||
230 | 1. AM79C901 HomePNA PHY is not thoroughly tested, there may be some | ||
231 | bugs in the "on the fly" change of transceiver. | ||
232 | 2. A bug is hidden somewhere in the receive buffer management code, | ||
233 | the bug causes NULL pointer reference in the kernel. This fault is | ||
234 | caught before bad things happen and reported with the message: | ||
235 | eth0: NULL pointer encountered in Rx ring, skipping which can be | ||
236 | viewed with dmesg or cat /var/log/message. | ||
237 | 3. The media type change from 10Mbps to 100Mbps twisted-pair ethernet | ||
238 | by ifconfig causes the media link down. | ||
239 | _________________________________________________________________ | ||
240 | |||
241 | Chapter 7. Revision History | ||
242 | |||
243 | * November 13, 2000, Revision 1.07, seventh release, 630E problem | ||
244 | fixed and further clean up. | ||
245 | * November 4, 1999, Revision 1.06, Second release, lots of clean up | ||
246 | and optimization. | ||
247 | * August 8, 1999, Revision 1.05, Initial Public Release | ||
248 | _________________________________________________________________ | ||
249 | |||
250 | Chapter 8. Acknowledgements | ||
251 | |||
252 | This driver was originally derived form Donald Becker's pci-skeleton | ||
253 | and rtl8139 drivers. Donald also provided various suggestion regarded | ||
254 | with improvements made in revision 1.06. | ||
255 | |||
256 | The 1.05 revision was created by Jim Huang, AMD 79c901 support was | ||
257 | added by Chin-Shan Li. | ||
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c index 50039a204c24..f945416960e9 100644 --- a/arch/ppc/platforms/hdpu.c +++ b/arch/ppc/platforms/hdpu.c | |||
@@ -319,11 +319,10 @@ static void __init hdpu_fixup_eth_pdata(struct platform_device *pd) | |||
319 | struct mv643xx_eth_platform_data *eth_pd; | 319 | struct mv643xx_eth_platform_data *eth_pd; |
320 | eth_pd = pd->dev.platform_data; | 320 | eth_pd = pd->dev.platform_data; |
321 | 321 | ||
322 | eth_pd->port_serial_control = | ||
323 | mv64x60_read(&bh, MV643XX_ETH_PORT_SERIAL_CONTROL_REG(pd->id) & ~1); | ||
324 | |||
325 | eth_pd->force_phy_addr = 1; | 322 | eth_pd->force_phy_addr = 1; |
326 | eth_pd->phy_addr = pd->id; | 323 | eth_pd->phy_addr = pd->id; |
324 | eth_pd->speed = SPEED_100; | ||
325 | eth_pd->duplex = DUPLEX_FULL; | ||
327 | eth_pd->tx_queue_size = 400; | 326 | eth_pd->tx_queue_size = 400; |
328 | eth_pd->rx_queue_size = 800; | 327 | eth_pd->rx_queue_size = 800; |
329 | } | 328 | } |
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8da3cf66894c..296708ceceb2 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -182,6 +182,9 @@ struct as_rq { | |||
182 | 182 | ||
183 | static kmem_cache_t *arq_pool; | 183 | static kmem_cache_t *arq_pool; |
184 | 184 | ||
185 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
186 | static struct completion *ioc_gone; | ||
187 | |||
185 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); | 188 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); |
186 | static void as_antic_stop(struct as_data *ad); | 189 | static void as_antic_stop(struct as_data *ad); |
187 | 190 | ||
@@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad); | |||
193 | static void free_as_io_context(struct as_io_context *aic) | 196 | static void free_as_io_context(struct as_io_context *aic) |
194 | { | 197 | { |
195 | kfree(aic); | 198 | kfree(aic); |
199 | if (atomic_dec_and_test(&ioc_count) && ioc_gone) | ||
200 | complete(ioc_gone); | ||
201 | } | ||
202 | |||
203 | static void as_trim(struct io_context *ioc) | ||
204 | { | ||
205 | if (ioc->aic) | ||
206 | free_as_io_context(ioc->aic); | ||
207 | ioc->aic = NULL; | ||
196 | } | 208 | } |
197 | 209 | ||
198 | /* Called when the task exits */ | 210 | /* Called when the task exits */ |
@@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void) | |||
220 | ret->seek_total = 0; | 232 | ret->seek_total = 0; |
221 | ret->seek_samples = 0; | 233 | ret->seek_samples = 0; |
222 | ret->seek_mean = 0; | 234 | ret->seek_mean = 0; |
235 | atomic_inc(&ioc_count); | ||
223 | } | 236 | } |
224 | 237 | ||
225 | return ret; | 238 | return ret; |
@@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
1696 | /* | 1709 | /* |
1697 | * sysfs parts below | 1710 | * sysfs parts below |
1698 | */ | 1711 | */ |
1699 | struct as_fs_entry { | ||
1700 | struct attribute attr; | ||
1701 | ssize_t (*show)(struct as_data *, char *); | ||
1702 | ssize_t (*store)(struct as_data *, const char *, size_t); | ||
1703 | }; | ||
1704 | 1712 | ||
1705 | static ssize_t | 1713 | static ssize_t |
1706 | as_var_show(unsigned int var, char *page) | 1714 | as_var_show(unsigned int var, char *page) |
@@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count) | |||
1717 | return count; | 1725 | return count; |
1718 | } | 1726 | } |
1719 | 1727 | ||
1720 | static ssize_t as_est_show(struct as_data *ad, char *page) | 1728 | static ssize_t est_time_show(elevator_t *e, char *page) |
1721 | { | 1729 | { |
1730 | struct as_data *ad = e->elevator_data; | ||
1722 | int pos = 0; | 1731 | int pos = 0; |
1723 | 1732 | ||
1724 | pos += sprintf(page+pos, "%lu %% exit probability\n", | 1733 | pos += sprintf(page+pos, "%lu %% exit probability\n", |
@@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page) | |||
1734 | } | 1743 | } |
1735 | 1744 | ||
1736 | #define SHOW_FUNCTION(__FUNC, __VAR) \ | 1745 | #define SHOW_FUNCTION(__FUNC, __VAR) \ |
1737 | static ssize_t __FUNC(struct as_data *ad, char *page) \ | 1746 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
1738 | { \ | 1747 | { \ |
1748 | struct as_data *ad = e->elevator_data; \ | ||
1739 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ | 1749 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ |
1740 | } | 1750 | } |
1741 | SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]); | 1751 | SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); |
1742 | SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]); | 1752 | SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); |
1743 | SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire); | 1753 | SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); |
1744 | SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]); | 1754 | SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); |
1745 | SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]); | 1755 | SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); |
1746 | #undef SHOW_FUNCTION | 1756 | #undef SHOW_FUNCTION |
1747 | 1757 | ||
1748 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | 1758 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
1749 | static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | 1759 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
1750 | { \ | 1760 | { \ |
1751 | int ret = as_var_store(__PTR, (page), count); \ | 1761 | struct as_data *ad = e->elevator_data; \ |
1762 | int ret = as_var_store(__PTR, (page), count); \ | ||
1752 | if (*(__PTR) < (MIN)) \ | 1763 | if (*(__PTR) < (MIN)) \ |
1753 | *(__PTR) = (MIN); \ | 1764 | *(__PTR) = (MIN); \ |
1754 | else if (*(__PTR) > (MAX)) \ | 1765 | else if (*(__PTR) > (MAX)) \ |
@@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | |||
1756 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ | 1767 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ |
1757 | return ret; \ | 1768 | return ret; \ |
1758 | } | 1769 | } |
1759 | STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); | 1770 | STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); |
1760 | STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); | 1771 | STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); |
1761 | STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX); | 1772 | STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); |
1762 | STORE_FUNCTION(as_read_batchexpire_store, | 1773 | STORE_FUNCTION(as_read_batch_expire_store, |
1763 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); | 1774 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); |
1764 | STORE_FUNCTION(as_write_batchexpire_store, | 1775 | STORE_FUNCTION(as_write_batch_expire_store, |
1765 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); | 1776 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); |
1766 | #undef STORE_FUNCTION | 1777 | #undef STORE_FUNCTION |
1767 | 1778 | ||
1768 | static struct as_fs_entry as_est_entry = { | 1779 | #define AS_ATTR(name) \ |
1769 | .attr = {.name = "est_time", .mode = S_IRUGO }, | 1780 | __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) |
1770 | .show = as_est_show, | 1781 | |
1771 | }; | 1782 | static struct elv_fs_entry as_attrs[] = { |
1772 | static struct as_fs_entry as_readexpire_entry = { | 1783 | __ATTR_RO(est_time), |
1773 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 1784 | AS_ATTR(read_expire), |
1774 | .show = as_readexpire_show, | 1785 | AS_ATTR(write_expire), |
1775 | .store = as_readexpire_store, | 1786 | AS_ATTR(antic_expire), |
1776 | }; | 1787 | AS_ATTR(read_batch_expire), |
1777 | static struct as_fs_entry as_writeexpire_entry = { | 1788 | AS_ATTR(write_batch_expire), |
1778 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 1789 | __ATTR_NULL |
1779 | .show = as_writeexpire_show, | ||
1780 | .store = as_writeexpire_store, | ||
1781 | }; | ||
1782 | static struct as_fs_entry as_anticexpire_entry = { | ||
1783 | .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1784 | .show = as_anticexpire_show, | ||
1785 | .store = as_anticexpire_store, | ||
1786 | }; | ||
1787 | static struct as_fs_entry as_read_batchexpire_entry = { | ||
1788 | .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1789 | .show = as_read_batchexpire_show, | ||
1790 | .store = as_read_batchexpire_store, | ||
1791 | }; | ||
1792 | static struct as_fs_entry as_write_batchexpire_entry = { | ||
1793 | .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1794 | .show = as_write_batchexpire_show, | ||
1795 | .store = as_write_batchexpire_store, | ||
1796 | }; | ||
1797 | |||
1798 | static struct attribute *default_attrs[] = { | ||
1799 | &as_est_entry.attr, | ||
1800 | &as_readexpire_entry.attr, | ||
1801 | &as_writeexpire_entry.attr, | ||
1802 | &as_anticexpire_entry.attr, | ||
1803 | &as_read_batchexpire_entry.attr, | ||
1804 | &as_write_batchexpire_entry.attr, | ||
1805 | NULL, | ||
1806 | }; | ||
1807 | |||
1808 | #define to_as(atr) container_of((atr), struct as_fs_entry, attr) | ||
1809 | |||
1810 | static ssize_t | ||
1811 | as_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
1812 | { | ||
1813 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
1814 | struct as_fs_entry *entry = to_as(attr); | ||
1815 | |||
1816 | if (!entry->show) | ||
1817 | return -EIO; | ||
1818 | |||
1819 | return entry->show(e->elevator_data, page); | ||
1820 | } | ||
1821 | |||
1822 | static ssize_t | ||
1823 | as_attr_store(struct kobject *kobj, struct attribute *attr, | ||
1824 | const char *page, size_t length) | ||
1825 | { | ||
1826 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
1827 | struct as_fs_entry *entry = to_as(attr); | ||
1828 | |||
1829 | if (!entry->store) | ||
1830 | return -EIO; | ||
1831 | |||
1832 | return entry->store(e->elevator_data, page, length); | ||
1833 | } | ||
1834 | |||
1835 | static struct sysfs_ops as_sysfs_ops = { | ||
1836 | .show = as_attr_show, | ||
1837 | .store = as_attr_store, | ||
1838 | }; | ||
1839 | |||
1840 | static struct kobj_type as_ktype = { | ||
1841 | .sysfs_ops = &as_sysfs_ops, | ||
1842 | .default_attrs = default_attrs, | ||
1843 | }; | 1790 | }; |
1844 | 1791 | ||
1845 | static struct elevator_type iosched_as = { | 1792 | static struct elevator_type iosched_as = { |
@@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = { | |||
1860 | .elevator_may_queue_fn = as_may_queue, | 1807 | .elevator_may_queue_fn = as_may_queue, |
1861 | .elevator_init_fn = as_init_queue, | 1808 | .elevator_init_fn = as_init_queue, |
1862 | .elevator_exit_fn = as_exit_queue, | 1809 | .elevator_exit_fn = as_exit_queue, |
1810 | .trim = as_trim, | ||
1863 | }, | 1811 | }, |
1864 | 1812 | ||
1865 | .elevator_ktype = &as_ktype, | 1813 | .elevator_attrs = as_attrs, |
1866 | .elevator_name = "anticipatory", | 1814 | .elevator_name = "anticipatory", |
1867 | .elevator_owner = THIS_MODULE, | 1815 | .elevator_owner = THIS_MODULE, |
1868 | }; | 1816 | }; |
@@ -1893,7 +1841,13 @@ static int __init as_init(void) | |||
1893 | 1841 | ||
1894 | static void __exit as_exit(void) | 1842 | static void __exit as_exit(void) |
1895 | { | 1843 | { |
1844 | DECLARE_COMPLETION(all_gone); | ||
1896 | elv_unregister(&iosched_as); | 1845 | elv_unregister(&iosched_as); |
1846 | ioc_gone = &all_gone; | ||
1847 | barrier(); | ||
1848 | if (atomic_read(&ioc_count)) | ||
1849 | complete(ioc_gone); | ||
1850 | synchronize_rcu(); | ||
1897 | kmem_cache_destroy(arq_pool); | 1851 | kmem_cache_destroy(arq_pool); |
1898 | } | 1852 | } |
1899 | 1853 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c8dbe38c81c8..c4a0d5d8d7f0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -6,21 +6,13 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> | 7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> |
8 | */ | 8 | */ |
9 | #include <linux/kernel.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/blkdev.h> | ||
12 | #include <linux/elevator.h> | ||
13 | #include <linux/bio.h> | ||
14 | #include <linux/config.h> | 9 | #include <linux/config.h> |
15 | #include <linux/module.h> | 10 | #include <linux/module.h> |
16 | #include <linux/slab.h> | 11 | #include <linux/blkdev.h> |
17 | #include <linux/init.h> | 12 | #include <linux/elevator.h> |
18 | #include <linux/compiler.h> | ||
19 | #include <linux/hash.h> | 13 | #include <linux/hash.h> |
20 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
21 | #include <linux/mempool.h> | ||
22 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
23 | #include <linux/writeback.h> | ||
24 | 16 | ||
25 | /* | 17 | /* |
26 | * tunables | 18 | * tunables |
@@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100; | |||
47 | */ | 39 | */ |
48 | static const int cfq_max_depth = 2; | 40 | static const int cfq_max_depth = 2; |
49 | 41 | ||
42 | static DEFINE_RWLOCK(cfq_exit_lock); | ||
43 | |||
50 | /* | 44 | /* |
51 | * for the hash of cfqq inside the cfqd | 45 | * for the hash of cfqq inside the cfqd |
52 | */ | 46 | */ |
@@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool; | |||
89 | static kmem_cache_t *cfq_pool; | 83 | static kmem_cache_t *cfq_pool; |
90 | static kmem_cache_t *cfq_ioc_pool; | 84 | static kmem_cache_t *cfq_ioc_pool; |
91 | 85 | ||
86 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
87 | static struct completion *ioc_gone; | ||
88 | |||
92 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 89 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
93 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 90 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
94 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) | 91 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) |
@@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool; | |||
109 | * Per block device queue structure | 106 | * Per block device queue structure |
110 | */ | 107 | */ |
111 | struct cfq_data { | 108 | struct cfq_data { |
112 | atomic_t ref; | ||
113 | request_queue_t *queue; | 109 | request_queue_t *queue; |
114 | 110 | ||
115 | /* | 111 | /* |
@@ -175,6 +171,8 @@ struct cfq_data { | |||
175 | unsigned int cfq_slice_async_rq; | 171 | unsigned int cfq_slice_async_rq; |
176 | unsigned int cfq_slice_idle; | 172 | unsigned int cfq_slice_idle; |
177 | unsigned int cfq_max_depth; | 173 | unsigned int cfq_max_depth; |
174 | |||
175 | struct list_head cic_list; | ||
178 | }; | 176 | }; |
179 | 177 | ||
180 | /* | 178 | /* |
@@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync); | |||
288 | 286 | ||
289 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 287 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
290 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | 288 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); |
291 | static void cfq_put_cfqd(struct cfq_data *cfqd); | 289 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); |
292 | 290 | ||
293 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | 291 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
294 | 292 | ||
@@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1160 | if (unlikely(cfqd->active_queue == cfqq)) | 1158 | if (unlikely(cfqd->active_queue == cfqq)) |
1161 | __cfq_slice_expired(cfqd, cfqq, 0); | 1159 | __cfq_slice_expired(cfqd, cfqq, 0); |
1162 | 1160 | ||
1163 | cfq_put_cfqd(cfqq->cfqd); | ||
1164 | |||
1165 | /* | 1161 | /* |
1166 | * it's on the empty list and still hashed | 1162 | * it's on the empty list and still hashed |
1167 | */ | 1163 | */ |
@@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, | |||
1179 | 1175 | ||
1180 | hlist_for_each_safe(entry, next, hash_list) { | 1176 | hlist_for_each_safe(entry, next, hash_list) { |
1181 | struct cfq_queue *__cfqq = list_entry_qhash(entry); | 1177 | struct cfq_queue *__cfqq = list_entry_qhash(entry); |
1182 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); | 1178 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); |
1183 | 1179 | ||
1184 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) | 1180 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) |
1185 | return __cfqq; | 1181 | return __cfqq; |
@@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
1198 | { | 1194 | { |
1199 | struct cfq_io_context *__cic; | 1195 | struct cfq_io_context *__cic; |
1200 | struct list_head *entry, *next; | 1196 | struct list_head *entry, *next; |
1197 | int freed = 1; | ||
1201 | 1198 | ||
1202 | list_for_each_safe(entry, next, &cic->list) { | 1199 | list_for_each_safe(entry, next, &cic->list) { |
1203 | __cic = list_entry(entry, struct cfq_io_context, list); | 1200 | __cic = list_entry(entry, struct cfq_io_context, list); |
1204 | kmem_cache_free(cfq_ioc_pool, __cic); | 1201 | kmem_cache_free(cfq_ioc_pool, __cic); |
1202 | freed++; | ||
1205 | } | 1203 | } |
1206 | 1204 | ||
1207 | kmem_cache_free(cfq_ioc_pool, cic); | 1205 | kmem_cache_free(cfq_ioc_pool, cic); |
1206 | if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) | ||
1207 | complete(ioc_gone); | ||
1208 | } | ||
1209 | |||
1210 | static void cfq_trim(struct io_context *ioc) | ||
1211 | { | ||
1212 | ioc->set_ioprio = NULL; | ||
1213 | if (ioc->cic) | ||
1214 | cfq_free_io_context(ioc->cic); | ||
1208 | } | 1215 | } |
1209 | 1216 | ||
1210 | /* | 1217 | /* |
@@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
1212 | */ | 1219 | */ |
1213 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | 1220 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) |
1214 | { | 1221 | { |
1215 | struct cfq_data *cfqd = cic->cfqq->cfqd; | 1222 | struct cfq_data *cfqd = cic->key; |
1216 | request_queue_t *q = cfqd->queue; | 1223 | request_queue_t *q; |
1224 | |||
1225 | if (!cfqd) | ||
1226 | return; | ||
1227 | |||
1228 | q = cfqd->queue; | ||
1217 | 1229 | ||
1218 | WARN_ON(!irqs_disabled()); | 1230 | WARN_ON(!irqs_disabled()); |
1219 | 1231 | ||
1220 | spin_lock(q->queue_lock); | 1232 | spin_lock(q->queue_lock); |
1221 | 1233 | ||
1222 | if (unlikely(cic->cfqq == cfqd->active_queue)) | 1234 | if (cic->cfqq[ASYNC]) { |
1223 | __cfq_slice_expired(cfqd, cic->cfqq, 0); | 1235 | if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue)) |
1236 | __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0); | ||
1237 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
1238 | cic->cfqq[ASYNC] = NULL; | ||
1239 | } | ||
1240 | |||
1241 | if (cic->cfqq[SYNC]) { | ||
1242 | if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue)) | ||
1243 | __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0); | ||
1244 | cfq_put_queue(cic->cfqq[SYNC]); | ||
1245 | cic->cfqq[SYNC] = NULL; | ||
1246 | } | ||
1224 | 1247 | ||
1225 | cfq_put_queue(cic->cfqq); | 1248 | cic->key = NULL; |
1226 | cic->cfqq = NULL; | 1249 | list_del_init(&cic->queue_list); |
1227 | spin_unlock(q->queue_lock); | 1250 | spin_unlock(q->queue_lock); |
1228 | } | 1251 | } |
1229 | 1252 | ||
1230 | /* | ||
1231 | * Another task may update the task cic list, if it is doing a queue lookup | ||
1232 | * on its behalf. cfq_cic_lock excludes such concurrent updates | ||
1233 | */ | ||
1234 | static void cfq_exit_io_context(struct cfq_io_context *cic) | 1253 | static void cfq_exit_io_context(struct cfq_io_context *cic) |
1235 | { | 1254 | { |
1236 | struct cfq_io_context *__cic; | 1255 | struct cfq_io_context *__cic; |
@@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) | |||
1242 | /* | 1261 | /* |
1243 | * put the reference this task is holding to the various queues | 1262 | * put the reference this task is holding to the various queues |
1244 | */ | 1263 | */ |
1264 | read_lock(&cfq_exit_lock); | ||
1245 | list_for_each(entry, &cic->list) { | 1265 | list_for_each(entry, &cic->list) { |
1246 | __cic = list_entry(entry, struct cfq_io_context, list); | 1266 | __cic = list_entry(entry, struct cfq_io_context, list); |
1247 | cfq_exit_single_io_context(__cic); | 1267 | cfq_exit_single_io_context(__cic); |
1248 | } | 1268 | } |
1249 | 1269 | ||
1250 | cfq_exit_single_io_context(cic); | 1270 | cfq_exit_single_io_context(cic); |
1271 | read_unlock(&cfq_exit_lock); | ||
1251 | local_irq_restore(flags); | 1272 | local_irq_restore(flags); |
1252 | } | 1273 | } |
1253 | 1274 | ||
@@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1258 | 1279 | ||
1259 | if (cic) { | 1280 | if (cic) { |
1260 | INIT_LIST_HEAD(&cic->list); | 1281 | INIT_LIST_HEAD(&cic->list); |
1261 | cic->cfqq = NULL; | 1282 | cic->cfqq[ASYNC] = NULL; |
1283 | cic->cfqq[SYNC] = NULL; | ||
1262 | cic->key = NULL; | 1284 | cic->key = NULL; |
1263 | cic->last_end_request = jiffies; | 1285 | cic->last_end_request = jiffies; |
1264 | cic->ttime_total = 0; | 1286 | cic->ttime_total = 0; |
@@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1266 | cic->ttime_mean = 0; | 1288 | cic->ttime_mean = 0; |
1267 | cic->dtor = cfq_free_io_context; | 1289 | cic->dtor = cfq_free_io_context; |
1268 | cic->exit = cfq_exit_io_context; | 1290 | cic->exit = cfq_exit_io_context; |
1291 | INIT_LIST_HEAD(&cic->queue_list); | ||
1292 | atomic_inc(&ioc_count); | ||
1269 | } | 1293 | } |
1270 | 1294 | ||
1271 | return cic; | 1295 | return cic; |
@@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1318 | cfq_clear_cfqq_prio_changed(cfqq); | 1342 | cfq_clear_cfqq_prio_changed(cfqq); |
1319 | } | 1343 | } |
1320 | 1344 | ||
1321 | static inline void changed_ioprio(struct cfq_queue *cfqq) | 1345 | static inline void changed_ioprio(struct cfq_io_context *cic) |
1322 | { | 1346 | { |
1323 | if (cfqq) { | 1347 | struct cfq_data *cfqd = cic->key; |
1324 | struct cfq_data *cfqd = cfqq->cfqd; | 1348 | struct cfq_queue *cfqq; |
1325 | 1349 | if (cfqd) { | |
1326 | spin_lock(cfqd->queue->queue_lock); | 1350 | spin_lock(cfqd->queue->queue_lock); |
1327 | cfq_mark_cfqq_prio_changed(cfqq); | 1351 | cfqq = cic->cfqq[ASYNC]; |
1328 | cfq_init_prio_data(cfqq); | 1352 | if (cfqq) { |
1353 | struct cfq_queue *new_cfqq; | ||
1354 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, | ||
1355 | cic->ioc->task, GFP_ATOMIC); | ||
1356 | if (new_cfqq) { | ||
1357 | cic->cfqq[ASYNC] = new_cfqq; | ||
1358 | cfq_put_queue(cfqq); | ||
1359 | } | ||
1360 | } | ||
1361 | cfqq = cic->cfqq[SYNC]; | ||
1362 | if (cfqq) { | ||
1363 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1364 | cfq_init_prio_data(cfqq); | ||
1365 | } | ||
1329 | spin_unlock(cfqd->queue->queue_lock); | 1366 | spin_unlock(cfqd->queue->queue_lock); |
1330 | } | 1367 | } |
1331 | } | 1368 | } |
@@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq) | |||
1335 | */ | 1372 | */ |
1336 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | 1373 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) |
1337 | { | 1374 | { |
1338 | struct cfq_io_context *cic = ioc->cic; | 1375 | struct cfq_io_context *cic; |
1376 | |||
1377 | write_lock(&cfq_exit_lock); | ||
1378 | |||
1379 | cic = ioc->cic; | ||
1339 | 1380 | ||
1340 | changed_ioprio(cic->cfqq); | 1381 | changed_ioprio(cic); |
1341 | 1382 | ||
1342 | list_for_each_entry(cic, &cic->list, list) | 1383 | list_for_each_entry(cic, &cic->list, list) |
1343 | changed_ioprio(cic->cfqq); | 1384 | changed_ioprio(cic); |
1385 | |||
1386 | write_unlock(&cfq_exit_lock); | ||
1344 | 1387 | ||
1345 | return 0; | 1388 | return 0; |
1346 | } | 1389 | } |
1347 | 1390 | ||
1348 | static struct cfq_queue * | 1391 | static struct cfq_queue * |
1349 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | 1392 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, |
1350 | gfp_t gfp_mask) | 1393 | gfp_t gfp_mask) |
1351 | { | 1394 | { |
1352 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | 1395 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); |
1353 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1396 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1397 | unsigned short ioprio; | ||
1354 | 1398 | ||
1355 | retry: | 1399 | retry: |
1400 | ioprio = tsk->ioprio; | ||
1356 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | 1401 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); |
1357 | 1402 | ||
1358 | if (!cfqq) { | 1403 | if (!cfqq) { |
@@ -1381,7 +1426,6 @@ retry: | |||
1381 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1426 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
1382 | atomic_set(&cfqq->ref, 0); | 1427 | atomic_set(&cfqq->ref, 0); |
1383 | cfqq->cfqd = cfqd; | 1428 | cfqq->cfqd = cfqd; |
1384 | atomic_inc(&cfqd->ref); | ||
1385 | cfqq->service_last = 0; | 1429 | cfqq->service_last = 0; |
1386 | /* | 1430 | /* |
1387 | * set ->slice_left to allow preemption for a new process | 1431 | * set ->slice_left to allow preemption for a new process |
@@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1419 | if (!ioc) | 1463 | if (!ioc) |
1420 | return NULL; | 1464 | return NULL; |
1421 | 1465 | ||
1466 | restart: | ||
1422 | if ((cic = ioc->cic) == NULL) { | 1467 | if ((cic = ioc->cic) == NULL) { |
1423 | cic = cfq_alloc_io_context(cfqd, gfp_mask); | 1468 | cic = cfq_alloc_io_context(cfqd, gfp_mask); |
1424 | 1469 | ||
@@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1429 | * manually increment generic io_context usage count, it | 1474 | * manually increment generic io_context usage count, it |
1430 | * cannot go away since we are already holding one ref to it | 1475 | * cannot go away since we are already holding one ref to it |
1431 | */ | 1476 | */ |
1432 | ioc->cic = cic; | ||
1433 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1434 | cic->ioc = ioc; | 1477 | cic->ioc = ioc; |
1435 | cic->key = cfqd; | 1478 | cic->key = cfqd; |
1436 | atomic_inc(&cfqd->ref); | 1479 | read_lock(&cfq_exit_lock); |
1480 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1481 | ioc->cic = cic; | ||
1482 | list_add(&cic->queue_list, &cfqd->cic_list); | ||
1483 | read_unlock(&cfq_exit_lock); | ||
1437 | } else { | 1484 | } else { |
1438 | struct cfq_io_context *__cic; | 1485 | struct cfq_io_context *__cic; |
1439 | 1486 | ||
@@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1443 | if (cic->key == cfqd) | 1490 | if (cic->key == cfqd) |
1444 | goto out; | 1491 | goto out; |
1445 | 1492 | ||
1493 | if (unlikely(!cic->key)) { | ||
1494 | read_lock(&cfq_exit_lock); | ||
1495 | if (list_empty(&cic->list)) | ||
1496 | ioc->cic = NULL; | ||
1497 | else | ||
1498 | ioc->cic = list_entry(cic->list.next, | ||
1499 | struct cfq_io_context, | ||
1500 | list); | ||
1501 | read_unlock(&cfq_exit_lock); | ||
1502 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1503 | atomic_dec(&ioc_count); | ||
1504 | goto restart; | ||
1505 | } | ||
1506 | |||
1446 | /* | 1507 | /* |
1447 | * cic exists, check if we already are there. linear search | 1508 | * cic exists, check if we already are there. linear search |
1448 | * should be ok here, the list will usually not be more than | 1509 | * should be ok here, the list will usually not be more than |
@@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1457 | cic = __cic; | 1518 | cic = __cic; |
1458 | goto out; | 1519 | goto out; |
1459 | } | 1520 | } |
1521 | if (unlikely(!__cic->key)) { | ||
1522 | read_lock(&cfq_exit_lock); | ||
1523 | list_del(&__cic->list); | ||
1524 | read_unlock(&cfq_exit_lock); | ||
1525 | kmem_cache_free(cfq_ioc_pool, __cic); | ||
1526 | atomic_dec(&ioc_count); | ||
1527 | goto restart; | ||
1528 | } | ||
1460 | } | 1529 | } |
1461 | 1530 | ||
1462 | /* | 1531 | /* |
@@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1469 | 1538 | ||
1470 | __cic->ioc = ioc; | 1539 | __cic->ioc = ioc; |
1471 | __cic->key = cfqd; | 1540 | __cic->key = cfqd; |
1472 | atomic_inc(&cfqd->ref); | 1541 | read_lock(&cfq_exit_lock); |
1473 | list_add(&__cic->list, &cic->list); | 1542 | list_add(&__cic->list, &cic->list); |
1543 | list_add(&__cic->queue_list, &cfqd->cic_list); | ||
1544 | read_unlock(&cfq_exit_lock); | ||
1474 | cic = __cic; | 1545 | cic = __cic; |
1475 | } | 1546 | } |
1476 | 1547 | ||
@@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1890 | struct cfq_queue *cfqq; | 1961 | struct cfq_queue *cfqq; |
1891 | struct cfq_rq *crq; | 1962 | struct cfq_rq *crq; |
1892 | unsigned long flags; | 1963 | unsigned long flags; |
1964 | int is_sync = key != CFQ_KEY_ASYNC; | ||
1893 | 1965 | ||
1894 | might_sleep_if(gfp_mask & __GFP_WAIT); | 1966 | might_sleep_if(gfp_mask & __GFP_WAIT); |
1895 | 1967 | ||
@@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1900 | if (!cic) | 1972 | if (!cic) |
1901 | goto queue_fail; | 1973 | goto queue_fail; |
1902 | 1974 | ||
1903 | if (!cic->cfqq) { | 1975 | if (!cic->cfqq[is_sync]) { |
1904 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); | 1976 | cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); |
1905 | if (!cfqq) | 1977 | if (!cfqq) |
1906 | goto queue_fail; | 1978 | goto queue_fail; |
1907 | 1979 | ||
1908 | cic->cfqq = cfqq; | 1980 | cic->cfqq[is_sync] = cfqq; |
1909 | } else | 1981 | } else |
1910 | cfqq = cic->cfqq; | 1982 | cfqq = cic->cfqq[is_sync]; |
1911 | 1983 | ||
1912 | cfqq->allocated[rw]++; | 1984 | cfqq->allocated[rw]++; |
1913 | cfq_clear_cfqq_must_alloc(cfqq); | 1985 | cfq_clear_cfqq_must_alloc(cfqq); |
@@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1924 | crq->cfq_queue = cfqq; | 1996 | crq->cfq_queue = cfqq; |
1925 | crq->io_context = cic; | 1997 | crq->io_context = cic; |
1926 | 1998 | ||
1927 | if (rw == READ || process_sync(tsk)) | 1999 | if (is_sync) |
1928 | cfq_mark_crq_is_sync(crq); | 2000 | cfq_mark_crq_is_sync(crq); |
1929 | else | 2001 | else |
1930 | cfq_clear_crq_is_sync(crq); | 2002 | cfq_clear_crq_is_sync(crq); |
@@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | |||
2055 | blk_sync_queue(cfqd->queue); | 2127 | blk_sync_queue(cfqd->queue); |
2056 | } | 2128 | } |
2057 | 2129 | ||
2058 | static void cfq_put_cfqd(struct cfq_data *cfqd) | 2130 | static void cfq_exit_queue(elevator_t *e) |
2059 | { | 2131 | { |
2132 | struct cfq_data *cfqd = e->elevator_data; | ||
2060 | request_queue_t *q = cfqd->queue; | 2133 | request_queue_t *q = cfqd->queue; |
2061 | 2134 | ||
2062 | if (!atomic_dec_and_test(&cfqd->ref)) | 2135 | cfq_shutdown_timer_wq(cfqd); |
2063 | return; | 2136 | write_lock(&cfq_exit_lock); |
2137 | spin_lock_irq(q->queue_lock); | ||
2138 | if (cfqd->active_queue) | ||
2139 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | ||
2140 | while(!list_empty(&cfqd->cic_list)) { | ||
2141 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | ||
2142 | struct cfq_io_context, | ||
2143 | queue_list); | ||
2144 | if (cic->cfqq[ASYNC]) { | ||
2145 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
2146 | cic->cfqq[ASYNC] = NULL; | ||
2147 | } | ||
2148 | if (cic->cfqq[SYNC]) { | ||
2149 | cfq_put_queue(cic->cfqq[SYNC]); | ||
2150 | cic->cfqq[SYNC] = NULL; | ||
2151 | } | ||
2152 | cic->key = NULL; | ||
2153 | list_del_init(&cic->queue_list); | ||
2154 | } | ||
2155 | spin_unlock_irq(q->queue_lock); | ||
2156 | write_unlock(&cfq_exit_lock); | ||
2064 | 2157 | ||
2065 | cfq_shutdown_timer_wq(cfqd); | 2158 | cfq_shutdown_timer_wq(cfqd); |
2066 | blk_put_queue(q); | ||
2067 | 2159 | ||
2068 | mempool_destroy(cfqd->crq_pool); | 2160 | mempool_destroy(cfqd->crq_pool); |
2069 | kfree(cfqd->crq_hash); | 2161 | kfree(cfqd->crq_hash); |
@@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
2071 | kfree(cfqd); | 2163 | kfree(cfqd); |
2072 | } | 2164 | } |
2073 | 2165 | ||
2074 | static void cfq_exit_queue(elevator_t *e) | ||
2075 | { | ||
2076 | struct cfq_data *cfqd = e->elevator_data; | ||
2077 | |||
2078 | cfq_shutdown_timer_wq(cfqd); | ||
2079 | cfq_put_cfqd(cfqd); | ||
2080 | } | ||
2081 | |||
2082 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | 2166 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) |
2083 | { | 2167 | { |
2084 | struct cfq_data *cfqd; | 2168 | struct cfq_data *cfqd; |
@@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2097 | INIT_LIST_HEAD(&cfqd->cur_rr); | 2181 | INIT_LIST_HEAD(&cfqd->cur_rr); |
2098 | INIT_LIST_HEAD(&cfqd->idle_rr); | 2182 | INIT_LIST_HEAD(&cfqd->idle_rr); |
2099 | INIT_LIST_HEAD(&cfqd->empty_list); | 2183 | INIT_LIST_HEAD(&cfqd->empty_list); |
2184 | INIT_LIST_HEAD(&cfqd->cic_list); | ||
2100 | 2185 | ||
2101 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | 2186 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); |
2102 | if (!cfqd->crq_hash) | 2187 | if (!cfqd->crq_hash) |
@@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2118 | e->elevator_data = cfqd; | 2203 | e->elevator_data = cfqd; |
2119 | 2204 | ||
2120 | cfqd->queue = q; | 2205 | cfqd->queue = q; |
2121 | atomic_inc(&q->refcnt); | ||
2122 | 2206 | ||
2123 | cfqd->max_queued = q->nr_requests / 4; | 2207 | cfqd->max_queued = q->nr_requests / 4; |
2124 | q->nr_batching = cfq_queued; | 2208 | q->nr_batching = cfq_queued; |
@@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2133 | 2217 | ||
2134 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | 2218 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); |
2135 | 2219 | ||
2136 | atomic_set(&cfqd->ref, 1); | ||
2137 | |||
2138 | cfqd->cfq_queued = cfq_queued; | 2220 | cfqd->cfq_queued = cfq_queued; |
2139 | cfqd->cfq_quantum = cfq_quantum; | 2221 | cfqd->cfq_quantum = cfq_quantum; |
2140 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2222 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
@@ -2193,11 +2275,6 @@ fail: | |||
2193 | /* | 2275 | /* |
2194 | * sysfs parts below --> | 2276 | * sysfs parts below --> |
2195 | */ | 2277 | */ |
2196 | struct cfq_fs_entry { | ||
2197 | struct attribute attr; | ||
2198 | ssize_t (*show)(struct cfq_data *, char *); | ||
2199 | ssize_t (*store)(struct cfq_data *, const char *, size_t); | ||
2200 | }; | ||
2201 | 2278 | ||
2202 | static ssize_t | 2279 | static ssize_t |
2203 | cfq_var_show(unsigned int var, char *page) | 2280 | cfq_var_show(unsigned int var, char *page) |
@@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
2215 | } | 2292 | } |
2216 | 2293 | ||
2217 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 2294 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
2218 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | 2295 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
2219 | { \ | 2296 | { \ |
2297 | struct cfq_data *cfqd = e->elevator_data; \ | ||
2220 | unsigned int __data = __VAR; \ | 2298 | unsigned int __data = __VAR; \ |
2221 | if (__CONV) \ | 2299 | if (__CONV) \ |
2222 | __data = jiffies_to_msecs(__data); \ | 2300 | __data = jiffies_to_msecs(__data); \ |
@@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | |||
2226 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); | 2304 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); |
2227 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); | 2305 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); |
2228 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | 2306 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
2229 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); | 2307 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
2230 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); | 2308 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
2231 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 2309 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
2232 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2310 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
2233 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2311 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
@@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); | |||
2236 | #undef SHOW_FUNCTION | 2314 | #undef SHOW_FUNCTION |
2237 | 2315 | ||
2238 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2316 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
2239 | static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ | 2317 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
2240 | { \ | 2318 | { \ |
2319 | struct cfq_data *cfqd = e->elevator_data; \ | ||
2241 | unsigned int __data; \ | 2320 | unsigned int __data; \ |
2242 | int ret = cfq_var_store(&__data, (page), count); \ | 2321 | int ret = cfq_var_store(&__data, (page), count); \ |
2243 | if (__data < (MIN)) \ | 2322 | if (__data < (MIN)) \ |
@@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | |||
2254 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); | 2333 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); |
2255 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); | 2334 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); |
2256 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); | 2335 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
2257 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | 2336 | STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
2258 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | 2337 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
2259 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 2338 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
2260 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 2339 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
2261 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2340 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
@@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, | |||
2263 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); | 2342 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); |
2264 | #undef STORE_FUNCTION | 2343 | #undef STORE_FUNCTION |
2265 | 2344 | ||
2266 | static struct cfq_fs_entry cfq_quantum_entry = { | 2345 | #define CFQ_ATTR(name) \ |
2267 | .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, | 2346 | __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) |
2268 | .show = cfq_quantum_show, | 2347 | |
2269 | .store = cfq_quantum_store, | 2348 | static struct elv_fs_entry cfq_attrs[] = { |
2270 | }; | 2349 | CFQ_ATTR(quantum), |
2271 | static struct cfq_fs_entry cfq_queued_entry = { | 2350 | CFQ_ATTR(queued), |
2272 | .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, | 2351 | CFQ_ATTR(fifo_expire_sync), |
2273 | .show = cfq_queued_show, | 2352 | CFQ_ATTR(fifo_expire_async), |
2274 | .store = cfq_queued_store, | 2353 | CFQ_ATTR(back_seek_max), |
2275 | }; | 2354 | CFQ_ATTR(back_seek_penalty), |
2276 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { | 2355 | CFQ_ATTR(slice_sync), |
2277 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, | 2356 | CFQ_ATTR(slice_async), |
2278 | .show = cfq_fifo_expire_sync_show, | 2357 | CFQ_ATTR(slice_async_rq), |
2279 | .store = cfq_fifo_expire_sync_store, | 2358 | CFQ_ATTR(slice_idle), |
2280 | }; | 2359 | CFQ_ATTR(max_depth), |
2281 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { | 2360 | __ATTR_NULL |
2282 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, | ||
2283 | .show = cfq_fifo_expire_async_show, | ||
2284 | .store = cfq_fifo_expire_async_store, | ||
2285 | }; | ||
2286 | static struct cfq_fs_entry cfq_back_max_entry = { | ||
2287 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, | ||
2288 | .show = cfq_back_max_show, | ||
2289 | .store = cfq_back_max_store, | ||
2290 | }; | ||
2291 | static struct cfq_fs_entry cfq_back_penalty_entry = { | ||
2292 | .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, | ||
2293 | .show = cfq_back_penalty_show, | ||
2294 | .store = cfq_back_penalty_store, | ||
2295 | }; | ||
2296 | static struct cfq_fs_entry cfq_slice_sync_entry = { | ||
2297 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, | ||
2298 | .show = cfq_slice_sync_show, | ||
2299 | .store = cfq_slice_sync_store, | ||
2300 | }; | ||
2301 | static struct cfq_fs_entry cfq_slice_async_entry = { | ||
2302 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, | ||
2303 | .show = cfq_slice_async_show, | ||
2304 | .store = cfq_slice_async_store, | ||
2305 | }; | ||
2306 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { | ||
2307 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, | ||
2308 | .show = cfq_slice_async_rq_show, | ||
2309 | .store = cfq_slice_async_rq_store, | ||
2310 | }; | ||
2311 | static struct cfq_fs_entry cfq_slice_idle_entry = { | ||
2312 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, | ||
2313 | .show = cfq_slice_idle_show, | ||
2314 | .store = cfq_slice_idle_store, | ||
2315 | }; | ||
2316 | static struct cfq_fs_entry cfq_max_depth_entry = { | ||
2317 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, | ||
2318 | .show = cfq_max_depth_show, | ||
2319 | .store = cfq_max_depth_store, | ||
2320 | }; | ||
2321 | |||
2322 | static struct attribute *default_attrs[] = { | ||
2323 | &cfq_quantum_entry.attr, | ||
2324 | &cfq_queued_entry.attr, | ||
2325 | &cfq_fifo_expire_sync_entry.attr, | ||
2326 | &cfq_fifo_expire_async_entry.attr, | ||
2327 | &cfq_back_max_entry.attr, | ||
2328 | &cfq_back_penalty_entry.attr, | ||
2329 | &cfq_slice_sync_entry.attr, | ||
2330 | &cfq_slice_async_entry.attr, | ||
2331 | &cfq_slice_async_rq_entry.attr, | ||
2332 | &cfq_slice_idle_entry.attr, | ||
2333 | &cfq_max_depth_entry.attr, | ||
2334 | NULL, | ||
2335 | }; | ||
2336 | |||
2337 | #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) | ||
2338 | |||
2339 | static ssize_t | ||
2340 | cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
2341 | { | ||
2342 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2343 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2344 | |||
2345 | if (!entry->show) | ||
2346 | return -EIO; | ||
2347 | |||
2348 | return entry->show(e->elevator_data, page); | ||
2349 | } | ||
2350 | |||
2351 | static ssize_t | ||
2352 | cfq_attr_store(struct kobject *kobj, struct attribute *attr, | ||
2353 | const char *page, size_t length) | ||
2354 | { | ||
2355 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2356 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2357 | |||
2358 | if (!entry->store) | ||
2359 | return -EIO; | ||
2360 | |||
2361 | return entry->store(e->elevator_data, page, length); | ||
2362 | } | ||
2363 | |||
2364 | static struct sysfs_ops cfq_sysfs_ops = { | ||
2365 | .show = cfq_attr_show, | ||
2366 | .store = cfq_attr_store, | ||
2367 | }; | ||
2368 | |||
2369 | static struct kobj_type cfq_ktype = { | ||
2370 | .sysfs_ops = &cfq_sysfs_ops, | ||
2371 | .default_attrs = default_attrs, | ||
2372 | }; | 2361 | }; |
2373 | 2362 | ||
2374 | static struct elevator_type iosched_cfq = { | 2363 | static struct elevator_type iosched_cfq = { |
@@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = { | |||
2389 | .elevator_may_queue_fn = cfq_may_queue, | 2378 | .elevator_may_queue_fn = cfq_may_queue, |
2390 | .elevator_init_fn = cfq_init_queue, | 2379 | .elevator_init_fn = cfq_init_queue, |
2391 | .elevator_exit_fn = cfq_exit_queue, | 2380 | .elevator_exit_fn = cfq_exit_queue, |
2381 | .trim = cfq_trim, | ||
2392 | }, | 2382 | }, |
2393 | .elevator_ktype = &cfq_ktype, | 2383 | .elevator_attrs = cfq_attrs, |
2394 | .elevator_name = "cfq", | 2384 | .elevator_name = "cfq", |
2395 | .elevator_owner = THIS_MODULE, | 2385 | .elevator_owner = THIS_MODULE, |
2396 | }; | 2386 | }; |
@@ -2419,7 +2409,13 @@ static int __init cfq_init(void) | |||
2419 | 2409 | ||
2420 | static void __exit cfq_exit(void) | 2410 | static void __exit cfq_exit(void) |
2421 | { | 2411 | { |
2412 | DECLARE_COMPLETION(all_gone); | ||
2422 | elv_unregister(&iosched_cfq); | 2413 | elv_unregister(&iosched_cfq); |
2414 | ioc_gone = &all_gone; | ||
2415 | barrier(); | ||
2416 | if (atomic_read(&ioc_count)) | ||
2417 | complete(ioc_gone); | ||
2418 | synchronize_rcu(); | ||
2423 | cfq_slab_kill(); | 2419 | cfq_slab_kill(); |
2424 | } | 2420 | } |
2425 | 2421 | ||
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 27e494b1bf97..399fa1e60e1f 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
694 | /* | 694 | /* |
695 | * sysfs parts below | 695 | * sysfs parts below |
696 | */ | 696 | */ |
697 | struct deadline_fs_entry { | ||
698 | struct attribute attr; | ||
699 | ssize_t (*show)(struct deadline_data *, char *); | ||
700 | ssize_t (*store)(struct deadline_data *, const char *, size_t); | ||
701 | }; | ||
702 | 697 | ||
703 | static ssize_t | 698 | static ssize_t |
704 | deadline_var_show(int var, char *page) | 699 | deadline_var_show(int var, char *page) |
@@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count) | |||
716 | } | 711 | } |
717 | 712 | ||
718 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 713 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
719 | static ssize_t __FUNC(struct deadline_data *dd, char *page) \ | 714 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
720 | { \ | 715 | { \ |
721 | int __data = __VAR; \ | 716 | struct deadline_data *dd = e->elevator_data; \ |
717 | int __data = __VAR; \ | ||
722 | if (__CONV) \ | 718 | if (__CONV) \ |
723 | __data = jiffies_to_msecs(__data); \ | 719 | __data = jiffies_to_msecs(__data); \ |
724 | return deadline_var_show(__data, (page)); \ | 720 | return deadline_var_show(__data, (page)); \ |
725 | } | 721 | } |
726 | SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1); | 722 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
727 | SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1); | 723 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); |
728 | SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0); | 724 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); |
729 | SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0); | 725 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); |
730 | SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0); | 726 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); |
731 | #undef SHOW_FUNCTION | 727 | #undef SHOW_FUNCTION |
732 | 728 | ||
733 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 729 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
734 | static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \ | 730 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
735 | { \ | 731 | { \ |
732 | struct deadline_data *dd = e->elevator_data; \ | ||
736 | int __data; \ | 733 | int __data; \ |
737 | int ret = deadline_var_store(&__data, (page), count); \ | 734 | int ret = deadline_var_store(&__data, (page), count); \ |
738 | if (__data < (MIN)) \ | 735 | if (__data < (MIN)) \ |
@@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) | |||
745 | *(__PTR) = __data; \ | 742 | *(__PTR) = __data; \ |
746 | return ret; \ | 743 | return ret; \ |
747 | } | 744 | } |
748 | STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | 745 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
749 | STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | 746 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); |
750 | STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | 747 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); |
751 | STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0); | 748 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); |
752 | STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0); | 749 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); |
753 | #undef STORE_FUNCTION | 750 | #undef STORE_FUNCTION |
754 | 751 | ||
755 | static struct deadline_fs_entry deadline_readexpire_entry = { | 752 | #define DD_ATTR(name) \ |
756 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 753 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ |
757 | .show = deadline_readexpire_show, | 754 | deadline_##name##_store) |
758 | .store = deadline_readexpire_store, | 755 | |
759 | }; | 756 | static struct elv_fs_entry deadline_attrs[] = { |
760 | static struct deadline_fs_entry deadline_writeexpire_entry = { | 757 | DD_ATTR(read_expire), |
761 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 758 | DD_ATTR(write_expire), |
762 | .show = deadline_writeexpire_show, | 759 | DD_ATTR(writes_starved), |
763 | .store = deadline_writeexpire_store, | 760 | DD_ATTR(front_merges), |
764 | }; | 761 | DD_ATTR(fifo_batch), |
765 | static struct deadline_fs_entry deadline_writesstarved_entry = { | 762 | __ATTR_NULL |
766 | .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR }, | ||
767 | .show = deadline_writesstarved_show, | ||
768 | .store = deadline_writesstarved_store, | ||
769 | }; | ||
770 | static struct deadline_fs_entry deadline_frontmerges_entry = { | ||
771 | .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, | ||
772 | .show = deadline_frontmerges_show, | ||
773 | .store = deadline_frontmerges_store, | ||
774 | }; | ||
775 | static struct deadline_fs_entry deadline_fifobatch_entry = { | ||
776 | .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR }, | ||
777 | .show = deadline_fifobatch_show, | ||
778 | .store = deadline_fifobatch_store, | ||
779 | }; | ||
780 | |||
781 | static struct attribute *default_attrs[] = { | ||
782 | &deadline_readexpire_entry.attr, | ||
783 | &deadline_writeexpire_entry.attr, | ||
784 | &deadline_writesstarved_entry.attr, | ||
785 | &deadline_frontmerges_entry.attr, | ||
786 | &deadline_fifobatch_entry.attr, | ||
787 | NULL, | ||
788 | }; | ||
789 | |||
790 | #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr) | ||
791 | |||
792 | static ssize_t | ||
793 | deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
794 | { | ||
795 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
796 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
797 | |||
798 | if (!entry->show) | ||
799 | return -EIO; | ||
800 | |||
801 | return entry->show(e->elevator_data, page); | ||
802 | } | ||
803 | |||
804 | static ssize_t | ||
805 | deadline_attr_store(struct kobject *kobj, struct attribute *attr, | ||
806 | const char *page, size_t length) | ||
807 | { | ||
808 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
809 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
810 | |||
811 | if (!entry->store) | ||
812 | return -EIO; | ||
813 | |||
814 | return entry->store(e->elevator_data, page, length); | ||
815 | } | ||
816 | |||
817 | static struct sysfs_ops deadline_sysfs_ops = { | ||
818 | .show = deadline_attr_show, | ||
819 | .store = deadline_attr_store, | ||
820 | }; | ||
821 | |||
822 | static struct kobj_type deadline_ktype = { | ||
823 | .sysfs_ops = &deadline_sysfs_ops, | ||
824 | .default_attrs = default_attrs, | ||
825 | }; | 763 | }; |
826 | 764 | ||
827 | static struct elevator_type iosched_deadline = { | 765 | static struct elevator_type iosched_deadline = { |
@@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = { | |||
840 | .elevator_exit_fn = deadline_exit_queue, | 778 | .elevator_exit_fn = deadline_exit_queue, |
841 | }, | 779 | }, |
842 | 780 | ||
843 | .elevator_ktype = &deadline_ktype, | 781 | .elevator_attrs = deadline_attrs, |
844 | .elevator_name = "deadline", | 782 | .elevator_name = "deadline", |
845 | .elevator_owner = THIS_MODULE, | 783 | .elevator_owner = THIS_MODULE, |
846 | }; | 784 | }; |
diff --git a/block/elevator.c b/block/elevator.c index 24b702d649a9..db3d0d8296a0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name) | |||
120 | return e; | 120 | return e; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int elevator_attach(request_queue_t *q, struct elevator_type *e, | 123 | static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) |
124 | struct elevator_queue *eq) | ||
125 | { | 124 | { |
126 | int ret = 0; | 125 | int ret = 0; |
127 | 126 | ||
128 | memset(eq, 0, sizeof(*eq)); | ||
129 | eq->ops = &e->ops; | ||
130 | eq->elevator_type = e; | ||
131 | |||
132 | q->elevator = eq; | 127 | q->elevator = eq; |
133 | 128 | ||
134 | if (eq->ops->elevator_init_fn) | 129 | if (eq->ops->elevator_init_fn) |
@@ -154,6 +149,32 @@ static int __init elevator_setup(char *str) | |||
154 | 149 | ||
155 | __setup("elevator=", elevator_setup); | 150 | __setup("elevator=", elevator_setup); |
156 | 151 | ||
152 | static struct kobj_type elv_ktype; | ||
153 | |||
154 | static elevator_t *elevator_alloc(struct elevator_type *e) | ||
155 | { | ||
156 | elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); | ||
157 | if (eq) { | ||
158 | memset(eq, 0, sizeof(*eq)); | ||
159 | eq->ops = &e->ops; | ||
160 | eq->elevator_type = e; | ||
161 | kobject_init(&eq->kobj); | ||
162 | snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | ||
163 | eq->kobj.ktype = &elv_ktype; | ||
164 | mutex_init(&eq->sysfs_lock); | ||
165 | } else { | ||
166 | elevator_put(e); | ||
167 | } | ||
168 | return eq; | ||
169 | } | ||
170 | |||
171 | static void elevator_release(struct kobject *kobj) | ||
172 | { | ||
173 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
174 | elevator_put(e->elevator_type); | ||
175 | kfree(e); | ||
176 | } | ||
177 | |||
157 | int elevator_init(request_queue_t *q, char *name) | 178 | int elevator_init(request_queue_t *q, char *name) |
158 | { | 179 | { |
159 | struct elevator_type *e = NULL; | 180 | struct elevator_type *e = NULL; |
@@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name) | |||
176 | e = elevator_get("noop"); | 197 | e = elevator_get("noop"); |
177 | } | 198 | } |
178 | 199 | ||
179 | eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); | 200 | eq = elevator_alloc(e); |
180 | if (!eq) { | 201 | if (!eq) |
181 | elevator_put(e); | ||
182 | return -ENOMEM; | 202 | return -ENOMEM; |
183 | } | ||
184 | 203 | ||
185 | ret = elevator_attach(q, e, eq); | 204 | ret = elevator_attach(q, eq); |
186 | if (ret) { | 205 | if (ret) |
187 | kfree(eq); | 206 | kobject_put(&eq->kobj); |
188 | elevator_put(e); | ||
189 | } | ||
190 | 207 | ||
191 | return ret; | 208 | return ret; |
192 | } | 209 | } |
193 | 210 | ||
194 | void elevator_exit(elevator_t *e) | 211 | void elevator_exit(elevator_t *e) |
195 | { | 212 | { |
213 | mutex_lock(&e->sysfs_lock); | ||
196 | if (e->ops->elevator_exit_fn) | 214 | if (e->ops->elevator_exit_fn) |
197 | e->ops->elevator_exit_fn(e); | 215 | e->ops->elevator_exit_fn(e); |
216 | e->ops = NULL; | ||
217 | mutex_unlock(&e->sysfs_lock); | ||
198 | 218 | ||
199 | elevator_put(e->elevator_type); | 219 | kobject_put(&e->kobj); |
200 | e->elevator_type = NULL; | ||
201 | kfree(e); | ||
202 | } | 220 | } |
203 | 221 | ||
204 | /* | 222 | /* |
@@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq) | |||
627 | } | 645 | } |
628 | } | 646 | } |
629 | 647 | ||
630 | int elv_register_queue(struct request_queue *q) | 648 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
649 | |||
650 | static ssize_t | ||
651 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
631 | { | 652 | { |
632 | elevator_t *e = q->elevator; | 653 | elevator_t *e = container_of(kobj, elevator_t, kobj); |
654 | struct elv_fs_entry *entry = to_elv(attr); | ||
655 | ssize_t error; | ||
633 | 656 | ||
634 | e->kobj.parent = kobject_get(&q->kobj); | 657 | if (!entry->show) |
635 | if (!e->kobj.parent) | 658 | return -EIO; |
636 | return -EBUSY; | ||
637 | 659 | ||
638 | snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | 660 | mutex_lock(&e->sysfs_lock); |
639 | e->kobj.ktype = e->elevator_type->elevator_ktype; | 661 | error = e->ops ? entry->show(e, page) : -ENOENT; |
662 | mutex_unlock(&e->sysfs_lock); | ||
663 | return error; | ||
664 | } | ||
665 | |||
666 | static ssize_t | ||
667 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | ||
668 | const char *page, size_t length) | ||
669 | { | ||
670 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
671 | struct elv_fs_entry *entry = to_elv(attr); | ||
672 | ssize_t error; | ||
673 | |||
674 | if (!entry->store) | ||
675 | return -EIO; | ||
676 | |||
677 | mutex_lock(&e->sysfs_lock); | ||
678 | error = e->ops ? entry->store(e, page, length) : -ENOENT; | ||
679 | mutex_unlock(&e->sysfs_lock); | ||
680 | return error; | ||
681 | } | ||
682 | |||
683 | static struct sysfs_ops elv_sysfs_ops = { | ||
684 | .show = elv_attr_show, | ||
685 | .store = elv_attr_store, | ||
686 | }; | ||
687 | |||
688 | static struct kobj_type elv_ktype = { | ||
689 | .sysfs_ops = &elv_sysfs_ops, | ||
690 | .release = elevator_release, | ||
691 | }; | ||
640 | 692 | ||
641 | return kobject_register(&e->kobj); | 693 | int elv_register_queue(struct request_queue *q) |
694 | { | ||
695 | elevator_t *e = q->elevator; | ||
696 | int error; | ||
697 | |||
698 | e->kobj.parent = &q->kobj; | ||
699 | |||
700 | error = kobject_add(&e->kobj); | ||
701 | if (!error) { | ||
702 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; | ||
703 | if (attr) { | ||
704 | while (attr->attr.name) { | ||
705 | if (sysfs_create_file(&e->kobj, &attr->attr)) | ||
706 | break; | ||
707 | attr++; | ||
708 | } | ||
709 | } | ||
710 | kobject_uevent(&e->kobj, KOBJ_ADD); | ||
711 | } | ||
712 | return error; | ||
642 | } | 713 | } |
643 | 714 | ||
644 | void elv_unregister_queue(struct request_queue *q) | 715 | void elv_unregister_queue(struct request_queue *q) |
645 | { | 716 | { |
646 | if (q) { | 717 | if (q) { |
647 | elevator_t *e = q->elevator; | 718 | elevator_t *e = q->elevator; |
648 | kobject_unregister(&e->kobj); | 719 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
649 | kobject_put(&q->kobj); | 720 | kobject_del(&e->kobj); |
650 | } | 721 | } |
651 | } | 722 | } |
652 | 723 | ||
@@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e) | |||
675 | /* | 746 | /* |
676 | * Iterate every thread in the process to remove the io contexts. | 747 | * Iterate every thread in the process to remove the io contexts. |
677 | */ | 748 | */ |
678 | read_lock(&tasklist_lock); | 749 | if (e->ops.trim) { |
679 | do_each_thread(g, p) { | 750 | read_lock(&tasklist_lock); |
680 | struct io_context *ioc = p->io_context; | 751 | do_each_thread(g, p) { |
681 | if (ioc && ioc->cic) { | 752 | task_lock(p); |
682 | ioc->cic->exit(ioc->cic); | 753 | e->ops.trim(p->io_context); |
683 | ioc->cic->dtor(ioc->cic); | 754 | task_unlock(p); |
684 | ioc->cic = NULL; | 755 | } while_each_thread(g, p); |
685 | } | 756 | read_unlock(&tasklist_lock); |
686 | if (ioc && ioc->aic) { | 757 | } |
687 | ioc->aic->exit(ioc->aic); | ||
688 | ioc->aic->dtor(ioc->aic); | ||
689 | ioc->aic = NULL; | ||
690 | } | ||
691 | } while_each_thread(g, p); | ||
692 | read_unlock(&tasklist_lock); | ||
693 | 758 | ||
694 | spin_lock_irq(&elv_list_lock); | 759 | spin_lock_irq(&elv_list_lock); |
695 | list_del_init(&e->list); | 760 | list_del_init(&e->list); |
@@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
703 | * need for the new one. this way we have a chance of going back to the old | 768 | * need for the new one. this way we have a chance of going back to the old |
704 | * one, if the new one fails init for some reason. | 769 | * one, if the new one fails init for some reason. |
705 | */ | 770 | */ |
706 | static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 771 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) |
707 | { | 772 | { |
708 | elevator_t *old_elevator, *e; | 773 | elevator_t *old_elevator, *e; |
709 | 774 | ||
710 | /* | 775 | /* |
711 | * Allocate new elevator | 776 | * Allocate new elevator |
712 | */ | 777 | */ |
713 | e = kmalloc(sizeof(elevator_t), GFP_KERNEL); | 778 | e = elevator_alloc(new_e); |
714 | if (!e) | 779 | if (!e) |
715 | goto error; | 780 | return 0; |
716 | 781 | ||
717 | /* | 782 | /* |
718 | * Turn on BYPASS and drain all requests w/ elevator private data | 783 | * Turn on BYPASS and drain all requests w/ elevator private data |
@@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
743 | /* | 808 | /* |
744 | * attach and start new elevator | 809 | * attach and start new elevator |
745 | */ | 810 | */ |
746 | if (elevator_attach(q, new_e, e)) | 811 | if (elevator_attach(q, e)) |
747 | goto fail; | 812 | goto fail; |
748 | 813 | ||
749 | if (elv_register_queue(q)) | 814 | if (elv_register_queue(q)) |
@@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
754 | */ | 819 | */ |
755 | elevator_exit(old_elevator); | 820 | elevator_exit(old_elevator); |
756 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 821 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
757 | return; | 822 | return 1; |
758 | 823 | ||
759 | fail_register: | 824 | fail_register: |
760 | /* | 825 | /* |
@@ -767,10 +832,9 @@ fail: | |||
767 | q->elevator = old_elevator; | 832 | q->elevator = old_elevator; |
768 | elv_register_queue(q); | 833 | elv_register_queue(q); |
769 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 834 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
770 | kfree(e); | 835 | if (e) |
771 | error: | 836 | kobject_put(&e->kobj); |
772 | elevator_put(new_e); | 837 | return 0; |
773 | printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); | ||
774 | } | 838 | } |
775 | 839 | ||
776 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | 840 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) |
@@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | |||
797 | return count; | 861 | return count; |
798 | } | 862 | } |
799 | 863 | ||
800 | elevator_switch(q, e); | 864 | if (!elevator_switch(q, e)) |
865 | printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); | ||
801 | return count; | 866 | return count; |
802 | } | 867 | } |
803 | 868 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0ef2971a9e82..6c793b196aa9 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue); | |||
1740 | * Hopefully the low level driver will have finished any | 1740 | * Hopefully the low level driver will have finished any |
1741 | * outstanding requests first... | 1741 | * outstanding requests first... |
1742 | **/ | 1742 | **/ |
1743 | void blk_cleanup_queue(request_queue_t * q) | 1743 | static void blk_release_queue(struct kobject *kobj) |
1744 | { | 1744 | { |
1745 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | ||
1745 | struct request_list *rl = &q->rq; | 1746 | struct request_list *rl = &q->rq; |
1746 | 1747 | ||
1747 | if (!atomic_dec_and_test(&q->refcnt)) | ||
1748 | return; | ||
1749 | |||
1750 | if (q->elevator) | ||
1751 | elevator_exit(q->elevator); | ||
1752 | |||
1753 | blk_sync_queue(q); | 1748 | blk_sync_queue(q); |
1754 | 1749 | ||
1755 | if (rl->rq_pool) | 1750 | if (rl->rq_pool) |
@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q) | |||
1761 | kmem_cache_free(requestq_cachep, q); | 1756 | kmem_cache_free(requestq_cachep, q); |
1762 | } | 1757 | } |
1763 | 1758 | ||
1759 | void blk_put_queue(request_queue_t *q) | ||
1760 | { | ||
1761 | kobject_put(&q->kobj); | ||
1762 | } | ||
1763 | EXPORT_SYMBOL(blk_put_queue); | ||
1764 | |||
1765 | void blk_cleanup_queue(request_queue_t * q) | ||
1766 | { | ||
1767 | mutex_lock(&q->sysfs_lock); | ||
1768 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | ||
1769 | mutex_unlock(&q->sysfs_lock); | ||
1770 | |||
1771 | if (q->elevator) | ||
1772 | elevator_exit(q->elevator); | ||
1773 | |||
1774 | blk_put_queue(q); | ||
1775 | } | ||
1776 | |||
1764 | EXPORT_SYMBOL(blk_cleanup_queue); | 1777 | EXPORT_SYMBOL(blk_cleanup_queue); |
1765 | 1778 | ||
1766 | static int blk_init_free_list(request_queue_t *q) | 1779 | static int blk_init_free_list(request_queue_t *q) |
@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | |||
1788 | } | 1801 | } |
1789 | EXPORT_SYMBOL(blk_alloc_queue); | 1802 | EXPORT_SYMBOL(blk_alloc_queue); |
1790 | 1803 | ||
1804 | static struct kobj_type queue_ktype; | ||
1805 | |||
1791 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 1806 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1792 | { | 1807 | { |
1793 | request_queue_t *q; | 1808 | request_queue_t *q; |
@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
1798 | 1813 | ||
1799 | memset(q, 0, sizeof(*q)); | 1814 | memset(q, 0, sizeof(*q)); |
1800 | init_timer(&q->unplug_timer); | 1815 | init_timer(&q->unplug_timer); |
1801 | atomic_set(&q->refcnt, 1); | 1816 | |
1817 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | ||
1818 | q->kobj.ktype = &queue_ktype; | ||
1819 | kobject_init(&q->kobj); | ||
1802 | 1820 | ||
1803 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 1821 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; |
1804 | q->backing_dev_info.unplug_io_data = q; | 1822 | q->backing_dev_info.unplug_io_data = q; |
1805 | 1823 | ||
1824 | mutex_init(&q->sysfs_lock); | ||
1825 | |||
1806 | return q; | 1826 | return q; |
1807 | } | 1827 | } |
1808 | EXPORT_SYMBOL(blk_alloc_queue_node); | 1828 | EXPORT_SYMBOL(blk_alloc_queue_node); |
@@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1854 | return NULL; | 1874 | return NULL; |
1855 | 1875 | ||
1856 | q->node = node_id; | 1876 | q->node = node_id; |
1857 | if (blk_init_free_list(q)) | 1877 | if (blk_init_free_list(q)) { |
1858 | goto out_init; | 1878 | kmem_cache_free(requestq_cachep, q); |
1879 | return NULL; | ||
1880 | } | ||
1859 | 1881 | ||
1860 | /* | 1882 | /* |
1861 | * if caller didn't supply a lock, they get per-queue locking with | 1883 | * if caller didn't supply a lock, they get per-queue locking with |
@@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1891 | return q; | 1913 | return q; |
1892 | } | 1914 | } |
1893 | 1915 | ||
1894 | blk_cleanup_queue(q); | 1916 | blk_put_queue(q); |
1895 | out_init: | ||
1896 | kmem_cache_free(requestq_cachep, q); | ||
1897 | return NULL; | 1917 | return NULL; |
1898 | } | 1918 | } |
1899 | EXPORT_SYMBOL(blk_init_queue_node); | 1919 | EXPORT_SYMBOL(blk_init_queue_node); |
@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node); | |||
1901 | int blk_get_queue(request_queue_t *q) | 1921 | int blk_get_queue(request_queue_t *q) |
1902 | { | 1922 | { |
1903 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 1923 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
1904 | atomic_inc(&q->refcnt); | 1924 | kobject_get(&q->kobj); |
1905 | return 0; | 1925 | return 0; |
1906 | } | 1926 | } |
1907 | 1927 | ||
@@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc) | |||
3477 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3497 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
3478 | 3498 | ||
3479 | if (atomic_dec_and_test(&ioc->refcount)) { | 3499 | if (atomic_dec_and_test(&ioc->refcount)) { |
3500 | rcu_read_lock(); | ||
3480 | if (ioc->aic && ioc->aic->dtor) | 3501 | if (ioc->aic && ioc->aic->dtor) |
3481 | ioc->aic->dtor(ioc->aic); | 3502 | ioc->aic->dtor(ioc->aic); |
3482 | if (ioc->cic && ioc->cic->dtor) | 3503 | if (ioc->cic && ioc->cic->dtor) |
3483 | ioc->cic->dtor(ioc->cic); | 3504 | ioc->cic->dtor(ioc->cic); |
3505 | rcu_read_unlock(); | ||
3484 | 3506 | ||
3485 | kmem_cache_free(iocontext_cachep, ioc); | 3507 | kmem_cache_free(iocontext_cachep, ioc); |
3486 | } | 3508 | } |
@@ -3614,10 +3636,13 @@ static ssize_t | |||
3614 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | 3636 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
3615 | { | 3637 | { |
3616 | struct request_list *rl = &q->rq; | 3638 | struct request_list *rl = &q->rq; |
3639 | unsigned long nr; | ||
3640 | int ret = queue_var_store(&nr, page, count); | ||
3641 | if (nr < BLKDEV_MIN_RQ) | ||
3642 | nr = BLKDEV_MIN_RQ; | ||
3617 | 3643 | ||
3618 | int ret = queue_var_store(&q->nr_requests, page, count); | 3644 | spin_lock_irq(q->queue_lock); |
3619 | if (q->nr_requests < BLKDEV_MIN_RQ) | 3645 | q->nr_requests = nr; |
3620 | q->nr_requests = BLKDEV_MIN_RQ; | ||
3621 | blk_queue_congestion_threshold(q); | 3646 | blk_queue_congestion_threshold(q); |
3622 | 3647 | ||
3623 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 3648 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) |
@@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
3643 | blk_clear_queue_full(q, WRITE); | 3668 | blk_clear_queue_full(q, WRITE); |
3644 | wake_up(&rl->wait[WRITE]); | 3669 | wake_up(&rl->wait[WRITE]); |
3645 | } | 3670 | } |
3671 | spin_unlock_irq(q->queue_lock); | ||
3646 | return ret; | 3672 | return ret; |
3647 | } | 3673 | } |
3648 | 3674 | ||
@@ -3758,13 +3784,19 @@ static ssize_t | |||
3758 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 3784 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
3759 | { | 3785 | { |
3760 | struct queue_sysfs_entry *entry = to_queue(attr); | 3786 | struct queue_sysfs_entry *entry = to_queue(attr); |
3761 | struct request_queue *q; | 3787 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
3788 | ssize_t res; | ||
3762 | 3789 | ||
3763 | q = container_of(kobj, struct request_queue, kobj); | ||
3764 | if (!entry->show) | 3790 | if (!entry->show) |
3765 | return -EIO; | 3791 | return -EIO; |
3766 | 3792 | mutex_lock(&q->sysfs_lock); | |
3767 | return entry->show(q, page); | 3793 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
3794 | mutex_unlock(&q->sysfs_lock); | ||
3795 | return -ENOENT; | ||
3796 | } | ||
3797 | res = entry->show(q, page); | ||
3798 | mutex_unlock(&q->sysfs_lock); | ||
3799 | return res; | ||
3768 | } | 3800 | } |
3769 | 3801 | ||
3770 | static ssize_t | 3802 | static ssize_t |
@@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3772 | const char *page, size_t length) | 3804 | const char *page, size_t length) |
3773 | { | 3805 | { |
3774 | struct queue_sysfs_entry *entry = to_queue(attr); | 3806 | struct queue_sysfs_entry *entry = to_queue(attr); |
3775 | struct request_queue *q; | 3807 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
3808 | |||
3809 | ssize_t res; | ||
3776 | 3810 | ||
3777 | q = container_of(kobj, struct request_queue, kobj); | ||
3778 | if (!entry->store) | 3811 | if (!entry->store) |
3779 | return -EIO; | 3812 | return -EIO; |
3780 | 3813 | mutex_lock(&q->sysfs_lock); | |
3781 | return entry->store(q, page, length); | 3814 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
3815 | mutex_unlock(&q->sysfs_lock); | ||
3816 | return -ENOENT; | ||
3817 | } | ||
3818 | res = entry->store(q, page, length); | ||
3819 | mutex_unlock(&q->sysfs_lock); | ||
3820 | return res; | ||
3782 | } | 3821 | } |
3783 | 3822 | ||
3784 | static struct sysfs_ops queue_sysfs_ops = { | 3823 | static struct sysfs_ops queue_sysfs_ops = { |
@@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = { | |||
3789 | static struct kobj_type queue_ktype = { | 3828 | static struct kobj_type queue_ktype = { |
3790 | .sysfs_ops = &queue_sysfs_ops, | 3829 | .sysfs_ops = &queue_sysfs_ops, |
3791 | .default_attrs = default_attrs, | 3830 | .default_attrs = default_attrs, |
3831 | .release = blk_release_queue, | ||
3792 | }; | 3832 | }; |
3793 | 3833 | ||
3794 | int blk_register_queue(struct gendisk *disk) | 3834 | int blk_register_queue(struct gendisk *disk) |
@@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk) | |||
3801 | return -ENXIO; | 3841 | return -ENXIO; |
3802 | 3842 | ||
3803 | q->kobj.parent = kobject_get(&disk->kobj); | 3843 | q->kobj.parent = kobject_get(&disk->kobj); |
3804 | if (!q->kobj.parent) | ||
3805 | return -EBUSY; | ||
3806 | 3844 | ||
3807 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | 3845 | ret = kobject_add(&q->kobj); |
3808 | q->kobj.ktype = &queue_ktype; | ||
3809 | |||
3810 | ret = kobject_register(&q->kobj); | ||
3811 | if (ret < 0) | 3846 | if (ret < 0) |
3812 | return ret; | 3847 | return ret; |
3813 | 3848 | ||
3849 | kobject_uevent(&q->kobj, KOBJ_ADD); | ||
3850 | |||
3814 | ret = elv_register_queue(q); | 3851 | ret = elv_register_queue(q); |
3815 | if (ret) { | 3852 | if (ret) { |
3816 | kobject_unregister(&q->kobj); | 3853 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
3854 | kobject_del(&q->kobj); | ||
3817 | return ret; | 3855 | return ret; |
3818 | } | 3856 | } |
3819 | 3857 | ||
@@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk) | |||
3827 | if (q && q->request_fn) { | 3865 | if (q && q->request_fn) { |
3828 | elv_unregister_queue(q); | 3866 | elv_unregister_queue(q); |
3829 | 3867 | ||
3830 | kobject_unregister(&q->kobj); | 3868 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
3869 | kobject_del(&q->kobj); | ||
3831 | kobject_put(&disk->kobj); | 3870 | kobject_put(&disk->kobj); |
3832 | } | 3871 | } |
3833 | } | 3872 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5f6d1a5cce11..0010704739e3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1307,7 +1307,7 @@ static int __init loop_init(void) | |||
1307 | 1307 | ||
1308 | out_mem4: | 1308 | out_mem4: |
1309 | while (i--) | 1309 | while (i--) |
1310 | blk_put_queue(loop_dev[i].lo_queue); | 1310 | blk_cleanup_queue(loop_dev[i].lo_queue); |
1311 | devfs_remove("loop"); | 1311 | devfs_remove("loop"); |
1312 | i = max_loop; | 1312 | i = max_loop; |
1313 | out_mem3: | 1313 | out_mem3: |
@@ -1328,7 +1328,7 @@ static void loop_exit(void) | |||
1328 | 1328 | ||
1329 | for (i = 0; i < max_loop; i++) { | 1329 | for (i = 0; i < max_loop; i++) { |
1330 | del_gendisk(disks[i]); | 1330 | del_gendisk(disks[i]); |
1331 | blk_put_queue(loop_dev[i].lo_queue); | 1331 | blk_cleanup_queue(loop_dev[i].lo_queue); |
1332 | put_disk(disks[i]); | 1332 | put_disk(disks[i]); |
1333 | } | 1333 | } |
1334 | devfs_remove("loop"); | 1334 | devfs_remove("loop"); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index bc9b2bcd7dba..476a5b553f34 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
2514 | return 0; | 2514 | return 0; |
2515 | 2515 | ||
2516 | out_new_dev: | 2516 | out_new_dev: |
2517 | blk_put_queue(disk->queue); | 2517 | blk_cleanup_queue(disk->queue); |
2518 | out_mem2: | 2518 | out_mem2: |
2519 | put_disk(disk); | 2519 | put_disk(disk); |
2520 | out_mem: | 2520 | out_mem: |
@@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); | 2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); |
2556 | 2556 | ||
2557 | del_gendisk(pd->disk); | 2557 | del_gendisk(pd->disk); |
2558 | blk_put_queue(pd->disk->queue); | 2558 | blk_cleanup_queue(pd->disk->queue); |
2559 | put_disk(pd->disk); | 2559 | put_disk(pd->disk); |
2560 | 2560 | ||
2561 | pkt_devs[idx] = NULL; | 2561 | pkt_devs[idx] = NULL; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4ada1268b40d..c16e66b9c7a7 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev) | |||
1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, | 1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1132 | card->mm_pages[1].desc, | 1132 | card->mm_pages[1].desc, |
1133 | card->mm_pages[1].page_dma); | 1133 | card->mm_pages[1].page_dma); |
1134 | blk_put_queue(card->queue); | 1134 | blk_cleanup_queue(card->queue); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | static const struct pci_device_id mm_pci_ids[] = { { | 1137 | static const struct pci_device_id mm_pci_ids[] = { { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 745ca1f67b14..88d60202b9db 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) | |||
840 | bad3: | 840 | bad3: |
841 | mempool_destroy(md->io_pool); | 841 | mempool_destroy(md->io_pool); |
842 | bad2: | 842 | bad2: |
843 | blk_put_queue(md->queue); | 843 | blk_cleanup_queue(md->queue); |
844 | free_minor(minor); | 844 | free_minor(minor); |
845 | bad1: | 845 | bad1: |
846 | kfree(md); | 846 | kfree(md); |
@@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md) | |||
860 | del_gendisk(md->disk); | 860 | del_gendisk(md->disk); |
861 | free_minor(minor); | 861 | free_minor(minor); |
862 | put_disk(md->disk); | 862 | put_disk(md->disk); |
863 | blk_put_queue(md->queue); | 863 | blk_cleanup_queue(md->queue); |
864 | kfree(md); | 864 | kfree(md); |
865 | } | 865 | } |
866 | 866 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index d05e3125d298..5ed2228745cb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev) | |||
213 | return; | 213 | return; |
214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { | 214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { |
215 | list_del(&mddev->all_mddevs); | 215 | list_del(&mddev->all_mddevs); |
216 | blk_put_queue(mddev->queue); | 216 | /* that blocks */ |
217 | blk_cleanup_queue(mddev->queue); | ||
218 | /* that also blocks */ | ||
217 | kobject_unregister(&mddev->kobj); | 219 | kobject_unregister(&mddev->kobj); |
220 | /* result blows... */ | ||
218 | } | 221 | } |
219 | spin_unlock(&all_mddevs_lock); | 222 | spin_unlock(&all_mddevs_lock); |
220 | } | 223 | } |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 830528dce0ca..dc845f36fe49 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -100,6 +100,10 @@ static int max_interrupt_work = 10; | |||
100 | static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; | 100 | static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; |
101 | static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n"; | 101 | static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n"; |
102 | 102 | ||
103 | #if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) | ||
104 | #define EL3_SUSPEND | ||
105 | #endif | ||
106 | |||
103 | #ifdef EL3_DEBUG | 107 | #ifdef EL3_DEBUG |
104 | static int el3_debug = EL3_DEBUG; | 108 | static int el3_debug = EL3_DEBUG; |
105 | #else | 109 | #else |
@@ -174,9 +178,6 @@ struct el3_private { | |||
174 | /* skb send-queue */ | 178 | /* skb send-queue */ |
175 | int head, size; | 179 | int head, size; |
176 | struct sk_buff *queue[SKB_QUEUE_SIZE]; | 180 | struct sk_buff *queue[SKB_QUEUE_SIZE]; |
177 | #ifdef CONFIG_PM_LEGACY | ||
178 | struct pm_dev *pmdev; | ||
179 | #endif | ||
180 | enum { | 181 | enum { |
181 | EL3_MCA, | 182 | EL3_MCA, |
182 | EL3_PNP, | 183 | EL3_PNP, |
@@ -201,11 +202,15 @@ static void el3_tx_timeout (struct net_device *dev); | |||
201 | static void el3_down(struct net_device *dev); | 202 | static void el3_down(struct net_device *dev); |
202 | static void el3_up(struct net_device *dev); | 203 | static void el3_up(struct net_device *dev); |
203 | static struct ethtool_ops ethtool_ops; | 204 | static struct ethtool_ops ethtool_ops; |
204 | #ifdef CONFIG_PM_LEGACY | 205 | #ifdef EL3_SUSPEND |
205 | static int el3_suspend(struct pm_dev *pdev); | 206 | static int el3_suspend(struct device *, pm_message_t); |
206 | static int el3_resume(struct pm_dev *pdev); | 207 | static int el3_resume(struct device *); |
207 | static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data); | 208 | #else |
209 | #define el3_suspend NULL | ||
210 | #define el3_resume NULL | ||
208 | #endif | 211 | #endif |
212 | |||
213 | |||
209 | /* generic device remove for all device types */ | 214 | /* generic device remove for all device types */ |
210 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 215 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
211 | static int el3_device_remove (struct device *device); | 216 | static int el3_device_remove (struct device *device); |
@@ -229,7 +234,9 @@ static struct eisa_driver el3_eisa_driver = { | |||
229 | .driver = { | 234 | .driver = { |
230 | .name = "3c509", | 235 | .name = "3c509", |
231 | .probe = el3_eisa_probe, | 236 | .probe = el3_eisa_probe, |
232 | .remove = __devexit_p (el3_device_remove) | 237 | .remove = __devexit_p (el3_device_remove), |
238 | .suspend = el3_suspend, | ||
239 | .resume = el3_resume, | ||
233 | } | 240 | } |
234 | }; | 241 | }; |
235 | #endif | 242 | #endif |
@@ -262,6 +269,8 @@ static struct mca_driver el3_mca_driver = { | |||
262 | .bus = &mca_bus_type, | 269 | .bus = &mca_bus_type, |
263 | .probe = el3_mca_probe, | 270 | .probe = el3_mca_probe, |
264 | .remove = __devexit_p(el3_device_remove), | 271 | .remove = __devexit_p(el3_device_remove), |
272 | .suspend = el3_suspend, | ||
273 | .resume = el3_resume, | ||
265 | }, | 274 | }, |
266 | }; | 275 | }; |
267 | #endif /* CONFIG_MCA */ | 276 | #endif /* CONFIG_MCA */ |
@@ -362,10 +371,6 @@ static void el3_common_remove (struct net_device *dev) | |||
362 | struct el3_private *lp = netdev_priv(dev); | 371 | struct el3_private *lp = netdev_priv(dev); |
363 | 372 | ||
364 | (void) lp; /* Keep gcc quiet... */ | 373 | (void) lp; /* Keep gcc quiet... */ |
365 | #ifdef CONFIG_PM_LEGACY | ||
366 | if (lp->pmdev) | ||
367 | pm_unregister(lp->pmdev); | ||
368 | #endif | ||
369 | #if defined(__ISAPNP__) | 374 | #if defined(__ISAPNP__) |
370 | if (lp->type == EL3_PNP) | 375 | if (lp->type == EL3_PNP) |
371 | pnp_device_detach(to_pnp_dev(lp->dev)); | 376 | pnp_device_detach(to_pnp_dev(lp->dev)); |
@@ -572,16 +577,6 @@ no_pnp: | |||
572 | if (err) | 577 | if (err) |
573 | goto out1; | 578 | goto out1; |
574 | 579 | ||
575 | #ifdef CONFIG_PM_LEGACY | ||
576 | /* register power management */ | ||
577 | lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback); | ||
578 | if (lp->pmdev) { | ||
579 | struct pm_dev *p; | ||
580 | p = lp->pmdev; | ||
581 | p->data = (struct net_device *)dev; | ||
582 | } | ||
583 | #endif | ||
584 | |||
585 | el3_cards++; | 580 | el3_cards++; |
586 | lp->next_dev = el3_root_dev; | 581 | lp->next_dev = el3_root_dev; |
587 | el3_root_dev = dev; | 582 | el3_root_dev = dev; |
@@ -1480,20 +1475,17 @@ el3_up(struct net_device *dev) | |||
1480 | } | 1475 | } |
1481 | 1476 | ||
1482 | /* Power Management support functions */ | 1477 | /* Power Management support functions */ |
1483 | #ifdef CONFIG_PM_LEGACY | 1478 | #ifdef EL3_SUSPEND |
1484 | 1479 | ||
1485 | static int | 1480 | static int |
1486 | el3_suspend(struct pm_dev *pdev) | 1481 | el3_suspend(struct device *pdev, pm_message_t state) |
1487 | { | 1482 | { |
1488 | unsigned long flags; | 1483 | unsigned long flags; |
1489 | struct net_device *dev; | 1484 | struct net_device *dev; |
1490 | struct el3_private *lp; | 1485 | struct el3_private *lp; |
1491 | int ioaddr; | 1486 | int ioaddr; |
1492 | 1487 | ||
1493 | if (!pdev && !pdev->data) | 1488 | dev = pdev->driver_data; |
1494 | return -EINVAL; | ||
1495 | |||
1496 | dev = (struct net_device *)pdev->data; | ||
1497 | lp = netdev_priv(dev); | 1489 | lp = netdev_priv(dev); |
1498 | ioaddr = dev->base_addr; | 1490 | ioaddr = dev->base_addr; |
1499 | 1491 | ||
@@ -1510,17 +1502,14 @@ el3_suspend(struct pm_dev *pdev) | |||
1510 | } | 1502 | } |
1511 | 1503 | ||
1512 | static int | 1504 | static int |
1513 | el3_resume(struct pm_dev *pdev) | 1505 | el3_resume(struct device *pdev) |
1514 | { | 1506 | { |
1515 | unsigned long flags; | 1507 | unsigned long flags; |
1516 | struct net_device *dev; | 1508 | struct net_device *dev; |
1517 | struct el3_private *lp; | 1509 | struct el3_private *lp; |
1518 | int ioaddr; | 1510 | int ioaddr; |
1519 | 1511 | ||
1520 | if (!pdev && !pdev->data) | 1512 | dev = pdev->driver_data; |
1521 | return -EINVAL; | ||
1522 | |||
1523 | dev = (struct net_device *)pdev->data; | ||
1524 | lp = netdev_priv(dev); | 1513 | lp = netdev_priv(dev); |
1525 | ioaddr = dev->base_addr; | 1514 | ioaddr = dev->base_addr; |
1526 | 1515 | ||
@@ -1536,20 +1525,7 @@ el3_resume(struct pm_dev *pdev) | |||
1536 | return 0; | 1525 | return 0; |
1537 | } | 1526 | } |
1538 | 1527 | ||
1539 | static int | 1528 | #endif /* EL3_SUSPEND */ |
1540 | el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data) | ||
1541 | { | ||
1542 | switch (rqst) { | ||
1543 | case PM_SUSPEND: | ||
1544 | return el3_suspend(pdev); | ||
1545 | |||
1546 | case PM_RESUME: | ||
1547 | return el3_resume(pdev); | ||
1548 | } | ||
1549 | return 0; | ||
1550 | } | ||
1551 | |||
1552 | #endif /* CONFIG_PM_LEGACY */ | ||
1553 | 1529 | ||
1554 | /* Parameters that may be passed into the module. */ | 1530 | /* Parameters that may be passed into the module. */ |
1555 | static int debug = -1; | 1531 | static int debug = -1; |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index 9e1fe2e0478c..b40885d41680 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -105,6 +105,7 @@ | |||
105 | #include <linux/mca-legacy.h> | 105 | #include <linux/mca-legacy.h> |
106 | #include <linux/ethtool.h> | 106 | #include <linux/ethtool.h> |
107 | #include <linux/bitops.h> | 107 | #include <linux/bitops.h> |
108 | #include <linux/jiffies.h> | ||
108 | 109 | ||
109 | #include <asm/uaccess.h> | 110 | #include <asm/uaccess.h> |
110 | #include <asm/processor.h> | 111 | #include <asm/processor.h> |
@@ -658,7 +659,7 @@ static int init586(struct net_device *dev) | |||
658 | 659 | ||
659 | s = jiffies; /* warning: only active with interrupts on !! */ | 660 | s = jiffies; /* warning: only active with interrupts on !! */ |
660 | while (!(cfg_cmd->cmd_status & STAT_COMPL)) { | 661 | while (!(cfg_cmd->cmd_status & STAT_COMPL)) { |
661 | if (jiffies - s > 30*HZ/100) | 662 | if (time_after(jiffies, s + 30*HZ/100)) |
662 | break; | 663 | break; |
663 | } | 664 | } |
664 | 665 | ||
@@ -684,7 +685,7 @@ static int init586(struct net_device *dev) | |||
684 | 685 | ||
685 | s = jiffies; | 686 | s = jiffies; |
686 | while (!(ias_cmd->cmd_status & STAT_COMPL)) { | 687 | while (!(ias_cmd->cmd_status & STAT_COMPL)) { |
687 | if (jiffies - s > 30*HZ/100) | 688 | if (time_after(jiffies, s + 30*HZ/100)) |
688 | break; | 689 | break; |
689 | } | 690 | } |
690 | 691 | ||
@@ -709,7 +710,7 @@ static int init586(struct net_device *dev) | |||
709 | 710 | ||
710 | s = jiffies; | 711 | s = jiffies; |
711 | while (!(tdr_cmd->cmd_status & STAT_COMPL)) { | 712 | while (!(tdr_cmd->cmd_status & STAT_COMPL)) { |
712 | if (jiffies - s > 30*HZ/100) { | 713 | if (time_after(jiffies, s + 30*HZ/100)) { |
713 | printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); | 714 | printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); |
714 | result = 1; | 715 | result = 1; |
715 | break; | 716 | break; |
@@ -798,7 +799,7 @@ static int init586(struct net_device *dev) | |||
798 | elmc_id_attn586(); | 799 | elmc_id_attn586(); |
799 | s = jiffies; | 800 | s = jiffies; |
800 | while (!(mc_cmd->cmd_status & STAT_COMPL)) { | 801 | while (!(mc_cmd->cmd_status & STAT_COMPL)) { |
801 | if (jiffies - s > 30*HZ/100) | 802 | if (time_after(jiffies, s + 30*HZ/100)) |
802 | break; | 803 | break; |
803 | } | 804 | } |
804 | if (!(mc_cmd->cmd_status & STAT_COMPL)) { | 805 | if (!(mc_cmd->cmd_status & STAT_COMPL)) { |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 7f47124f118d..5d11a06ecb2c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -258,6 +258,7 @@ static int vortex_debug = 1; | |||
258 | #include <linux/highmem.h> | 258 | #include <linux/highmem.h> |
259 | #include <linux/eisa.h> | 259 | #include <linux/eisa.h> |
260 | #include <linux/bitops.h> | 260 | #include <linux/bitops.h> |
261 | #include <linux/jiffies.h> | ||
261 | #include <asm/irq.h> /* For NR_IRQS only. */ | 262 | #include <asm/irq.h> /* For NR_IRQS only. */ |
262 | #include <asm/io.h> | 263 | #include <asm/io.h> |
263 | #include <asm/uaccess.h> | 264 | #include <asm/uaccess.h> |
@@ -841,7 +842,7 @@ enum xcvr_types { | |||
841 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, | 842 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, |
842 | }; | 843 | }; |
843 | 844 | ||
844 | static struct media_table { | 845 | static const struct media_table { |
845 | char *name; | 846 | char *name; |
846 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ | 847 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ |
847 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ | 848 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ |
@@ -1445,7 +1446,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1445 | } | 1446 | } |
1446 | 1447 | ||
1447 | { | 1448 | { |
1448 | static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 1449 | static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
1449 | unsigned int config; | 1450 | unsigned int config; |
1450 | EL3WINDOW(3); | 1451 | EL3WINDOW(3); |
1451 | vp->available_media = ioread16(ioaddr + Wn3_Options); | 1452 | vp->available_media = ioread16(ioaddr + Wn3_Options); |
@@ -2724,7 +2725,7 @@ boomerang_rx(struct net_device *dev) | |||
2724 | skb = dev_alloc_skb(PKT_BUF_SZ); | 2725 | skb = dev_alloc_skb(PKT_BUF_SZ); |
2725 | if (skb == NULL) { | 2726 | if (skb == NULL) { |
2726 | static unsigned long last_jif; | 2727 | static unsigned long last_jif; |
2727 | if ((jiffies - last_jif) > 10 * HZ) { | 2728 | if (time_after(jiffies, last_jif + 10 * HZ)) { |
2728 | printk(KERN_WARNING "%s: memory shortage\n", dev->name); | 2729 | printk(KERN_WARNING "%s: memory shortage\n", dev->name); |
2729 | last_jif = jiffies; | 2730 | last_jif = jiffies; |
2730 | } | 2731 | } |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 18b027e73f28..86633c5f1a4b 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
32 | #include <linux/irq.h> | 32 | #include <asm/irq.h> |
33 | /* Used for the temporal inet entries and routing */ | 33 | /* Used for the temporal inet entries and routing */ |
34 | #include <linux/socket.h> | 34 | #include <linux/socket.h> |
35 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index dd410496aadb..ce99845d8266 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1276,7 +1276,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) | |||
1276 | } | 1276 | } |
1277 | #endif /* BROKEN */ | 1277 | #endif /* BROKEN */ |
1278 | 1278 | ||
1279 | static char mii_2_8139_map[8] = { | 1279 | static const char mii_2_8139_map[8] = { |
1280 | BasicModeCtrl, | 1280 | BasicModeCtrl, |
1281 | BasicModeStatus, | 1281 | BasicModeStatus, |
1282 | 0, | 1282 | 0, |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 2beac55b57d6..e58d4c50c2e1 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -229,7 +229,7 @@ typedef enum { | |||
229 | 229 | ||
230 | 230 | ||
231 | /* indexed by board_t, above */ | 231 | /* indexed by board_t, above */ |
232 | static struct { | 232 | static const struct { |
233 | const char *name; | 233 | const char *name; |
234 | u32 hw_flags; | 234 | u32 hw_flags; |
235 | } board_info[] __devinitdata = { | 235 | } board_info[] __devinitdata = { |
@@ -1192,7 +1192,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l | |||
1192 | #define mdio_delay() RTL_R8(Config4) | 1192 | #define mdio_delay() RTL_R8(Config4) |
1193 | 1193 | ||
1194 | 1194 | ||
1195 | static char mii_2_8139_map[8] = { | 1195 | static const char mii_2_8139_map[8] = { |
1196 | BasicModeCtrl, | 1196 | BasicModeCtrl, |
1197 | BasicModeStatus, | 1197 | BasicModeStatus, |
1198 | 0, | 1198 | 0, |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 13b745b39667..da0c878dcba8 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -614,7 +614,7 @@ static void rebuild_rx_bufs(struct net_device *dev) | |||
614 | static int init_i596_mem(struct net_device *dev) | 614 | static int init_i596_mem(struct net_device *dev) |
615 | { | 615 | { |
616 | struct i596_private *lp = dev->priv; | 616 | struct i596_private *lp = dev->priv; |
617 | #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) | 617 | #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) |
618 | short ioaddr = dev->base_addr; | 618 | short ioaddr = dev->base_addr; |
619 | #endif | 619 | #endif |
620 | unsigned long flags; | 620 | unsigned long flags; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index aa633fa95e64..e0b11095b9da 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -66,7 +66,7 @@ config BONDING | |||
66 | 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux. | 66 | 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux. |
67 | 67 | ||
68 | The driver supports multiple bonding modes to allow for both high | 68 | The driver supports multiple bonding modes to allow for both high |
69 | perfomance and high availability operation. | 69 | performance and high availability operation. |
70 | 70 | ||
71 | Refer to <file:Documentation/networking/bonding.txt> for more | 71 | Refer to <file:Documentation/networking/bonding.txt> for more |
72 | information. | 72 | information. |
@@ -698,8 +698,8 @@ config VORTEX | |||
698 | depends on NET_VENDOR_3COM && (PCI || EISA) | 698 | depends on NET_VENDOR_3COM && (PCI || EISA) |
699 | select MII | 699 | select MII |
700 | ---help--- | 700 | ---help--- |
701 | This option enables driver support for a large number of 10mbps and | 701 | This option enables driver support for a large number of 10Mbps and |
702 | 10/100mbps EISA, PCI and PCMCIA 3Com network cards: | 702 | 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: |
703 | 703 | ||
704 | "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI | 704 | "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI |
705 | "Boomerang" (EtherLink XL 3c900 or 3c905) PCI | 705 | "Boomerang" (EtherLink XL 3c900 or 3c905) PCI |
@@ -1021,7 +1021,7 @@ config EEXPRESS_PRO | |||
1021 | depends on NET_ISA | 1021 | depends on NET_ISA |
1022 | ---help--- | 1022 | ---help--- |
1023 | If you have a network (Ethernet) card of this type, say Y. This | 1023 | If you have a network (Ethernet) card of this type, say Y. This |
1024 | driver supports intel i82595{FX,TX} based boards. Note however | 1024 | driver supports Intel i82595{FX,TX} based boards. Note however |
1025 | that the EtherExpress PRO/100 Ethernet card has its own separate | 1025 | that the EtherExpress PRO/100 Ethernet card has its own separate |
1026 | driver. Please read the Ethernet-HOWTO, available from | 1026 | driver. Please read the Ethernet-HOWTO, available from |
1027 | <http://www.tldp.org/docs.html#howto>. | 1027 | <http://www.tldp.org/docs.html#howto>. |
@@ -1208,7 +1208,7 @@ config IBM_EMAC_RX_SKB_HEADROOM | |||
1208 | help | 1208 | help |
1209 | Additional receive skb headroom. Note, that driver | 1209 | Additional receive skb headroom. Note, that driver |
1210 | will always reserve at least 2 bytes to make IP header | 1210 | will always reserve at least 2 bytes to make IP header |
1211 | aligned, so usualy there is no need to add any additional | 1211 | aligned, so usually there is no need to add any additional |
1212 | headroom. | 1212 | headroom. |
1213 | 1213 | ||
1214 | If unsure, set to 0. | 1214 | If unsure, set to 0. |
@@ -1372,8 +1372,8 @@ config B44 | |||
1372 | called b44. | 1372 | called b44. |
1373 | 1373 | ||
1374 | config FORCEDETH | 1374 | config FORCEDETH |
1375 | tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)" | 1375 | tristate "nForce Ethernet support" |
1376 | depends on NET_PCI && PCI && EXPERIMENTAL | 1376 | depends on NET_PCI && PCI |
1377 | help | 1377 | help |
1378 | If you have a network (Ethernet) controller of this type, say Y and | 1378 | If you have a network (Ethernet) controller of this type, say Y and |
1379 | read the Ethernet-HOWTO, available from | 1379 | read the Ethernet-HOWTO, available from |
@@ -1614,11 +1614,7 @@ config SIS900 | |||
1614 | ---help--- | 1614 | ---help--- |
1615 | This is a driver for the Fast Ethernet PCI network cards based on | 1615 | This is a driver for the Fast Ethernet PCI network cards based on |
1616 | the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in | 1616 | the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in |
1617 | SiS 630 and SiS 540 chipsets. If you have one of those, say Y and | 1617 | SiS 630 and SiS 540 chipsets. |
1618 | read the Ethernet-HOWTO, available at | ||
1619 | <http://www.tldp.org/docs.html#howto>. Please read | ||
1620 | <file:Documentation/networking/sis900.txt> and comments at the | ||
1621 | beginning of <file:drivers/net/sis900.c> for more information. | ||
1622 | 1618 | ||
1623 | This driver also supports AMD 79C901 HomePNA so that you can use | 1619 | This driver also supports AMD 79C901 HomePNA so that you can use |
1624 | your phone line as a network cable. | 1620 | your phone line as a network cable. |
@@ -1934,7 +1930,7 @@ config MYRI_SBUS | |||
1934 | will be called myri_sbus. This is recommended. | 1930 | will be called myri_sbus. This is recommended. |
1935 | 1931 | ||
1936 | config NS83820 | 1932 | config NS83820 |
1937 | tristate "National Semiconduct DP83820 support" | 1933 | tristate "National Semiconductor DP83820 support" |
1938 | depends on PCI | 1934 | depends on PCI |
1939 | help | 1935 | help |
1940 | This is a driver for the National Semiconductor DP83820 series | 1936 | This is a driver for the National Semiconductor DP83820 series |
@@ -2195,6 +2191,7 @@ config GFAR_NAPI | |||
2195 | config MV643XX_ETH | 2191 | config MV643XX_ETH |
2196 | tristate "MV-643XX Ethernet support" | 2192 | tristate "MV-643XX Ethernet support" |
2197 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM | 2193 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM |
2194 | select MII | ||
2198 | help | 2195 | help |
2199 | This driver supports the gigabit Ethernet on the Marvell MV643XX | 2196 | This driver supports the gigabit Ethernet on the Marvell MV643XX |
2200 | chipset which is used in the Momenco Ocelot C and Jaguar ATX and | 2197 | chipset which is used in the Momenco Ocelot C and Jaguar ATX and |
@@ -2514,7 +2511,7 @@ config PPP_FILTER | |||
2514 | Say Y here if you want to be able to filter the packets passing over | 2511 | Say Y here if you want to be able to filter the packets passing over |
2515 | PPP interfaces. This allows you to control which packets count as | 2512 | PPP interfaces. This allows you to control which packets count as |
2516 | activity (i.e. which packets will reset the idle timer or bring up | 2513 | activity (i.e. which packets will reset the idle timer or bring up |
2517 | a demand-dialled link) and which packets are to be dropped entirely. | 2514 | a demand-dialed link) and which packets are to be dropped entirely. |
2518 | You need to say Y here if you wish to use the pass-filter and | 2515 | You need to say Y here if you wish to use the pass-filter and |
2519 | active-filter options to pppd. | 2516 | active-filter options to pppd. |
2520 | 2517 | ||
@@ -2702,8 +2699,8 @@ config SHAPER | |||
2702 | <file:Documentation/networking/shaper.txt> for more information. | 2699 | <file:Documentation/networking/shaper.txt> for more information. |
2703 | 2700 | ||
2704 | An alternative to this traffic shaper is the experimental | 2701 | An alternative to this traffic shaper is the experimental |
2705 | Class-Based Queueing (CBQ) scheduling support which you get if you | 2702 | Class-Based Queuing (CBQ) scheduling support which you get if you |
2706 | say Y to "QoS and/or fair queueing" above. | 2703 | say Y to "QoS and/or fair queuing" above. |
2707 | 2704 | ||
2708 | To compile this driver as a module, choose M here: the module | 2705 | To compile this driver as a module, choose M here: the module |
2709 | will be called shaper. If unsure, say N. | 2706 | will be called shaper. If unsure, say N. |
diff --git a/drivers/net/apne.c b/drivers/net/apne.c index a94216b87184..b9820b86cdcc 100644 --- a/drivers/net/apne.c +++ b/drivers/net/apne.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/netdevice.h> | 37 | #include <linux/netdevice.h> |
38 | #include <linux/etherdevice.h> | 38 | #include <linux/etherdevice.h> |
39 | #include <linux/jiffies.h> | ||
39 | 40 | ||
40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -216,7 +217,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) | |||
216 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 217 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
217 | 218 | ||
218 | while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) | 219 | while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) |
219 | if (jiffies - reset_start_time > 2*HZ/100) { | 220 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
220 | printk(" not found (no reset ack).\n"); | 221 | printk(" not found (no reset ack).\n"); |
221 | return -ENODEV; | 222 | return -ENODEV; |
222 | } | 223 | } |
@@ -382,7 +383,7 @@ apne_reset_8390(struct net_device *dev) | |||
382 | 383 | ||
383 | /* This check _should_not_ be necessary, omit eventually. */ | 384 | /* This check _should_not_ be necessary, omit eventually. */ |
384 | while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) | 385 | while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) |
385 | if (jiffies - reset_start_time > 2*HZ/100) { | 386 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
386 | printk("%s: ne_reset_8390() did not complete.\n", dev->name); | 387 | printk("%s: ne_reset_8390() did not complete.\n", dev->name); |
387 | break; | 388 | break; |
388 | } | 389 | } |
@@ -530,7 +531,7 @@ apne_block_output(struct net_device *dev, int count, | |||
530 | dma_start = jiffies; | 531 | dma_start = jiffies; |
531 | 532 | ||
532 | while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) | 533 | while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) |
533 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 534 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
534 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 535 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
535 | apne_reset_8390(dev); | 536 | apne_reset_8390(dev); |
536 | NS8390_init(dev,1); | 537 | NS8390_init(dev,1); |
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig index 948de2532a1e..7284ccad0b91 100644 --- a/drivers/net/arcnet/Kconfig +++ b/drivers/net/arcnet/Kconfig | |||
@@ -68,10 +68,10 @@ config ARCNET_CAP | |||
68 | packet is stuffed with an extra 4 byte "cookie" which doesn't | 68 | packet is stuffed with an extra 4 byte "cookie" which doesn't |
69 | actually appear on the network. After transmit the driver will send | 69 | actually appear on the network. After transmit the driver will send |
70 | back a packet with protocol byte 0 containing the status of the | 70 | back a packet with protocol byte 0 containing the status of the |
71 | transmition: | 71 | transmission: |
72 | 0=no hardware acknowledge | 72 | 0=no hardware acknowledge |
73 | 1=excessive nak | 73 | 1=excessive nak |
74 | 2=transmition accepted by the reciever hardware | 74 | 2=transmission accepted by the receiver hardware |
75 | 75 | ||
76 | Received packets are also stuffed with the extra 4 bytes but it will | 76 | Received packets are also stuffed with the extra 4 bytes but it will |
77 | be random data. | 77 | be random data. |
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index e1ea29b0cd14..e7555d4e6ff1 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c | |||
@@ -42,7 +42,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, | |||
42 | static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | 42 | static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, |
43 | int bufnum); | 43 | int bufnum); |
44 | 44 | ||
45 | struct ArcProto rawmode_proto = | 45 | static struct ArcProto rawmode_proto = |
46 | { | 46 | { |
47 | .suffix = 'r', | 47 | .suffix = 'r', |
48 | .mtu = XMTU, | 48 | .mtu = XMTU, |
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 38c3f033f739..8c8d6c453c45 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c | |||
@@ -97,25 +97,44 @@ static int __init arcrimi_probe(struct net_device *dev) | |||
97 | "must specify the shmem and irq!\n"); | 97 | "must specify the shmem and irq!\n"); |
98 | return -ENODEV; | 98 | return -ENODEV; |
99 | } | 99 | } |
100 | if (dev->dev_addr[0] == 0) { | ||
101 | BUGMSG(D_NORMAL, "You need to specify your card's station " | ||
102 | "ID!\n"); | ||
103 | return -ENODEV; | ||
104 | } | ||
100 | /* | 105 | /* |
101 | * Grab the memory region at mem_start for BUFFER_SIZE bytes. | 106 | * Grab the memory region at mem_start for MIRROR_SIZE bytes. |
102 | * Later in arcrimi_found() the real size will be determined | 107 | * Later in arcrimi_found() the real size will be determined |
103 | * and this reserve will be released and the correct size | 108 | * and this reserve will be released and the correct size |
104 | * will be taken. | 109 | * will be taken. |
105 | */ | 110 | */ |
106 | if (!request_mem_region(dev->mem_start, BUFFER_SIZE, "arcnet (90xx)")) { | 111 | if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { |
107 | BUGMSG(D_NORMAL, "Card memory already allocated\n"); | 112 | BUGMSG(D_NORMAL, "Card memory already allocated\n"); |
108 | return -ENODEV; | 113 | return -ENODEV; |
109 | } | 114 | } |
110 | if (dev->dev_addr[0] == 0) { | ||
111 | release_mem_region(dev->mem_start, BUFFER_SIZE); | ||
112 | BUGMSG(D_NORMAL, "You need to specify your card's station " | ||
113 | "ID!\n"); | ||
114 | return -ENODEV; | ||
115 | } | ||
116 | return arcrimi_found(dev); | 115 | return arcrimi_found(dev); |
117 | } | 116 | } |
118 | 117 | ||
118 | static int check_mirror(unsigned long addr, size_t size) | ||
119 | { | ||
120 | void __iomem *p; | ||
121 | int res = -1; | ||
122 | |||
123 | if (!request_mem_region(addr, size, "arcnet (90xx)")) | ||
124 | return -1; | ||
125 | |||
126 | p = ioremap(addr, size); | ||
127 | if (p) { | ||
128 | if (readb(p) == TESTvalue) | ||
129 | res = 1; | ||
130 | else | ||
131 | res = 0; | ||
132 | iounmap(p); | ||
133 | } | ||
134 | |||
135 | release_mem_region(addr, size); | ||
136 | return res; | ||
137 | } | ||
119 | 138 | ||
120 | /* | 139 | /* |
121 | * Set up the struct net_device associated with this card. Called after | 140 | * Set up the struct net_device associated with this card. Called after |
@@ -125,19 +144,28 @@ static int __init arcrimi_found(struct net_device *dev) | |||
125 | { | 144 | { |
126 | struct arcnet_local *lp; | 145 | struct arcnet_local *lp; |
127 | unsigned long first_mirror, last_mirror, shmem; | 146 | unsigned long first_mirror, last_mirror, shmem; |
147 | void __iomem *p; | ||
128 | int mirror_size; | 148 | int mirror_size; |
129 | int err; | 149 | int err; |
130 | 150 | ||
151 | p = ioremap(dev->mem_start, MIRROR_SIZE); | ||
152 | if (!p) { | ||
153 | release_mem_region(dev->mem_start, MIRROR_SIZE); | ||
154 | BUGMSG(D_NORMAL, "Can't ioremap\n"); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | |||
131 | /* reserve the irq */ | 158 | /* reserve the irq */ |
132 | if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { | 159 | if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { |
133 | release_mem_region(dev->mem_start, BUFFER_SIZE); | 160 | iounmap(p); |
161 | release_mem_region(dev->mem_start, MIRROR_SIZE); | ||
134 | BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); | 162 | BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); |
135 | return -ENODEV; | 163 | return -ENODEV; |
136 | } | 164 | } |
137 | 165 | ||
138 | shmem = dev->mem_start; | 166 | shmem = dev->mem_start; |
139 | isa_writeb(TESTvalue, shmem); | 167 | writeb(TESTvalue, p); |
140 | isa_writeb(dev->dev_addr[0], shmem + 1); /* actually the node ID */ | 168 | writeb(dev->dev_addr[0], p + 1); /* actually the node ID */ |
141 | 169 | ||
142 | /* find the real shared memory start/end points, including mirrors */ | 170 | /* find the real shared memory start/end points, including mirrors */ |
143 | 171 | ||
@@ -146,17 +174,18 @@ static int __init arcrimi_found(struct net_device *dev) | |||
146 | * 2k (or there are no mirrors at all) but on some, it's 4k. | 174 | * 2k (or there are no mirrors at all) but on some, it's 4k. |
147 | */ | 175 | */ |
148 | mirror_size = MIRROR_SIZE; | 176 | mirror_size = MIRROR_SIZE; |
149 | if (isa_readb(shmem) == TESTvalue | 177 | if (readb(p) == TESTvalue |
150 | && isa_readb(shmem - mirror_size) != TESTvalue | 178 | && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 |
151 | && isa_readb(shmem - 2 * mirror_size) == TESTvalue) | 179 | && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) |
152 | mirror_size *= 2; | 180 | mirror_size = 2 * MIRROR_SIZE; |
153 | 181 | ||
154 | first_mirror = last_mirror = shmem; | 182 | first_mirror = shmem - mirror_size; |
155 | while (isa_readb(first_mirror) == TESTvalue) | 183 | while (check_mirror(first_mirror, mirror_size) == 1) |
156 | first_mirror -= mirror_size; | 184 | first_mirror -= mirror_size; |
157 | first_mirror += mirror_size; | 185 | first_mirror += mirror_size; |
158 | 186 | ||
159 | while (isa_readb(last_mirror) == TESTvalue) | 187 | last_mirror = shmem + mirror_size; |
188 | while (check_mirror(last_mirror, mirror_size) == 1) | ||
160 | last_mirror += mirror_size; | 189 | last_mirror += mirror_size; |
161 | last_mirror -= mirror_size; | 190 | last_mirror -= mirror_size; |
162 | 191 | ||
@@ -181,7 +210,8 @@ static int __init arcrimi_found(struct net_device *dev) | |||
181 | * with the correct size. There is a VERY slim chance this could | 210 | * with the correct size. There is a VERY slim chance this could |
182 | * fail. | 211 | * fail. |
183 | */ | 212 | */ |
184 | release_mem_region(shmem, BUFFER_SIZE); | 213 | iounmap(p); |
214 | release_mem_region(shmem, MIRROR_SIZE); | ||
185 | if (!request_mem_region(dev->mem_start, | 215 | if (!request_mem_region(dev->mem_start, |
186 | dev->mem_end - dev->mem_start + 1, | 216 | dev->mem_end - dev->mem_start + 1, |
187 | "arcnet (90xx)")) { | 217 | "arcnet (90xx)")) { |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 12ef52c193a3..64e2caf3083d 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <net/arp.h> | 52 | #include <net/arp.h> |
53 | #include <linux/init.h> | 53 | #include <linux/init.h> |
54 | #include <linux/arcdevice.h> | 54 | #include <linux/arcdevice.h> |
55 | #include <linux/jiffies.h> | ||
55 | 56 | ||
56 | /* "do nothing" functions for protocol drivers */ | 57 | /* "do nothing" functions for protocol drivers */ |
57 | static void null_rx(struct net_device *dev, int bufnum, | 58 | static void null_rx(struct net_device *dev, int bufnum, |
@@ -61,6 +62,7 @@ static int null_build_header(struct sk_buff *skb, struct net_device *dev, | |||
61 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, | 62 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, |
62 | int length, int bufnum); | 63 | int length, int bufnum); |
63 | 64 | ||
65 | static void arcnet_rx(struct net_device *dev, int bufnum); | ||
64 | 66 | ||
65 | /* | 67 | /* |
66 | * one ArcProto per possible proto ID. None of the elements of | 68 | * one ArcProto per possible proto ID. None of the elements of |
@@ -71,7 +73,7 @@ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, | |||
71 | struct ArcProto *arc_proto_map[256], *arc_proto_default, | 73 | struct ArcProto *arc_proto_map[256], *arc_proto_default, |
72 | *arc_bcast_proto, *arc_raw_proto; | 74 | *arc_bcast_proto, *arc_raw_proto; |
73 | 75 | ||
74 | struct ArcProto arc_proto_null = | 76 | static struct ArcProto arc_proto_null = |
75 | { | 77 | { |
76 | .suffix = '?', | 78 | .suffix = '?', |
77 | .mtu = XMTU, | 79 | .mtu = XMTU, |
@@ -90,7 +92,6 @@ EXPORT_SYMBOL(arc_proto_map); | |||
90 | EXPORT_SYMBOL(arc_proto_default); | 92 | EXPORT_SYMBOL(arc_proto_default); |
91 | EXPORT_SYMBOL(arc_bcast_proto); | 93 | EXPORT_SYMBOL(arc_bcast_proto); |
92 | EXPORT_SYMBOL(arc_raw_proto); | 94 | EXPORT_SYMBOL(arc_raw_proto); |
93 | EXPORT_SYMBOL(arc_proto_null); | ||
94 | EXPORT_SYMBOL(arcnet_unregister_proto); | 95 | EXPORT_SYMBOL(arcnet_unregister_proto); |
95 | EXPORT_SYMBOL(arcnet_debug); | 96 | EXPORT_SYMBOL(arcnet_debug); |
96 | EXPORT_SYMBOL(alloc_arcdev); | 97 | EXPORT_SYMBOL(alloc_arcdev); |
@@ -118,7 +119,7 @@ static int __init arcnet_init(void) | |||
118 | 119 | ||
119 | arcnet_debug = debug; | 120 | arcnet_debug = debug; |
120 | 121 | ||
121 | printk(VERSION); | 122 | printk("arcnet loaded.\n"); |
122 | 123 | ||
123 | #ifdef ALPHA_WARNING | 124 | #ifdef ALPHA_WARNING |
124 | BUGLVL(D_EXTRA) { | 125 | BUGLVL(D_EXTRA) { |
@@ -178,8 +179,8 @@ EXPORT_SYMBOL(arcnet_dump_skb); | |||
178 | * Dump the contents of an ARCnet buffer | 179 | * Dump the contents of an ARCnet buffer |
179 | */ | 180 | */ |
180 | #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) | 181 | #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) |
181 | void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | 182 | static void arcnet_dump_packet(struct net_device *dev, int bufnum, |
182 | int take_arcnet_lock) | 183 | char *desc, int take_arcnet_lock) |
183 | { | 184 | { |
184 | struct arcnet_local *lp = dev->priv; | 185 | struct arcnet_local *lp = dev->priv; |
185 | int i, length; | 186 | int i, length; |
@@ -208,7 +209,10 @@ void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | |||
208 | 209 | ||
209 | } | 210 | } |
210 | 211 | ||
211 | EXPORT_SYMBOL(arcnet_dump_packet); | 212 | #else |
213 | |||
214 | #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0) | ||
215 | |||
212 | #endif | 216 | #endif |
213 | 217 | ||
214 | 218 | ||
@@ -733,7 +737,7 @@ static void arcnet_timeout(struct net_device *dev) | |||
733 | 737 | ||
734 | spin_unlock_irqrestore(&lp->lock, flags); | 738 | spin_unlock_irqrestore(&lp->lock, flags); |
735 | 739 | ||
736 | if (jiffies - lp->last_timeout > 10*HZ) { | 740 | if (time_after(jiffies, lp->last_timeout + 10*HZ)) { |
737 | BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", | 741 | BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", |
738 | msg, status, lp->intmask, lp->lasttrans_dest); | 742 | msg, status, lp->intmask, lp->lasttrans_dest); |
739 | lp->last_timeout = jiffies; | 743 | lp->last_timeout = jiffies; |
@@ -996,7 +1000,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
996 | * This is a generic packet receiver that calls arcnet??_rx depending on the | 1000 | * This is a generic packet receiver that calls arcnet??_rx depending on the |
997 | * protocol ID found. | 1001 | * protocol ID found. |
998 | */ | 1002 | */ |
999 | void arcnet_rx(struct net_device *dev, int bufnum) | 1003 | static void arcnet_rx(struct net_device *dev, int bufnum) |
1000 | { | 1004 | { |
1001 | struct arcnet_local *lp = dev->priv; | 1005 | struct arcnet_local *lp = dev->priv; |
1002 | struct archdr pkt; | 1006 | struct archdr pkt; |
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 6c2c9b9ac6db..43150b2bd13f 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | /* Internal function declarations */ | 55 | /* Internal function declarations */ |
56 | static int com90xx_found(int ioaddr, int airq, u_long shmem); | 56 | static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *); |
57 | static void com90xx_command(struct net_device *dev, int command); | 57 | static void com90xx_command(struct net_device *dev, int command); |
58 | static int com90xx_status(struct net_device *dev); | 58 | static int com90xx_status(struct net_device *dev); |
59 | static void com90xx_setmask(struct net_device *dev, int mask); | 59 | static void com90xx_setmask(struct net_device *dev, int mask); |
@@ -116,14 +116,26 @@ static void __init com90xx_probe(void) | |||
116 | unsigned long airqmask; | 116 | unsigned long airqmask; |
117 | int ports[(0x3f0 - 0x200) / 16 + 1] = | 117 | int ports[(0x3f0 - 0x200) / 16 + 1] = |
118 | {0}; | 118 | {0}; |
119 | u_long shmems[(0xFF800 - 0xA0000) / 2048 + 1] = | 119 | unsigned long *shmems; |
120 | {0}; | 120 | void __iomem **iomem; |
121 | int numports, numshmems, *port; | 121 | int numports, numshmems, *port; |
122 | u_long *p; | 122 | u_long *p; |
123 | int index; | ||
123 | 124 | ||
124 | if (!io && !irq && !shmem && !*device && com90xx_skip_probe) | 125 | if (!io && !irq && !shmem && !*device && com90xx_skip_probe) |
125 | return; | 126 | return; |
126 | 127 | ||
128 | shmems = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(unsigned long), | ||
129 | GFP_KERNEL); | ||
130 | if (!shmems) | ||
131 | return; | ||
132 | iomem = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(void __iomem *), | ||
133 | GFP_KERNEL); | ||
134 | if (!iomem) { | ||
135 | kfree(shmems); | ||
136 | return; | ||
137 | } | ||
138 | |||
127 | BUGLVL(D_NORMAL) printk(VERSION); | 139 | BUGLVL(D_NORMAL) printk(VERSION); |
128 | 140 | ||
129 | /* set up the arrays where we'll store the possible probe addresses */ | 141 | /* set up the arrays where we'll store the possible probe addresses */ |
@@ -179,6 +191,8 @@ static void __init com90xx_probe(void) | |||
179 | 191 | ||
180 | if (!numports) { | 192 | if (!numports) { |
181 | BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n"); | 193 | BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n"); |
194 | kfree(shmems); | ||
195 | kfree(iomem); | ||
182 | return; | 196 | return; |
183 | } | 197 | } |
184 | /* Stage 2: we have now reset any possible ARCnet cards, so we can't | 198 | /* Stage 2: we have now reset any possible ARCnet cards, so we can't |
@@ -202,8 +216,8 @@ static void __init com90xx_probe(void) | |||
202 | * 0xD1 byte in the right place, or are read-only. | 216 | * 0xD1 byte in the right place, or are read-only. |
203 | */ | 217 | */ |
204 | numprint = -1; | 218 | numprint = -1; |
205 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 219 | for (index = 0, p = &shmems[0]; index < numshmems; p++, index++) { |
206 | u_long ptr = *p; | 220 | void __iomem *base; |
207 | 221 | ||
208 | numprint++; | 222 | numprint++; |
209 | numprint %= 8; | 223 | numprint %= 8; |
@@ -213,38 +227,49 @@ static void __init com90xx_probe(void) | |||
213 | } | 227 | } |
214 | BUGMSG2(D_INIT, "%lXh ", *p); | 228 | BUGMSG2(D_INIT, "%lXh ", *p); |
215 | 229 | ||
216 | if (!request_mem_region(*p, BUFFER_SIZE, "arcnet (90xx)")) { | 230 | if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) { |
217 | BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n"); | 231 | BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n"); |
218 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); | 232 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); |
219 | BUGLVL(D_INIT_REASONS) numprint = 0; | 233 | BUGLVL(D_INIT_REASONS) numprint = 0; |
220 | *p-- = shmems[--numshmems]; | 234 | goto out; |
221 | continue; | 235 | } |
236 | base = ioremap(*p, MIRROR_SIZE); | ||
237 | if (!base) { | ||
238 | BUGMSG2(D_INIT_REASONS, "(ioremap)\n"); | ||
239 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); | ||
240 | BUGLVL(D_INIT_REASONS) numprint = 0; | ||
241 | goto out1; | ||
222 | } | 242 | } |
223 | if (isa_readb(ptr) != TESTvalue) { | 243 | if (readb(base) != TESTvalue) { |
224 | BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n", | 244 | BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n", |
225 | isa_readb(ptr), TESTvalue); | 245 | readb(base), TESTvalue); |
226 | BUGMSG2(D_INIT_REASONS, "S3: "); | 246 | BUGMSG2(D_INIT_REASONS, "S3: "); |
227 | BUGLVL(D_INIT_REASONS) numprint = 0; | 247 | BUGLVL(D_INIT_REASONS) numprint = 0; |
228 | release_mem_region(*p, BUFFER_SIZE); | 248 | goto out2; |
229 | *p-- = shmems[--numshmems]; | ||
230 | continue; | ||
231 | } | 249 | } |
232 | /* By writing 0x42 to the TESTvalue location, we also make | 250 | /* By writing 0x42 to the TESTvalue location, we also make |
233 | * sure no "mirror" shmem areas show up - if they occur | 251 | * sure no "mirror" shmem areas show up - if they occur |
234 | * in another pass through this loop, they will be discarded | 252 | * in another pass through this loop, they will be discarded |
235 | * because *cptr != TESTvalue. | 253 | * because *cptr != TESTvalue. |
236 | */ | 254 | */ |
237 | isa_writeb(0x42, ptr); | 255 | writeb(0x42, base); |
238 | if (isa_readb(ptr) != 0x42) { | 256 | if (readb(base) != 0x42) { |
239 | BUGMSG2(D_INIT_REASONS, "(read only)\n"); | 257 | BUGMSG2(D_INIT_REASONS, "(read only)\n"); |
240 | BUGMSG2(D_INIT_REASONS, "S3: "); | 258 | BUGMSG2(D_INIT_REASONS, "S3: "); |
241 | release_mem_region(*p, BUFFER_SIZE); | 259 | goto out2; |
242 | *p-- = shmems[--numshmems]; | ||
243 | continue; | ||
244 | } | 260 | } |
245 | BUGMSG2(D_INIT_REASONS, "\n"); | 261 | BUGMSG2(D_INIT_REASONS, "\n"); |
246 | BUGMSG2(D_INIT_REASONS, "S3: "); | 262 | BUGMSG2(D_INIT_REASONS, "S3: "); |
247 | BUGLVL(D_INIT_REASONS) numprint = 0; | 263 | BUGLVL(D_INIT_REASONS) numprint = 0; |
264 | iomem[index] = base; | ||
265 | continue; | ||
266 | out2: | ||
267 | iounmap(base); | ||
268 | out1: | ||
269 | release_mem_region(*p, MIRROR_SIZE); | ||
270 | out: | ||
271 | *p-- = shmems[--numshmems]; | ||
272 | index--; | ||
248 | } | 273 | } |
249 | BUGMSG2(D_INIT, "\n"); | 274 | BUGMSG2(D_INIT, "\n"); |
250 | 275 | ||
@@ -252,6 +277,8 @@ static void __init com90xx_probe(void) | |||
252 | BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n"); | 277 | BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n"); |
253 | for (port = &ports[0]; port < ports + numports; port++) | 278 | for (port = &ports[0]; port < ports + numports; port++) |
254 | release_region(*port, ARCNET_TOTAL_SIZE); | 279 | release_region(*port, ARCNET_TOTAL_SIZE); |
280 | kfree(shmems); | ||
281 | kfree(iomem); | ||
255 | return; | 282 | return; |
256 | } | 283 | } |
257 | /* Stage 4: something of a dummy, to report the shmems that are | 284 | /* Stage 4: something of a dummy, to report the shmems that are |
@@ -351,30 +378,32 @@ static void __init com90xx_probe(void) | |||
351 | mdelay(RESETtime); | 378 | mdelay(RESETtime); |
352 | } else { | 379 | } else { |
353 | /* just one shmem and port, assume they match */ | 380 | /* just one shmem and port, assume they match */ |
354 | isa_writeb(TESTvalue, shmems[0]); | 381 | writeb(TESTvalue, iomem[0]); |
355 | } | 382 | } |
356 | #else | 383 | #else |
357 | inb(_RESET); | 384 | inb(_RESET); |
358 | mdelay(RESETtime); | 385 | mdelay(RESETtime); |
359 | #endif | 386 | #endif |
360 | 387 | ||
361 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 388 | for (index = 0; index < numshmems; index++) { |
362 | u_long ptr = *p; | 389 | u_long ptr = shmems[index]; |
390 | void __iomem *base = iomem[index]; | ||
363 | 391 | ||
364 | if (isa_readb(ptr) == TESTvalue) { /* found one */ | 392 | if (readb(base) == TESTvalue) { /* found one */ |
365 | BUGMSG2(D_INIT, "%lXh)\n", *p); | 393 | BUGMSG2(D_INIT, "%lXh)\n", *p); |
366 | openparen = 0; | 394 | openparen = 0; |
367 | 395 | ||
368 | /* register the card */ | 396 | /* register the card */ |
369 | if (com90xx_found(*port, airq, *p) == 0) | 397 | if (com90xx_found(*port, airq, ptr, base) == 0) |
370 | found = 1; | 398 | found = 1; |
371 | numprint = -1; | 399 | numprint = -1; |
372 | 400 | ||
373 | /* remove shmem from the list */ | 401 | /* remove shmem from the list */ |
374 | *p = shmems[--numshmems]; | 402 | shmems[index] = shmems[--numshmems]; |
403 | iomem[index] = iomem[numshmems]; | ||
375 | break; /* go to the next I/O port */ | 404 | break; /* go to the next I/O port */ |
376 | } else { | 405 | } else { |
377 | BUGMSG2(D_INIT_REASONS, "%Xh-", isa_readb(ptr)); | 406 | BUGMSG2(D_INIT_REASONS, "%Xh-", readb(base)); |
378 | } | 407 | } |
379 | } | 408 | } |
380 | 409 | ||
@@ -391,17 +420,40 @@ static void __init com90xx_probe(void) | |||
391 | BUGLVL(D_INIT_REASONS) printk("\n"); | 420 | BUGLVL(D_INIT_REASONS) printk("\n"); |
392 | 421 | ||
393 | /* Now put back TESTvalue on all leftover shmems. */ | 422 | /* Now put back TESTvalue on all leftover shmems. */ |
394 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 423 | for (index = 0; index < numshmems; index++) { |
395 | isa_writeb(TESTvalue, *p); | 424 | writeb(TESTvalue, iomem[index]); |
396 | release_mem_region(*p, BUFFER_SIZE); | 425 | iounmap(iomem[index]); |
426 | release_mem_region(shmems[index], MIRROR_SIZE); | ||
397 | } | 427 | } |
428 | kfree(shmems); | ||
429 | kfree(iomem); | ||
398 | } | 430 | } |
399 | 431 | ||
432 | static int check_mirror(unsigned long addr, size_t size) | ||
433 | { | ||
434 | void __iomem *p; | ||
435 | int res = -1; | ||
436 | |||
437 | if (!request_mem_region(addr, size, "arcnet (90xx)")) | ||
438 | return -1; | ||
439 | |||
440 | p = ioremap(addr, size); | ||
441 | if (p) { | ||
442 | if (readb(p) == TESTvalue) | ||
443 | res = 1; | ||
444 | else | ||
445 | res = 0; | ||
446 | iounmap(p); | ||
447 | } | ||
448 | |||
449 | release_mem_region(addr, size); | ||
450 | return res; | ||
451 | } | ||
400 | 452 | ||
401 | /* Set up the struct net_device associated with this card. Called after | 453 | /* Set up the struct net_device associated with this card. Called after |
402 | * probing succeeds. | 454 | * probing succeeds. |
403 | */ | 455 | */ |
404 | static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | 456 | static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *p) |
405 | { | 457 | { |
406 | struct net_device *dev = NULL; | 458 | struct net_device *dev = NULL; |
407 | struct arcnet_local *lp; | 459 | struct arcnet_local *lp; |
@@ -412,7 +464,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | |||
412 | dev = alloc_arcdev(device); | 464 | dev = alloc_arcdev(device); |
413 | if (!dev) { | 465 | if (!dev) { |
414 | BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n"); | 466 | BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n"); |
415 | release_mem_region(shmem, BUFFER_SIZE); | 467 | iounmap(p); |
468 | release_mem_region(shmem, MIRROR_SIZE); | ||
416 | return -ENOMEM; | 469 | return -ENOMEM; |
417 | } | 470 | } |
418 | lp = dev->priv; | 471 | lp = dev->priv; |
@@ -423,24 +476,27 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | |||
423 | * 2k (or there are no mirrors at all) but on some, it's 4k. | 476 | * 2k (or there are no mirrors at all) but on some, it's 4k. |
424 | */ | 477 | */ |
425 | mirror_size = MIRROR_SIZE; | 478 | mirror_size = MIRROR_SIZE; |
426 | if (isa_readb(shmem) == TESTvalue | 479 | if (readb(p) == TESTvalue && |
427 | && isa_readb(shmem - mirror_size) != TESTvalue | 480 | check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && |
428 | && isa_readb(shmem - 2 * mirror_size) == TESTvalue) | 481 | check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) |
429 | mirror_size *= 2; | 482 | mirror_size = 2 * MIRROR_SIZE; |
430 | 483 | ||
431 | first_mirror = last_mirror = shmem; | 484 | first_mirror = shmem - mirror_size; |
432 | while (isa_readb(first_mirror) == TESTvalue) | 485 | while (check_mirror(first_mirror, mirror_size) == 1) |
433 | first_mirror -= mirror_size; | 486 | first_mirror -= mirror_size; |
434 | first_mirror += mirror_size; | 487 | first_mirror += mirror_size; |
435 | 488 | ||
436 | while (isa_readb(last_mirror) == TESTvalue) | 489 | last_mirror = shmem + mirror_size; |
490 | while (check_mirror(last_mirror, mirror_size) == 1) | ||
437 | last_mirror += mirror_size; | 491 | last_mirror += mirror_size; |
438 | last_mirror -= mirror_size; | 492 | last_mirror -= mirror_size; |
439 | 493 | ||
440 | dev->mem_start = first_mirror; | 494 | dev->mem_start = first_mirror; |
441 | dev->mem_end = last_mirror + MIRROR_SIZE - 1; | 495 | dev->mem_end = last_mirror + MIRROR_SIZE - 1; |
442 | 496 | ||
443 | release_mem_region(shmem, BUFFER_SIZE); | 497 | iounmap(p); |
498 | release_mem_region(shmem, MIRROR_SIZE); | ||
499 | |||
444 | if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) | 500 | if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) |
445 | goto err_free_dev; | 501 | goto err_free_dev; |
446 | 502 | ||
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 6d7913704fb5..6d6c69f036ef 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c | |||
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | |||
43 | int bufnum); | 43 | int bufnum); |
44 | 44 | ||
45 | 45 | ||
46 | struct ArcProto rfc1051_proto = | 46 | static struct ArcProto rfc1051_proto = |
47 | { | 47 | { |
48 | .suffix = 's', | 48 | .suffix = 's', |
49 | .mtu = XMTU - RFC1051_HDR_SIZE, | 49 | .mtu = XMTU - RFC1051_HDR_SIZE, |
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 6b6ae4bf3d39..bee34226abfa 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c | |||
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | |||
43 | int bufnum); | 43 | int bufnum); |
44 | static int continue_tx(struct net_device *dev, int bufnum); | 44 | static int continue_tx(struct net_device *dev, int bufnum); |
45 | 45 | ||
46 | struct ArcProto rfc1201_proto = | 46 | static struct ArcProto rfc1201_proto = |
47 | { | 47 | { |
48 | .suffix = 'a', | 48 | .suffix = 'a', |
49 | .mtu = 1500, /* could be more, but some receivers can't handle it... */ | 49 | .mtu = 1500, /* could be more, but some receivers can't handle it... */ |
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 6a93b666eb72..d52deb8d2075 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <linux/init.h> | 47 | #include <linux/init.h> |
48 | #include <linux/bitops.h> | 48 | #include <linux/bitops.h> |
49 | #include <linux/jiffies.h> | ||
49 | 50 | ||
50 | #include <asm/system.h> | 51 | #include <asm/system.h> |
51 | #include <asm/ecard.h> | 52 | #include <asm/ecard.h> |
@@ -355,7 +356,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf | |||
355 | dma_start = jiffies; | 356 | dma_start = jiffies; |
356 | 357 | ||
357 | while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) | 358 | while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) |
358 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 359 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
359 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", | 360 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", |
360 | dev->name); | 361 | dev->name); |
361 | etherh_reset (dev); | 362 | etherh_reset (dev); |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a24200d0a616..b787b6582e50 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -46,7 +46,7 @@ typedef enum { | |||
46 | } board_t; | 46 | } board_t; |
47 | 47 | ||
48 | /* indexed by board_t, above */ | 48 | /* indexed by board_t, above */ |
49 | static struct { | 49 | static const struct { |
50 | char *name; | 50 | char *name; |
51 | } board_info[] __devinitdata = { | 51 | } board_info[] __devinitdata = { |
52 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, | 52 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, |
@@ -3476,7 +3476,7 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3476 | { | 3476 | { |
3477 | int ret; | 3477 | int ret; |
3478 | int i; | 3478 | int i; |
3479 | static struct { | 3479 | static const struct { |
3480 | u16 offset; | 3480 | u16 offset; |
3481 | u16 flags; | 3481 | u16 flags; |
3482 | u32 rw_mask; | 3482 | u32 rw_mask; |
@@ -3891,7 +3891,7 @@ reg_test_err: | |||
3891 | static int | 3891 | static int |
3892 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) | 3892 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) |
3893 | { | 3893 | { |
3894 | static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, | 3894 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, |
3895 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; | 3895 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; |
3896 | int i; | 3896 | int i; |
3897 | 3897 | ||
@@ -3916,7 +3916,7 @@ bnx2_test_memory(struct bnx2 *bp) | |||
3916 | { | 3916 | { |
3917 | int ret = 0; | 3917 | int ret = 0; |
3918 | int i; | 3918 | int i; |
3919 | static struct { | 3919 | static const struct { |
3920 | u32 offset; | 3920 | u32 offset; |
3921 | u32 len; | 3921 | u32 len; |
3922 | } mem_tbl[] = { | 3922 | } mem_tbl[] = { |
@@ -5122,7 +5122,7 @@ static struct { | |||
5122 | 5122 | ||
5123 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) | 5123 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) |
5124 | 5124 | ||
5125 | static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { | 5125 | static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { |
5126 | STATS_OFFSET32(stat_IfHCInOctets_hi), | 5126 | STATS_OFFSET32(stat_IfHCInOctets_hi), |
5127 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), | 5127 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), |
5128 | STATS_OFFSET32(stat_IfHCOutOctets_hi), | 5128 | STATS_OFFSET32(stat_IfHCOutOctets_hi), |
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h index 0c21bd849814..8158974c35a8 100644 --- a/drivers/net/bnx2_fw.h +++ b/drivers/net/bnx2_fw.h | |||
@@ -14,20 +14,20 @@ | |||
14 | * accompanying it. | 14 | * accompanying it. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | static int bnx2_COM_b06FwReleaseMajor = 0x1; | 17 | static const int bnx2_COM_b06FwReleaseMajor = 0x1; |
18 | static int bnx2_COM_b06FwReleaseMinor = 0x0; | 18 | static const int bnx2_COM_b06FwReleaseMinor = 0x0; |
19 | static int bnx2_COM_b06FwReleaseFix = 0x0; | 19 | static const int bnx2_COM_b06FwReleaseFix = 0x0; |
20 | static u32 bnx2_COM_b06FwStartAddr = 0x080008b4; | 20 | static const u32 bnx2_COM_b06FwStartAddr = 0x080008b4; |
21 | static u32 bnx2_COM_b06FwTextAddr = 0x08000000; | 21 | static const u32 bnx2_COM_b06FwTextAddr = 0x08000000; |
22 | static int bnx2_COM_b06FwTextLen = 0x57bc; | 22 | static const int bnx2_COM_b06FwTextLen = 0x57bc; |
23 | static u32 bnx2_COM_b06FwDataAddr = 0x08005840; | 23 | static const u32 bnx2_COM_b06FwDataAddr = 0x08005840; |
24 | static int bnx2_COM_b06FwDataLen = 0x0; | 24 | static const int bnx2_COM_b06FwDataLen = 0x0; |
25 | static u32 bnx2_COM_b06FwRodataAddr = 0x080057c0; | 25 | static const u32 bnx2_COM_b06FwRodataAddr = 0x080057c0; |
26 | static int bnx2_COM_b06FwRodataLen = 0x58; | 26 | static const int bnx2_COM_b06FwRodataLen = 0x58; |
27 | static u32 bnx2_COM_b06FwBssAddr = 0x08005860; | 27 | static const u32 bnx2_COM_b06FwBssAddr = 0x08005860; |
28 | static int bnx2_COM_b06FwBssLen = 0x88; | 28 | static const int bnx2_COM_b06FwBssLen = 0x88; |
29 | static u32 bnx2_COM_b06FwSbssAddr = 0x08005840; | 29 | static const u32 bnx2_COM_b06FwSbssAddr = 0x08005840; |
30 | static int bnx2_COM_b06FwSbssLen = 0x1c; | 30 | static const int bnx2_COM_b06FwSbssLen = 0x1c; |
31 | static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = { | 31 | static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = { |
32 | 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e, | 32 | 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e, |
33 | 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032, | 33 | 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032, |
@@ -2325,20 +2325,20 @@ static u32 bnx2_rv2p_proc2[] = { | |||
2325 | 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000, | 2325 | 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000, |
2326 | 0x00000018, 0x00570000 }; | 2326 | 0x00000018, 0x00570000 }; |
2327 | 2327 | ||
2328 | static int bnx2_TPAT_b06FwReleaseMajor = 0x1; | 2328 | static const int bnx2_TPAT_b06FwReleaseMajor = 0x1; |
2329 | static int bnx2_TPAT_b06FwReleaseMinor = 0x0; | 2329 | static const int bnx2_TPAT_b06FwReleaseMinor = 0x0; |
2330 | static int bnx2_TPAT_b06FwReleaseFix = 0x0; | 2330 | static const int bnx2_TPAT_b06FwReleaseFix = 0x0; |
2331 | static u32 bnx2_TPAT_b06FwStartAddr = 0x08000860; | 2331 | static const u32 bnx2_TPAT_b06FwStartAddr = 0x08000860; |
2332 | static u32 bnx2_TPAT_b06FwTextAddr = 0x08000800; | 2332 | static const u32 bnx2_TPAT_b06FwTextAddr = 0x08000800; |
2333 | static int bnx2_TPAT_b06FwTextLen = 0x122c; | 2333 | static const int bnx2_TPAT_b06FwTextLen = 0x122c; |
2334 | static u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60; | 2334 | static const u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60; |
2335 | static int bnx2_TPAT_b06FwDataLen = 0x0; | 2335 | static const int bnx2_TPAT_b06FwDataLen = 0x0; |
2336 | static u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000; | 2336 | static const u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000; |
2337 | static int bnx2_TPAT_b06FwRodataLen = 0x0; | 2337 | static const int bnx2_TPAT_b06FwRodataLen = 0x0; |
2338 | static u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0; | 2338 | static const u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0; |
2339 | static int bnx2_TPAT_b06FwBssLen = 0x250; | 2339 | static const int bnx2_TPAT_b06FwBssLen = 0x250; |
2340 | static u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60; | 2340 | static const u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60; |
2341 | static int bnx2_TPAT_b06FwSbssLen = 0x34; | 2341 | static const int bnx2_TPAT_b06FwSbssLen = 0x34; |
2342 | static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = { | 2342 | static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = { |
2343 | 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35, | 2343 | 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35, |
2344 | 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 2344 | 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
@@ -2540,20 +2540,20 @@ static u32 bnx2_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 }; | |||
2540 | static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; | 2540 | static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; |
2541 | static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; | 2541 | static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; |
2542 | 2542 | ||
2543 | static int bnx2_TXP_b06FwReleaseMajor = 0x1; | 2543 | static const int bnx2_TXP_b06FwReleaseMajor = 0x1; |
2544 | static int bnx2_TXP_b06FwReleaseMinor = 0x0; | 2544 | static const int bnx2_TXP_b06FwReleaseMinor = 0x0; |
2545 | static int bnx2_TXP_b06FwReleaseFix = 0x0; | 2545 | static const int bnx2_TXP_b06FwReleaseFix = 0x0; |
2546 | static u32 bnx2_TXP_b06FwStartAddr = 0x080034b0; | 2546 | static const u32 bnx2_TXP_b06FwStartAddr = 0x080034b0; |
2547 | static u32 bnx2_TXP_b06FwTextAddr = 0x08000000; | 2547 | static const u32 bnx2_TXP_b06FwTextAddr = 0x08000000; |
2548 | static int bnx2_TXP_b06FwTextLen = 0x5748; | 2548 | static const int bnx2_TXP_b06FwTextLen = 0x5748; |
2549 | static u32 bnx2_TXP_b06FwDataAddr = 0x08005760; | 2549 | static const u32 bnx2_TXP_b06FwDataAddr = 0x08005760; |
2550 | static int bnx2_TXP_b06FwDataLen = 0x0; | 2550 | static const int bnx2_TXP_b06FwDataLen = 0x0; |
2551 | static u32 bnx2_TXP_b06FwRodataAddr = 0x00000000; | 2551 | static const u32 bnx2_TXP_b06FwRodataAddr = 0x00000000; |
2552 | static int bnx2_TXP_b06FwRodataLen = 0x0; | 2552 | static const int bnx2_TXP_b06FwRodataLen = 0x0; |
2553 | static u32 bnx2_TXP_b06FwBssAddr = 0x080057a0; | 2553 | static const u32 bnx2_TXP_b06FwBssAddr = 0x080057a0; |
2554 | static int bnx2_TXP_b06FwBssLen = 0x1c4; | 2554 | static const int bnx2_TXP_b06FwBssLen = 0x1c4; |
2555 | static u32 bnx2_TXP_b06FwSbssAddr = 0x08005760; | 2555 | static const u32 bnx2_TXP_b06FwSbssAddr = 0x08005760; |
2556 | static int bnx2_TXP_b06FwSbssLen = 0x38; | 2556 | static const int bnx2_TXP_b06FwSbssLen = 0x38; |
2557 | static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = { | 2557 | static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = { |
2558 | 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e, | 2558 | 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e, |
2559 | 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000, | 2559 | 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000, |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index f2a63186ae05..e83bc825f6af 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1261,7 +1261,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1261 | struct ethhdr *eth_data; | 1261 | struct ethhdr *eth_data; |
1262 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 1262 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
1263 | struct slave *tx_slave = NULL; | 1263 | struct slave *tx_slave = NULL; |
1264 | static u32 ip_bcast = 0xffffffff; | 1264 | static const u32 ip_bcast = 0xffffffff; |
1265 | int hash_size = 0; | 1265 | int hash_size = 0; |
1266 | int do_tx_balance = 1; | 1266 | int do_tx_balance = 1; |
1267 | u32 hash_index = 0; | 1267 | u32 hash_index = 0; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bcf9f17daf0d..2d0ac169a86c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); | |||
131 | 131 | ||
132 | /*----------------------------- Global variables ----------------------------*/ | 132 | /*----------------------------- Global variables ----------------------------*/ |
133 | 133 | ||
134 | static const char *version = | 134 | static const char * const version = |
135 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; | 135 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; |
136 | 136 | ||
137 | LIST_HEAD(bond_dev_list); | 137 | LIST_HEAD(bond_dev_list); |
@@ -1040,6 +1040,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1040 | if ((bond->params.mode == BOND_MODE_TLB) || | 1040 | if ((bond->params.mode == BOND_MODE_TLB) || |
1041 | (bond->params.mode == BOND_MODE_ALB)) { | 1041 | (bond->params.mode == BOND_MODE_ALB)) { |
1042 | bond_alb_handle_active_change(bond, new_active); | 1042 | bond_alb_handle_active_change(bond, new_active); |
1043 | if (old_active) | ||
1044 | bond_set_slave_inactive_flags(old_active); | ||
1045 | if (new_active) | ||
1046 | bond_set_slave_active_flags(new_active); | ||
1043 | } else { | 1047 | } else { |
1044 | bond->curr_active_slave = new_active; | 1048 | bond->curr_active_slave = new_active; |
1045 | } | 1049 | } |
@@ -1443,15 +1447,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1443 | 1447 | ||
1444 | switch (bond->params.mode) { | 1448 | switch (bond->params.mode) { |
1445 | case BOND_MODE_ACTIVEBACKUP: | 1449 | case BOND_MODE_ACTIVEBACKUP: |
1446 | /* if we're in active-backup mode, we need one and only one active | 1450 | /* if we're in active-backup mode, we need one and |
1447 | * interface. The backup interfaces will have their NOARP flag set | 1451 | * only one active interface. The backup interfaces |
1448 | * because we need them to be completely deaf and not to respond to | 1452 | * will have their SLAVE_INACTIVE flag set because we |
1449 | * any ARP request on the network to avoid fooling a switch. Thus, | 1453 | * need them to be drop all packets. Thus, since we |
1450 | * since we guarantee that curr_active_slave always point to the last | 1454 | * guarantee that curr_active_slave always point to |
1451 | * usable interface, we just have to verify this interface's flag. | 1455 | * the last usable interface, we just have to verify |
1456 | * this interface's flag. | ||
1452 | */ | 1457 | */ |
1453 | if (((!bond->curr_active_slave) || | 1458 | if (((!bond->curr_active_slave) || |
1454 | (bond->curr_active_slave->dev->flags & IFF_NOARP)) && | 1459 | (bond->curr_active_slave->dev->priv_flags & IFF_SLAVE_INACTIVE)) && |
1455 | (new_slave->link != BOND_LINK_DOWN)) { | 1460 | (new_slave->link != BOND_LINK_DOWN)) { |
1456 | dprintk("This is the first active slave\n"); | 1461 | dprintk("This is the first active slave\n"); |
1457 | /* first slave or no active slave yet, and this link | 1462 | /* first slave or no active slave yet, and this link |
@@ -1492,6 +1497,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1492 | * is OK, so make this interface the active one | 1497 | * is OK, so make this interface the active one |
1493 | */ | 1498 | */ |
1494 | bond_change_active_slave(bond, new_slave); | 1499 | bond_change_active_slave(bond, new_slave); |
1500 | } else { | ||
1501 | bond_set_slave_inactive_flags(new_slave); | ||
1495 | } | 1502 | } |
1496 | break; | 1503 | break; |
1497 | default: | 1504 | default: |
@@ -1724,13 +1731,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1724 | addr.sa_family = slave_dev->type; | 1731 | addr.sa_family = slave_dev->type; |
1725 | dev_set_mac_address(slave_dev, &addr); | 1732 | dev_set_mac_address(slave_dev, &addr); |
1726 | 1733 | ||
1727 | /* restore the original state of the | 1734 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | |
1728 | * IFF_NOARP flag that might have been | 1735 | IFF_SLAVE_INACTIVE); |
1729 | * set by bond_set_slave_inactive_flags() | ||
1730 | */ | ||
1731 | if ((slave->original_flags & IFF_NOARP) == 0) { | ||
1732 | slave_dev->flags &= ~IFF_NOARP; | ||
1733 | } | ||
1734 | 1736 | ||
1735 | kfree(slave); | 1737 | kfree(slave); |
1736 | 1738 | ||
@@ -1816,12 +1818,8 @@ static int bond_release_all(struct net_device *bond_dev) | |||
1816 | addr.sa_family = slave_dev->type; | 1818 | addr.sa_family = slave_dev->type; |
1817 | dev_set_mac_address(slave_dev, &addr); | 1819 | dev_set_mac_address(slave_dev, &addr); |
1818 | 1820 | ||
1819 | /* restore the original state of the IFF_NOARP flag that might have | 1821 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | |
1820 | * been set by bond_set_slave_inactive_flags() | 1822 | IFF_SLAVE_INACTIVE); |
1821 | */ | ||
1822 | if ((slave->original_flags & IFF_NOARP) == 0) { | ||
1823 | slave_dev->flags &= ~IFF_NOARP; | ||
1824 | } | ||
1825 | 1823 | ||
1826 | kfree(slave); | 1824 | kfree(slave); |
1827 | 1825 | ||
@@ -4061,14 +4059,17 @@ void bond_set_mode_ops(struct bonding *bond, int mode) | |||
4061 | bond_dev->hard_start_xmit = bond_xmit_broadcast; | 4059 | bond_dev->hard_start_xmit = bond_xmit_broadcast; |
4062 | break; | 4060 | break; |
4063 | case BOND_MODE_8023AD: | 4061 | case BOND_MODE_8023AD: |
4062 | bond_set_master_3ad_flags(bond); | ||
4064 | bond_dev->hard_start_xmit = bond_3ad_xmit_xor; | 4063 | bond_dev->hard_start_xmit = bond_3ad_xmit_xor; |
4065 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) | 4064 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) |
4066 | bond->xmit_hash_policy = bond_xmit_hash_policy_l34; | 4065 | bond->xmit_hash_policy = bond_xmit_hash_policy_l34; |
4067 | else | 4066 | else |
4068 | bond->xmit_hash_policy = bond_xmit_hash_policy_l2; | 4067 | bond->xmit_hash_policy = bond_xmit_hash_policy_l2; |
4069 | break; | 4068 | break; |
4070 | case BOND_MODE_TLB: | ||
4071 | case BOND_MODE_ALB: | 4069 | case BOND_MODE_ALB: |
4070 | bond_set_master_alb_flags(bond); | ||
4071 | /* FALLTHRU */ | ||
4072 | case BOND_MODE_TLB: | ||
4072 | bond_dev->hard_start_xmit = bond_alb_xmit; | 4073 | bond_dev->hard_start_xmit = bond_alb_xmit; |
4073 | bond_dev->set_mac_address = bond_alb_set_mac_address; | 4074 | bond_dev->set_mac_address = bond_alb_set_mac_address; |
4074 | break; | 4075 | break; |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 041bcc583557..5a9bd95884be 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -424,6 +424,12 @@ static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size | |||
424 | ret = -EINVAL; | 424 | ret = -EINVAL; |
425 | goto out; | 425 | goto out; |
426 | } else { | 426 | } else { |
427 | if (bond->params.mode == BOND_MODE_8023AD) | ||
428 | bond_unset_master_3ad_flags(bond); | ||
429 | |||
430 | if (bond->params.mode == BOND_MODE_ALB) | ||
431 | bond_unset_master_alb_flags(bond); | ||
432 | |||
427 | bond->params.mode = new_value; | 433 | bond->params.mode = new_value; |
428 | bond_set_mode_ops(bond, bond->params.mode); | 434 | bond_set_mode_ops(bond, bond->params.mode); |
429 | printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n", | 435 | printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n", |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 3dd78d048c3e..ce9dc9b4e2dc 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "bond_3ad.h" | 22 | #include "bond_3ad.h" |
23 | #include "bond_alb.h" | 23 | #include "bond_alb.h" |
24 | 24 | ||
25 | #define DRV_VERSION "3.0.1" | 25 | #define DRV_VERSION "3.0.2" |
26 | #define DRV_RELDATE "January 9, 2006" | 26 | #define DRV_RELDATE "February 21, 2006" |
27 | #define DRV_NAME "bonding" | 27 | #define DRV_NAME "bonding" |
28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
29 | 29 | ||
@@ -230,14 +230,37 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) | |||
230 | 230 | ||
231 | static inline void bond_set_slave_inactive_flags(struct slave *slave) | 231 | static inline void bond_set_slave_inactive_flags(struct slave *slave) |
232 | { | 232 | { |
233 | slave->state = BOND_STATE_BACKUP; | 233 | struct bonding *bond = slave->dev->master->priv; |
234 | slave->dev->flags |= IFF_NOARP; | 234 | if (bond->params.mode != BOND_MODE_TLB && |
235 | bond->params.mode != BOND_MODE_ALB) | ||
236 | slave->state = BOND_STATE_BACKUP; | ||
237 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | ||
235 | } | 238 | } |
236 | 239 | ||
237 | static inline void bond_set_slave_active_flags(struct slave *slave) | 240 | static inline void bond_set_slave_active_flags(struct slave *slave) |
238 | { | 241 | { |
239 | slave->state = BOND_STATE_ACTIVE; | 242 | slave->state = BOND_STATE_ACTIVE; |
240 | slave->dev->flags &= ~IFF_NOARP; | 243 | slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; |
244 | } | ||
245 | |||
246 | static inline void bond_set_master_3ad_flags(struct bonding *bond) | ||
247 | { | ||
248 | bond->dev->priv_flags |= IFF_MASTER_8023AD; | ||
249 | } | ||
250 | |||
251 | static inline void bond_unset_master_3ad_flags(struct bonding *bond) | ||
252 | { | ||
253 | bond->dev->priv_flags &= ~IFF_MASTER_8023AD; | ||
254 | } | ||
255 | |||
256 | static inline void bond_set_master_alb_flags(struct bonding *bond) | ||
257 | { | ||
258 | bond->dev->priv_flags |= IFF_MASTER_ALB; | ||
259 | } | ||
260 | |||
261 | static inline void bond_unset_master_alb_flags(struct bonding *bond) | ||
262 | { | ||
263 | bond->dev->priv_flags &= ~IFF_MASTER_ALB; | ||
241 | } | 264 | } |
242 | 265 | ||
243 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); | 266 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); |
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c index e824acaf188a..542e5e065c6f 100644 --- a/drivers/net/chelsio/espi.c +++ b/drivers/net/chelsio/espi.c | |||
@@ -87,15 +87,9 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, | |||
87 | static int tricn_init(adapter_t *adapter) | 87 | static int tricn_init(adapter_t *adapter) |
88 | { | 88 | { |
89 | int i = 0; | 89 | int i = 0; |
90 | int sme = 1; | ||
91 | int stat = 0; | 90 | int stat = 0; |
92 | int timeout = 0; | 91 | int timeout = 0; |
93 | int is_ready = 0; | 92 | int is_ready = 0; |
94 | int dynamic_deskew = 0; | ||
95 | |||
96 | if (dynamic_deskew) | ||
97 | sme = 0; | ||
98 | |||
99 | 93 | ||
100 | /* 1 */ | 94 | /* 1 */ |
101 | timeout=1000; | 95 | timeout=1000; |
@@ -113,11 +107,9 @@ static int tricn_init(adapter_t *adapter) | |||
113 | } | 107 | } |
114 | 108 | ||
115 | /* 2 */ | 109 | /* 2 */ |
116 | if (sme) { | 110 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); |
117 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); | 111 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); |
118 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); | 112 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); |
119 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); | ||
120 | } | ||
121 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); | 113 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); |
122 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); | 114 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); |
123 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | 115 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); |
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 1ebb5d149aef..12e4e96dba2d 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -686,7 +686,7 @@ int t1_init_hw_modules(adapter_t *adapter) | |||
686 | */ | 686 | */ |
687 | static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) | 687 | static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) |
688 | { | 688 | { |
689 | static unsigned short speed_map[] = { 33, 66, 100, 133 }; | 689 | static const unsigned short speed_map[] = { 33, 66, 100, 133 }; |
690 | u32 pci_mode; | 690 | u32 pci_mode; |
691 | 691 | ||
692 | pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); | 692 | pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); |
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index 70b47e4c4e9c..32d13166c6e8 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c | |||
@@ -993,7 +993,7 @@ dgrs_download(struct net_device *dev0) | |||
993 | int is; | 993 | int is; |
994 | unsigned long i; | 994 | unsigned long i; |
995 | 995 | ||
996 | static int iv2is[16] = { | 996 | static const int iv2is[16] = { |
997 | 0, 0, 0, ES4H_IS_INT3, | 997 | 0, 0, 0, ES4H_IS_INT3, |
998 | 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, | 998 | 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, |
999 | 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, | 999 | 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, |
diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c index 1e49e1e1f201..8c20d4c99937 100644 --- a/drivers/net/dgrs_firmware.c +++ b/drivers/net/dgrs_firmware.c | |||
@@ -1,4 +1,4 @@ | |||
1 | static int dgrs_firmnum = 550; | 1 | static const int dgrs_firmnum = 550; |
2 | static char dgrs_firmver[] = "$Version$"; | 2 | static char dgrs_firmver[] = "$Version$"; |
3 | static char dgrs_firmdate[] = "11/16/96 03:45:15"; | 3 | static char dgrs_firmdate[] = "11/16/96 03:45:15"; |
4 | static unsigned char dgrs_code[] __initdata = { | 4 | static unsigned char dgrs_code[] __initdata = { |
@@ -9963,4 +9963,4 @@ static unsigned char dgrs_code[] __initdata = { | |||
9963 | 109,46,99,0,114,99,0,0,48,120,0,0, | 9963 | 109,46,99,0,114,99,0,0,48,120,0,0, |
9964 | 0,0,0,0,0,0,0,0,0,0,0,0 | 9964 | 0,0,0,0,0,0,0,0,0,0,0,0 |
9965 | } ; | 9965 | } ; |
9966 | static int dgrs_ncode = 119520 ; | 9966 | static const int dgrs_ncode = 119520 ; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index fb9dae302dcc..1f3627470c95 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -90,8 +90,8 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ | |||
90 | #define EnableInt() \ | 90 | #define EnableInt() \ |
91 | writew(DEFAULT_INTR, ioaddr + IntEnable) | 91 | writew(DEFAULT_INTR, ioaddr + IntEnable) |
92 | 92 | ||
93 | static int max_intrloop = 50; | 93 | static const int max_intrloop = 50; |
94 | static int multicast_filter_limit = 0x40; | 94 | static const int multicast_filter_limit = 0x40; |
95 | 95 | ||
96 | static int rio_open (struct net_device *dev); | 96 | static int rio_open (struct net_device *dev); |
97 | static void rio_timer (unsigned long data); | 97 | static void rio_timer (unsigned long data); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index f57a85feda3d..31ac001f5517 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -598,8 +598,8 @@ static void e100_enable_irq(struct nic *nic) | |||
598 | 598 | ||
599 | spin_lock_irqsave(&nic->cmd_lock, flags); | 599 | spin_lock_irqsave(&nic->cmd_lock, flags); |
600 | writeb(irq_mask_none, &nic->csr->scb.cmd_hi); | 600 | writeb(irq_mask_none, &nic->csr->scb.cmd_hi); |
601 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
602 | e100_write_flush(nic); | 601 | e100_write_flush(nic); |
602 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
603 | } | 603 | } |
604 | 604 | ||
605 | static void e100_disable_irq(struct nic *nic) | 605 | static void e100_disable_irq(struct nic *nic) |
@@ -608,8 +608,8 @@ static void e100_disable_irq(struct nic *nic) | |||
608 | 608 | ||
609 | spin_lock_irqsave(&nic->cmd_lock, flags); | 609 | spin_lock_irqsave(&nic->cmd_lock, flags); |
610 | writeb(irq_mask_all, &nic->csr->scb.cmd_hi); | 610 | writeb(irq_mask_all, &nic->csr->scb.cmd_hi); |
611 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
612 | e100_write_flush(nic); | 611 | e100_write_flush(nic); |
612 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
613 | } | 613 | } |
614 | 614 | ||
615 | static void e100_hw_reset(struct nic *nic) | 615 | static void e100_hw_reset(struct nic *nic) |
@@ -1582,8 +1582,8 @@ static void e100_watchdog(unsigned long data) | |||
1582 | * interrupt mask bit and the SW Interrupt generation bit */ | 1582 | * interrupt mask bit and the SW Interrupt generation bit */ |
1583 | spin_lock_irq(&nic->cmd_lock); | 1583 | spin_lock_irq(&nic->cmd_lock); |
1584 | writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); | 1584 | writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); |
1585 | spin_unlock_irq(&nic->cmd_lock); | ||
1586 | e100_write_flush(nic); | 1585 | e100_write_flush(nic); |
1586 | spin_unlock_irq(&nic->cmd_lock); | ||
1587 | 1587 | ||
1588 | e100_update_stats(nic); | 1588 | e100_update_stats(nic); |
1589 | e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); | 1589 | e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 99baf0e099fc..281de41d030a 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -83,10 +83,6 @@ | |||
83 | struct e1000_adapter; | 83 | struct e1000_adapter; |
84 | 84 | ||
85 | #include "e1000_hw.h" | 85 | #include "e1000_hw.h" |
86 | #ifdef CONFIG_E1000_MQ | ||
87 | #include <linux/cpu.h> | ||
88 | #include <linux/smp.h> | ||
89 | #endif | ||
90 | 86 | ||
91 | #ifdef DBG | 87 | #ifdef DBG |
92 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) | 88 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) |
@@ -169,12 +165,6 @@ struct e1000_buffer { | |||
169 | uint16_t next_to_watch; | 165 | uint16_t next_to_watch; |
170 | }; | 166 | }; |
171 | 167 | ||
172 | #ifdef CONFIG_E1000_MQ | ||
173 | struct e1000_queue_stats { | ||
174 | uint64_t packets; | ||
175 | uint64_t bytes; | ||
176 | }; | ||
177 | #endif | ||
178 | 168 | ||
179 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; | 169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; |
180 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; | 170 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; |
@@ -198,12 +188,7 @@ struct e1000_tx_ring { | |||
198 | spinlock_t tx_lock; | 188 | spinlock_t tx_lock; |
199 | uint16_t tdh; | 189 | uint16_t tdh; |
200 | uint16_t tdt; | 190 | uint16_t tdt; |
201 | |||
202 | boolean_t last_tx_tso; | 191 | boolean_t last_tx_tso; |
203 | |||
204 | #ifdef CONFIG_E1000_MQ | ||
205 | struct e1000_queue_stats tx_stats; | ||
206 | #endif | ||
207 | }; | 192 | }; |
208 | 193 | ||
209 | struct e1000_rx_ring { | 194 | struct e1000_rx_ring { |
@@ -230,9 +215,6 @@ struct e1000_rx_ring { | |||
230 | 215 | ||
231 | uint16_t rdh; | 216 | uint16_t rdh; |
232 | uint16_t rdt; | 217 | uint16_t rdt; |
233 | #ifdef CONFIG_E1000_MQ | ||
234 | struct e1000_queue_stats rx_stats; | ||
235 | #endif | ||
236 | }; | 218 | }; |
237 | 219 | ||
238 | #define E1000_DESC_UNUSED(R) \ | 220 | #define E1000_DESC_UNUSED(R) \ |
@@ -260,6 +242,7 @@ struct e1000_adapter { | |||
260 | uint32_t rx_buffer_len; | 242 | uint32_t rx_buffer_len; |
261 | uint32_t part_num; | 243 | uint32_t part_num; |
262 | uint32_t wol; | 244 | uint32_t wol; |
245 | uint32_t ksp3_port_a; | ||
263 | uint32_t smartspeed; | 246 | uint32_t smartspeed; |
264 | uint32_t en_mng_pt; | 247 | uint32_t en_mng_pt; |
265 | uint16_t link_speed; | 248 | uint16_t link_speed; |
@@ -269,8 +252,8 @@ struct e1000_adapter { | |||
269 | spinlock_t tx_queue_lock; | 252 | spinlock_t tx_queue_lock; |
270 | #endif | 253 | #endif |
271 | atomic_t irq_sem; | 254 | atomic_t irq_sem; |
272 | struct work_struct tx_timeout_task; | ||
273 | struct work_struct watchdog_task; | 255 | struct work_struct watchdog_task; |
256 | struct work_struct reset_task; | ||
274 | uint8_t fc_autoneg; | 257 | uint8_t fc_autoneg; |
275 | 258 | ||
276 | struct timer_list blink_timer; | 259 | struct timer_list blink_timer; |
@@ -278,9 +261,6 @@ struct e1000_adapter { | |||
278 | 261 | ||
279 | /* TX */ | 262 | /* TX */ |
280 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 263 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
281 | #ifdef CONFIG_E1000_MQ | ||
282 | struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ | ||
283 | #endif | ||
284 | unsigned long tx_queue_len; | 264 | unsigned long tx_queue_len; |
285 | uint32_t txd_cmd; | 265 | uint32_t txd_cmd; |
286 | uint32_t tx_int_delay; | 266 | uint32_t tx_int_delay; |
@@ -301,24 +281,19 @@ struct e1000_adapter { | |||
301 | /* RX */ | 281 | /* RX */ |
302 | #ifdef CONFIG_E1000_NAPI | 282 | #ifdef CONFIG_E1000_NAPI |
303 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, | 283 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, |
304 | struct e1000_rx_ring *rx_ring, | 284 | struct e1000_rx_ring *rx_ring, |
305 | int *work_done, int work_to_do); | 285 | int *work_done, int work_to_do); |
306 | #else | 286 | #else |
307 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, | 287 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, |
308 | struct e1000_rx_ring *rx_ring); | 288 | struct e1000_rx_ring *rx_ring); |
309 | #endif | 289 | #endif |
310 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, | 290 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, |
311 | struct e1000_rx_ring *rx_ring, | 291 | struct e1000_rx_ring *rx_ring, |
312 | int cleaned_count); | 292 | int cleaned_count); |
313 | struct e1000_rx_ring *rx_ring; /* One per active queue */ | 293 | struct e1000_rx_ring *rx_ring; /* One per active queue */ |
314 | #ifdef CONFIG_E1000_NAPI | 294 | #ifdef CONFIG_E1000_NAPI |
315 | struct net_device *polling_netdev; /* One per active queue */ | 295 | struct net_device *polling_netdev; /* One per active queue */ |
316 | #endif | 296 | #endif |
317 | #ifdef CONFIG_E1000_MQ | ||
318 | struct net_device **cpu_netdev; /* per-cpu */ | ||
319 | struct call_async_data_struct rx_sched_call_data; | ||
320 | cpumask_t cpumask; | ||
321 | #endif | ||
322 | int num_tx_queues; | 297 | int num_tx_queues; |
323 | int num_rx_queues; | 298 | int num_rx_queues; |
324 | 299 | ||
@@ -353,10 +328,37 @@ struct e1000_adapter { | |||
353 | struct e1000_rx_ring test_rx_ring; | 328 | struct e1000_rx_ring test_rx_ring; |
354 | 329 | ||
355 | 330 | ||
356 | u32 *config_space; | 331 | uint32_t *config_space; |
357 | int msg_enable; | 332 | int msg_enable; |
358 | #ifdef CONFIG_PCI_MSI | 333 | #ifdef CONFIG_PCI_MSI |
359 | boolean_t have_msi; | 334 | boolean_t have_msi; |
360 | #endif | 335 | #endif |
336 | /* to not mess up cache alignment, always add to the bottom */ | ||
337 | boolean_t txb2b; | ||
338 | #ifdef NETIF_F_TSO | ||
339 | boolean_t tso_force; | ||
340 | #endif | ||
361 | }; | 341 | }; |
342 | |||
343 | |||
344 | /* e1000_main.c */ | ||
345 | extern char e1000_driver_name[]; | ||
346 | extern char e1000_driver_version[]; | ||
347 | int e1000_up(struct e1000_adapter *adapter); | ||
348 | void e1000_down(struct e1000_adapter *adapter); | ||
349 | void e1000_reset(struct e1000_adapter *adapter); | ||
350 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
351 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
352 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
353 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
354 | void e1000_update_stats(struct e1000_adapter *adapter); | ||
355 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
356 | |||
357 | /* e1000_ethtool.c */ | ||
358 | void e1000_set_ethtool_ops(struct net_device *netdev); | ||
359 | |||
360 | /* e1000_param.c */ | ||
361 | void e1000_check_options(struct e1000_adapter *adapter); | ||
362 | |||
363 | |||
362 | #endif /* _E1000_H_ */ | 364 | #endif /* _E1000_H_ */ |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 5cedc81786e3..ecccca35c6f4 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -32,19 +32,6 @@ | |||
32 | 32 | ||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | 34 | ||
35 | extern char e1000_driver_name[]; | ||
36 | extern char e1000_driver_version[]; | ||
37 | |||
38 | extern int e1000_up(struct e1000_adapter *adapter); | ||
39 | extern void e1000_down(struct e1000_adapter *adapter); | ||
40 | extern void e1000_reset(struct e1000_adapter *adapter); | ||
41 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
42 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
43 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
44 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
45 | extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
46 | extern void e1000_update_stats(struct e1000_adapter *adapter); | ||
47 | |||
48 | struct e1000_stats { | 35 | struct e1000_stats { |
49 | char stat_string[ETH_GSTRING_LEN]; | 36 | char stat_string[ETH_GSTRING_LEN]; |
50 | int sizeof_stat; | 37 | int sizeof_stat; |
@@ -60,7 +47,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
60 | { "tx_bytes", E1000_STAT(net_stats.tx_bytes) }, | 47 | { "tx_bytes", E1000_STAT(net_stats.tx_bytes) }, |
61 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | 48 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, |
62 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | 49 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, |
63 | { "rx_dropped", E1000_STAT(net_stats.rx_dropped) }, | ||
64 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | 50 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, |
65 | { "multicast", E1000_STAT(net_stats.multicast) }, | 51 | { "multicast", E1000_STAT(net_stats.multicast) }, |
66 | { "collisions", E1000_STAT(net_stats.collisions) }, | 52 | { "collisions", E1000_STAT(net_stats.collisions) }, |
@@ -68,7 +54,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
68 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | 54 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, |
69 | { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, | 55 | { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, |
70 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | 56 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, |
71 | { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, | ||
72 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | 57 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, |
73 | { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, | 58 | { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, |
74 | { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, | 59 | { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, |
@@ -97,14 +82,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
97 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, | 82 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, |
98 | }; | 83 | }; |
99 | 84 | ||
100 | #ifdef CONFIG_E1000_MQ | ||
101 | #define E1000_QUEUE_STATS_LEN \ | ||
102 | (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \ | ||
103 | ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \ | ||
104 | * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t)) | ||
105 | #else | ||
106 | #define E1000_QUEUE_STATS_LEN 0 | 85 | #define E1000_QUEUE_STATS_LEN 0 |
107 | #endif | ||
108 | #define E1000_GLOBAL_STATS_LEN \ | 86 | #define E1000_GLOBAL_STATS_LEN \ |
109 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) | 87 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) |
110 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) | 88 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) |
@@ -346,6 +324,9 @@ e1000_set_tso(struct net_device *netdev, uint32_t data) | |||
346 | netdev->features |= NETIF_F_TSO; | 324 | netdev->features |= NETIF_F_TSO; |
347 | else | 325 | else |
348 | netdev->features &= ~NETIF_F_TSO; | 326 | netdev->features &= ~NETIF_F_TSO; |
327 | |||
328 | DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); | ||
329 | adapter->tso_force = TRUE; | ||
349 | return 0; | 330 | return 0; |
350 | } | 331 | } |
351 | #endif /* NETIF_F_TSO */ | 332 | #endif /* NETIF_F_TSO */ |
@@ -594,6 +575,7 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
594 | case e1000_82571: | 575 | case e1000_82571: |
595 | case e1000_82572: | 576 | case e1000_82572: |
596 | case e1000_82573: | 577 | case e1000_82573: |
578 | case e1000_80003es2lan: | ||
597 | sprintf(firmware_version, "%d.%d-%d", | 579 | sprintf(firmware_version, "%d.%d-%d", |
598 | (eeprom_data & 0xF000) >> 12, | 580 | (eeprom_data & 0xF000) >> 12, |
599 | (eeprom_data & 0x0FF0) >> 4, | 581 | (eeprom_data & 0x0FF0) >> 4, |
@@ -642,6 +624,9 @@ e1000_set_ringparam(struct net_device *netdev, | |||
642 | struct e1000_rx_ring *rxdr, *rx_old, *rx_new; | 624 | struct e1000_rx_ring *rxdr, *rx_old, *rx_new; |
643 | int i, err, tx_ring_size, rx_ring_size; | 625 | int i, err, tx_ring_size, rx_ring_size; |
644 | 626 | ||
627 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
628 | return -EINVAL; | ||
629 | |||
645 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; | 630 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; |
646 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; | 631 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; |
647 | 632 | ||
@@ -669,9 +654,6 @@ e1000_set_ringparam(struct net_device *netdev, | |||
669 | txdr = adapter->tx_ring; | 654 | txdr = adapter->tx_ring; |
670 | rxdr = adapter->rx_ring; | 655 | rxdr = adapter->rx_ring; |
671 | 656 | ||
672 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
673 | return -EINVAL; | ||
674 | |||
675 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); | 657 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); |
676 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? | 658 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? |
677 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); | 659 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); |
@@ -767,6 +749,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
767 | /* there are several bits on newer hardware that are r/w */ | 749 | /* there are several bits on newer hardware that are r/w */ |
768 | case e1000_82571: | 750 | case e1000_82571: |
769 | case e1000_82572: | 751 | case e1000_82572: |
752 | case e1000_80003es2lan: | ||
770 | toggle = 0x7FFFF3FF; | 753 | toggle = 0x7FFFF3FF; |
771 | break; | 754 | break; |
772 | case e1000_82573: | 755 | case e1000_82573: |
@@ -1256,6 +1239,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1256 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); | 1239 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); |
1257 | /* autoneg off */ | 1240 | /* autoneg off */ |
1258 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); | 1241 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); |
1242 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { | ||
1243 | e1000_write_phy_reg(&adapter->hw, | ||
1244 | GG82563_PHY_KMRN_MODE_CTRL, | ||
1245 | 0x1CE); | ||
1259 | } | 1246 | } |
1260 | /* force 1000, set loopback */ | 1247 | /* force 1000, set loopback */ |
1261 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | 1248 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); |
@@ -1325,6 +1312,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1325 | case e1000_82571: | 1312 | case e1000_82571: |
1326 | case e1000_82572: | 1313 | case e1000_82572: |
1327 | case e1000_82573: | 1314 | case e1000_82573: |
1315 | case e1000_80003es2lan: | ||
1328 | return e1000_integrated_phy_loopback(adapter); | 1316 | return e1000_integrated_phy_loopback(adapter); |
1329 | break; | 1317 | break; |
1330 | 1318 | ||
@@ -1405,6 +1393,11 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1405 | case e1000_82546_rev_3: | 1393 | case e1000_82546_rev_3: |
1406 | default: | 1394 | default: |
1407 | hw->autoneg = TRUE; | 1395 | hw->autoneg = TRUE; |
1396 | if (hw->phy_type == e1000_phy_gg82563) { | ||
1397 | e1000_write_phy_reg(hw, | ||
1398 | GG82563_PHY_KMRN_MODE_CTRL, | ||
1399 | 0x180); | ||
1400 | } | ||
1408 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); | 1401 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1409 | if (phy_reg & MII_CR_LOOPBACK) { | 1402 | if (phy_reg & MII_CR_LOOPBACK) { |
1410 | phy_reg &= ~MII_CR_LOOPBACK; | 1403 | phy_reg &= ~MII_CR_LOOPBACK; |
@@ -1640,10 +1633,26 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1640 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | 1633 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
1641 | case E1000_DEV_ID_82545EM_FIBER: | 1634 | case E1000_DEV_ID_82545EM_FIBER: |
1642 | case E1000_DEV_ID_82545EM_COPPER: | 1635 | case E1000_DEV_ID_82545EM_COPPER: |
1636 | case E1000_DEV_ID_82546GB_QUAD_COPPER: | ||
1643 | wol->supported = 0; | 1637 | wol->supported = 0; |
1644 | wol->wolopts = 0; | 1638 | wol->wolopts = 0; |
1645 | return; | 1639 | return; |
1646 | 1640 | ||
1641 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | ||
1642 | /* device id 10B5 port-A supports wol */ | ||
1643 | if (!adapter->ksp3_port_a) { | ||
1644 | wol->supported = 0; | ||
1645 | return; | ||
1646 | } | ||
1647 | /* KSP3 does not suppport UCAST wake-ups for any interface */ | ||
1648 | wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; | ||
1649 | |||
1650 | if (adapter->wol & E1000_WUFC_EX) | ||
1651 | DPRINTK(DRV, ERR, "Interface does not support " | ||
1652 | "directed (unicast) frame wake-up packets\n"); | ||
1653 | wol->wolopts = 0; | ||
1654 | goto do_defaults; | ||
1655 | |||
1647 | case E1000_DEV_ID_82546EB_FIBER: | 1656 | case E1000_DEV_ID_82546EB_FIBER: |
1648 | case E1000_DEV_ID_82546GB_FIBER: | 1657 | case E1000_DEV_ID_82546GB_FIBER: |
1649 | case E1000_DEV_ID_82571EB_FIBER: | 1658 | case E1000_DEV_ID_82571EB_FIBER: |
@@ -1658,8 +1667,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1658 | default: | 1667 | default: |
1659 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1668 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1660 | WAKE_BCAST | WAKE_MAGIC; | 1669 | WAKE_BCAST | WAKE_MAGIC; |
1661 | |||
1662 | wol->wolopts = 0; | 1670 | wol->wolopts = 0; |
1671 | |||
1672 | do_defaults: | ||
1663 | if (adapter->wol & E1000_WUFC_EX) | 1673 | if (adapter->wol & E1000_WUFC_EX) |
1664 | wol->wolopts |= WAKE_UCAST; | 1674 | wol->wolopts |= WAKE_UCAST; |
1665 | if (adapter->wol & E1000_WUFC_MC) | 1675 | if (adapter->wol & E1000_WUFC_MC) |
@@ -1684,10 +1694,22 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1684 | case E1000_DEV_ID_82543GC_COPPER: | 1694 | case E1000_DEV_ID_82543GC_COPPER: |
1685 | case E1000_DEV_ID_82544EI_FIBER: | 1695 | case E1000_DEV_ID_82544EI_FIBER: |
1686 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | 1696 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
1697 | case E1000_DEV_ID_82546GB_QUAD_COPPER: | ||
1687 | case E1000_DEV_ID_82545EM_FIBER: | 1698 | case E1000_DEV_ID_82545EM_FIBER: |
1688 | case E1000_DEV_ID_82545EM_COPPER: | 1699 | case E1000_DEV_ID_82545EM_COPPER: |
1689 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1700 | return wol->wolopts ? -EOPNOTSUPP : 0; |
1690 | 1701 | ||
1702 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | ||
1703 | /* device id 10B5 port-A supports wol */ | ||
1704 | if (!adapter->ksp3_port_a) | ||
1705 | return wol->wolopts ? -EOPNOTSUPP : 0; | ||
1706 | |||
1707 | if (wol->wolopts & WAKE_UCAST) { | ||
1708 | DPRINTK(DRV, ERR, "Interface does not support " | ||
1709 | "directed (unicast) frame wake-up packets\n"); | ||
1710 | return -EOPNOTSUPP; | ||
1711 | } | ||
1712 | |||
1691 | case E1000_DEV_ID_82546EB_FIBER: | 1713 | case E1000_DEV_ID_82546EB_FIBER: |
1692 | case E1000_DEV_ID_82546GB_FIBER: | 1714 | case E1000_DEV_ID_82546GB_FIBER: |
1693 | case E1000_DEV_ID_82571EB_FIBER: | 1715 | case E1000_DEV_ID_82571EB_FIBER: |
@@ -1799,11 +1821,6 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1799 | struct ethtool_stats *stats, uint64_t *data) | 1821 | struct ethtool_stats *stats, uint64_t *data) |
1800 | { | 1822 | { |
1801 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1823 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1802 | #ifdef CONFIG_E1000_MQ | ||
1803 | uint64_t *queue_stat; | ||
1804 | int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t); | ||
1805 | int j, k; | ||
1806 | #endif | ||
1807 | int i; | 1824 | int i; |
1808 | 1825 | ||
1809 | e1000_update_stats(adapter); | 1826 | e1000_update_stats(adapter); |
@@ -1812,29 +1829,12 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1812 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1829 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1813 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | 1830 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; |
1814 | } | 1831 | } |
1815 | #ifdef CONFIG_E1000_MQ | ||
1816 | for (j = 0; j < adapter->num_tx_queues; j++) { | ||
1817 | queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats; | ||
1818 | for (k = 0; k < stat_count; k++) | ||
1819 | data[i + k] = queue_stat[k]; | ||
1820 | i += k; | ||
1821 | } | ||
1822 | for (j = 0; j < adapter->num_rx_queues; j++) { | ||
1823 | queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats; | ||
1824 | for (k = 0; k < stat_count; k++) | ||
1825 | data[i + k] = queue_stat[k]; | ||
1826 | i += k; | ||
1827 | } | ||
1828 | #endif | ||
1829 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1832 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1830 | } | 1833 | } |
1831 | 1834 | ||
1832 | static void | 1835 | static void |
1833 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 1836 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) |
1834 | { | 1837 | { |
1835 | #ifdef CONFIG_E1000_MQ | ||
1836 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1837 | #endif | ||
1838 | uint8_t *p = data; | 1838 | uint8_t *p = data; |
1839 | int i; | 1839 | int i; |
1840 | 1840 | ||
@@ -1849,20 +1849,6 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | |||
1849 | ETH_GSTRING_LEN); | 1849 | ETH_GSTRING_LEN); |
1850 | p += ETH_GSTRING_LEN; | 1850 | p += ETH_GSTRING_LEN; |
1851 | } | 1851 | } |
1852 | #ifdef CONFIG_E1000_MQ | ||
1853 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1854 | sprintf(p, "tx_queue_%u_packets", i); | ||
1855 | p += ETH_GSTRING_LEN; | ||
1856 | sprintf(p, "tx_queue_%u_bytes", i); | ||
1857 | p += ETH_GSTRING_LEN; | ||
1858 | } | ||
1859 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1860 | sprintf(p, "rx_queue_%u_packets", i); | ||
1861 | p += ETH_GSTRING_LEN; | ||
1862 | sprintf(p, "rx_queue_%u_bytes", i); | ||
1863 | p += ETH_GSTRING_LEN; | ||
1864 | } | ||
1865 | #endif | ||
1866 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ | 1852 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ |
1867 | break; | 1853 | break; |
1868 | } | 1854 | } |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index beeec0fbbeac..523c2c9fc0ac 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -100,6 +100,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, | |||
100 | 100 | ||
101 | #define E1000_WRITE_REG_IO(a, reg, val) \ | 101 | #define E1000_WRITE_REG_IO(a, reg, val) \ |
102 | e1000_write_reg_io((a), E1000_##reg, val) | 102 | e1000_write_reg_io((a), E1000_##reg, val) |
103 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw); | ||
104 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | ||
103 | 105 | ||
104 | /* IGP cable length table */ | 106 | /* IGP cable length table */ |
105 | static const | 107 | static const |
@@ -153,6 +155,11 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
153 | hw->phy_type = e1000_phy_igp; | 155 | hw->phy_type = e1000_phy_igp; |
154 | break; | 156 | break; |
155 | } | 157 | } |
158 | case GG82563_E_PHY_ID: | ||
159 | if (hw->mac_type == e1000_80003es2lan) { | ||
160 | hw->phy_type = e1000_phy_gg82563; | ||
161 | break; | ||
162 | } | ||
156 | /* Fall Through */ | 163 | /* Fall Through */ |
157 | default: | 164 | default: |
158 | /* Should never have loaded on this device */ | 165 | /* Should never have loaded on this device */ |
@@ -353,12 +360,19 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
353 | case E1000_DEV_ID_82573L: | 360 | case E1000_DEV_ID_82573L: |
354 | hw->mac_type = e1000_82573; | 361 | hw->mac_type = e1000_82573; |
355 | break; | 362 | break; |
363 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: | ||
364 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | ||
365 | hw->mac_type = e1000_80003es2lan; | ||
366 | break; | ||
356 | default: | 367 | default: |
357 | /* Should never have loaded on this device */ | 368 | /* Should never have loaded on this device */ |
358 | return -E1000_ERR_MAC_TYPE; | 369 | return -E1000_ERR_MAC_TYPE; |
359 | } | 370 | } |
360 | 371 | ||
361 | switch(hw->mac_type) { | 372 | switch(hw->mac_type) { |
373 | case e1000_80003es2lan: | ||
374 | hw->swfw_sync_present = TRUE; | ||
375 | /* fall through */ | ||
362 | case e1000_82571: | 376 | case e1000_82571: |
363 | case e1000_82572: | 377 | case e1000_82572: |
364 | case e1000_82573: | 378 | case e1000_82573: |
@@ -399,6 +413,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
399 | case E1000_DEV_ID_82546GB_SERDES: | 413 | case E1000_DEV_ID_82546GB_SERDES: |
400 | case E1000_DEV_ID_82571EB_SERDES: | 414 | case E1000_DEV_ID_82571EB_SERDES: |
401 | case E1000_DEV_ID_82572EI_SERDES: | 415 | case E1000_DEV_ID_82572EI_SERDES: |
416 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | ||
402 | hw->media_type = e1000_media_type_internal_serdes; | 417 | hw->media_type = e1000_media_type_internal_serdes; |
403 | break; | 418 | break; |
404 | default: | 419 | default: |
@@ -575,6 +590,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
575 | /* fall through */ | 590 | /* fall through */ |
576 | case e1000_82571: | 591 | case e1000_82571: |
577 | case e1000_82572: | 592 | case e1000_82572: |
593 | case e1000_80003es2lan: | ||
578 | ret_val = e1000_get_auto_rd_done(hw); | 594 | ret_val = e1000_get_auto_rd_done(hw); |
579 | if(ret_val) | 595 | if(ret_val) |
580 | /* We don't want to continue accessing MAC registers. */ | 596 | /* We don't want to continue accessing MAC registers. */ |
@@ -641,6 +657,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
641 | uint16_t cmd_mmrbc; | 657 | uint16_t cmd_mmrbc; |
642 | uint16_t stat_mmrbc; | 658 | uint16_t stat_mmrbc; |
643 | uint32_t mta_size; | 659 | uint32_t mta_size; |
660 | uint32_t reg_data; | ||
644 | uint32_t ctrl_ext; | 661 | uint32_t ctrl_ext; |
645 | 662 | ||
646 | DEBUGFUNC("e1000_init_hw"); | 663 | DEBUGFUNC("e1000_init_hw"); |
@@ -739,6 +756,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
739 | case e1000_82571: | 756 | case e1000_82571: |
740 | case e1000_82572: | 757 | case e1000_82572: |
741 | case e1000_82573: | 758 | case e1000_82573: |
759 | case e1000_80003es2lan: | ||
742 | ctrl |= E1000_TXDCTL_COUNT_DESC; | 760 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
743 | break; | 761 | break; |
744 | } | 762 | } |
@@ -752,12 +770,34 @@ e1000_init_hw(struct e1000_hw *hw) | |||
752 | switch (hw->mac_type) { | 770 | switch (hw->mac_type) { |
753 | default: | 771 | default: |
754 | break; | 772 | break; |
773 | case e1000_80003es2lan: | ||
774 | /* Enable retransmit on late collisions */ | ||
775 | reg_data = E1000_READ_REG(hw, TCTL); | ||
776 | reg_data |= E1000_TCTL_RTLC; | ||
777 | E1000_WRITE_REG(hw, TCTL, reg_data); | ||
778 | |||
779 | /* Configure Gigabit Carry Extend Padding */ | ||
780 | reg_data = E1000_READ_REG(hw, TCTL_EXT); | ||
781 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; | ||
782 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | ||
783 | E1000_WRITE_REG(hw, TCTL_EXT, reg_data); | ||
784 | |||
785 | /* Configure Transmit Inter-Packet Gap */ | ||
786 | reg_data = E1000_READ_REG(hw, TIPG); | ||
787 | reg_data &= ~E1000_TIPG_IPGT_MASK; | ||
788 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | ||
789 | E1000_WRITE_REG(hw, TIPG, reg_data); | ||
790 | |||
791 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); | ||
792 | reg_data &= ~0x00100000; | ||
793 | E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data); | ||
794 | /* Fall through */ | ||
755 | case e1000_82571: | 795 | case e1000_82571: |
756 | case e1000_82572: | 796 | case e1000_82572: |
757 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 797 | ctrl = E1000_READ_REG(hw, TXDCTL1); |
758 | ctrl &= ~E1000_TXDCTL_WTHRESH; | 798 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
759 | ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB; | 799 | if(hw->mac_type >= e1000_82571) |
760 | ctrl |= (1 << 22); | 800 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
761 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); | 801 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); |
762 | break; | 802 | break; |
763 | } | 803 | } |
@@ -906,7 +946,13 @@ e1000_setup_link(struct e1000_hw *hw) | |||
906 | * signal detection. So this should be done before e1000_setup_pcs_link() | 946 | * signal detection. So this should be done before e1000_setup_pcs_link() |
907 | * or e1000_phy_setup() is called. | 947 | * or e1000_phy_setup() is called. |
908 | */ | 948 | */ |
909 | if(hw->mac_type == e1000_82543) { | 949 | if (hw->mac_type == e1000_82543) { |
950 | ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, | ||
951 | 1, &eeprom_data); | ||
952 | if (ret_val) { | ||
953 | DEBUGOUT("EEPROM Read Error\n"); | ||
954 | return -E1000_ERR_EEPROM; | ||
955 | } | ||
910 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << | 956 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << |
911 | SWDPIO__EXT_SHIFT); | 957 | SWDPIO__EXT_SHIFT); |
912 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 958 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); |
@@ -1308,6 +1354,154 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1308 | return E1000_SUCCESS; | 1354 | return E1000_SUCCESS; |
1309 | } | 1355 | } |
1310 | 1356 | ||
1357 | /******************************************************************** | ||
1358 | * Copper link setup for e1000_phy_gg82563 series. | ||
1359 | * | ||
1360 | * hw - Struct containing variables accessed by shared code | ||
1361 | *********************************************************************/ | ||
1362 | static int32_t | ||
1363 | e1000_copper_link_ggp_setup(struct e1000_hw *hw) | ||
1364 | { | ||
1365 | int32_t ret_val; | ||
1366 | uint16_t phy_data; | ||
1367 | uint32_t reg_data; | ||
1368 | |||
1369 | DEBUGFUNC("e1000_copper_link_ggp_setup"); | ||
1370 | |||
1371 | if(!hw->phy_reset_disable) { | ||
1372 | |||
1373 | /* Enable CRS on TX for half-duplex operation. */ | ||
1374 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, | ||
1375 | &phy_data); | ||
1376 | if(ret_val) | ||
1377 | return ret_val; | ||
1378 | |||
1379 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
1380 | /* Use 25MHz for both link down and 1000BASE-T for Tx clock */ | ||
1381 | phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ; | ||
1382 | |||
1383 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, | ||
1384 | phy_data); | ||
1385 | if(ret_val) | ||
1386 | return ret_val; | ||
1387 | |||
1388 | /* Options: | ||
1389 | * MDI/MDI-X = 0 (default) | ||
1390 | * 0 - Auto for all speeds | ||
1391 | * 1 - MDI mode | ||
1392 | * 2 - MDI-X mode | ||
1393 | * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) | ||
1394 | */ | ||
1395 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); | ||
1396 | if(ret_val) | ||
1397 | return ret_val; | ||
1398 | |||
1399 | phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; | ||
1400 | |||
1401 | switch (hw->mdix) { | ||
1402 | case 1: | ||
1403 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI; | ||
1404 | break; | ||
1405 | case 2: | ||
1406 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; | ||
1407 | break; | ||
1408 | case 0: | ||
1409 | default: | ||
1410 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; | ||
1411 | break; | ||
1412 | } | ||
1413 | |||
1414 | /* Options: | ||
1415 | * disable_polarity_correction = 0 (default) | ||
1416 | * Automatic Correction for Reversed Cable Polarity | ||
1417 | * 0 - Disabled | ||
1418 | * 1 - Enabled | ||
1419 | */ | ||
1420 | phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1421 | if(hw->disable_polarity_correction == 1) | ||
1422 | phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1423 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); | ||
1424 | |||
1425 | if(ret_val) | ||
1426 | return ret_val; | ||
1427 | |||
1428 | /* SW Reset the PHY so all changes take effect */ | ||
1429 | ret_val = e1000_phy_reset(hw); | ||
1430 | if (ret_val) { | ||
1431 | DEBUGOUT("Error Resetting the PHY\n"); | ||
1432 | return ret_val; | ||
1433 | } | ||
1434 | } /* phy_reset_disable */ | ||
1435 | |||
1436 | if (hw->mac_type == e1000_80003es2lan) { | ||
1437 | /* Bypass RX and TX FIFO's */ | ||
1438 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL, | ||
1439 | E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | | ||
1440 | E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); | ||
1441 | if (ret_val) | ||
1442 | return ret_val; | ||
1443 | |||
1444 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data); | ||
1445 | if (ret_val) | ||
1446 | return ret_val; | ||
1447 | |||
1448 | phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; | ||
1449 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data); | ||
1450 | |||
1451 | if (ret_val) | ||
1452 | return ret_val; | ||
1453 | |||
1454 | reg_data = E1000_READ_REG(hw, CTRL_EXT); | ||
1455 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | ||
1456 | E1000_WRITE_REG(hw, CTRL_EXT, reg_data); | ||
1457 | |||
1458 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | ||
1459 | &phy_data); | ||
1460 | if (ret_val) | ||
1461 | return ret_val; | ||
1462 | |||
1463 | /* Do not init these registers when the HW is in IAMT mode, since the | ||
1464 | * firmware will have already initialized them. We only initialize | ||
1465 | * them if the HW is not in IAMT mode. | ||
1466 | */ | ||
1467 | if (e1000_check_mng_mode(hw) == FALSE) { | ||
1468 | /* Enable Electrical Idle on the PHY */ | ||
1469 | phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; | ||
1470 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | ||
1471 | phy_data); | ||
1472 | if (ret_val) | ||
1473 | return ret_val; | ||
1474 | |||
1475 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | ||
1476 | &phy_data); | ||
1477 | if (ret_val) | ||
1478 | return ret_val; | ||
1479 | |||
1480 | /* Enable Pass False Carrier on the PHY */ | ||
1481 | phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1482 | |||
1483 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | ||
1484 | phy_data); | ||
1485 | if (ret_val) | ||
1486 | return ret_val; | ||
1487 | } | ||
1488 | |||
1489 | /* Workaround: Disable padding in Kumeran interface in the MAC | ||
1490 | * and in the PHY to avoid CRC errors. | ||
1491 | */ | ||
1492 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, | ||
1493 | &phy_data); | ||
1494 | if (ret_val) | ||
1495 | return ret_val; | ||
1496 | phy_data |= GG82563_ICR_DIS_PADDING; | ||
1497 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, | ||
1498 | phy_data); | ||
1499 | if (ret_val) | ||
1500 | return ret_val; | ||
1501 | } | ||
1502 | |||
1503 | return E1000_SUCCESS; | ||
1504 | } | ||
1311 | 1505 | ||
1312 | /******************************************************************** | 1506 | /******************************************************************** |
1313 | * Copper link setup for e1000_phy_m88 series. | 1507 | * Copper link setup for e1000_phy_m88 series. |
@@ -1518,6 +1712,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1518 | int32_t ret_val; | 1712 | int32_t ret_val; |
1519 | uint16_t i; | 1713 | uint16_t i; |
1520 | uint16_t phy_data; | 1714 | uint16_t phy_data; |
1715 | uint16_t reg_data; | ||
1521 | 1716 | ||
1522 | DEBUGFUNC("e1000_setup_copper_link"); | 1717 | DEBUGFUNC("e1000_setup_copper_link"); |
1523 | 1718 | ||
@@ -1526,6 +1721,22 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1526 | if(ret_val) | 1721 | if(ret_val) |
1527 | return ret_val; | 1722 | return ret_val; |
1528 | 1723 | ||
1724 | switch (hw->mac_type) { | ||
1725 | case e1000_80003es2lan: | ||
1726 | ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | ||
1727 | ®_data); | ||
1728 | if (ret_val) | ||
1729 | return ret_val; | ||
1730 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; | ||
1731 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | ||
1732 | reg_data); | ||
1733 | if (ret_val) | ||
1734 | return ret_val; | ||
1735 | break; | ||
1736 | default: | ||
1737 | break; | ||
1738 | } | ||
1739 | |||
1529 | if (hw->phy_type == e1000_phy_igp || | 1740 | if (hw->phy_type == e1000_phy_igp || |
1530 | hw->phy_type == e1000_phy_igp_2) { | 1741 | hw->phy_type == e1000_phy_igp_2) { |
1531 | ret_val = e1000_copper_link_igp_setup(hw); | 1742 | ret_val = e1000_copper_link_igp_setup(hw); |
@@ -1535,6 +1746,10 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1535 | ret_val = e1000_copper_link_mgp_setup(hw); | 1746 | ret_val = e1000_copper_link_mgp_setup(hw); |
1536 | if(ret_val) | 1747 | if(ret_val) |
1537 | return ret_val; | 1748 | return ret_val; |
1749 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
1750 | ret_val = e1000_copper_link_ggp_setup(hw); | ||
1751 | if(ret_val) | ||
1752 | return ret_val; | ||
1538 | } | 1753 | } |
1539 | 1754 | ||
1540 | if(hw->autoneg) { | 1755 | if(hw->autoneg) { |
@@ -1582,6 +1797,59 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1582 | } | 1797 | } |
1583 | 1798 | ||
1584 | /****************************************************************************** | 1799 | /****************************************************************************** |
1800 | * Configure the MAC-to-PHY interface for 10/100Mbps | ||
1801 | * | ||
1802 | * hw - Struct containing variables accessed by shared code | ||
1803 | ******************************************************************************/ | ||
1804 | static int32_t | ||
1805 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | ||
1806 | { | ||
1807 | int32_t ret_val = E1000_SUCCESS; | ||
1808 | uint32_t tipg; | ||
1809 | uint16_t reg_data; | ||
1810 | |||
1811 | DEBUGFUNC("e1000_configure_kmrn_for_10_100"); | ||
1812 | |||
1813 | reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT; | ||
1814 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, | ||
1815 | reg_data); | ||
1816 | if (ret_val) | ||
1817 | return ret_val; | ||
1818 | |||
1819 | /* Configure Transmit Inter-Packet Gap */ | ||
1820 | tipg = E1000_READ_REG(hw, TIPG); | ||
1821 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1822 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | ||
1823 | E1000_WRITE_REG(hw, TIPG, tipg); | ||
1824 | |||
1825 | return ret_val; | ||
1826 | } | ||
1827 | |||
1828 | static int32_t | ||
1829 | e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | ||
1830 | { | ||
1831 | int32_t ret_val = E1000_SUCCESS; | ||
1832 | uint16_t reg_data; | ||
1833 | uint32_t tipg; | ||
1834 | |||
1835 | DEBUGFUNC("e1000_configure_kmrn_for_1000"); | ||
1836 | |||
1837 | reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT; | ||
1838 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, | ||
1839 | reg_data); | ||
1840 | if (ret_val) | ||
1841 | return ret_val; | ||
1842 | |||
1843 | /* Configure Transmit Inter-Packet Gap */ | ||
1844 | tipg = E1000_READ_REG(hw, TIPG); | ||
1845 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1846 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | ||
1847 | E1000_WRITE_REG(hw, TIPG, tipg); | ||
1848 | |||
1849 | return ret_val; | ||
1850 | } | ||
1851 | |||
1852 | /****************************************************************************** | ||
1585 | * Configures PHY autoneg and flow control advertisement settings | 1853 | * Configures PHY autoneg and flow control advertisement settings |
1586 | * | 1854 | * |
1587 | * hw - Struct containing variables accessed by shared code | 1855 | * hw - Struct containing variables accessed by shared code |
@@ -1802,7 +2070,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1802 | /* Write the configured values back to the Device Control Reg. */ | 2070 | /* Write the configured values back to the Device Control Reg. */ |
1803 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2071 | E1000_WRITE_REG(hw, CTRL, ctrl); |
1804 | 2072 | ||
1805 | if (hw->phy_type == e1000_phy_m88) { | 2073 | if ((hw->phy_type == e1000_phy_m88) || |
2074 | (hw->phy_type == e1000_phy_gg82563)) { | ||
1806 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 2075 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
1807 | if(ret_val) | 2076 | if(ret_val) |
1808 | return ret_val; | 2077 | return ret_val; |
@@ -1871,7 +2140,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1871 | msec_delay(100); | 2140 | msec_delay(100); |
1872 | } | 2141 | } |
1873 | if((i == 0) && | 2142 | if((i == 0) && |
1874 | (hw->phy_type == e1000_phy_m88)) { | 2143 | ((hw->phy_type == e1000_phy_m88) || |
2144 | (hw->phy_type == e1000_phy_gg82563))) { | ||
1875 | /* We didn't get link. Reset the DSP and wait again for link. */ | 2145 | /* We didn't get link. Reset the DSP and wait again for link. */ |
1876 | ret_val = e1000_phy_reset_dsp(hw); | 2146 | ret_val = e1000_phy_reset_dsp(hw); |
1877 | if(ret_val) { | 2147 | if(ret_val) { |
@@ -1930,6 +2200,27 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1930 | if(ret_val) | 2200 | if(ret_val) |
1931 | return ret_val; | 2201 | return ret_val; |
1932 | } | 2202 | } |
2203 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
2204 | /* The TX_CLK of the Extended PHY Specific Control Register defaults | ||
2205 | * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if | ||
2206 | * we're not in a forced 10/duplex configuration. */ | ||
2207 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); | ||
2208 | if (ret_val) | ||
2209 | return ret_val; | ||
2210 | |||
2211 | phy_data &= ~GG82563_MSCR_TX_CLK_MASK; | ||
2212 | if ((hw->forced_speed_duplex == e1000_10_full) || | ||
2213 | (hw->forced_speed_duplex == e1000_10_half)) | ||
2214 | phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ; | ||
2215 | else | ||
2216 | phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ; | ||
2217 | |||
2218 | /* Also due to the reset, we need to enable CRS on Tx. */ | ||
2219 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
2220 | |||
2221 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); | ||
2222 | if (ret_val) | ||
2223 | return ret_val; | ||
1933 | } | 2224 | } |
1934 | return E1000_SUCCESS; | 2225 | return E1000_SUCCESS; |
1935 | } | 2226 | } |
@@ -2592,6 +2883,16 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2592 | } | 2883 | } |
2593 | } | 2884 | } |
2594 | 2885 | ||
2886 | if ((hw->mac_type == e1000_80003es2lan) && | ||
2887 | (hw->media_type == e1000_media_type_copper)) { | ||
2888 | if (*speed == SPEED_1000) | ||
2889 | ret_val = e1000_configure_kmrn_for_1000(hw); | ||
2890 | else | ||
2891 | ret_val = e1000_configure_kmrn_for_10_100(hw); | ||
2892 | if (ret_val) | ||
2893 | return ret_val; | ||
2894 | } | ||
2895 | |||
2595 | return E1000_SUCCESS; | 2896 | return E1000_SUCCESS; |
2596 | } | 2897 | } |
2597 | 2898 | ||
@@ -2767,6 +3068,72 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
2767 | return data; | 3068 | return data; |
2768 | } | 3069 | } |
2769 | 3070 | ||
3071 | int32_t | ||
3072 | e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | ||
3073 | { | ||
3074 | uint32_t swfw_sync = 0; | ||
3075 | uint32_t swmask = mask; | ||
3076 | uint32_t fwmask = mask << 16; | ||
3077 | int32_t timeout = 200; | ||
3078 | |||
3079 | DEBUGFUNC("e1000_swfw_sync_acquire"); | ||
3080 | |||
3081 | if (!hw->swfw_sync_present) | ||
3082 | return e1000_get_hw_eeprom_semaphore(hw); | ||
3083 | |||
3084 | while(timeout) { | ||
3085 | if (e1000_get_hw_eeprom_semaphore(hw)) | ||
3086 | return -E1000_ERR_SWFW_SYNC; | ||
3087 | |||
3088 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | ||
3089 | if (!(swfw_sync & (fwmask | swmask))) { | ||
3090 | break; | ||
3091 | } | ||
3092 | |||
3093 | /* firmware currently using resource (fwmask) */ | ||
3094 | /* or other software thread currently using resource (swmask) */ | ||
3095 | e1000_put_hw_eeprom_semaphore(hw); | ||
3096 | msec_delay_irq(5); | ||
3097 | timeout--; | ||
3098 | } | ||
3099 | |||
3100 | if (!timeout) { | ||
3101 | DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
3102 | return -E1000_ERR_SWFW_SYNC; | ||
3103 | } | ||
3104 | |||
3105 | swfw_sync |= swmask; | ||
3106 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | ||
3107 | |||
3108 | e1000_put_hw_eeprom_semaphore(hw); | ||
3109 | return E1000_SUCCESS; | ||
3110 | } | ||
3111 | |||
3112 | void | ||
3113 | e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | ||
3114 | { | ||
3115 | uint32_t swfw_sync; | ||
3116 | uint32_t swmask = mask; | ||
3117 | |||
3118 | DEBUGFUNC("e1000_swfw_sync_release"); | ||
3119 | |||
3120 | if (!hw->swfw_sync_present) { | ||
3121 | e1000_put_hw_eeprom_semaphore(hw); | ||
3122 | return; | ||
3123 | } | ||
3124 | |||
3125 | /* if (e1000_get_hw_eeprom_semaphore(hw)) | ||
3126 | * return -E1000_ERR_SWFW_SYNC; */ | ||
3127 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); | ||
3128 | /* empty */ | ||
3129 | |||
3130 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | ||
3131 | swfw_sync &= ~swmask; | ||
3132 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | ||
3133 | |||
3134 | e1000_put_hw_eeprom_semaphore(hw); | ||
3135 | } | ||
3136 | |||
2770 | /***************************************************************************** | 3137 | /***************************************************************************** |
2771 | * Reads the value from a PHY register, if the value is on a specific non zero | 3138 | * Reads the value from a PHY register, if the value is on a specific non zero |
2772 | * page, sets the page first. | 3139 | * page, sets the page first. |
@@ -2779,22 +3146,55 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
2779 | uint16_t *phy_data) | 3146 | uint16_t *phy_data) |
2780 | { | 3147 | { |
2781 | uint32_t ret_val; | 3148 | uint32_t ret_val; |
3149 | uint16_t swfw; | ||
2782 | 3150 | ||
2783 | DEBUGFUNC("e1000_read_phy_reg"); | 3151 | DEBUGFUNC("e1000_read_phy_reg"); |
2784 | 3152 | ||
3153 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3154 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3155 | swfw = E1000_SWFW_PHY1_SM; | ||
3156 | } else { | ||
3157 | swfw = E1000_SWFW_PHY0_SM; | ||
3158 | } | ||
3159 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3160 | return -E1000_ERR_SWFW_SYNC; | ||
3161 | |||
2785 | if((hw->phy_type == e1000_phy_igp || | 3162 | if((hw->phy_type == e1000_phy_igp || |
2786 | hw->phy_type == e1000_phy_igp_2) && | 3163 | hw->phy_type == e1000_phy_igp_2) && |
2787 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3164 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
2788 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3165 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
2789 | (uint16_t)reg_addr); | 3166 | (uint16_t)reg_addr); |
2790 | if(ret_val) { | 3167 | if(ret_val) { |
3168 | e1000_swfw_sync_release(hw, swfw); | ||
2791 | return ret_val; | 3169 | return ret_val; |
2792 | } | 3170 | } |
3171 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
3172 | if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || | ||
3173 | (hw->mac_type == e1000_80003es2lan)) { | ||
3174 | /* Select Configuration Page */ | ||
3175 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
3176 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | ||
3177 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3178 | } else { | ||
3179 | /* Use Alternative Page Select register to access | ||
3180 | * registers 30 and 31 | ||
3181 | */ | ||
3182 | ret_val = e1000_write_phy_reg_ex(hw, | ||
3183 | GG82563_PHY_PAGE_SELECT_ALT, | ||
3184 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3185 | } | ||
3186 | |||
3187 | if (ret_val) { | ||
3188 | e1000_swfw_sync_release(hw, swfw); | ||
3189 | return ret_val; | ||
3190 | } | ||
3191 | } | ||
2793 | } | 3192 | } |
2794 | 3193 | ||
2795 | ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, | 3194 | ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, |
2796 | phy_data); | 3195 | phy_data); |
2797 | 3196 | ||
3197 | e1000_swfw_sync_release(hw, swfw); | ||
2798 | return ret_val; | 3198 | return ret_val; |
2799 | } | 3199 | } |
2800 | 3200 | ||
@@ -2885,22 +3285,55 @@ e1000_write_phy_reg(struct e1000_hw *hw, | |||
2885 | uint16_t phy_data) | 3285 | uint16_t phy_data) |
2886 | { | 3286 | { |
2887 | uint32_t ret_val; | 3287 | uint32_t ret_val; |
3288 | uint16_t swfw; | ||
2888 | 3289 | ||
2889 | DEBUGFUNC("e1000_write_phy_reg"); | 3290 | DEBUGFUNC("e1000_write_phy_reg"); |
2890 | 3291 | ||
3292 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3293 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3294 | swfw = E1000_SWFW_PHY1_SM; | ||
3295 | } else { | ||
3296 | swfw = E1000_SWFW_PHY0_SM; | ||
3297 | } | ||
3298 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3299 | return -E1000_ERR_SWFW_SYNC; | ||
3300 | |||
2891 | if((hw->phy_type == e1000_phy_igp || | 3301 | if((hw->phy_type == e1000_phy_igp || |
2892 | hw->phy_type == e1000_phy_igp_2) && | 3302 | hw->phy_type == e1000_phy_igp_2) && |
2893 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3303 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
2894 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3304 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
2895 | (uint16_t)reg_addr); | 3305 | (uint16_t)reg_addr); |
2896 | if(ret_val) { | 3306 | if(ret_val) { |
3307 | e1000_swfw_sync_release(hw, swfw); | ||
2897 | return ret_val; | 3308 | return ret_val; |
2898 | } | 3309 | } |
3310 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
3311 | if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || | ||
3312 | (hw->mac_type == e1000_80003es2lan)) { | ||
3313 | /* Select Configuration Page */ | ||
3314 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
3315 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | ||
3316 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3317 | } else { | ||
3318 | /* Use Alternative Page Select register to access | ||
3319 | * registers 30 and 31 | ||
3320 | */ | ||
3321 | ret_val = e1000_write_phy_reg_ex(hw, | ||
3322 | GG82563_PHY_PAGE_SELECT_ALT, | ||
3323 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3324 | } | ||
3325 | |||
3326 | if (ret_val) { | ||
3327 | e1000_swfw_sync_release(hw, swfw); | ||
3328 | return ret_val; | ||
3329 | } | ||
3330 | } | ||
2899 | } | 3331 | } |
2900 | 3332 | ||
2901 | ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, | 3333 | ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, |
2902 | phy_data); | 3334 | phy_data); |
2903 | 3335 | ||
3336 | e1000_swfw_sync_release(hw, swfw); | ||
2904 | return ret_val; | 3337 | return ret_val; |
2905 | } | 3338 | } |
2906 | 3339 | ||
@@ -2967,6 +3400,65 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, | |||
2967 | return E1000_SUCCESS; | 3400 | return E1000_SUCCESS; |
2968 | } | 3401 | } |
2969 | 3402 | ||
3403 | int32_t | ||
3404 | e1000_read_kmrn_reg(struct e1000_hw *hw, | ||
3405 | uint32_t reg_addr, | ||
3406 | uint16_t *data) | ||
3407 | { | ||
3408 | uint32_t reg_val; | ||
3409 | uint16_t swfw; | ||
3410 | DEBUGFUNC("e1000_read_kmrn_reg"); | ||
3411 | |||
3412 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3413 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3414 | swfw = E1000_SWFW_PHY1_SM; | ||
3415 | } else { | ||
3416 | swfw = E1000_SWFW_PHY0_SM; | ||
3417 | } | ||
3418 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3419 | return -E1000_ERR_SWFW_SYNC; | ||
3420 | |||
3421 | /* Write register address */ | ||
3422 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | ||
3423 | E1000_KUMCTRLSTA_OFFSET) | | ||
3424 | E1000_KUMCTRLSTA_REN; | ||
3425 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | ||
3426 | udelay(2); | ||
3427 | |||
3428 | /* Read the data returned */ | ||
3429 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); | ||
3430 | *data = (uint16_t)reg_val; | ||
3431 | |||
3432 | e1000_swfw_sync_release(hw, swfw); | ||
3433 | return E1000_SUCCESS; | ||
3434 | } | ||
3435 | |||
3436 | int32_t | ||
3437 | e1000_write_kmrn_reg(struct e1000_hw *hw, | ||
3438 | uint32_t reg_addr, | ||
3439 | uint16_t data) | ||
3440 | { | ||
3441 | uint32_t reg_val; | ||
3442 | uint16_t swfw; | ||
3443 | DEBUGFUNC("e1000_write_kmrn_reg"); | ||
3444 | |||
3445 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3446 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3447 | swfw = E1000_SWFW_PHY1_SM; | ||
3448 | } else { | ||
3449 | swfw = E1000_SWFW_PHY0_SM; | ||
3450 | } | ||
3451 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3452 | return -E1000_ERR_SWFW_SYNC; | ||
3453 | |||
3454 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | ||
3455 | E1000_KUMCTRLSTA_OFFSET) | data; | ||
3456 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | ||
3457 | udelay(2); | ||
3458 | |||
3459 | e1000_swfw_sync_release(hw, swfw); | ||
3460 | return E1000_SUCCESS; | ||
3461 | } | ||
2970 | 3462 | ||
2971 | /****************************************************************************** | 3463 | /****************************************************************************** |
2972 | * Returns the PHY to the power-on reset state | 3464 | * Returns the PHY to the power-on reset state |
@@ -2979,6 +3471,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
2979 | uint32_t ctrl, ctrl_ext; | 3471 | uint32_t ctrl, ctrl_ext; |
2980 | uint32_t led_ctrl; | 3472 | uint32_t led_ctrl; |
2981 | int32_t ret_val; | 3473 | int32_t ret_val; |
3474 | uint16_t swfw; | ||
2982 | 3475 | ||
2983 | DEBUGFUNC("e1000_phy_hw_reset"); | 3476 | DEBUGFUNC("e1000_phy_hw_reset"); |
2984 | 3477 | ||
@@ -2991,11 +3484,21 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
2991 | DEBUGOUT("Resetting Phy...\n"); | 3484 | DEBUGOUT("Resetting Phy...\n"); |
2992 | 3485 | ||
2993 | if(hw->mac_type > e1000_82543) { | 3486 | if(hw->mac_type > e1000_82543) { |
3487 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3488 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3489 | swfw = E1000_SWFW_PHY1_SM; | ||
3490 | } else { | ||
3491 | swfw = E1000_SWFW_PHY0_SM; | ||
3492 | } | ||
3493 | if (e1000_swfw_sync_acquire(hw, swfw)) { | ||
3494 | e1000_release_software_semaphore(hw); | ||
3495 | return -E1000_ERR_SWFW_SYNC; | ||
3496 | } | ||
2994 | /* Read the device control register and assert the E1000_CTRL_PHY_RST | 3497 | /* Read the device control register and assert the E1000_CTRL_PHY_RST |
2995 | * bit. Then, take it out of reset. | 3498 | * bit. Then, take it out of reset. |
2996 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert | 3499 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert |
2997 | * and deassert. For e1000_82571 hardware and later, we instead delay | 3500 | * and deassert. For e1000_82571 hardware and later, we instead delay |
2998 | * for 10ms after the deassertion. | 3501 | * for 50us between and 10ms after the deassertion. |
2999 | */ | 3502 | */ |
3000 | ctrl = E1000_READ_REG(hw, CTRL); | 3503 | ctrl = E1000_READ_REG(hw, CTRL); |
3001 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); | 3504 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); |
@@ -3011,6 +3514,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3011 | 3514 | ||
3012 | if (hw->mac_type >= e1000_82571) | 3515 | if (hw->mac_type >= e1000_82571) |
3013 | msec_delay(10); | 3516 | msec_delay(10); |
3517 | e1000_swfw_sync_release(hw, swfw); | ||
3014 | } else { | 3518 | } else { |
3015 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3519 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
3016 | * bit to put the PHY into reset. Then, take it out of reset. | 3520 | * bit to put the PHY into reset. Then, take it out of reset. |
@@ -3037,6 +3541,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3037 | 3541 | ||
3038 | /* Wait for FW to finish PHY configuration. */ | 3542 | /* Wait for FW to finish PHY configuration. */ |
3039 | ret_val = e1000_get_phy_cfg_done(hw); | 3543 | ret_val = e1000_get_phy_cfg_done(hw); |
3544 | e1000_release_software_semaphore(hw); | ||
3040 | 3545 | ||
3041 | return ret_val; | 3546 | return ret_val; |
3042 | } | 3547 | } |
@@ -3114,6 +3619,15 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3114 | return E1000_SUCCESS; | 3619 | return E1000_SUCCESS; |
3115 | } | 3620 | } |
3116 | 3621 | ||
3622 | /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work- | ||
3623 | * around that forces PHY page 0 to be set or the reads fail. The rest of | ||
3624 | * the code in this routine uses e1000_read_phy_reg to read the PHY ID. | ||
3625 | * So for ESB-2 we need to have this set so our reads won't fail. If the | ||
3626 | * attached PHY is not a e1000_phy_gg82563, the routines below will figure | ||
3627 | * this out as well. */ | ||
3628 | if (hw->mac_type == e1000_80003es2lan) | ||
3629 | hw->phy_type = e1000_phy_gg82563; | ||
3630 | |||
3117 | /* Read the PHY ID Registers to identify which PHY is onboard. */ | 3631 | /* Read the PHY ID Registers to identify which PHY is onboard. */ |
3118 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); | 3632 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); |
3119 | if(ret_val) | 3633 | if(ret_val) |
@@ -3151,6 +3665,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3151 | case e1000_82573: | 3665 | case e1000_82573: |
3152 | if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; | 3666 | if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; |
3153 | break; | 3667 | break; |
3668 | case e1000_80003es2lan: | ||
3669 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; | ||
3670 | break; | ||
3154 | default: | 3671 | default: |
3155 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); | 3672 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); |
3156 | return -E1000_ERR_CONFIG; | 3673 | return -E1000_ERR_CONFIG; |
@@ -3177,8 +3694,10 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) | |||
3177 | DEBUGFUNC("e1000_phy_reset_dsp"); | 3694 | DEBUGFUNC("e1000_phy_reset_dsp"); |
3178 | 3695 | ||
3179 | do { | 3696 | do { |
3180 | ret_val = e1000_write_phy_reg(hw, 29, 0x001d); | 3697 | if (hw->phy_type != e1000_phy_gg82563) { |
3181 | if(ret_val) break; | 3698 | ret_val = e1000_write_phy_reg(hw, 29, 0x001d); |
3699 | if(ret_val) break; | ||
3700 | } | ||
3182 | ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); | 3701 | ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); |
3183 | if(ret_val) break; | 3702 | if(ret_val) break; |
3184 | ret_val = e1000_write_phy_reg(hw, 30, 0x0000); | 3703 | ret_val = e1000_write_phy_reg(hw, 30, 0x0000); |
@@ -3310,8 +3829,17 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
3310 | /* Cable Length Estimation and Local/Remote Receiver Information | 3829 | /* Cable Length Estimation and Local/Remote Receiver Information |
3311 | * are only valid at 1000 Mbps. | 3830 | * are only valid at 1000 Mbps. |
3312 | */ | 3831 | */ |
3313 | phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | 3832 | if (hw->phy_type != e1000_phy_gg82563) { |
3314 | M88E1000_PSSR_CABLE_LENGTH_SHIFT); | 3833 | phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> |
3834 | M88E1000_PSSR_CABLE_LENGTH_SHIFT); | ||
3835 | } else { | ||
3836 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, | ||
3837 | &phy_data); | ||
3838 | if (ret_val) | ||
3839 | return ret_val; | ||
3840 | |||
3841 | phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; | ||
3842 | } | ||
3315 | 3843 | ||
3316 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); | 3844 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); |
3317 | if(ret_val) | 3845 | if(ret_val) |
@@ -3392,7 +3920,8 @@ e1000_validate_mdi_setting(struct e1000_hw *hw) | |||
3392 | 3920 | ||
3393 | /****************************************************************************** | 3921 | /****************************************************************************** |
3394 | * Sets up eeprom variables in the hw struct. Must be called after mac_type | 3922 | * Sets up eeprom variables in the hw struct. Must be called after mac_type |
3395 | * is configured. | 3923 | * is configured. Additionally, if this is ICH8, the flash controller GbE |
3924 | * registers must be mapped, or this will crash. | ||
3396 | * | 3925 | * |
3397 | * hw - Struct containing variables accessed by shared code | 3926 | * hw - Struct containing variables accessed by shared code |
3398 | *****************************************************************************/ | 3927 | *****************************************************************************/ |
@@ -3505,6 +4034,20 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
3505 | E1000_WRITE_REG(hw, EECD, eecd); | 4034 | E1000_WRITE_REG(hw, EECD, eecd); |
3506 | } | 4035 | } |
3507 | break; | 4036 | break; |
4037 | case e1000_80003es2lan: | ||
4038 | eeprom->type = e1000_eeprom_spi; | ||
4039 | eeprom->opcode_bits = 8; | ||
4040 | eeprom->delay_usec = 1; | ||
4041 | if (eecd & E1000_EECD_ADDR_BITS) { | ||
4042 | eeprom->page_size = 32; | ||
4043 | eeprom->address_bits = 16; | ||
4044 | } else { | ||
4045 | eeprom->page_size = 8; | ||
4046 | eeprom->address_bits = 8; | ||
4047 | } | ||
4048 | eeprom->use_eerd = TRUE; | ||
4049 | eeprom->use_eewr = FALSE; | ||
4050 | break; | ||
3508 | default: | 4051 | default: |
3509 | break; | 4052 | break; |
3510 | } | 4053 | } |
@@ -3685,9 +4228,8 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
3685 | 4228 | ||
3686 | DEBUGFUNC("e1000_acquire_eeprom"); | 4229 | DEBUGFUNC("e1000_acquire_eeprom"); |
3687 | 4230 | ||
3688 | if(e1000_get_hw_eeprom_semaphore(hw)) | 4231 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) |
3689 | return -E1000_ERR_EEPROM; | 4232 | return -E1000_ERR_SWFW_SYNC; |
3690 | |||
3691 | eecd = E1000_READ_REG(hw, EECD); | 4233 | eecd = E1000_READ_REG(hw, EECD); |
3692 | 4234 | ||
3693 | if (hw->mac_type != e1000_82573) { | 4235 | if (hw->mac_type != e1000_82573) { |
@@ -3706,7 +4248,7 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
3706 | eecd &= ~E1000_EECD_REQ; | 4248 | eecd &= ~E1000_EECD_REQ; |
3707 | E1000_WRITE_REG(hw, EECD, eecd); | 4249 | E1000_WRITE_REG(hw, EECD, eecd); |
3708 | DEBUGOUT("Could not acquire EEPROM grant\n"); | 4250 | DEBUGOUT("Could not acquire EEPROM grant\n"); |
3709 | e1000_put_hw_eeprom_semaphore(hw); | 4251 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
3710 | return -E1000_ERR_EEPROM; | 4252 | return -E1000_ERR_EEPROM; |
3711 | } | 4253 | } |
3712 | } | 4254 | } |
@@ -3829,7 +4371,7 @@ e1000_release_eeprom(struct e1000_hw *hw) | |||
3829 | E1000_WRITE_REG(hw, EECD, eecd); | 4371 | E1000_WRITE_REG(hw, EECD, eecd); |
3830 | } | 4372 | } |
3831 | 4373 | ||
3832 | e1000_put_hw_eeprom_semaphore(hw); | 4374 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
3833 | } | 4375 | } |
3834 | 4376 | ||
3835 | /****************************************************************************** | 4377 | /****************************************************************************** |
@@ -3908,6 +4450,8 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
3908 | if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && | 4450 | if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && |
3909 | hw->eeprom.use_eerd == FALSE) { | 4451 | hw->eeprom.use_eerd == FALSE) { |
3910 | switch (hw->mac_type) { | 4452 | switch (hw->mac_type) { |
4453 | case e1000_80003es2lan: | ||
4454 | break; | ||
3911 | default: | 4455 | default: |
3912 | /* Prepare the EEPROM for reading */ | 4456 | /* Prepare the EEPROM for reading */ |
3913 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) | 4457 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) |
@@ -4025,6 +4569,9 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
4025 | uint32_t i = 0; | 4569 | uint32_t i = 0; |
4026 | int32_t error = 0; | 4570 | int32_t error = 0; |
4027 | 4571 | ||
4572 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) | ||
4573 | return -E1000_ERR_SWFW_SYNC; | ||
4574 | |||
4028 | for (i = 0; i < words; i++) { | 4575 | for (i = 0; i < words; i++) { |
4029 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | | 4576 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | |
4030 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | | 4577 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | |
@@ -4044,6 +4591,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
4044 | } | 4591 | } |
4045 | } | 4592 | } |
4046 | 4593 | ||
4594 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | ||
4047 | return error; | 4595 | return error; |
4048 | } | 4596 | } |
4049 | 4597 | ||
@@ -4085,6 +4633,8 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
4085 | { | 4633 | { |
4086 | uint32_t eecd = 0; | 4634 | uint32_t eecd = 0; |
4087 | 4635 | ||
4636 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); | ||
4637 | |||
4088 | if(hw->mac_type == e1000_82573) { | 4638 | if(hw->mac_type == e1000_82573) { |
4089 | eecd = E1000_READ_REG(hw, EECD); | 4639 | eecd = E1000_READ_REG(hw, EECD); |
4090 | 4640 | ||
@@ -4511,6 +5061,7 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
4511 | case e1000_82546: | 5061 | case e1000_82546: |
4512 | case e1000_82546_rev_3: | 5062 | case e1000_82546_rev_3: |
4513 | case e1000_82571: | 5063 | case e1000_82571: |
5064 | case e1000_80003es2lan: | ||
4514 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 5065 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) |
4515 | hw->perm_mac_addr[5] ^= 0x01; | 5066 | hw->perm_mac_addr[5] ^= 0x01; |
4516 | break; | 5067 | break; |
@@ -4749,8 +5300,37 @@ e1000_rar_set(struct e1000_hw *hw, | |||
4749 | rar_low = ((uint32_t) addr[0] | | 5300 | rar_low = ((uint32_t) addr[0] | |
4750 | ((uint32_t) addr[1] << 8) | | 5301 | ((uint32_t) addr[1] << 8) | |
4751 | ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); | 5302 | ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); |
5303 | rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); | ||
4752 | 5304 | ||
4753 | rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8) | E1000_RAH_AV); | 5305 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx |
5306 | * unit hang. | ||
5307 | * | ||
5308 | * Description: | ||
5309 | * If there are any Rx frames queued up or otherwise present in the HW | ||
5310 | * before RSS is enabled, and then we enable RSS, the HW Rx unit will | ||
5311 | * hang. To work around this issue, we have to disable receives and | ||
5312 | * flush out all Rx frames before we enable RSS. To do so, we modify we | ||
5313 | * redirect all Rx traffic to manageability and then reset the HW. | ||
5314 | * This flushes away Rx frames, and (since the redirections to | ||
5315 | * manageability persists across resets) keeps new ones from coming in | ||
5316 | * while we work. Then, we clear the Address Valid AV bit for all MAC | ||
5317 | * addresses and undo the re-direction to manageability. | ||
5318 | * Now, frames are coming in again, but the MAC won't accept them, so | ||
5319 | * far so good. We now proceed to initialize RSS (if necessary) and | ||
5320 | * configure the Rx unit. Last, we re-enable the AV bits and continue | ||
5321 | * on our merry way. | ||
5322 | */ | ||
5323 | switch (hw->mac_type) { | ||
5324 | case e1000_82571: | ||
5325 | case e1000_82572: | ||
5326 | case e1000_80003es2lan: | ||
5327 | if (hw->leave_av_bit_off == TRUE) | ||
5328 | break; | ||
5329 | default: | ||
5330 | /* Indicate to hardware the Address is Valid. */ | ||
5331 | rar_high |= E1000_RAH_AV; | ||
5332 | break; | ||
5333 | } | ||
4754 | 5334 | ||
4755 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5335 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
4756 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5336 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
@@ -5330,6 +5910,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5330 | hw->bus_width = e1000_bus_width_pciex_1; | 5910 | hw->bus_width = e1000_bus_width_pciex_1; |
5331 | break; | 5911 | break; |
5332 | case e1000_82571: | 5912 | case e1000_82571: |
5913 | case e1000_80003es2lan: | ||
5333 | hw->bus_type = e1000_bus_type_pci_express; | 5914 | hw->bus_type = e1000_bus_type_pci_express; |
5334 | hw->bus_speed = e1000_bus_speed_2500; | 5915 | hw->bus_speed = e1000_bus_speed_2500; |
5335 | hw->bus_width = e1000_bus_width_pciex_4; | 5916 | hw->bus_width = e1000_bus_width_pciex_4; |
@@ -5475,6 +6056,34 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
5475 | return -E1000_ERR_PHY; | 6056 | return -E1000_ERR_PHY; |
5476 | break; | 6057 | break; |
5477 | } | 6058 | } |
6059 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
6060 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, | ||
6061 | &phy_data); | ||
6062 | if (ret_val) | ||
6063 | return ret_val; | ||
6064 | cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; | ||
6065 | |||
6066 | switch (cable_length) { | ||
6067 | case e1000_gg_cable_length_60: | ||
6068 | *min_length = 0; | ||
6069 | *max_length = e1000_igp_cable_length_60; | ||
6070 | break; | ||
6071 | case e1000_gg_cable_length_60_115: | ||
6072 | *min_length = e1000_igp_cable_length_60; | ||
6073 | *max_length = e1000_igp_cable_length_115; | ||
6074 | break; | ||
6075 | case e1000_gg_cable_length_115_150: | ||
6076 | *min_length = e1000_igp_cable_length_115; | ||
6077 | *max_length = e1000_igp_cable_length_150; | ||
6078 | break; | ||
6079 | case e1000_gg_cable_length_150: | ||
6080 | *min_length = e1000_igp_cable_length_150; | ||
6081 | *max_length = e1000_igp_cable_length_180; | ||
6082 | break; | ||
6083 | default: | ||
6084 | return -E1000_ERR_PHY; | ||
6085 | break; | ||
6086 | } | ||
5478 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ | 6087 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ |
5479 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6088 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
5480 | {IGP01E1000_PHY_AGC_A, | 6089 | {IGP01E1000_PHY_AGC_A, |
@@ -5584,7 +6193,8 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
5584 | 6193 | ||
5585 | DEBUGFUNC("e1000_check_polarity"); | 6194 | DEBUGFUNC("e1000_check_polarity"); |
5586 | 6195 | ||
5587 | if(hw->phy_type == e1000_phy_m88) { | 6196 | if ((hw->phy_type == e1000_phy_m88) || |
6197 | (hw->phy_type == e1000_phy_gg82563)) { | ||
5588 | /* return the Polarity bit in the Status register. */ | 6198 | /* return the Polarity bit in the Status register. */ |
5589 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, | 6199 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
5590 | &phy_data); | 6200 | &phy_data); |
@@ -5653,7 +6263,8 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
5653 | return ret_val; | 6263 | return ret_val; |
5654 | 6264 | ||
5655 | hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; | 6265 | hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; |
5656 | } else if(hw->phy_type == e1000_phy_m88) { | 6266 | } else if ((hw->phy_type == e1000_phy_m88) || |
6267 | (hw->phy_type == e1000_phy_gg82563)) { | ||
5657 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, | 6268 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
5658 | &phy_data); | 6269 | &phy_data); |
5659 | if(ret_val) | 6270 | if(ret_val) |
@@ -6686,6 +7297,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
6686 | case e1000_82571: | 7297 | case e1000_82571: |
6687 | case e1000_82572: | 7298 | case e1000_82572: |
6688 | case e1000_82573: | 7299 | case e1000_82573: |
7300 | case e1000_80003es2lan: | ||
6689 | while(timeout) { | 7301 | while(timeout) { |
6690 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; | 7302 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; |
6691 | else msec_delay(1); | 7303 | else msec_delay(1); |
@@ -6729,6 +7341,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
6729 | default: | 7341 | default: |
6730 | msec_delay(10); | 7342 | msec_delay(10); |
6731 | break; | 7343 | break; |
7344 | case e1000_80003es2lan: | ||
7345 | /* Separate *_CFG_DONE_* bit for each port */ | ||
7346 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | ||
7347 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; | ||
7348 | /* Fall Through */ | ||
6732 | case e1000_82571: | 7349 | case e1000_82571: |
6733 | case e1000_82572: | 7350 | case e1000_82572: |
6734 | while (timeout) { | 7351 | while (timeout) { |
@@ -6746,12 +7363,6 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
6746 | break; | 7363 | break; |
6747 | } | 7364 | } |
6748 | 7365 | ||
6749 | /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high. | ||
6750 | * Need to wait for PHY configuration completion before accessing NVM | ||
6751 | * and PHY. */ | ||
6752 | if (hw->mac_type == e1000_82573) | ||
6753 | msec_delay(25); | ||
6754 | |||
6755 | return E1000_SUCCESS; | 7366 | return E1000_SUCCESS; |
6756 | } | 7367 | } |
6757 | 7368 | ||
@@ -6777,6 +7388,11 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
6777 | if(!hw->eeprom_semaphore_present) | 7388 | if(!hw->eeprom_semaphore_present) |
6778 | return E1000_SUCCESS; | 7389 | return E1000_SUCCESS; |
6779 | 7390 | ||
7391 | if (hw->mac_type == e1000_80003es2lan) { | ||
7392 | /* Get the SW semaphore. */ | ||
7393 | if (e1000_get_software_semaphore(hw) != E1000_SUCCESS) | ||
7394 | return -E1000_ERR_EEPROM; | ||
7395 | } | ||
6780 | 7396 | ||
6781 | /* Get the FW semaphore. */ | 7397 | /* Get the FW semaphore. */ |
6782 | timeout = hw->eeprom.word_size + 1; | 7398 | timeout = hw->eeprom.word_size + 1; |
@@ -6822,10 +7438,75 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
6822 | return; | 7438 | return; |
6823 | 7439 | ||
6824 | swsm = E1000_READ_REG(hw, SWSM); | 7440 | swsm = E1000_READ_REG(hw, SWSM); |
7441 | if (hw->mac_type == e1000_80003es2lan) { | ||
7442 | /* Release both semaphores. */ | ||
7443 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | ||
7444 | } else | ||
6825 | swsm &= ~(E1000_SWSM_SWESMBI); | 7445 | swsm &= ~(E1000_SWSM_SWESMBI); |
6826 | E1000_WRITE_REG(hw, SWSM, swsm); | 7446 | E1000_WRITE_REG(hw, SWSM, swsm); |
6827 | } | 7447 | } |
6828 | 7448 | ||
7449 | /*************************************************************************** | ||
7450 | * | ||
7451 | * Obtaining software semaphore bit (SMBI) before resetting PHY. | ||
7452 | * | ||
7453 | * hw: Struct containing variables accessed by shared code | ||
7454 | * | ||
7455 | * returns: - E1000_ERR_RESET if fail to obtain semaphore. | ||
7456 | * E1000_SUCCESS at any other case. | ||
7457 | * | ||
7458 | ***************************************************************************/ | ||
7459 | int32_t | ||
7460 | e1000_get_software_semaphore(struct e1000_hw *hw) | ||
7461 | { | ||
7462 | int32_t timeout = hw->eeprom.word_size + 1; | ||
7463 | uint32_t swsm; | ||
7464 | |||
7465 | DEBUGFUNC("e1000_get_software_semaphore"); | ||
7466 | |||
7467 | if (hw->mac_type != e1000_80003es2lan) | ||
7468 | return E1000_SUCCESS; | ||
7469 | |||
7470 | while(timeout) { | ||
7471 | swsm = E1000_READ_REG(hw, SWSM); | ||
7472 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ | ||
7473 | if(!(swsm & E1000_SWSM_SMBI)) | ||
7474 | break; | ||
7475 | msec_delay_irq(1); | ||
7476 | timeout--; | ||
7477 | } | ||
7478 | |||
7479 | if(!timeout) { | ||
7480 | DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); | ||
7481 | return -E1000_ERR_RESET; | ||
7482 | } | ||
7483 | |||
7484 | return E1000_SUCCESS; | ||
7485 | } | ||
7486 | |||
7487 | /*************************************************************************** | ||
7488 | * | ||
7489 | * Release semaphore bit (SMBI). | ||
7490 | * | ||
7491 | * hw: Struct containing variables accessed by shared code | ||
7492 | * | ||
7493 | ***************************************************************************/ | ||
7494 | void | ||
7495 | e1000_release_software_semaphore(struct e1000_hw *hw) | ||
7496 | { | ||
7497 | uint32_t swsm; | ||
7498 | |||
7499 | DEBUGFUNC("e1000_release_software_semaphore"); | ||
7500 | |||
7501 | if (hw->mac_type != e1000_80003es2lan) | ||
7502 | return; | ||
7503 | |||
7504 | swsm = E1000_READ_REG(hw, SWSM); | ||
7505 | /* Release the SW semaphores.*/ | ||
7506 | swsm &= ~E1000_SWSM_SMBI; | ||
7507 | E1000_WRITE_REG(hw, SWSM, swsm); | ||
7508 | } | ||
7509 | |||
6829 | /****************************************************************************** | 7510 | /****************************************************************************** |
6830 | * Checks if PHY reset is blocked due to SOL/IDER session, for example. | 7511 | * Checks if PHY reset is blocked due to SOL/IDER session, for example. |
6831 | * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to | 7512 | * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to |
@@ -6862,6 +7543,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
6862 | case e1000_82571: | 7543 | case e1000_82571: |
6863 | case e1000_82572: | 7544 | case e1000_82572: |
6864 | case e1000_82573: | 7545 | case e1000_82573: |
7546 | case e1000_80003es2lan: | ||
6865 | fwsm = E1000_READ_REG(hw, FWSM); | 7547 | fwsm = E1000_READ_REG(hw, FWSM); |
6866 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) | 7548 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) |
6867 | return TRUE; | 7549 | return TRUE; |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index f1219dd9dbac..150e45e30f87 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -60,6 +60,7 @@ typedef enum { | |||
60 | e1000_82571, | 60 | e1000_82571, |
61 | e1000_82572, | 61 | e1000_82572, |
62 | e1000_82573, | 62 | e1000_82573, |
63 | e1000_80003es2lan, | ||
63 | e1000_num_macs | 64 | e1000_num_macs |
64 | } e1000_mac_type; | 65 | } e1000_mac_type; |
65 | 66 | ||
@@ -139,6 +140,13 @@ typedef enum { | |||
139 | } e1000_cable_length; | 140 | } e1000_cable_length; |
140 | 141 | ||
141 | typedef enum { | 142 | typedef enum { |
143 | e1000_gg_cable_length_60 = 0, | ||
144 | e1000_gg_cable_length_60_115 = 1, | ||
145 | e1000_gg_cable_length_115_150 = 2, | ||
146 | e1000_gg_cable_length_150 = 4 | ||
147 | } e1000_gg_cable_length; | ||
148 | |||
149 | typedef enum { | ||
142 | e1000_igp_cable_length_10 = 10, | 150 | e1000_igp_cable_length_10 = 10, |
143 | e1000_igp_cable_length_20 = 20, | 151 | e1000_igp_cable_length_20 = 20, |
144 | e1000_igp_cable_length_30 = 30, | 152 | e1000_igp_cable_length_30 = 30, |
@@ -208,6 +216,7 @@ typedef enum { | |||
208 | e1000_phy_m88 = 0, | 216 | e1000_phy_m88 = 0, |
209 | e1000_phy_igp, | 217 | e1000_phy_igp, |
210 | e1000_phy_igp_2, | 218 | e1000_phy_igp_2, |
219 | e1000_phy_gg82563, | ||
211 | e1000_phy_undefined = 0xFF | 220 | e1000_phy_undefined = 0xFF |
212 | } e1000_phy_type; | 221 | } e1000_phy_type; |
213 | 222 | ||
@@ -281,6 +290,7 @@ typedef enum { | |||
281 | #define E1000_ERR_MASTER_REQUESTS_PENDING 10 | 290 | #define E1000_ERR_MASTER_REQUESTS_PENDING 10 |
282 | #define E1000_ERR_HOST_INTERFACE_COMMAND 11 | 291 | #define E1000_ERR_HOST_INTERFACE_COMMAND 11 |
283 | #define E1000_BLK_PHY_RESET 12 | 292 | #define E1000_BLK_PHY_RESET 12 |
293 | #define E1000_ERR_SWFW_SYNC 13 | ||
284 | 294 | ||
285 | /* Function prototypes */ | 295 | /* Function prototypes */ |
286 | /* Initialization */ | 296 | /* Initialization */ |
@@ -304,6 +314,8 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | |||
304 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 314 | int32_t e1000_phy_reset(struct e1000_hw *hw); |
305 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 315 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
306 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 316 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); |
317 | int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); | ||
318 | int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | ||
307 | 319 | ||
308 | /* EEPROM Functions */ | 320 | /* EEPROM Functions */ |
309 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); | 321 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); |
@@ -454,6 +466,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
454 | #define E1000_DEV_ID_82573E_IAMT 0x108C | 466 | #define E1000_DEV_ID_82573E_IAMT 0x108C |
455 | #define E1000_DEV_ID_82573L 0x109A | 467 | #define E1000_DEV_ID_82573L 0x109A |
456 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 | 468 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 |
469 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | ||
470 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | ||
457 | 471 | ||
458 | 472 | ||
459 | #define NODE_ADDRESS_SIZE 6 | 473 | #define NODE_ADDRESS_SIZE 6 |
@@ -850,6 +864,7 @@ struct e1000_ffvt_entry { | |||
850 | #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ | 864 | #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ |
851 | #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ | 865 | #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ |
852 | #define E1000_TCTL 0x00400 /* TX Control - RW */ | 866 | #define E1000_TCTL 0x00400 /* TX Control - RW */ |
867 | #define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ | ||
853 | #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ | 868 | #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ |
854 | #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ | 869 | #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ |
855 | #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ | 870 | #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ |
@@ -996,6 +1011,11 @@ struct e1000_ffvt_entry { | |||
996 | #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ | 1011 | #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ |
997 | #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ | 1012 | #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ |
998 | 1013 | ||
1014 | #define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ | ||
1015 | #define E1000_MDPHYA 0x0003C /* PHY address - RW */ | ||
1016 | #define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */ | ||
1017 | #define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ | ||
1018 | |||
999 | #define E1000_GCR 0x05B00 /* PCI-Ex Control */ | 1019 | #define E1000_GCR 0x05B00 /* PCI-Ex Control */ |
1000 | #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ | 1020 | #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ |
1001 | #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ | 1021 | #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ |
@@ -1065,6 +1085,7 @@ struct e1000_ffvt_entry { | |||
1065 | #define E1000_82542_RXCW E1000_RXCW | 1085 | #define E1000_82542_RXCW E1000_RXCW |
1066 | #define E1000_82542_MTA 0x00200 | 1086 | #define E1000_82542_MTA 0x00200 |
1067 | #define E1000_82542_TCTL E1000_TCTL | 1087 | #define E1000_82542_TCTL E1000_TCTL |
1088 | #define E1000_82542_TCTL_EXT E1000_TCTL_EXT | ||
1068 | #define E1000_82542_TIPG E1000_TIPG | 1089 | #define E1000_82542_TIPG E1000_TIPG |
1069 | #define E1000_82542_TDBAL 0x00420 | 1090 | #define E1000_82542_TDBAL 0x00420 |
1070 | #define E1000_82542_TDBAH 0x00424 | 1091 | #define E1000_82542_TDBAH 0x00424 |
@@ -1212,6 +1233,8 @@ struct e1000_ffvt_entry { | |||
1212 | #define E1000_82542_RSSRK E1000_RSSRK | 1233 | #define E1000_82542_RSSRK E1000_RSSRK |
1213 | #define E1000_82542_RSSIM E1000_RSSIM | 1234 | #define E1000_82542_RSSIM E1000_RSSIM |
1214 | #define E1000_82542_RSSIR E1000_RSSIR | 1235 | #define E1000_82542_RSSIR E1000_RSSIR |
1236 | #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA | ||
1237 | #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC | ||
1215 | 1238 | ||
1216 | /* Statistics counters collected by the MAC */ | 1239 | /* Statistics counters collected by the MAC */ |
1217 | struct e1000_hw_stats { | 1240 | struct e1000_hw_stats { |
@@ -1303,6 +1326,7 @@ struct e1000_hw { | |||
1303 | e1000_ffe_config ffe_config_state; | 1326 | e1000_ffe_config ffe_config_state; |
1304 | uint32_t asf_firmware_present; | 1327 | uint32_t asf_firmware_present; |
1305 | uint32_t eeprom_semaphore_present; | 1328 | uint32_t eeprom_semaphore_present; |
1329 | uint32_t swfw_sync_present; | ||
1306 | unsigned long io_base; | 1330 | unsigned long io_base; |
1307 | uint32_t phy_id; | 1331 | uint32_t phy_id; |
1308 | uint32_t phy_revision; | 1332 | uint32_t phy_revision; |
@@ -1361,6 +1385,7 @@ struct e1000_hw { | |||
1361 | boolean_t ifs_params_forced; | 1385 | boolean_t ifs_params_forced; |
1362 | boolean_t in_ifs_mode; | 1386 | boolean_t in_ifs_mode; |
1363 | boolean_t mng_reg_access_disabled; | 1387 | boolean_t mng_reg_access_disabled; |
1388 | boolean_t leave_av_bit_off; | ||
1364 | }; | 1389 | }; |
1365 | 1390 | ||
1366 | 1391 | ||
@@ -1393,6 +1418,8 @@ struct e1000_hw { | |||
1393 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ | 1418 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ |
1394 | #define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ | 1419 | #define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ |
1395 | #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ | 1420 | #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ |
1421 | #define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ | ||
1422 | #define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ | ||
1396 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 1423 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
1397 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 1424 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
1398 | #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ | 1425 | #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ |
@@ -1429,6 +1456,16 @@ struct e1000_hw { | |||
1429 | #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ | 1456 | #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ |
1430 | #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ | 1457 | #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ |
1431 | #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ | 1458 | #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ |
1459 | #define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ | ||
1460 | #define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ | ||
1461 | #define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ | ||
1462 | #define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ | ||
1463 | #define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ | ||
1464 | #define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ | ||
1465 | #define E1000_STATUS_FUSE_8 0x04000000 | ||
1466 | #define E1000_STATUS_FUSE_9 0x08000000 | ||
1467 | #define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ | ||
1468 | #define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ | ||
1432 | 1469 | ||
1433 | /* Constants used to intrepret the masked PCI-X bus speed. */ | 1470 | /* Constants used to intrepret the masked PCI-X bus speed. */ |
1434 | #define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ | 1471 | #define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ |
@@ -1506,6 +1543,8 @@ struct e1000_hw { | |||
1506 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | 1543 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 |
1507 | #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 | 1544 | #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 |
1508 | #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 | 1545 | #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 |
1546 | #define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 | ||
1547 | #define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 | ||
1509 | #define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 | 1548 | #define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 |
1510 | #define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 | 1549 | #define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 |
1511 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 | 1550 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 |
@@ -1515,6 +1554,9 @@ struct e1000_hw { | |||
1515 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | 1554 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ |
1516 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | 1555 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ |
1517 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | 1556 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ |
1557 | #define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ | ||
1558 | #define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ | ||
1559 | #define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 | ||
1518 | 1560 | ||
1519 | /* MDI Control */ | 1561 | /* MDI Control */ |
1520 | #define E1000_MDIC_DATA_MASK 0x0000FFFF | 1562 | #define E1000_MDIC_DATA_MASK 0x0000FFFF |
@@ -1528,6 +1570,32 @@ struct e1000_hw { | |||
1528 | #define E1000_MDIC_INT_EN 0x20000000 | 1570 | #define E1000_MDIC_INT_EN 0x20000000 |
1529 | #define E1000_MDIC_ERROR 0x40000000 | 1571 | #define E1000_MDIC_ERROR 0x40000000 |
1530 | 1572 | ||
1573 | #define E1000_KUMCTRLSTA_MASK 0x0000FFFF | ||
1574 | #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 | ||
1575 | #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 | ||
1576 | #define E1000_KUMCTRLSTA_REN 0x00200000 | ||
1577 | |||
1578 | #define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 | ||
1579 | #define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 | ||
1580 | #define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 | ||
1581 | #define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 | ||
1582 | #define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 | ||
1583 | #define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 | ||
1584 | #define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 | ||
1585 | #define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E | ||
1586 | #define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F | ||
1587 | |||
1588 | /* FIFO Control */ | ||
1589 | #define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 | ||
1590 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 | ||
1591 | |||
1592 | /* In-Band Control */ | ||
1593 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 | ||
1594 | |||
1595 | /* Half-Duplex Control */ | ||
1596 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 | ||
1597 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 | ||
1598 | |||
1531 | /* LED Control */ | 1599 | /* LED Control */ |
1532 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F | 1600 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F |
1533 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 | 1601 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 |
@@ -1590,6 +1658,13 @@ struct e1000_hw { | |||
1590 | #define E1000_ICR_MNG 0x00040000 /* Manageability event */ | 1658 | #define E1000_ICR_MNG 0x00040000 /* Manageability event */ |
1591 | #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ | 1659 | #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ |
1592 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | 1660 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ |
1661 | #define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ | ||
1662 | #define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ | ||
1663 | #define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ | ||
1664 | #define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ | ||
1665 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ | ||
1666 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ | ||
1667 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ | ||
1593 | 1668 | ||
1594 | /* Interrupt Cause Set */ | 1669 | /* Interrupt Cause Set */ |
1595 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1670 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1610,6 +1685,12 @@ struct e1000_hw { | |||
1610 | #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1685 | #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1611 | #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ | 1686 | #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ |
1612 | #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1687 | #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1688 | #define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1689 | #define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1690 | #define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1691 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1692 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1693 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1613 | 1694 | ||
1614 | /* Interrupt Mask Set */ | 1695 | /* Interrupt Mask Set */ |
1615 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1696 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1630,6 +1711,12 @@ struct e1000_hw { | |||
1630 | #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1711 | #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1631 | #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ | 1712 | #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ |
1632 | #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1713 | #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1714 | #define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1715 | #define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1716 | #define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1717 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1718 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1719 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1633 | 1720 | ||
1634 | /* Interrupt Mask Clear */ | 1721 | /* Interrupt Mask Clear */ |
1635 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1722 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1650,6 +1737,12 @@ struct e1000_hw { | |||
1650 | #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1737 | #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1651 | #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ | 1738 | #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ |
1652 | #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1739 | #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1740 | #define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1741 | #define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1742 | #define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1743 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1744 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1745 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1653 | 1746 | ||
1654 | /* Receive Control */ | 1747 | /* Receive Control */ |
1655 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ | 1748 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ |
@@ -1719,6 +1812,12 @@ struct e1000_hw { | |||
1719 | #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ | 1812 | #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ |
1720 | #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ | 1813 | #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ |
1721 | 1814 | ||
1815 | /* SW_W_SYNC definitions */ | ||
1816 | #define E1000_SWFW_EEP_SM 0x0001 | ||
1817 | #define E1000_SWFW_PHY0_SM 0x0002 | ||
1818 | #define E1000_SWFW_PHY1_SM 0x0004 | ||
1819 | #define E1000_SWFW_MAC_CSR_SM 0x0008 | ||
1820 | |||
1722 | /* Receive Descriptor */ | 1821 | /* Receive Descriptor */ |
1723 | #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ | 1822 | #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ |
1724 | #define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ | 1823 | #define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ |
@@ -1797,6 +1896,11 @@ struct e1000_hw { | |||
1797 | #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ | 1896 | #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ |
1798 | #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ | 1897 | #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ |
1799 | #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ | 1898 | #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ |
1899 | /* Extended Transmit Control */ | ||
1900 | #define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ | ||
1901 | #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ | ||
1902 | |||
1903 | #define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000 | ||
1800 | 1904 | ||
1801 | /* Receive Checksum Control */ | 1905 | /* Receive Checksum Control */ |
1802 | #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ | 1906 | #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ |
@@ -1874,6 +1978,7 @@ struct e1000_hw { | |||
1874 | #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ | 1978 | #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ |
1875 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ | 1979 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ |
1876 | #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ | 1980 | #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ |
1981 | #define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ | ||
1877 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ | 1982 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ |
1878 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address | 1983 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address |
1879 | * filtering */ | 1984 | * filtering */ |
@@ -1962,19 +2067,19 @@ struct e1000_host_command_info { | |||
1962 | /* PCI-Ex registers */ | 2067 | /* PCI-Ex registers */ |
1963 | 2068 | ||
1964 | /* PCI-Ex Control Register */ | 2069 | /* PCI-Ex Control Register */ |
1965 | #define E1000_GCR_RXD_NO_SNOOP 0x00000001 | 2070 | #define E1000_GCR_RXD_NO_SNOOP 0x00000001 |
1966 | #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 | 2071 | #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 |
1967 | #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 | 2072 | #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 |
1968 | #define E1000_GCR_TXD_NO_SNOOP 0x00000008 | 2073 | #define E1000_GCR_TXD_NO_SNOOP 0x00000008 |
1969 | #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 | 2074 | #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 |
1970 | #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 | 2075 | #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 |
1971 | 2076 | ||
1972 | #define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ | 2077 | #define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ |
1973 | E1000_GCR_RXDSCW_NO_SNOOP | \ | 2078 | E1000_GCR_RXDSCW_NO_SNOOP | \ |
1974 | E1000_GCR_RXDSCR_NO_SNOOP | \ | 2079 | E1000_GCR_RXDSCR_NO_SNOOP | \ |
1975 | E1000_GCR TXD_NO_SNOOP | \ | 2080 | E1000_GCR_TXD_NO_SNOOP | \ |
1976 | E1000_GCR_TXDSCW_NO_SNOOP | \ | 2081 | E1000_GCR_TXDSCW_NO_SNOOP | \ |
1977 | E1000_GCR_TXDSCR_NO_SNOOP) | 2082 | E1000_GCR_TXDSCR_NO_SNOOP) |
1978 | 2083 | ||
1979 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | 2084 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 |
1980 | /* Function Active and Power State to MNG */ | 2085 | /* Function Active and Power State to MNG */ |
@@ -2035,12 +2140,14 @@ struct e1000_host_command_info { | |||
2035 | #define EEPROM_INIT_CONTROL1_REG 0x000A | 2140 | #define EEPROM_INIT_CONTROL1_REG 0x000A |
2036 | #define EEPROM_INIT_CONTROL2_REG 0x000F | 2141 | #define EEPROM_INIT_CONTROL2_REG 0x000F |
2037 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 | 2142 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 |
2143 | #define EEPROM_INIT_3GIO_3 0x001A | ||
2038 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 | 2144 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 |
2039 | #define EEPROM_CFG 0x0012 | 2145 | #define EEPROM_CFG 0x0012 |
2040 | #define EEPROM_FLASH_VERSION 0x0032 | 2146 | #define EEPROM_FLASH_VERSION 0x0032 |
2041 | #define EEPROM_CHECKSUM_REG 0x003F | 2147 | #define EEPROM_CHECKSUM_REG 0x003F |
2042 | 2148 | ||
2043 | #define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ | 2149 | #define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ |
2150 | #define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ | ||
2044 | 2151 | ||
2045 | /* Word definitions for ID LED Settings */ | 2152 | /* Word definitions for ID LED Settings */ |
2046 | #define ID_LED_RESERVED_0000 0x0000 | 2153 | #define ID_LED_RESERVED_0000 0x0000 |
@@ -2084,6 +2191,9 @@ struct e1000_host_command_info { | |||
2084 | #define EEPROM_WORD0F_ANE 0x0800 | 2191 | #define EEPROM_WORD0F_ANE 0x0800 |
2085 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 | 2192 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 |
2086 | 2193 | ||
2194 | /* Mask bits for fields in Word 0x1a of the EEPROM */ | ||
2195 | #define EEPROM_WORD1A_ASPM_MASK 0x000C | ||
2196 | |||
2087 | /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ | 2197 | /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ |
2088 | #define EEPROM_SUM 0xBABA | 2198 | #define EEPROM_SUM 0xBABA |
2089 | 2199 | ||
@@ -2126,8 +2236,11 @@ struct e1000_host_command_info { | |||
2126 | 2236 | ||
2127 | #define DEFAULT_82542_TIPG_IPGR2 10 | 2237 | #define DEFAULT_82542_TIPG_IPGR2 10 |
2128 | #define DEFAULT_82543_TIPG_IPGR2 6 | 2238 | #define DEFAULT_82543_TIPG_IPGR2 6 |
2239 | #define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 | ||
2129 | #define E1000_TIPG_IPGR2_SHIFT 20 | 2240 | #define E1000_TIPG_IPGR2_SHIFT 20 |
2130 | 2241 | ||
2242 | #define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009 | ||
2243 | #define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008 | ||
2131 | #define E1000_TXDMAC_DPP 0x00000001 | 2244 | #define E1000_TXDMAC_DPP 0x00000001 |
2132 | 2245 | ||
2133 | /* Adaptive IFS defines */ | 2246 | /* Adaptive IFS defines */ |
@@ -2368,6 +2481,78 @@ struct e1000_host_command_info { | |||
2368 | 2481 | ||
2369 | #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 | 2482 | #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 |
2370 | 2483 | ||
2484 | /* Bits... | ||
2485 | * 15-5: page | ||
2486 | * 4-0: register offset | ||
2487 | */ | ||
2488 | #define GG82563_PAGE_SHIFT 5 | ||
2489 | #define GG82563_REG(page, reg) \ | ||
2490 | (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
2491 | #define GG82563_MIN_ALT_REG 30 | ||
2492 | |||
2493 | /* GG82563 Specific Registers */ | ||
2494 | #define GG82563_PHY_SPEC_CTRL \ | ||
2495 | GG82563_REG(0, 16) /* PHY Specific Control */ | ||
2496 | #define GG82563_PHY_SPEC_STATUS \ | ||
2497 | GG82563_REG(0, 17) /* PHY Specific Status */ | ||
2498 | #define GG82563_PHY_INT_ENABLE \ | ||
2499 | GG82563_REG(0, 18) /* Interrupt Enable */ | ||
2500 | #define GG82563_PHY_SPEC_STATUS_2 \ | ||
2501 | GG82563_REG(0, 19) /* PHY Specific Status 2 */ | ||
2502 | #define GG82563_PHY_RX_ERR_CNTR \ | ||
2503 | GG82563_REG(0, 21) /* Receive Error Counter */ | ||
2504 | #define GG82563_PHY_PAGE_SELECT \ | ||
2505 | GG82563_REG(0, 22) /* Page Select */ | ||
2506 | #define GG82563_PHY_SPEC_CTRL_2 \ | ||
2507 | GG82563_REG(0, 26) /* PHY Specific Control 2 */ | ||
2508 | #define GG82563_PHY_PAGE_SELECT_ALT \ | ||
2509 | GG82563_REG(0, 29) /* Alternate Page Select */ | ||
2510 | #define GG82563_PHY_TEST_CLK_CTRL \ | ||
2511 | GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ | ||
2512 | |||
2513 | #define GG82563_PHY_MAC_SPEC_CTRL \ | ||
2514 | GG82563_REG(2, 21) /* MAC Specific Control Register */ | ||
2515 | #define GG82563_PHY_MAC_SPEC_CTRL_2 \ | ||
2516 | GG82563_REG(2, 26) /* MAC Specific Control 2 */ | ||
2517 | |||
2518 | #define GG82563_PHY_DSP_DISTANCE \ | ||
2519 | GG82563_REG(5, 26) /* DSP Distance */ | ||
2520 | |||
2521 | /* Page 193 - Port Control Registers */ | ||
2522 | #define GG82563_PHY_KMRN_MODE_CTRL \ | ||
2523 | GG82563_REG(193, 16) /* Kumeran Mode Control */ | ||
2524 | #define GG82563_PHY_PORT_RESET \ | ||
2525 | GG82563_REG(193, 17) /* Port Reset */ | ||
2526 | #define GG82563_PHY_REVISION_ID \ | ||
2527 | GG82563_REG(193, 18) /* Revision ID */ | ||
2528 | #define GG82563_PHY_DEVICE_ID \ | ||
2529 | GG82563_REG(193, 19) /* Device ID */ | ||
2530 | #define GG82563_PHY_PWR_MGMT_CTRL \ | ||
2531 | GG82563_REG(193, 20) /* Power Management Control */ | ||
2532 | #define GG82563_PHY_RATE_ADAPT_CTRL \ | ||
2533 | GG82563_REG(193, 25) /* Rate Adaptation Control */ | ||
2534 | |||
2535 | /* Page 194 - KMRN Registers */ | ||
2536 | #define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ | ||
2537 | GG82563_REG(194, 16) /* FIFO's Control/Status */ | ||
2538 | #define GG82563_PHY_KMRN_CTRL \ | ||
2539 | GG82563_REG(194, 17) /* Control */ | ||
2540 | #define GG82563_PHY_INBAND_CTRL \ | ||
2541 | GG82563_REG(194, 18) /* Inband Control */ | ||
2542 | #define GG82563_PHY_KMRN_DIAGNOSTIC \ | ||
2543 | GG82563_REG(194, 19) /* Diagnostic */ | ||
2544 | #define GG82563_PHY_ACK_TIMEOUTS \ | ||
2545 | GG82563_REG(194, 20) /* Acknowledge Timeouts */ | ||
2546 | #define GG82563_PHY_ADV_ABILITY \ | ||
2547 | GG82563_REG(194, 21) /* Advertised Ability */ | ||
2548 | #define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ | ||
2549 | GG82563_REG(194, 23) /* Link Partner Advertised Ability */ | ||
2550 | #define GG82563_PHY_ADV_NEXT_PAGE \ | ||
2551 | GG82563_REG(194, 24) /* Advertised Next Page */ | ||
2552 | #define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ | ||
2553 | GG82563_REG(194, 25) /* Link Partner Advertised Next page */ | ||
2554 | #define GG82563_PHY_KMRN_MISC \ | ||
2555 | GG82563_REG(194, 26) /* Misc. */ | ||
2371 | 2556 | ||
2372 | /* PHY Control Register */ | 2557 | /* PHY Control Register */ |
2373 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ | 2558 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ |
@@ -2681,6 +2866,113 @@ struct e1000_host_command_info { | |||
2681 | #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 | 2866 | #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 |
2682 | #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 | 2867 | #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 |
2683 | 2868 | ||
2869 | /* GG82563 PHY Specific Status Register (Page 0, Register 16 */ | ||
2870 | #define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */ | ||
2871 | #define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */ | ||
2872 | #define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */ | ||
2873 | #define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */ | ||
2874 | #define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 | ||
2875 | #define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */ | ||
2876 | #define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */ | ||
2877 | #define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */ | ||
2878 | #define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */ | ||
2879 | #define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300 | ||
2880 | #define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */ | ||
2881 | #define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */ | ||
2882 | #define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */ | ||
2883 | #define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */ | ||
2884 | #define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */ | ||
2885 | #define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000 | ||
2886 | #define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12 | ||
2887 | |||
2888 | /* PHY Specific Status Register (Page 0, Register 17) */ | ||
2889 | #define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */ | ||
2890 | #define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */ | ||
2891 | #define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */ | ||
2892 | #define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */ | ||
2893 | #define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */ | ||
2894 | #define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */ | ||
2895 | #define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */ | ||
2896 | #define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */ | ||
2897 | #define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */ | ||
2898 | #define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */ | ||
2899 | #define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */ | ||
2900 | #define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */ | ||
2901 | #define GG82563_PSSR_SPEED_MASK 0xC000 | ||
2902 | #define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */ | ||
2903 | #define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */ | ||
2904 | #define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */ | ||
2905 | |||
2906 | /* PHY Specific Status Register 2 (Page 0, Register 19) */ | ||
2907 | #define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */ | ||
2908 | #define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */ | ||
2909 | #define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */ | ||
2910 | #define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */ | ||
2911 | #define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */ | ||
2912 | #define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */ | ||
2913 | #define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */ | ||
2914 | #define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */ | ||
2915 | #define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */ | ||
2916 | #define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */ | ||
2917 | #define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */ | ||
2918 | #define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */ | ||
2919 | #define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */ | ||
2920 | |||
2921 | /* PHY Specific Control Register 2 (Page 0, Register 26) */ | ||
2922 | #define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */ | ||
2923 | #define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C | ||
2924 | #define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */ | ||
2925 | #define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */ | ||
2926 | #define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */ | ||
2927 | #define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */ | ||
2928 | #define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */ | ||
2929 | #define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000 | ||
2930 | #define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */ | ||
2931 | #define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */ | ||
2932 | |||
2933 | /* MAC Specific Control Register (Page 2, Register 21) */ | ||
2934 | /* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ | ||
2935 | #define GG82563_MSCR_TX_CLK_MASK 0x0007 | ||
2936 | #define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004 | ||
2937 | #define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005 | ||
2938 | #define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006 | ||
2939 | #define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007 | ||
2940 | |||
2941 | #define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ | ||
2942 | |||
2943 | /* DSP Distance Register (Page 5, Register 26) */ | ||
2944 | #define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; | ||
2945 | 1 = 50-80M; | ||
2946 | 2 = 80-110M; | ||
2947 | 3 = 110-140M; | ||
2948 | 4 = >140M */ | ||
2949 | |||
2950 | /* Kumeran Mode Control Register (Page 193, Register 16) */ | ||
2951 | #define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ | ||
2952 | #define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */ | ||
2953 | #define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080 | ||
2954 | #define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400 | ||
2955 | #define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */ | ||
2956 | #define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 | ||
2957 | |||
2958 | /* Power Management Control Register (Page 193, Register 20) */ | ||
2959 | #define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */ | ||
2960 | #define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */ | ||
2961 | #define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */ | ||
2962 | #define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */ | ||
2963 | #define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */ | ||
2964 | #define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */ | ||
2965 | #define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */ | ||
2966 | #define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */ | ||
2967 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300 | ||
2968 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */ | ||
2969 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */ | ||
2970 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */ | ||
2971 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */ | ||
2972 | |||
2973 | /* In-Band Control Register (Page 194, Register 18) */ | ||
2974 | #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */ | ||
2975 | |||
2684 | 2976 | ||
2685 | /* Bit definitions for valid PHY IDs. */ | 2977 | /* Bit definitions for valid PHY IDs. */ |
2686 | /* I = Integrated | 2978 | /* I = Integrated |
@@ -2695,6 +2987,7 @@ struct e1000_host_command_info { | |||
2695 | #define M88E1011_I_REV_4 0x04 | 2987 | #define M88E1011_I_REV_4 0x04 |
2696 | #define M88E1111_I_PHY_ID 0x01410CC0 | 2988 | #define M88E1111_I_PHY_ID 0x01410CC0 |
2697 | #define L1LXT971A_PHY_ID 0x001378E0 | 2989 | #define L1LXT971A_PHY_ID 0x001378E0 |
2990 | #define GG82563_E_PHY_ID 0x01410CA0 | ||
2698 | 2991 | ||
2699 | /* Miscellaneous PHY bit definitions. */ | 2992 | /* Miscellaneous PHY bit definitions. */ |
2700 | #define PHY_PREAMBLE 0xFFFFFFFF | 2993 | #define PHY_PREAMBLE 0xFFFFFFFF |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 84dcca3776ee..f39de16e6b97 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -29,6 +29,23 @@ | |||
29 | #include "e1000.h" | 29 | #include "e1000.h" |
30 | 30 | ||
31 | /* Change Log | 31 | /* Change Log |
32 | * 7.0.33 3-Feb-2006 | ||
33 | * o Added another fix for the pass false carrier bit | ||
34 | * 7.0.32 24-Jan-2006 | ||
35 | * o Need to rebuild with noew version number for the pass false carrier | ||
36 | * fix in e1000_hw.c | ||
37 | * 7.0.30 18-Jan-2006 | ||
38 | * o fixup for tso workaround to disable it for pci-x | ||
39 | * o fix mem leak on 82542 | ||
40 | * o fixes for 10 Mb/s connections and incorrect stats | ||
41 | * 7.0.28 01/06/2006 | ||
42 | * o hardware workaround to only set "speed mode" bit for 1G link. | ||
43 | * 7.0.26 12/23/2005 | ||
44 | * o wake on lan support modified for device ID 10B5 | ||
45 | * o fix dhcp + vlan issue not making it to the iAMT firmware | ||
46 | * 7.0.24 12/9/2005 | ||
47 | * o New hardware support for the Gigabit NIC embedded in the south bridge | ||
48 | * o Fixes to the recycling logic (skb->tail) from IBM LTC | ||
32 | * 6.3.9 12/16/2005 | 49 | * 6.3.9 12/16/2005 |
33 | * o incorporate fix for recycled skbs from IBM LTC | 50 | * o incorporate fix for recycled skbs from IBM LTC |
34 | * 6.3.7 11/18/2005 | 51 | * 6.3.7 11/18/2005 |
@@ -46,54 +63,8 @@ | |||
46 | * rx_buffer_len | 63 | * rx_buffer_len |
47 | * 6.3.1 9/19/05 | 64 | * 6.3.1 9/19/05 |
48 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic | 65 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic |
49 | (e1000_clean_tx_irq) | 66 | * (e1000_clean_tx_irq) |
50 | * o Support for 8086:10B5 device (Quad Port) | 67 | * o Support for 8086:10B5 device (Quad Port) |
51 | * 6.2.14 9/15/05 | ||
52 | * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface | ||
53 | * open/close | ||
54 | * 6.2.13 9/14/05 | ||
55 | * o Invoke e1000_check_mng_mode only for 8257x controllers since it | ||
56 | * accesses the FWSM that is not supported in other controllers | ||
57 | * 6.2.12 9/9/05 | ||
58 | * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER | ||
59 | * o set RCTL:SECRC only for controllers newer than 82543. | ||
60 | * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w. | ||
61 | * This code was moved from e1000_remove to e1000_close | ||
62 | * 6.2.10 9/6/05 | ||
63 | * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off. | ||
64 | * o Enable fc by default on 82573 controllers (do not read eeprom) | ||
65 | * o Fix rx_errors statistic not to include missed_packet_count | ||
66 | * o Fix rx_dropped statistic not to include missed_packet_count | ||
67 | (Padraig Brady) | ||
68 | * 6.2.9 8/30/05 | ||
69 | * o Remove call to update statistics from the controller ib e1000_get_stats | ||
70 | * 6.2.8 8/30/05 | ||
71 | * o Improved algorithm for rx buffer allocation/rdt update | ||
72 | * o Flow control watermarks relative to rx PBA size | ||
73 | * o Simplified 'Tx Hung' detect logic | ||
74 | * 6.2.7 8/17/05 | ||
75 | * o Report rx buffer allocation failures and tx timeout counts in stats | ||
76 | * 6.2.6 8/16/05 | ||
77 | * o Implement workaround for controller erratum -- linear non-tso packet | ||
78 | * following a TSO gets written back prematurely | ||
79 | * 6.2.5 8/15/05 | ||
80 | * o Set netdev->tx_queue_len based on link speed/duplex settings. | ||
81 | * o Fix net_stats.rx_fifo_errors <p@draigBrady.com> | ||
82 | * o Do not power off PHY if SoL/IDER session is active | ||
83 | * 6.2.4 8/10/05 | ||
84 | * o Fix loopback test setup/cleanup for 82571/3 controllers | ||
85 | * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat | ||
86 | * all packets as raw | ||
87 | * o Prevent operations that will cause the PHY to be reset if SoL/IDER | ||
88 | * sessions are active and log a message | ||
89 | * 6.2.2 7/21/05 | ||
90 | * o used fixed size descriptors for all MTU sizes, reduces memory load | ||
91 | * 6.1.2 4/13/05 | ||
92 | * o Fixed ethtool diagnostics | ||
93 | * o Enabled flow control to take default eeprom settings | ||
94 | * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent | ||
95 | * calls, one from mii_ioctl and other from within update_stats while | ||
96 | * processing MIIREG ioctl. | ||
97 | */ | 68 | */ |
98 | 69 | ||
99 | char e1000_driver_name[] = "e1000"; | 70 | char e1000_driver_name[] = "e1000"; |
@@ -103,7 +74,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 74 | #else |
104 | #define DRIVERNAPI "-NAPI" | 75 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 76 | #endif |
106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI | 77 | #define DRV_VERSION "7.0.33-k2"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 78 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 79 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 80 | ||
@@ -157,32 +128,26 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
157 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | 128 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
158 | INTEL_E1000_ETHERNET_DEVICE(0x108B), | 129 | INTEL_E1000_ETHERNET_DEVICE(0x108B), |
159 | INTEL_E1000_ETHERNET_DEVICE(0x108C), | 130 | INTEL_E1000_ETHERNET_DEVICE(0x108C), |
131 | INTEL_E1000_ETHERNET_DEVICE(0x1096), | ||
132 | INTEL_E1000_ETHERNET_DEVICE(0x1098), | ||
160 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | 133 | INTEL_E1000_ETHERNET_DEVICE(0x1099), |
161 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 134 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
162 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 135 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
136 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | ||
163 | /* required last entry */ | 137 | /* required last entry */ |
164 | {0,} | 138 | {0,} |
165 | }; | 139 | }; |
166 | 140 | ||
167 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | 141 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
168 | 142 | ||
169 | int e1000_up(struct e1000_adapter *adapter); | ||
170 | void e1000_down(struct e1000_adapter *adapter); | ||
171 | void e1000_reset(struct e1000_adapter *adapter); | ||
172 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
173 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
174 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
175 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
176 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
177 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | 143 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
178 | struct e1000_tx_ring *txdr); | 144 | struct e1000_tx_ring *txdr); |
179 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | 145 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
180 | struct e1000_rx_ring *rxdr); | 146 | struct e1000_rx_ring *rxdr); |
181 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | 147 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
182 | struct e1000_tx_ring *tx_ring); | 148 | struct e1000_tx_ring *tx_ring); |
183 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | 149 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
184 | struct e1000_rx_ring *rx_ring); | 150 | struct e1000_rx_ring *rx_ring); |
185 | void e1000_update_stats(struct e1000_adapter *adapter); | ||
186 | 151 | ||
187 | /* Local Function Prototypes */ | 152 | /* Local Function Prototypes */ |
188 | 153 | ||
@@ -191,9 +156,6 @@ static void e1000_exit_module(void); | |||
191 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 156 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | static void __devexit e1000_remove(struct pci_dev *pdev); | 157 | static void __devexit e1000_remove(struct pci_dev *pdev); |
193 | static int e1000_alloc_queues(struct e1000_adapter *adapter); | 158 | static int e1000_alloc_queues(struct e1000_adapter *adapter); |
194 | #ifdef CONFIG_E1000_MQ | ||
195 | static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); | ||
196 | #endif | ||
197 | static int e1000_sw_init(struct e1000_adapter *adapter); | 159 | static int e1000_sw_init(struct e1000_adapter *adapter); |
198 | static int e1000_open(struct net_device *netdev); | 160 | static int e1000_open(struct net_device *netdev); |
199 | static int e1000_close(struct net_device *netdev); | 161 | static int e1000_close(struct net_device *netdev); |
@@ -241,11 +203,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
241 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 203 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
242 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 204 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
243 | int cmd); | 205 | int cmd); |
244 | void e1000_set_ethtool_ops(struct net_device *netdev); | ||
245 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | 206 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
246 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | 207 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
247 | static void e1000_tx_timeout(struct net_device *dev); | 208 | static void e1000_tx_timeout(struct net_device *dev); |
248 | static void e1000_tx_timeout_task(struct net_device *dev); | 209 | static void e1000_reset_task(struct net_device *dev); |
249 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 210 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
250 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 211 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
251 | struct sk_buff *skb); | 212 | struct sk_buff *skb); |
@@ -265,14 +226,6 @@ static int e1000_resume(struct pci_dev *pdev); | |||
265 | static void e1000_netpoll (struct net_device *netdev); | 226 | static void e1000_netpoll (struct net_device *netdev); |
266 | #endif | 227 | #endif |
267 | 228 | ||
268 | #ifdef CONFIG_E1000_MQ | ||
269 | /* for multiple Rx queues */ | ||
270 | void e1000_rx_schedule(void *data); | ||
271 | #endif | ||
272 | |||
273 | /* Exported from other modules */ | ||
274 | |||
275 | extern void e1000_check_options(struct e1000_adapter *adapter); | ||
276 | 229 | ||
277 | static struct pci_driver e1000_driver = { | 230 | static struct pci_driver e1000_driver = { |
278 | .name = e1000_driver_name, | 231 | .name = e1000_driver_name, |
@@ -380,7 +333,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
380 | (vid != old_vid) && | 333 | (vid != old_vid) && |
381 | !adapter->vlgrp->vlan_devices[old_vid]) | 334 | !adapter->vlgrp->vlan_devices[old_vid]) |
382 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 335 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
383 | } | 336 | } else |
337 | adapter->mng_vlan_id = vid; | ||
384 | } | 338 | } |
385 | } | 339 | } |
386 | 340 | ||
@@ -502,10 +456,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
502 | return err; | 456 | return err; |
503 | } | 457 | } |
504 | 458 | ||
505 | #ifdef CONFIG_E1000_MQ | ||
506 | e1000_setup_queue_mapping(adapter); | ||
507 | #endif | ||
508 | |||
509 | adapter->tx_queue_len = netdev->tx_queue_len; | 459 | adapter->tx_queue_len = netdev->tx_queue_len; |
510 | 460 | ||
511 | mod_timer(&adapter->watchdog_timer, jiffies); | 461 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -526,9 +476,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
526 | e1000_check_mng_mode(&adapter->hw); | 476 | e1000_check_mng_mode(&adapter->hw); |
527 | 477 | ||
528 | e1000_irq_disable(adapter); | 478 | e1000_irq_disable(adapter); |
529 | #ifdef CONFIG_E1000_MQ | 479 | |
530 | while (atomic_read(&adapter->rx_sched_call_data.count) != 0); | ||
531 | #endif | ||
532 | free_irq(adapter->pdev->irq, netdev); | 480 | free_irq(adapter->pdev->irq, netdev); |
533 | #ifdef CONFIG_PCI_MSI | 481 | #ifdef CONFIG_PCI_MSI |
534 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | 482 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
@@ -587,6 +535,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
587 | break; | 535 | break; |
588 | case e1000_82571: | 536 | case e1000_82571: |
589 | case e1000_82572: | 537 | case e1000_82572: |
538 | case e1000_80003es2lan: | ||
590 | pba = E1000_PBA_38K; | 539 | pba = E1000_PBA_38K; |
591 | break; | 540 | break; |
592 | case e1000_82573: | 541 | case e1000_82573: |
@@ -619,7 +568,10 @@ e1000_reset(struct e1000_adapter *adapter) | |||
619 | 568 | ||
620 | adapter->hw.fc_high_water = fc_high_water_mark; | 569 | adapter->hw.fc_high_water = fc_high_water_mark; |
621 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 570 | adapter->hw.fc_low_water = fc_high_water_mark - 8; |
622 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 571 | if (adapter->hw.mac_type == e1000_80003es2lan) |
572 | adapter->hw.fc_pause_time = 0xFFFF; | ||
573 | else | ||
574 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | ||
623 | adapter->hw.fc_send_xon = 1; | 575 | adapter->hw.fc_send_xon = 1; |
624 | adapter->hw.fc = adapter->hw.original_fc; | 576 | adapter->hw.fc = adapter->hw.original_fc; |
625 | 577 | ||
@@ -663,6 +615,7 @@ e1000_probe(struct pci_dev *pdev, | |||
663 | unsigned long mmio_start, mmio_len; | 615 | unsigned long mmio_start, mmio_len; |
664 | 616 | ||
665 | static int cards_found = 0; | 617 | static int cards_found = 0; |
618 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ | ||
666 | int i, err, pci_using_dac; | 619 | int i, err, pci_using_dac; |
667 | uint16_t eeprom_data; | 620 | uint16_t eeprom_data; |
668 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 621 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
@@ -755,6 +708,15 @@ e1000_probe(struct pci_dev *pdev, | |||
755 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) | 708 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
756 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 709 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
757 | 710 | ||
711 | /* if ksp3, indicate if it's port a being setup */ | ||
712 | if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && | ||
713 | e1000_ksp3_port_a == 0) | ||
714 | adapter->ksp3_port_a = 1; | ||
715 | e1000_ksp3_port_a++; | ||
716 | /* Reset for multiple KP3 adapters */ | ||
717 | if (e1000_ksp3_port_a == 4) | ||
718 | e1000_ksp3_port_a = 0; | ||
719 | |||
758 | if (adapter->hw.mac_type >= e1000_82543) { | 720 | if (adapter->hw.mac_type >= e1000_82543) { |
759 | netdev->features = NETIF_F_SG | | 721 | netdev->features = NETIF_F_SG | |
760 | NETIF_F_HW_CSUM | | 722 | NETIF_F_HW_CSUM | |
@@ -826,8 +788,8 @@ e1000_probe(struct pci_dev *pdev, | |||
826 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 788 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
827 | adapter->phy_info_timer.data = (unsigned long) adapter; | 789 | adapter->phy_info_timer.data = (unsigned long) adapter; |
828 | 790 | ||
829 | INIT_WORK(&adapter->tx_timeout_task, | 791 | INIT_WORK(&adapter->reset_task, |
830 | (void (*)(void *))e1000_tx_timeout_task, netdev); | 792 | (void (*)(void *))e1000_reset_task, netdev); |
831 | 793 | ||
832 | /* we're going to reset, so assume we have no link for now */ | 794 | /* we're going to reset, so assume we have no link for now */ |
833 | 795 | ||
@@ -854,6 +816,7 @@ e1000_probe(struct pci_dev *pdev, | |||
854 | case e1000_82546: | 816 | case e1000_82546: |
855 | case e1000_82546_rev_3: | 817 | case e1000_82546_rev_3: |
856 | case e1000_82571: | 818 | case e1000_82571: |
819 | case e1000_80003es2lan: | ||
857 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 820 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
858 | e1000_read_eeprom(&adapter->hw, | 821 | e1000_read_eeprom(&adapter->hw, |
859 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 822 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
@@ -972,10 +935,6 @@ e1000_remove(struct pci_dev *pdev) | |||
972 | iounmap(adapter->hw.hw_addr); | 935 | iounmap(adapter->hw.hw_addr); |
973 | pci_release_regions(pdev); | 936 | pci_release_regions(pdev); |
974 | 937 | ||
975 | #ifdef CONFIG_E1000_MQ | ||
976 | free_percpu(adapter->cpu_netdev); | ||
977 | free_percpu(adapter->cpu_tx_ring); | ||
978 | #endif | ||
979 | free_netdev(netdev); | 938 | free_netdev(netdev); |
980 | 939 | ||
981 | pci_disable_device(pdev); | 940 | pci_disable_device(pdev); |
@@ -1056,40 +1015,8 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1056 | hw->master_slave = E1000_MASTER_SLAVE; | 1015 | hw->master_slave = E1000_MASTER_SLAVE; |
1057 | } | 1016 | } |
1058 | 1017 | ||
1059 | #ifdef CONFIG_E1000_MQ | ||
1060 | /* Number of supported queues */ | ||
1061 | switch (hw->mac_type) { | ||
1062 | case e1000_82571: | ||
1063 | case e1000_82572: | ||
1064 | /* These controllers support 2 tx queues, but with a single | ||
1065 | * qdisc implementation, multiple tx queues aren't quite as | ||
1066 | * interesting. If we can find a logical way of mapping | ||
1067 | * flows to a queue, then perhaps we can up the num_tx_queue | ||
1068 | * count back to its default. Until then, we run the risk of | ||
1069 | * terrible performance due to SACK overload. */ | ||
1070 | adapter->num_tx_queues = 1; | ||
1071 | adapter->num_rx_queues = 2; | ||
1072 | break; | ||
1073 | default: | ||
1074 | adapter->num_tx_queues = 1; | ||
1075 | adapter->num_rx_queues = 1; | ||
1076 | break; | ||
1077 | } | ||
1078 | adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); | ||
1079 | adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); | ||
1080 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", | ||
1081 | adapter->num_rx_queues, | ||
1082 | ((adapter->num_rx_queues == 1) | ||
1083 | ? ((num_online_cpus() > 1) | ||
1084 | ? "(due to unsupported feature in current adapter)" | ||
1085 | : "(due to unsupported system configuration)") | ||
1086 | : "")); | ||
1087 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", | ||
1088 | adapter->num_tx_queues); | ||
1089 | #else | ||
1090 | adapter->num_tx_queues = 1; | 1018 | adapter->num_tx_queues = 1; |
1091 | adapter->num_rx_queues = 1; | 1019 | adapter->num_rx_queues = 1; |
1092 | #endif | ||
1093 | 1020 | ||
1094 | if (e1000_alloc_queues(adapter)) { | 1021 | if (e1000_alloc_queues(adapter)) { |
1095 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 1022 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); |
@@ -1152,51 +1079,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1152 | memset(adapter->polling_netdev, 0, size); | 1079 | memset(adapter->polling_netdev, 0, size); |
1153 | #endif | 1080 | #endif |
1154 | 1081 | ||
1155 | #ifdef CONFIG_E1000_MQ | ||
1156 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1157 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1158 | |||
1159 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1160 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1161 | #endif | ||
1162 | |||
1163 | return E1000_SUCCESS; | 1082 | return E1000_SUCCESS; |
1164 | } | 1083 | } |
1165 | 1084 | ||
1166 | #ifdef CONFIG_E1000_MQ | ||
1167 | static void __devinit | ||
1168 | e1000_setup_queue_mapping(struct e1000_adapter *adapter) | ||
1169 | { | ||
1170 | int i, cpu; | ||
1171 | |||
1172 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1173 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1174 | cpus_clear(adapter->rx_sched_call_data.cpumask); | ||
1175 | |||
1176 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1177 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1178 | |||
1179 | lock_cpu_hotplug(); | ||
1180 | i = 0; | ||
1181 | for_each_online_cpu(cpu) { | ||
1182 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; | ||
1183 | /* This is incomplete because we'd like to assign separate | ||
1184 | * physical cpus to these netdev polling structures and | ||
1185 | * avoid saturating a subset of cpus. | ||
1186 | */ | ||
1187 | if (i < adapter->num_rx_queues) { | ||
1188 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | ||
1189 | adapter->rx_ring[i].cpu = cpu; | ||
1190 | cpu_set(cpu, adapter->cpumask); | ||
1191 | } else | ||
1192 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | ||
1193 | |||
1194 | i++; | ||
1195 | } | ||
1196 | unlock_cpu_hotplug(); | ||
1197 | } | ||
1198 | #endif | ||
1199 | |||
1200 | /** | 1085 | /** |
1201 | * e1000_open - Called when a network interface is made active | 1086 | * e1000_open - Called when a network interface is made active |
1202 | * @netdev: network interface device structure | 1087 | * @netdev: network interface device structure |
@@ -1435,18 +1320,6 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1435 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1320 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1436 | 1321 | ||
1437 | switch (adapter->num_tx_queues) { | 1322 | switch (adapter->num_tx_queues) { |
1438 | case 2: | ||
1439 | tdba = adapter->tx_ring[1].dma; | ||
1440 | tdlen = adapter->tx_ring[1].count * | ||
1441 | sizeof(struct e1000_tx_desc); | ||
1442 | E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); | ||
1443 | E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); | ||
1444 | E1000_WRITE_REG(hw, TDLEN1, tdlen); | ||
1445 | E1000_WRITE_REG(hw, TDH1, 0); | ||
1446 | E1000_WRITE_REG(hw, TDT1, 0); | ||
1447 | adapter->tx_ring[1].tdh = E1000_TDH1; | ||
1448 | adapter->tx_ring[1].tdt = E1000_TDT1; | ||
1449 | /* Fall Through */ | ||
1450 | case 1: | 1323 | case 1: |
1451 | default: | 1324 | default: |
1452 | tdba = adapter->tx_ring[0].dma; | 1325 | tdba = adapter->tx_ring[0].dma; |
@@ -1477,6 +1350,10 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1477 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; | 1350 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
1478 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; | 1351 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
1479 | break; | 1352 | break; |
1353 | case e1000_80003es2lan: | ||
1354 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | ||
1355 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; | ||
1356 | break; | ||
1480 | default: | 1357 | default: |
1481 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | 1358 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
1482 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; | 1359 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
@@ -1497,10 +1374,13 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1497 | tctl = E1000_READ_REG(hw, TCTL); | 1374 | tctl = E1000_READ_REG(hw, TCTL); |
1498 | 1375 | ||
1499 | tctl &= ~E1000_TCTL_CT; | 1376 | tctl &= ~E1000_TCTL_CT; |
1500 | tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | | 1377 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1501 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1378 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1502 | 1379 | ||
1503 | E1000_WRITE_REG(hw, TCTL, tctl); | 1380 | #ifdef DISABLE_MULR |
1381 | /* disable Multiple Reads for debugging */ | ||
1382 | tctl &= ~E1000_TCTL_MULR; | ||
1383 | #endif | ||
1504 | 1384 | ||
1505 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1385 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1506 | tarc = E1000_READ_REG(hw, TARC0); | 1386 | tarc = E1000_READ_REG(hw, TARC0); |
@@ -1513,6 +1393,15 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1513 | else | 1393 | else |
1514 | tarc |= (1 << 28); | 1394 | tarc |= (1 << 28); |
1515 | E1000_WRITE_REG(hw, TARC1, tarc); | 1395 | E1000_WRITE_REG(hw, TARC1, tarc); |
1396 | } else if (hw->mac_type == e1000_80003es2lan) { | ||
1397 | tarc = E1000_READ_REG(hw, TARC0); | ||
1398 | tarc |= 1; | ||
1399 | if (hw->media_type == e1000_media_type_internal_serdes) | ||
1400 | tarc |= (1 << 20); | ||
1401 | E1000_WRITE_REG(hw, TARC0, tarc); | ||
1402 | tarc = E1000_READ_REG(hw, TARC1); | ||
1403 | tarc |= 1; | ||
1404 | E1000_WRITE_REG(hw, TARC1, tarc); | ||
1516 | } | 1405 | } |
1517 | 1406 | ||
1518 | e1000_config_collision_dist(hw); | 1407 | e1000_config_collision_dist(hw); |
@@ -1531,6 +1420,9 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1531 | if (hw->mac_type == e1000_82544 && | 1420 | if (hw->mac_type == e1000_82544 && |
1532 | hw->bus_type == e1000_bus_type_pcix) | 1421 | hw->bus_type == e1000_bus_type_pcix) |
1533 | adapter->pcix_82544 = 1; | 1422 | adapter->pcix_82544 = 1; |
1423 | |||
1424 | E1000_WRITE_REG(hw, TCTL, tctl); | ||
1425 | |||
1534 | } | 1426 | } |
1535 | 1427 | ||
1536 | /** | 1428 | /** |
@@ -1790,12 +1682,9 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1790 | uint64_t rdba; | 1682 | uint64_t rdba; |
1791 | struct e1000_hw *hw = &adapter->hw; | 1683 | struct e1000_hw *hw = &adapter->hw; |
1792 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; | 1684 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; |
1793 | #ifdef CONFIG_E1000_MQ | ||
1794 | uint32_t reta, mrqc; | ||
1795 | int i; | ||
1796 | #endif | ||
1797 | 1685 | ||
1798 | if (adapter->rx_ps_pages) { | 1686 | if (adapter->rx_ps_pages) { |
1687 | /* this is a 32 byte descriptor */ | ||
1799 | rdlen = adapter->rx_ring[0].count * | 1688 | rdlen = adapter->rx_ring[0].count * |
1800 | sizeof(union e1000_rx_desc_packet_split); | 1689 | sizeof(union e1000_rx_desc_packet_split); |
1801 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 1690 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
@@ -1837,18 +1726,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1837 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1726 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1838 | * the Base and Length of the Rx Descriptor Ring */ | 1727 | * the Base and Length of the Rx Descriptor Ring */ |
1839 | switch (adapter->num_rx_queues) { | 1728 | switch (adapter->num_rx_queues) { |
1840 | #ifdef CONFIG_E1000_MQ | ||
1841 | case 2: | ||
1842 | rdba = adapter->rx_ring[1].dma; | ||
1843 | E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); | ||
1844 | E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); | ||
1845 | E1000_WRITE_REG(hw, RDLEN1, rdlen); | ||
1846 | E1000_WRITE_REG(hw, RDH1, 0); | ||
1847 | E1000_WRITE_REG(hw, RDT1, 0); | ||
1848 | adapter->rx_ring[1].rdh = E1000_RDH1; | ||
1849 | adapter->rx_ring[1].rdt = E1000_RDT1; | ||
1850 | /* Fall Through */ | ||
1851 | #endif | ||
1852 | case 1: | 1729 | case 1: |
1853 | default: | 1730 | default: |
1854 | rdba = adapter->rx_ring[0].dma; | 1731 | rdba = adapter->rx_ring[0].dma; |
@@ -1862,46 +1739,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1862 | break; | 1739 | break; |
1863 | } | 1740 | } |
1864 | 1741 | ||
1865 | #ifdef CONFIG_E1000_MQ | ||
1866 | if (adapter->num_rx_queues > 1) { | ||
1867 | uint32_t random[10]; | ||
1868 | |||
1869 | get_random_bytes(&random[0], 40); | ||
1870 | |||
1871 | if (hw->mac_type <= e1000_82572) { | ||
1872 | E1000_WRITE_REG(hw, RSSIR, 0); | ||
1873 | E1000_WRITE_REG(hw, RSSIM, 0); | ||
1874 | } | ||
1875 | |||
1876 | switch (adapter->num_rx_queues) { | ||
1877 | case 2: | ||
1878 | default: | ||
1879 | reta = 0x00800080; | ||
1880 | mrqc = E1000_MRQC_ENABLE_RSS_2Q; | ||
1881 | break; | ||
1882 | } | ||
1883 | |||
1884 | /* Fill out redirection table */ | ||
1885 | for (i = 0; i < 32; i++) | ||
1886 | E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); | ||
1887 | /* Fill out hash function seeds */ | ||
1888 | for (i = 0; i < 10; i++) | ||
1889 | E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); | ||
1890 | |||
1891 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
1892 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
1893 | E1000_WRITE_REG(hw, MRQC, mrqc); | ||
1894 | } | ||
1895 | |||
1896 | /* Multiqueue and packet checksumming are mutually exclusive. */ | ||
1897 | if (hw->mac_type >= e1000_82571) { | ||
1898 | rxcsum = E1000_READ_REG(hw, RXCSUM); | ||
1899 | rxcsum |= E1000_RXCSUM_PCSD; | ||
1900 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | ||
1901 | } | ||
1902 | |||
1903 | #else | ||
1904 | |||
1905 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1742 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1906 | if (hw->mac_type >= e1000_82543) { | 1743 | if (hw->mac_type >= e1000_82543) { |
1907 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1744 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
@@ -1920,7 +1757,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1920 | } | 1757 | } |
1921 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1758 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1922 | } | 1759 | } |
1923 | #endif /* CONFIG_E1000_MQ */ | ||
1924 | 1760 | ||
1925 | if (hw->mac_type == e1000_82573) | 1761 | if (hw->mac_type == e1000_82573) |
1926 | E1000_WRITE_REG(hw, ERT, 0x0100); | 1762 | E1000_WRITE_REG(hw, ERT, 0x0100); |
@@ -2392,7 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2392 | { | 2228 | { |
2393 | struct net_device *netdev = adapter->netdev; | 2229 | struct net_device *netdev = adapter->netdev; |
2394 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2230 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2395 | uint32_t link; | 2231 | uint32_t link, tctl; |
2396 | 2232 | ||
2397 | e1000_check_for_link(&adapter->hw); | 2233 | e1000_check_for_link(&adapter->hw); |
2398 | if (adapter->hw.mac_type == e1000_82573) { | 2234 | if (adapter->hw.mac_type == e1000_82573) { |
@@ -2418,20 +2254,61 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2418 | adapter->link_duplex == FULL_DUPLEX ? | 2254 | adapter->link_duplex == FULL_DUPLEX ? |
2419 | "Full Duplex" : "Half Duplex"); | 2255 | "Full Duplex" : "Half Duplex"); |
2420 | 2256 | ||
2421 | /* tweak tx_queue_len according to speed/duplex */ | 2257 | /* tweak tx_queue_len according to speed/duplex |
2258 | * and adjust the timeout factor */ | ||
2422 | netdev->tx_queue_len = adapter->tx_queue_len; | 2259 | netdev->tx_queue_len = adapter->tx_queue_len; |
2423 | adapter->tx_timeout_factor = 1; | 2260 | adapter->tx_timeout_factor = 1; |
2424 | if (adapter->link_duplex == HALF_DUPLEX) { | 2261 | adapter->txb2b = 1; |
2262 | switch (adapter->link_speed) { | ||
2263 | case SPEED_10: | ||
2264 | adapter->txb2b = 0; | ||
2265 | netdev->tx_queue_len = 10; | ||
2266 | adapter->tx_timeout_factor = 8; | ||
2267 | break; | ||
2268 | case SPEED_100: | ||
2269 | adapter->txb2b = 0; | ||
2270 | netdev->tx_queue_len = 100; | ||
2271 | /* maybe add some timeout factor ? */ | ||
2272 | break; | ||
2273 | } | ||
2274 | |||
2275 | if ((adapter->hw.mac_type == e1000_82571 || | ||
2276 | adapter->hw.mac_type == e1000_82572) && | ||
2277 | adapter->txb2b == 0) { | ||
2278 | #define SPEED_MODE_BIT (1 << 21) | ||
2279 | uint32_t tarc0; | ||
2280 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | ||
2281 | tarc0 &= ~SPEED_MODE_BIT; | ||
2282 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | ||
2283 | } | ||
2284 | |||
2285 | #ifdef NETIF_F_TSO | ||
2286 | /* disable TSO for pcie and 10/100 speeds, to avoid | ||
2287 | * some hardware issues */ | ||
2288 | if (!adapter->tso_force && | ||
2289 | adapter->hw.bus_type == e1000_bus_type_pci_express){ | ||
2425 | switch (adapter->link_speed) { | 2290 | switch (adapter->link_speed) { |
2426 | case SPEED_10: | 2291 | case SPEED_10: |
2427 | netdev->tx_queue_len = 10; | ||
2428 | adapter->tx_timeout_factor = 8; | ||
2429 | break; | ||
2430 | case SPEED_100: | 2292 | case SPEED_100: |
2431 | netdev->tx_queue_len = 100; | 2293 | DPRINTK(PROBE,INFO, |
2294 | "10/100 speed: disabling TSO\n"); | ||
2295 | netdev->features &= ~NETIF_F_TSO; | ||
2296 | break; | ||
2297 | case SPEED_1000: | ||
2298 | netdev->features |= NETIF_F_TSO; | ||
2299 | break; | ||
2300 | default: | ||
2301 | /* oops */ | ||
2432 | break; | 2302 | break; |
2433 | } | 2303 | } |
2434 | } | 2304 | } |
2305 | #endif | ||
2306 | |||
2307 | /* enable transmits in the hardware, need to do this | ||
2308 | * after setting TARC0 */ | ||
2309 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | ||
2310 | tctl |= E1000_TCTL_EN; | ||
2311 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | ||
2435 | 2312 | ||
2436 | netif_carrier_on(netdev); | 2313 | netif_carrier_on(netdev); |
2437 | netif_wake_queue(netdev); | 2314 | netif_wake_queue(netdev); |
@@ -2446,6 +2323,16 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2446 | netif_carrier_off(netdev); | 2323 | netif_carrier_off(netdev); |
2447 | netif_stop_queue(netdev); | 2324 | netif_stop_queue(netdev); |
2448 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); | 2325 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
2326 | |||
2327 | /* 80003ES2LAN workaround-- | ||
2328 | * For packet buffer work-around on link down event; | ||
2329 | * disable receives in the ISR and | ||
2330 | * reset device here in the watchdog | ||
2331 | */ | ||
2332 | if (adapter->hw.mac_type == e1000_80003es2lan) { | ||
2333 | /* reset device */ | ||
2334 | schedule_work(&adapter->reset_task); | ||
2335 | } | ||
2449 | } | 2336 | } |
2450 | 2337 | ||
2451 | e1000_smartspeed(adapter); | 2338 | e1000_smartspeed(adapter); |
@@ -2465,16 +2352,14 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2465 | 2352 | ||
2466 | e1000_update_adaptive(&adapter->hw); | 2353 | e1000_update_adaptive(&adapter->hw); |
2467 | 2354 | ||
2468 | #ifdef CONFIG_E1000_MQ | ||
2469 | txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2470 | #endif | ||
2471 | if (!netif_carrier_ok(netdev)) { | 2355 | if (!netif_carrier_ok(netdev)) { |
2472 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2356 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2473 | /* We've lost link, so the controller stops DMA, | 2357 | /* We've lost link, so the controller stops DMA, |
2474 | * but we've got queued Tx work that's never going | 2358 | * but we've got queued Tx work that's never going |
2475 | * to get done, so reset controller to flush Tx. | 2359 | * to get done, so reset controller to flush Tx. |
2476 | * (Do the reset outside of interrupt context). */ | 2360 | * (Do the reset outside of interrupt context). */ |
2477 | schedule_work(&adapter->tx_timeout_task); | 2361 | adapter->tx_timeout_count++; |
2362 | schedule_work(&adapter->reset_task); | ||
2478 | } | 2363 | } |
2479 | } | 2364 | } |
2480 | 2365 | ||
@@ -2649,9 +2534,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2649 | /* Workaround for Controller erratum -- | 2534 | /* Workaround for Controller erratum -- |
2650 | * descriptor for non-tso packet in a linear SKB that follows a | 2535 | * descriptor for non-tso packet in a linear SKB that follows a |
2651 | * tso gets written back prematurely before the data is fully | 2536 | * tso gets written back prematurely before the data is fully |
2652 | * DMAd to the controller */ | 2537 | * DMA'd to the controller */ |
2653 | if (!skb->data_len && tx_ring->last_tx_tso && | 2538 | if (!skb->data_len && tx_ring->last_tx_tso && |
2654 | !skb_shinfo(skb)->tso_size) { | 2539 | !skb_shinfo(skb)->tso_size) { |
2655 | tx_ring->last_tx_tso = 0; | 2540 | tx_ring->last_tx_tso = 0; |
2656 | size -= 4; | 2541 | size -= 4; |
2657 | } | 2542 | } |
@@ -2840,7 +2725,7 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2840 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 2725 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
2841 | return 0; | 2726 | return 0; |
2842 | } | 2727 | } |
2843 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { | 2728 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { |
2844 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 2729 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
2845 | if ((htons(ETH_P_IP) == eth->h_proto)) { | 2730 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
2846 | const struct iphdr *ip = | 2731 | const struct iphdr *ip = |
@@ -2881,11 +2766,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2881 | unsigned int f; | 2766 | unsigned int f; |
2882 | len -= skb->data_len; | 2767 | len -= skb->data_len; |
2883 | 2768 | ||
2884 | #ifdef CONFIG_E1000_MQ | ||
2885 | tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2886 | #else | ||
2887 | tx_ring = adapter->tx_ring; | 2769 | tx_ring = adapter->tx_ring; |
2888 | #endif | ||
2889 | 2770 | ||
2890 | if (unlikely(skb->len <= 0)) { | 2771 | if (unlikely(skb->len <= 0)) { |
2891 | dev_kfree_skb_any(skb); | 2772 | dev_kfree_skb_any(skb); |
@@ -2905,21 +2786,29 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2905 | max_per_txd = min(mss << 2, max_per_txd); | 2786 | max_per_txd = min(mss << 2, max_per_txd); |
2906 | max_txd_pwr = fls(max_per_txd) - 1; | 2787 | max_txd_pwr = fls(max_per_txd) - 1; |
2907 | 2788 | ||
2908 | /* TSO Workaround for 82571/2 Controllers -- if skb->data | 2789 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
2909 | * points to just header, pull a few bytes of payload from | 2790 | * points to just header, pull a few bytes of payload from |
2910 | * frags into skb->data */ | 2791 | * frags into skb->data */ |
2911 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2792 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2912 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && | 2793 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
2913 | (adapter->hw.mac_type == e1000_82571 || | 2794 | switch (adapter->hw.mac_type) { |
2914 | adapter->hw.mac_type == e1000_82572)) { | 2795 | unsigned int pull_size; |
2915 | unsigned int pull_size; | 2796 | case e1000_82571: |
2916 | pull_size = min((unsigned int)4, skb->data_len); | 2797 | case e1000_82572: |
2917 | if (!__pskb_pull_tail(skb, pull_size)) { | 2798 | case e1000_82573: |
2918 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | 2799 | pull_size = min((unsigned int)4, skb->data_len); |
2919 | dev_kfree_skb_any(skb); | 2800 | if (!__pskb_pull_tail(skb, pull_size)) { |
2920 | return NETDEV_TX_OK; | 2801 | printk(KERN_ERR |
2802 | "__pskb_pull_tail failed.\n"); | ||
2803 | dev_kfree_skb_any(skb); | ||
2804 | return NETDEV_TX_OK; | ||
2805 | } | ||
2806 | len = skb->len - skb->data_len; | ||
2807 | break; | ||
2808 | default: | ||
2809 | /* do nothing */ | ||
2810 | break; | ||
2921 | } | 2811 | } |
2922 | len = skb->len - skb->data_len; | ||
2923 | } | 2812 | } |
2924 | } | 2813 | } |
2925 | 2814 | ||
@@ -2935,7 +2824,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2935 | #ifdef NETIF_F_TSO | 2824 | #ifdef NETIF_F_TSO |
2936 | /* Controller Erratum workaround */ | 2825 | /* Controller Erratum workaround */ |
2937 | if (!skb->data_len && tx_ring->last_tx_tso && | 2826 | if (!skb->data_len && tx_ring->last_tx_tso && |
2938 | !skb_shinfo(skb)->tso_size) | 2827 | !skb_shinfo(skb)->tso_size) |
2939 | count++; | 2828 | count++; |
2940 | #endif | 2829 | #endif |
2941 | 2830 | ||
@@ -2958,7 +2847,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2958 | if (adapter->pcix_82544) | 2847 | if (adapter->pcix_82544) |
2959 | count += nr_frags; | 2848 | count += nr_frags; |
2960 | 2849 | ||
2961 | if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2850 | |
2851 | if (adapter->hw.tx_pkt_filtering && | ||
2852 | (adapter->hw.mac_type == e1000_82573)) | ||
2962 | e1000_transfer_dhcp_info(adapter, skb); | 2853 | e1000_transfer_dhcp_info(adapter, skb); |
2963 | 2854 | ||
2964 | local_irq_save(flags); | 2855 | local_irq_save(flags); |
@@ -3036,15 +2927,15 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3036 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2927 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3037 | 2928 | ||
3038 | /* Do the reset outside of interrupt context */ | 2929 | /* Do the reset outside of interrupt context */ |
3039 | schedule_work(&adapter->tx_timeout_task); | 2930 | adapter->tx_timeout_count++; |
2931 | schedule_work(&adapter->reset_task); | ||
3040 | } | 2932 | } |
3041 | 2933 | ||
3042 | static void | 2934 | static void |
3043 | e1000_tx_timeout_task(struct net_device *netdev) | 2935 | e1000_reset_task(struct net_device *netdev) |
3044 | { | 2936 | { |
3045 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2937 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3046 | 2938 | ||
3047 | adapter->tx_timeout_count++; | ||
3048 | e1000_down(adapter); | 2939 | e1000_down(adapter); |
3049 | e1000_up(adapter); | 2940 | e1000_up(adapter); |
3050 | } | 2941 | } |
@@ -3079,6 +2970,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3079 | { | 2970 | { |
3080 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2971 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3081 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 2972 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
2973 | uint16_t eeprom_data = 0; | ||
3082 | 2974 | ||
3083 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 2975 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3084 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 2976 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
@@ -3090,14 +2982,28 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3090 | switch (adapter->hw.mac_type) { | 2982 | switch (adapter->hw.mac_type) { |
3091 | case e1000_82542_rev2_0: | 2983 | case e1000_82542_rev2_0: |
3092 | case e1000_82542_rev2_1: | 2984 | case e1000_82542_rev2_1: |
3093 | case e1000_82573: | ||
3094 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 2985 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
3095 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 2986 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
3096 | return -EINVAL; | 2987 | return -EINVAL; |
3097 | } | 2988 | } |
3098 | break; | 2989 | break; |
2990 | case e1000_82573: | ||
2991 | /* only enable jumbo frames if ASPM is disabled completely | ||
2992 | * this means both bits must be zero in 0x1A bits 3:2 */ | ||
2993 | e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, | ||
2994 | &eeprom_data); | ||
2995 | if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) { | ||
2996 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | ||
2997 | DPRINTK(PROBE, ERR, | ||
2998 | "Jumbo Frames not supported.\n"); | ||
2999 | return -EINVAL; | ||
3000 | } | ||
3001 | break; | ||
3002 | } | ||
3003 | /* fall through to get support */ | ||
3099 | case e1000_82571: | 3004 | case e1000_82571: |
3100 | case e1000_82572: | 3005 | case e1000_82572: |
3006 | case e1000_80003es2lan: | ||
3101 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | 3007 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
3102 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 3008 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3103 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); | 3009 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); |
@@ -3251,11 +3157,15 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3251 | 3157 | ||
3252 | /* Rx Errors */ | 3158 | /* Rx Errors */ |
3253 | 3159 | ||
3160 | /* RLEC on some newer hardware can be incorrect so build | ||
3161 | * our own version based on RUC and ROC */ | ||
3254 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3162 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
3255 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3163 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3256 | adapter->stats.rlec + adapter->stats.cexterr; | 3164 | adapter->stats.ruc + adapter->stats.roc + |
3165 | adapter->stats.cexterr; | ||
3257 | adapter->net_stats.rx_dropped = 0; | 3166 | adapter->net_stats.rx_dropped = 0; |
3258 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; | 3167 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + |
3168 | adapter->stats.roc; | ||
3259 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3169 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
3260 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3170 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
3261 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3171 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
@@ -3288,29 +3198,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3288 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3198 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3289 | } | 3199 | } |
3290 | 3200 | ||
3291 | #ifdef CONFIG_E1000_MQ | ||
3292 | void | ||
3293 | e1000_rx_schedule(void *data) | ||
3294 | { | ||
3295 | struct net_device *poll_dev, *netdev = data; | ||
3296 | struct e1000_adapter *adapter = netdev->priv; | ||
3297 | int this_cpu = get_cpu(); | ||
3298 | |||
3299 | poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); | ||
3300 | if (poll_dev == NULL) { | ||
3301 | put_cpu(); | ||
3302 | return; | ||
3303 | } | ||
3304 | |||
3305 | if (likely(netif_rx_schedule_prep(poll_dev))) | ||
3306 | __netif_rx_schedule(poll_dev); | ||
3307 | else | ||
3308 | e1000_irq_enable(adapter); | ||
3309 | |||
3310 | put_cpu(); | ||
3311 | } | ||
3312 | #endif | ||
3313 | |||
3314 | /** | 3201 | /** |
3315 | * e1000_intr - Interrupt Handler | 3202 | * e1000_intr - Interrupt Handler |
3316 | * @irq: interrupt number | 3203 | * @irq: interrupt number |
@@ -3324,7 +3211,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3324 | struct net_device *netdev = data; | 3211 | struct net_device *netdev = data; |
3325 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3212 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3326 | struct e1000_hw *hw = &adapter->hw; | 3213 | struct e1000_hw *hw = &adapter->hw; |
3327 | uint32_t icr = E1000_READ_REG(hw, ICR); | 3214 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); |
3328 | #ifndef CONFIG_E1000_NAPI | 3215 | #ifndef CONFIG_E1000_NAPI |
3329 | int i; | 3216 | int i; |
3330 | #else | 3217 | #else |
@@ -3346,6 +3233,17 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3346 | 3233 | ||
3347 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3234 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3348 | hw->get_link_status = 1; | 3235 | hw->get_link_status = 1; |
3236 | /* 80003ES2LAN workaround-- | ||
3237 | * For packet buffer work-around on link down event; | ||
3238 | * disable receives here in the ISR and | ||
3239 | * reset adapter in watchdog | ||
3240 | */ | ||
3241 | if (netif_carrier_ok(netdev) && | ||
3242 | (adapter->hw.mac_type == e1000_80003es2lan)) { | ||
3243 | /* disable receives */ | ||
3244 | rctl = E1000_READ_REG(hw, RCTL); | ||
3245 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | ||
3246 | } | ||
3349 | mod_timer(&adapter->watchdog_timer, jiffies); | 3247 | mod_timer(&adapter->watchdog_timer, jiffies); |
3350 | } | 3248 | } |
3351 | 3249 | ||
@@ -3355,26 +3253,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3355 | E1000_WRITE_REG(hw, IMC, ~0); | 3253 | E1000_WRITE_REG(hw, IMC, ~0); |
3356 | E1000_WRITE_FLUSH(hw); | 3254 | E1000_WRITE_FLUSH(hw); |
3357 | } | 3255 | } |
3358 | #ifdef CONFIG_E1000_MQ | ||
3359 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | ||
3360 | /* We must setup the cpumask once count == 0 since | ||
3361 | * each cpu bit is cleared when the work is done. */ | ||
3362 | adapter->rx_sched_call_data.cpumask = adapter->cpumask; | ||
3363 | atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); | ||
3364 | atomic_set(&adapter->rx_sched_call_data.count, | ||
3365 | adapter->num_rx_queues); | ||
3366 | smp_call_async_mask(&adapter->rx_sched_call_data); | ||
3367 | } else { | ||
3368 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | ||
3369 | } | ||
3370 | #else /* if !CONFIG_E1000_MQ */ | ||
3371 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3256 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) |
3372 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3257 | __netif_rx_schedule(&adapter->polling_netdev[0]); |
3373 | else | 3258 | else |
3374 | e1000_irq_enable(adapter); | 3259 | e1000_irq_enable(adapter); |
3375 | #endif /* CONFIG_E1000_MQ */ | 3260 | #else |
3376 | |||
3377 | #else /* if !CONFIG_E1000_NAPI */ | ||
3378 | /* Writing IMC and IMS is needed for 82547. | 3261 | /* Writing IMC and IMS is needed for 82547. |
3379 | * Due to Hub Link bus being occupied, an interrupt | 3262 | * Due to Hub Link bus being occupied, an interrupt |
3380 | * de-assertion message is not able to be sent. | 3263 | * de-assertion message is not able to be sent. |
@@ -3398,7 +3281,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3398 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3281 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3399 | e1000_irq_enable(adapter); | 3282 | e1000_irq_enable(adapter); |
3400 | 3283 | ||
3401 | #endif /* CONFIG_E1000_NAPI */ | 3284 | #endif |
3402 | 3285 | ||
3403 | return IRQ_HANDLED; | 3286 | return IRQ_HANDLED; |
3404 | } | 3287 | } |
@@ -3474,6 +3357,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3474 | struct e1000_tx_desc *tx_desc, *eop_desc; | 3357 | struct e1000_tx_desc *tx_desc, *eop_desc; |
3475 | struct e1000_buffer *buffer_info; | 3358 | struct e1000_buffer *buffer_info; |
3476 | unsigned int i, eop; | 3359 | unsigned int i, eop; |
3360 | #ifdef CONFIG_E1000_NAPI | ||
3361 | unsigned int count = 0; | ||
3362 | #endif | ||
3477 | boolean_t cleaned = FALSE; | 3363 | boolean_t cleaned = FALSE; |
3478 | 3364 | ||
3479 | i = tx_ring->next_to_clean; | 3365 | i = tx_ring->next_to_clean; |
@@ -3486,21 +3372,20 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3486 | buffer_info = &tx_ring->buffer_info[i]; | 3372 | buffer_info = &tx_ring->buffer_info[i]; |
3487 | cleaned = (i == eop); | 3373 | cleaned = (i == eop); |
3488 | 3374 | ||
3489 | #ifdef CONFIG_E1000_MQ | ||
3490 | tx_ring->tx_stats.bytes += buffer_info->length; | ||
3491 | #endif | ||
3492 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3375 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3493 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3376 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
3494 | 3377 | ||
3495 | if (unlikely(++i == tx_ring->count)) i = 0; | 3378 | if (unlikely(++i == tx_ring->count)) i = 0; |
3496 | } | 3379 | } |
3497 | 3380 | ||
3498 | #ifdef CONFIG_E1000_MQ | ||
3499 | tx_ring->tx_stats.packets++; | ||
3500 | #endif | ||
3501 | 3381 | ||
3502 | eop = tx_ring->buffer_info[i].next_to_watch; | 3382 | eop = tx_ring->buffer_info[i].next_to_watch; |
3503 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3383 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3384 | #ifdef CONFIG_E1000_NAPI | ||
3385 | #define E1000_TX_WEIGHT 64 | ||
3386 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | ||
3387 | if (count++ == E1000_TX_WEIGHT) break; | ||
3388 | #endif | ||
3504 | } | 3389 | } |
3505 | 3390 | ||
3506 | tx_ring->next_to_clean = i; | 3391 | tx_ring->next_to_clean = i; |
@@ -3519,7 +3404,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3519 | adapter->detect_tx_hung = FALSE; | 3404 | adapter->detect_tx_hung = FALSE; |
3520 | if (tx_ring->buffer_info[eop].dma && | 3405 | if (tx_ring->buffer_info[eop].dma && |
3521 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 3406 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3522 | adapter->tx_timeout_factor * HZ) | 3407 | (adapter->tx_timeout_factor * HZ)) |
3523 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 3408 | && !(E1000_READ_REG(&adapter->hw, STATUS) & |
3524 | E1000_STATUS_TXOFF)) { | 3409 | E1000_STATUS_TXOFF)) { |
3525 | 3410 | ||
@@ -3644,10 +3529,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3644 | skb = buffer_info->skb; | 3529 | skb = buffer_info->skb; |
3645 | buffer_info->skb = NULL; | 3530 | buffer_info->skb = NULL; |
3646 | 3531 | ||
3532 | prefetch(skb->data - NET_IP_ALIGN); | ||
3533 | |||
3647 | if (++i == rx_ring->count) i = 0; | 3534 | if (++i == rx_ring->count) i = 0; |
3648 | next_rxd = E1000_RX_DESC(*rx_ring, i); | 3535 | next_rxd = E1000_RX_DESC(*rx_ring, i); |
3536 | prefetch(next_rxd); | ||
3537 | |||
3649 | next_buffer = &rx_ring->buffer_info[i]; | 3538 | next_buffer = &rx_ring->buffer_info[i]; |
3650 | next_skb = next_buffer->skb; | 3539 | next_skb = next_buffer->skb; |
3540 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3651 | 3541 | ||
3652 | cleaned = TRUE; | 3542 | cleaned = TRUE; |
3653 | cleaned_count++; | 3543 | cleaned_count++; |
@@ -3733,10 +3623,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3733 | } | 3623 | } |
3734 | #endif /* CONFIG_E1000_NAPI */ | 3624 | #endif /* CONFIG_E1000_NAPI */ |
3735 | netdev->last_rx = jiffies; | 3625 | netdev->last_rx = jiffies; |
3736 | #ifdef CONFIG_E1000_MQ | ||
3737 | rx_ring->rx_stats.packets++; | ||
3738 | rx_ring->rx_stats.bytes += length; | ||
3739 | #endif | ||
3740 | 3626 | ||
3741 | next_desc: | 3627 | next_desc: |
3742 | rx_desc->status = 0; | 3628 | rx_desc->status = 0; |
@@ -3747,6 +3633,7 @@ next_desc: | |||
3747 | cleaned_count = 0; | 3633 | cleaned_count = 0; |
3748 | } | 3634 | } |
3749 | 3635 | ||
3636 | /* use prefetched values */ | ||
3750 | rx_desc = next_rxd; | 3637 | rx_desc = next_rxd; |
3751 | buffer_info = next_buffer; | 3638 | buffer_info = next_buffer; |
3752 | } | 3639 | } |
@@ -3789,9 +3676,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3789 | i = rx_ring->next_to_clean; | 3676 | i = rx_ring->next_to_clean; |
3790 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3677 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3791 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3678 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3792 | buffer_info = &rx_ring->buffer_info[i]; | ||
3793 | 3679 | ||
3794 | while (staterr & E1000_RXD_STAT_DD) { | 3680 | while (staterr & E1000_RXD_STAT_DD) { |
3681 | buffer_info = &rx_ring->buffer_info[i]; | ||
3795 | ps_page = &rx_ring->ps_page[i]; | 3682 | ps_page = &rx_ring->ps_page[i]; |
3796 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3683 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3797 | #ifdef CONFIG_E1000_NAPI | 3684 | #ifdef CONFIG_E1000_NAPI |
@@ -3801,10 +3688,16 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3801 | #endif | 3688 | #endif |
3802 | skb = buffer_info->skb; | 3689 | skb = buffer_info->skb; |
3803 | 3690 | ||
3691 | /* in the packet split case this is header only */ | ||
3692 | prefetch(skb->data - NET_IP_ALIGN); | ||
3693 | |||
3804 | if (++i == rx_ring->count) i = 0; | 3694 | if (++i == rx_ring->count) i = 0; |
3805 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | 3695 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); |
3696 | prefetch(next_rxd); | ||
3697 | |||
3806 | next_buffer = &rx_ring->buffer_info[i]; | 3698 | next_buffer = &rx_ring->buffer_info[i]; |
3807 | next_skb = next_buffer->skb; | 3699 | next_skb = next_buffer->skb; |
3700 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3808 | 3701 | ||
3809 | cleaned = TRUE; | 3702 | cleaned = TRUE; |
3810 | cleaned_count++; | 3703 | cleaned_count++; |
@@ -3836,23 +3729,49 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3836 | /* Good Receive */ | 3729 | /* Good Receive */ |
3837 | skb_put(skb, length); | 3730 | skb_put(skb, length); |
3838 | 3731 | ||
3732 | { | ||
3733 | /* this looks ugly, but it seems compiler issues make it | ||
3734 | more efficient than reusing j */ | ||
3735 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | ||
3736 | |||
3737 | /* page alloc/put takes too long and effects small packet | ||
3738 | * throughput, so unsplit small packets and save the alloc/put*/ | ||
3739 | if (l1 && ((length + l1) < E1000_CB_LENGTH)) { | ||
3740 | u8 *vaddr; | ||
3741 | /* there is no documentation about how to call | ||
3742 | * kmap_atomic, so we can't hold the mapping | ||
3743 | * very long */ | ||
3744 | pci_dma_sync_single_for_cpu(pdev, | ||
3745 | ps_page_dma->ps_page_dma[0], | ||
3746 | PAGE_SIZE, | ||
3747 | PCI_DMA_FROMDEVICE); | ||
3748 | vaddr = kmap_atomic(ps_page->ps_page[0], | ||
3749 | KM_SKB_DATA_SOFTIRQ); | ||
3750 | memcpy(skb->tail, vaddr, l1); | ||
3751 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | ||
3752 | pci_dma_sync_single_for_device(pdev, | ||
3753 | ps_page_dma->ps_page_dma[0], | ||
3754 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
3755 | skb_put(skb, l1); | ||
3756 | length += l1; | ||
3757 | goto copydone; | ||
3758 | } /* if */ | ||
3759 | } | ||
3760 | |||
3839 | for (j = 0; j < adapter->rx_ps_pages; j++) { | 3761 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
3840 | if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) | 3762 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) |
3841 | break; | 3763 | break; |
3842 | |||
3843 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 3764 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
3844 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3765 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3845 | ps_page_dma->ps_page_dma[j] = 0; | 3766 | ps_page_dma->ps_page_dma[j] = 0; |
3846 | skb_shinfo(skb)->frags[j].page = | 3767 | skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, |
3847 | ps_page->ps_page[j]; | 3768 | length); |
3848 | ps_page->ps_page[j] = NULL; | 3769 | ps_page->ps_page[j] = NULL; |
3849 | skb_shinfo(skb)->frags[j].page_offset = 0; | ||
3850 | skb_shinfo(skb)->frags[j].size = length; | ||
3851 | skb_shinfo(skb)->nr_frags++; | ||
3852 | skb->len += length; | 3770 | skb->len += length; |
3853 | skb->data_len += length; | 3771 | skb->data_len += length; |
3854 | } | 3772 | } |
3855 | 3773 | ||
3774 | copydone: | ||
3856 | e1000_rx_checksum(adapter, staterr, | 3775 | e1000_rx_checksum(adapter, staterr, |
3857 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 3776 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
3858 | skb->protocol = eth_type_trans(skb, netdev); | 3777 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -3878,10 +3797,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3878 | } | 3797 | } |
3879 | #endif /* CONFIG_E1000_NAPI */ | 3798 | #endif /* CONFIG_E1000_NAPI */ |
3880 | netdev->last_rx = jiffies; | 3799 | netdev->last_rx = jiffies; |
3881 | #ifdef CONFIG_E1000_MQ | ||
3882 | rx_ring->rx_stats.packets++; | ||
3883 | rx_ring->rx_stats.bytes += length; | ||
3884 | #endif | ||
3885 | 3800 | ||
3886 | next_desc: | 3801 | next_desc: |
3887 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | 3802 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); |
@@ -3893,6 +3808,7 @@ next_desc: | |||
3893 | cleaned_count = 0; | 3808 | cleaned_count = 0; |
3894 | } | 3809 | } |
3895 | 3810 | ||
3811 | /* use prefetched values */ | ||
3896 | rx_desc = next_rxd; | 3812 | rx_desc = next_rxd; |
3897 | buffer_info = next_buffer; | 3813 | buffer_info = next_buffer; |
3898 | 3814 | ||
@@ -3936,7 +3852,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3936 | goto map_skb; | 3852 | goto map_skb; |
3937 | } | 3853 | } |
3938 | 3854 | ||
3939 | |||
3940 | if (unlikely(!skb)) { | 3855 | if (unlikely(!skb)) { |
3941 | /* Better luck next round */ | 3856 | /* Better luck next round */ |
3942 | adapter->alloc_rx_buff_failed++; | 3857 | adapter->alloc_rx_buff_failed++; |
@@ -4242,7 +4157,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4242 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4157 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4243 | return -EIO; | 4158 | return -EIO; |
4244 | } | 4159 | } |
4245 | if (adapter->hw.phy_type == e1000_phy_m88) { | 4160 | if (adapter->hw.phy_type == e1000_media_type_copper) { |
4246 | switch (data->reg_num) { | 4161 | switch (data->reg_num) { |
4247 | case PHY_CTRL: | 4162 | case PHY_CTRL: |
4248 | if (mii_reg & MII_CR_POWER_DOWN) | 4163 | if (mii_reg & MII_CR_POWER_DOWN) |
@@ -4258,8 +4173,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4258 | else | 4173 | else |
4259 | spddplx = SPEED_10; | 4174 | spddplx = SPEED_10; |
4260 | spddplx += (mii_reg & 0x100) | 4175 | spddplx += (mii_reg & 0x100) |
4261 | ? FULL_DUPLEX : | 4176 | ? DUPLEX_FULL : |
4262 | HALF_DUPLEX; | 4177 | DUPLEX_HALF; |
4263 | retval = e1000_set_spd_dplx(adapter, | 4178 | retval = e1000_set_spd_dplx(adapter, |
4264 | spddplx); | 4179 | spddplx); |
4265 | if (retval) { | 4180 | if (retval) { |
@@ -4489,8 +4404,8 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
4489 | } | 4404 | } |
4490 | 4405 | ||
4491 | #ifdef CONFIG_PM | 4406 | #ifdef CONFIG_PM |
4492 | /* these functions save and restore 16 or 64 dwords (64-256 bytes) of config | 4407 | /* Save/restore 16 or 64 dwords of PCI config space depending on which |
4493 | * space versus the 64 bytes that pci_[save|restore]_state handle | 4408 | * bus we're on (PCI(X) vs. PCI-E) |
4494 | */ | 4409 | */ |
4495 | #define PCIE_CONFIG_SPACE_LEN 256 | 4410 | #define PCIE_CONFIG_SPACE_LEN 256 |
4496 | #define PCI_CONFIG_SPACE_LEN 64 | 4411 | #define PCI_CONFIG_SPACE_LEN 64 |
@@ -4500,6 +4415,7 @@ e1000_pci_save_state(struct e1000_adapter *adapter) | |||
4500 | struct pci_dev *dev = adapter->pdev; | 4415 | struct pci_dev *dev = adapter->pdev; |
4501 | int size; | 4416 | int size; |
4502 | int i; | 4417 | int i; |
4418 | |||
4503 | if (adapter->hw.mac_type >= e1000_82571) | 4419 | if (adapter->hw.mac_type >= e1000_82571) |
4504 | size = PCIE_CONFIG_SPACE_LEN; | 4420 | size = PCIE_CONFIG_SPACE_LEN; |
4505 | else | 4421 | else |
@@ -4523,8 +4439,10 @@ e1000_pci_restore_state(struct e1000_adapter *adapter) | |||
4523 | struct pci_dev *dev = adapter->pdev; | 4439 | struct pci_dev *dev = adapter->pdev; |
4524 | int size; | 4440 | int size; |
4525 | int i; | 4441 | int i; |
4442 | |||
4526 | if (adapter->config_space == NULL) | 4443 | if (adapter->config_space == NULL) |
4527 | return; | 4444 | return; |
4445 | |||
4528 | if (adapter->hw.mac_type >= e1000_82571) | 4446 | if (adapter->hw.mac_type >= e1000_82571) |
4529 | size = PCIE_CONFIG_SPACE_LEN; | 4447 | size = PCIE_CONFIG_SPACE_LEN; |
4530 | else | 4448 | else |
@@ -4552,8 +4470,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4552 | e1000_down(adapter); | 4470 | e1000_down(adapter); |
4553 | 4471 | ||
4554 | #ifdef CONFIG_PM | 4472 | #ifdef CONFIG_PM |
4555 | /* implement our own version of pci_save_state(pdev) because pci | 4473 | /* Implement our own version of pci_save_state(pdev) because pci- |
4556 | * express adapters have larger 256 byte config spaces */ | 4474 | * express adapters have 256-byte config spaces. */ |
4557 | retval = e1000_pci_save_state(adapter); | 4475 | retval = e1000_pci_save_state(adapter); |
4558 | if (retval) | 4476 | if (retval) |
4559 | return retval; | 4477 | return retval; |
@@ -4610,7 +4528,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4610 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); | 4528 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); |
4611 | if (retval) | 4529 | if (retval) |
4612 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4530 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4613 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | 4531 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); |
4614 | if (retval) | 4532 | if (retval) |
4615 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4533 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); |
4616 | } | 4534 | } |
@@ -4626,7 +4544,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4626 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4544 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4627 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | 4545 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); |
4628 | if (retval) | 4546 | if (retval) |
4629 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4547 | DPRINTK(PROBE, ERR, |
4548 | "Error enabling D3 cold wake\n"); | ||
4630 | } | 4549 | } |
4631 | } | 4550 | } |
4632 | 4551 | ||
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 3768d83cd577..e0a4d37d1b85 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -268,7 +268,7 @@ e1000_validate_option(int *value, struct e1000_option *opt, | |||
268 | BUG(); | 268 | BUG(); |
269 | } | 269 | } |
270 | 270 | ||
271 | DPRINTK(PROBE, INFO, "Invalid %s specified (%i) %s\n", | 271 | DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", |
272 | opt->name, *value, opt->err); | 272 | opt->name, *value, opt->err); |
273 | *value = opt->def; | 273 | *value = opt->def; |
274 | return -1; | 274 | return -1; |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 8c62ced2c9b2..467fc861360d 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -27,7 +27,7 @@ | |||
27 | rx_align support: enables rx DMA without causing unaligned accesses. | 27 | rx_align support: enables rx DMA without causing unaligned accesses. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | static const char *version = | 30 | static const char * const version = |
31 | "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" | 31 | "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" |
32 | "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; | 32 | "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; |
33 | 33 | ||
@@ -469,7 +469,7 @@ static const char i82558_config_cmd[CONFIG_DATA_SIZE] = { | |||
469 | 0x31, 0x05, }; | 469 | 0x31, 0x05, }; |
470 | 470 | ||
471 | /* PHY media interface chips. */ | 471 | /* PHY media interface chips. */ |
472 | static const char *phys[] = { | 472 | static const char * const phys[] = { |
473 | "None", "i82553-A/B", "i82553-C", "i82503", | 473 | "None", "i82553-A/B", "i82553-C", "i82503", |
474 | "DP83840", "80c240", "80c24", "i82555", | 474 | "DP83840", "80c240", "80c24", "i82555", |
475 | "unknown-8", "unknown-9", "DP83840A", "unknown-11", | 475 | "unknown-8", "unknown-9", "DP83840A", "unknown-11", |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index f119ec4e89ea..2f7b86837fe8 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -225,7 +225,7 @@ struct epic_chip_info { | |||
225 | 225 | ||
226 | 226 | ||
227 | /* indexed by chip_t */ | 227 | /* indexed by chip_t */ |
228 | static struct epic_chip_info pci_id_tbl[] = { | 228 | static const struct epic_chip_info pci_id_tbl[] = { |
229 | { "SMSC EPIC/100 83c170", | 229 | { "SMSC EPIC/100 83c170", |
230 | EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, | 230 | EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, |
231 | { "SMSC EPIC/100 83c170", | 231 | { "SMSC EPIC/100 83c170", |
@@ -291,7 +291,7 @@ enum CommandBits { | |||
291 | RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) | 291 | RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) |
292 | #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) | 292 | #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) |
293 | 293 | ||
294 | static u16 media2miictl[16] = { | 294 | static const u16 media2miictl[16] = { |
295 | 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, | 295 | 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, |
296 | 0, 0, 0, 0, 0, 0, 0, 0 }; | 296 | 0, 0, 0, 0, 0, 0, 0, 0 }; |
297 | 297 | ||
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index f32a6b3acb2a..b67545be2caa 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -161,6 +161,7 @@ static char *version = | |||
161 | #include <linux/etherdevice.h> | 161 | #include <linux/etherdevice.h> |
162 | #include <linux/skbuff.h> | 162 | #include <linux/skbuff.h> |
163 | #include <linux/bitops.h> | 163 | #include <linux/bitops.h> |
164 | #include <linux/jiffies.h> | ||
164 | 165 | ||
165 | #include <asm/system.h> | 166 | #include <asm/system.h> |
166 | #include <asm/io.h> | 167 | #include <asm/io.h> |
@@ -754,7 +755,7 @@ static void eth16i_set_port(int ioaddr, int porttype) | |||
754 | 755 | ||
755 | static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | 756 | static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) |
756 | { | 757 | { |
757 | int starttime; | 758 | unsigned long starttime; |
758 | 759 | ||
759 | outb(0xff, ioaddr + TX_STATUS_REG); | 760 | outb(0xff, ioaddr + TX_STATUS_REG); |
760 | 761 | ||
@@ -765,7 +766,7 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | |||
765 | outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); | 766 | outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); |
766 | 767 | ||
767 | while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { | 768 | while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { |
768 | if( (jiffies - starttime) > TX_TIMEOUT) { | 769 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
769 | return -1; | 770 | return -1; |
770 | } | 771 | } |
771 | } | 772 | } |
@@ -775,18 +776,18 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | |||
775 | 776 | ||
776 | static int eth16i_receive_probe_packet(int ioaddr) | 777 | static int eth16i_receive_probe_packet(int ioaddr) |
777 | { | 778 | { |
778 | int starttime; | 779 | unsigned long starttime; |
779 | 780 | ||
780 | starttime = jiffies; | 781 | starttime = jiffies; |
781 | 782 | ||
782 | while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { | 783 | while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { |
783 | if( (jiffies - starttime) > TX_TIMEOUT) { | 784 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
784 | 785 | ||
785 | if(eth16i_debug > 1) | 786 | if(eth16i_debug > 1) |
786 | printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); | 787 | printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); |
787 | starttime = jiffies; | 788 | starttime = jiffies; |
788 | while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { | 789 | while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { |
789 | if( (jiffies - starttime) > TX_TIMEOUT) { | 790 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
790 | if(eth16i_debug > 1) | 791 | if(eth16i_debug > 1) |
791 | printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); | 792 | printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); |
792 | return -1; | 793 | return -1; |
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 55dbe9a3fd56..a8449265e5fd 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -160,7 +160,7 @@ struct chip_info { | |||
160 | int flags; | 160 | int flags; |
161 | }; | 161 | }; |
162 | 162 | ||
163 | static struct chip_info skel_netdrv_tbl[] = { | 163 | static const struct chip_info skel_netdrv_tbl[] = { |
164 | {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 164 | {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, |
165 | {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, | 165 | {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, |
166 | {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 166 | {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 3682ec61e8a8..e7fc28b07e5a 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -102,6 +102,9 @@ | |||
102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. | 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single | 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. | ||
106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | ||
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | ||
105 | * | 108 | * |
106 | * Known bugs: | 109 | * Known bugs: |
107 | * We suspect that on some hardware no TX done interrupts are generated. | 110 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -113,7 +116,7 @@ | |||
113 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 116 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
114 | * superfluous timer interrupts from the nic. | 117 | * superfluous timer interrupts from the nic. |
115 | */ | 118 | */ |
116 | #define FORCEDETH_VERSION "0.49" | 119 | #define FORCEDETH_VERSION "0.52" |
117 | #define DRV_NAME "forcedeth" | 120 | #define DRV_NAME "forcedeth" |
118 | 121 | ||
119 | #include <linux/module.h> | 122 | #include <linux/module.h> |
@@ -153,6 +156,9 @@ | |||
153 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | 156 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ |
154 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ | 157 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
155 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ | 158 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
159 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ | ||
160 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | ||
161 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | ||
156 | 162 | ||
157 | enum { | 163 | enum { |
158 | NvRegIrqStatus = 0x000, | 164 | NvRegIrqStatus = 0x000, |
@@ -166,14 +172,17 @@ enum { | |||
166 | #define NVREG_IRQ_TX_OK 0x0010 | 172 | #define NVREG_IRQ_TX_OK 0x0010 |
167 | #define NVREG_IRQ_TIMER 0x0020 | 173 | #define NVREG_IRQ_TIMER 0x0020 |
168 | #define NVREG_IRQ_LINK 0x0040 | 174 | #define NVREG_IRQ_LINK 0x0040 |
169 | #define NVREG_IRQ_TX_ERROR 0x0080 | 175 | #define NVREG_IRQ_RX_FORCED 0x0080 |
170 | #define NVREG_IRQ_TX1 0x0100 | 176 | #define NVREG_IRQ_TX_FORCED 0x0100 |
171 | #define NVREG_IRQMASK_THROUGHPUT 0x00df | 177 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
172 | #define NVREG_IRQMASK_CPU 0x0040 | 178 | #define NVREG_IRQMASK_CPU 0x0040 |
179 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | ||
180 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | ||
181 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | ||
173 | 182 | ||
174 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | 183 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
175 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | 184 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
176 | NVREG_IRQ_TX1)) | 185 | NVREG_IRQ_TX_FORCED)) |
177 | 186 | ||
178 | NvRegUnknownSetupReg6 = 0x008, | 187 | NvRegUnknownSetupReg6 = 0x008, |
179 | #define NVREG_UNKSETUP6_VAL 3 | 188 | #define NVREG_UNKSETUP6_VAL 3 |
@@ -185,6 +194,10 @@ enum { | |||
185 | NvRegPollingInterval = 0x00c, | 194 | NvRegPollingInterval = 0x00c, |
186 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 | 195 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
187 | #define NVREG_POLL_DEFAULT_CPU 13 | 196 | #define NVREG_POLL_DEFAULT_CPU 13 |
197 | NvRegMSIMap0 = 0x020, | ||
198 | NvRegMSIMap1 = 0x024, | ||
199 | NvRegMSIIrqMask = 0x030, | ||
200 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | ||
188 | NvRegMisc1 = 0x080, | 201 | NvRegMisc1 = 0x080, |
189 | #define NVREG_MISC1_HD 0x02 | 202 | #define NVREG_MISC1_HD 0x02 |
190 | #define NVREG_MISC1_FORCE 0x3b0f3c | 203 | #define NVREG_MISC1_FORCE 0x3b0f3c |
@@ -254,6 +267,10 @@ enum { | |||
254 | #define NVREG_TXRXCTL_DESC_1 0 | 267 | #define NVREG_TXRXCTL_DESC_1 0 |
255 | #define NVREG_TXRXCTL_DESC_2 0x02100 | 268 | #define NVREG_TXRXCTL_DESC_2 0x02100 |
256 | #define NVREG_TXRXCTL_DESC_3 0x02200 | 269 | #define NVREG_TXRXCTL_DESC_3 0x02200 |
270 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 | ||
271 | #define NVREG_TXRXCTL_VLANINS 0x00080 | ||
272 | NvRegTxRingPhysAddrHigh = 0x148, | ||
273 | NvRegRxRingPhysAddrHigh = 0x14C, | ||
257 | NvRegMIIStatus = 0x180, | 274 | NvRegMIIStatus = 0x180, |
258 | #define NVREG_MIISTAT_ERROR 0x0001 | 275 | #define NVREG_MIISTAT_ERROR 0x0001 |
259 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 276 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -303,6 +320,11 @@ enum { | |||
303 | #define NVREG_POWERSTATE_D1 0x0001 | 320 | #define NVREG_POWERSTATE_D1 0x0001 |
304 | #define NVREG_POWERSTATE_D2 0x0002 | 321 | #define NVREG_POWERSTATE_D2 0x0002 |
305 | #define NVREG_POWERSTATE_D3 0x0003 | 322 | #define NVREG_POWERSTATE_D3 0x0003 |
323 | NvRegVlanControl = 0x300, | ||
324 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | ||
325 | NvRegMSIXMap0 = 0x3e0, | ||
326 | NvRegMSIXMap1 = 0x3e4, | ||
327 | NvRegMSIXIrqStatus = 0x3f0, | ||
306 | }; | 328 | }; |
307 | 329 | ||
308 | /* Big endian: should work, but is untested */ | 330 | /* Big endian: should work, but is untested */ |
@@ -314,7 +336,7 @@ struct ring_desc { | |||
314 | struct ring_desc_ex { | 336 | struct ring_desc_ex { |
315 | u32 PacketBufferHigh; | 337 | u32 PacketBufferHigh; |
316 | u32 PacketBufferLow; | 338 | u32 PacketBufferLow; |
317 | u32 Reserved; | 339 | u32 TxVlan; |
318 | u32 FlagLen; | 340 | u32 FlagLen; |
319 | }; | 341 | }; |
320 | 342 | ||
@@ -355,6 +377,8 @@ typedef union _ring_type { | |||
355 | #define NV_TX2_CHECKSUM_L3 (1<<27) | 377 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
356 | #define NV_TX2_CHECKSUM_L4 (1<<26) | 378 | #define NV_TX2_CHECKSUM_L4 (1<<26) |
357 | 379 | ||
380 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) | ||
381 | |||
358 | #define NV_RX_DESCRIPTORVALID (1<<16) | 382 | #define NV_RX_DESCRIPTORVALID (1<<16) |
359 | #define NV_RX_MISSEDFRAME (1<<17) | 383 | #define NV_RX_MISSEDFRAME (1<<17) |
360 | #define NV_RX_SUBSTRACT1 (1<<18) | 384 | #define NV_RX_SUBSTRACT1 (1<<18) |
@@ -385,6 +409,9 @@ typedef union _ring_type { | |||
385 | #define NV_RX2_ERROR (1<<30) | 409 | #define NV_RX2_ERROR (1<<30) |
386 | #define NV_RX2_AVAIL (1<<31) | 410 | #define NV_RX2_AVAIL (1<<31) |
387 | 411 | ||
412 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) | ||
413 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | ||
414 | |||
388 | /* Miscelaneous hardware related defines: */ | 415 | /* Miscelaneous hardware related defines: */ |
389 | #define NV_PCI_REGSZ 0x270 | 416 | #define NV_PCI_REGSZ 0x270 |
390 | 417 | ||
@@ -475,6 +502,18 @@ typedef union _ring_type { | |||
475 | #define LPA_1000FULL 0x0800 | 502 | #define LPA_1000FULL 0x0800 |
476 | #define LPA_1000HALF 0x0400 | 503 | #define LPA_1000HALF 0x0400 |
477 | 504 | ||
505 | /* MSI/MSI-X defines */ | ||
506 | #define NV_MSI_X_MAX_VECTORS 8 | ||
507 | #define NV_MSI_X_VECTORS_MASK 0x000f | ||
508 | #define NV_MSI_CAPABLE 0x0010 | ||
509 | #define NV_MSI_X_CAPABLE 0x0020 | ||
510 | #define NV_MSI_ENABLED 0x0040 | ||
511 | #define NV_MSI_X_ENABLED 0x0080 | ||
512 | |||
513 | #define NV_MSI_X_VECTOR_ALL 0x0 | ||
514 | #define NV_MSI_X_VECTOR_RX 0x0 | ||
515 | #define NV_MSI_X_VECTOR_TX 0x1 | ||
516 | #define NV_MSI_X_VECTOR_OTHER 0x2 | ||
478 | 517 | ||
479 | /* | 518 | /* |
480 | * SMP locking: | 519 | * SMP locking: |
@@ -511,6 +550,7 @@ struct fe_priv { | |||
511 | u32 irqmask; | 550 | u32 irqmask; |
512 | u32 desc_ver; | 551 | u32 desc_ver; |
513 | u32 txrxctl_bits; | 552 | u32 txrxctl_bits; |
553 | u32 vlanctl_bits; | ||
514 | 554 | ||
515 | void __iomem *base; | 555 | void __iomem *base; |
516 | 556 | ||
@@ -525,6 +565,7 @@ struct fe_priv { | |||
525 | unsigned int pkt_limit; | 565 | unsigned int pkt_limit; |
526 | struct timer_list oom_kick; | 566 | struct timer_list oom_kick; |
527 | struct timer_list nic_poll; | 567 | struct timer_list nic_poll; |
568 | u32 nic_poll_irq; | ||
528 | 569 | ||
529 | /* media detection workaround. | 570 | /* media detection workaround. |
530 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 571 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
@@ -540,6 +581,13 @@ struct fe_priv { | |||
540 | dma_addr_t tx_dma[TX_RING]; | 581 | dma_addr_t tx_dma[TX_RING]; |
541 | unsigned int tx_dma_len[TX_RING]; | 582 | unsigned int tx_dma_len[TX_RING]; |
542 | u32 tx_flags; | 583 | u32 tx_flags; |
584 | |||
585 | /* vlan fields */ | ||
586 | struct vlan_group *vlangrp; | ||
587 | |||
588 | /* msi/msi-x fields */ | ||
589 | u32 msi_flags; | ||
590 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | ||
543 | }; | 591 | }; |
544 | 592 | ||
545 | /* | 593 | /* |
@@ -567,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |||
567 | */ | 615 | */ |
568 | static int poll_interval = -1; | 616 | static int poll_interval = -1; |
569 | 617 | ||
618 | /* | ||
619 | * Disable MSI interrupts | ||
620 | */ | ||
621 | static int disable_msi = 0; | ||
622 | |||
623 | /* | ||
624 | * Disable MSIX interrupts | ||
625 | */ | ||
626 | static int disable_msix = 0; | ||
627 | |||
570 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) | 628 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
571 | { | 629 | { |
572 | return netdev_priv(dev); | 630 | return netdev_priv(dev); |
@@ -612,6 +670,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | |||
612 | return 0; | 670 | return 0; |
613 | } | 671 | } |
614 | 672 | ||
673 | #define NV_SETUP_RX_RING 0x01 | ||
674 | #define NV_SETUP_TX_RING 0x02 | ||
675 | |||
676 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | ||
677 | { | ||
678 | struct fe_priv *np = get_nvpriv(dev); | ||
679 | u8 __iomem *base = get_hwbase(dev); | ||
680 | |||
681 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
682 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
683 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
684 | } | ||
685 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
686 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
687 | } | ||
688 | } else { | ||
689 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
690 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
691 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | ||
692 | } | ||
693 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
694 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
695 | writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | ||
696 | } | ||
697 | } | ||
698 | } | ||
699 | |||
615 | #define MII_READ (-1) | 700 | #define MII_READ (-1) |
616 | /* mii_rw: read/write a register on the PHY. | 701 | /* mii_rw: read/write a register on the PHY. |
617 | * | 702 | * |
@@ -903,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data) | |||
903 | struct net_device *dev = (struct net_device *) data; | 988 | struct net_device *dev = (struct net_device *) data; |
904 | struct fe_priv *np = netdev_priv(dev); | 989 | struct fe_priv *np = netdev_priv(dev); |
905 | 990 | ||
906 | disable_irq(dev->irq); | 991 | |
992 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
993 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
994 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
995 | disable_irq(dev->irq); | ||
996 | } else { | ||
997 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
998 | } | ||
907 | if (nv_alloc_rx(dev)) { | 999 | if (nv_alloc_rx(dev)) { |
908 | spin_lock(&np->lock); | 1000 | spin_lock(&np->lock); |
909 | if (!np->in_shutdown) | 1001 | if (!np->in_shutdown) |
910 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1002 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
911 | spin_unlock(&np->lock); | 1003 | spin_unlock(&np->lock); |
912 | } | 1004 | } |
913 | enable_irq(dev->irq); | 1005 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1006 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1007 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1008 | enable_irq(dev->irq); | ||
1009 | } else { | ||
1010 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1011 | } | ||
914 | } | 1012 | } |
915 | 1013 | ||
916 | static void nv_init_rx(struct net_device *dev) | 1014 | static void nv_init_rx(struct net_device *dev) |
@@ -965,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) | |||
965 | } | 1063 | } |
966 | 1064 | ||
967 | if (np->tx_skbuff[skbnr]) { | 1065 | if (np->tx_skbuff[skbnr]) { |
968 | dev_kfree_skb_irq(np->tx_skbuff[skbnr]); | 1066 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
969 | np->tx_skbuff[skbnr] = NULL; | 1067 | np->tx_skbuff[skbnr] = NULL; |
970 | return 1; | 1068 | return 1; |
971 | } else { | 1069 | } else { |
@@ -1031,6 +1129,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1031 | u32 bcnt; | 1129 | u32 bcnt; |
1032 | u32 size = skb->len-skb->data_len; | 1130 | u32 size = skb->len-skb->data_len; |
1033 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1131 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1132 | u32 tx_flags_vlan = 0; | ||
1034 | 1133 | ||
1035 | /* add fragments to entries count */ | 1134 | /* add fragments to entries count */ |
1036 | for (i = 0; i < fragments; i++) { | 1135 | for (i = 0; i < fragments; i++) { |
@@ -1111,10 +1210,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1111 | #endif | 1210 | #endif |
1112 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); | 1211 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
1113 | 1212 | ||
1213 | /* vlan tag */ | ||
1214 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | ||
1215 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | ||
1216 | } | ||
1217 | |||
1114 | /* set tx flags */ | 1218 | /* set tx flags */ |
1115 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1219 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1116 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1220 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1117 | } else { | 1221 | } else { |
1222 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | ||
1118 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1223 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1119 | } | 1224 | } |
1120 | 1225 | ||
@@ -1209,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1209 | { | 1314 | { |
1210 | struct fe_priv *np = netdev_priv(dev); | 1315 | struct fe_priv *np = netdev_priv(dev); |
1211 | u8 __iomem *base = get_hwbase(dev); | 1316 | u8 __iomem *base = get_hwbase(dev); |
1317 | u32 status; | ||
1318 | |||
1319 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1320 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1321 | else | ||
1322 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1212 | 1323 | ||
1213 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, | 1324 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1214 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | ||
1215 | 1325 | ||
1216 | { | 1326 | { |
1217 | int i; | 1327 | int i; |
@@ -1273,10 +1383,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1273 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 1383 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
1274 | nv_drain_tx(dev); | 1384 | nv_drain_tx(dev); |
1275 | np->next_tx = np->nic_tx = 0; | 1385 | np->next_tx = np->nic_tx = 0; |
1276 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1386 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1277 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1278 | else | ||
1279 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1280 | netif_wake_queue(dev); | 1387 | netif_wake_queue(dev); |
1281 | } | 1388 | } |
1282 | 1389 | ||
@@ -1342,6 +1449,8 @@ static void nv_rx_process(struct net_device *dev) | |||
1342 | { | 1449 | { |
1343 | struct fe_priv *np = netdev_priv(dev); | 1450 | struct fe_priv *np = netdev_priv(dev); |
1344 | u32 Flags; | 1451 | u32 Flags; |
1452 | u32 vlanflags = 0; | ||
1453 | |||
1345 | 1454 | ||
1346 | for (;;) { | 1455 | for (;;) { |
1347 | struct sk_buff *skb; | 1456 | struct sk_buff *skb; |
@@ -1357,6 +1466,7 @@ static void nv_rx_process(struct net_device *dev) | |||
1357 | } else { | 1466 | } else { |
1358 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | 1467 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); |
1359 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | 1468 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); |
1469 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); | ||
1360 | } | 1470 | } |
1361 | 1471 | ||
1362 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1472 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", |
@@ -1474,7 +1584,11 @@ static void nv_rx_process(struct net_device *dev) | |||
1474 | skb->protocol = eth_type_trans(skb, dev); | 1584 | skb->protocol = eth_type_trans(skb, dev); |
1475 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | 1585 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", |
1476 | dev->name, np->cur_rx, len, skb->protocol); | 1586 | dev->name, np->cur_rx, len, skb->protocol); |
1477 | netif_rx(skb); | 1587 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { |
1588 | vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1589 | } else { | ||
1590 | netif_rx(skb); | ||
1591 | } | ||
1478 | dev->last_rx = jiffies; | 1592 | dev->last_rx = jiffies; |
1479 | np->stats.rx_packets++; | 1593 | np->stats.rx_packets++; |
1480 | np->stats.rx_bytes += len; | 1594 | np->stats.rx_bytes += len; |
@@ -1523,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1523 | * guessed, there is probably a simpler approach. | 1637 | * guessed, there is probably a simpler approach. |
1524 | * Changing the MTU is a rare event, it shouldn't matter. | 1638 | * Changing the MTU is a rare event, it shouldn't matter. |
1525 | */ | 1639 | */ |
1526 | disable_irq(dev->irq); | 1640 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1641 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1642 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1643 | disable_irq(dev->irq); | ||
1644 | } else { | ||
1645 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1646 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1647 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1648 | } | ||
1527 | spin_lock_bh(&dev->xmit_lock); | 1649 | spin_lock_bh(&dev->xmit_lock); |
1528 | spin_lock(&np->lock); | 1650 | spin_lock(&np->lock); |
1529 | /* stop engines */ | 1651 | /* stop engines */ |
@@ -1544,11 +1666,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1544 | } | 1666 | } |
1545 | /* reinit nic view of the rx queue */ | 1667 | /* reinit nic view of the rx queue */ |
1546 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1668 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1547 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 1669 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
1548 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
1549 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1550 | else | ||
1551 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1552 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1670 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
1553 | base + NvRegRingSizes); | 1671 | base + NvRegRingSizes); |
1554 | pci_push(base); | 1672 | pci_push(base); |
@@ -1560,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1560 | nv_start_tx(dev); | 1678 | nv_start_tx(dev); |
1561 | spin_unlock(&np->lock); | 1679 | spin_unlock(&np->lock); |
1562 | spin_unlock_bh(&dev->xmit_lock); | 1680 | spin_unlock_bh(&dev->xmit_lock); |
1563 | enable_irq(dev->irq); | 1681 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1682 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1683 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1684 | enable_irq(dev->irq); | ||
1685 | } else { | ||
1686 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1687 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1688 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1689 | } | ||
1564 | } | 1690 | } |
1565 | return 0; | 1691 | return 0; |
1566 | } | 1692 | } |
@@ -1866,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1866 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 1992 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
1867 | 1993 | ||
1868 | for (i=0; ; i++) { | 1994 | for (i=0; ; i++) { |
1869 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | 1995 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
1870 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 1996 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
1997 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
1998 | } else { | ||
1999 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2000 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
2001 | } | ||
1871 | pci_push(base); | 2002 | pci_push(base); |
1872 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2003 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
1873 | if (!(events & np->irqmask)) | 2004 | if (!(events & np->irqmask)) |
@@ -1907,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1907 | if (i > max_interrupt_work) { | 2038 | if (i > max_interrupt_work) { |
1908 | spin_lock(&np->lock); | 2039 | spin_lock(&np->lock); |
1909 | /* disable interrupts on the nic */ | 2040 | /* disable interrupts on the nic */ |
1910 | writel(0, base + NvRegIrqMask); | 2041 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
2042 | writel(0, base + NvRegIrqMask); | ||
2043 | else | ||
2044 | writel(np->irqmask, base + NvRegIrqMask); | ||
1911 | pci_push(base); | 2045 | pci_push(base); |
1912 | 2046 | ||
1913 | if (!np->in_shutdown) | 2047 | if (!np->in_shutdown) { |
2048 | np->nic_poll_irq = np->irqmask; | ||
1914 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2049 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2050 | } | ||
1915 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); | 2051 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
1916 | spin_unlock(&np->lock); | 2052 | spin_unlock(&np->lock); |
1917 | break; | 2053 | break; |
@@ -1923,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1923 | return IRQ_RETVAL(i); | 2059 | return IRQ_RETVAL(i); |
1924 | } | 2060 | } |
1925 | 2061 | ||
2062 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | ||
2063 | { | ||
2064 | struct net_device *dev = (struct net_device *) data; | ||
2065 | struct fe_priv *np = netdev_priv(dev); | ||
2066 | u8 __iomem *base = get_hwbase(dev); | ||
2067 | u32 events; | ||
2068 | int i; | ||
2069 | |||
2070 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | ||
2071 | |||
2072 | for (i=0; ; i++) { | ||
2073 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | ||
2074 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | ||
2075 | pci_push(base); | ||
2076 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | ||
2077 | if (!(events & np->irqmask)) | ||
2078 | break; | ||
2079 | |||
2080 | spin_lock(&np->lock); | ||
2081 | nv_tx_done(dev); | ||
2082 | spin_unlock(&np->lock); | ||
2083 | |||
2084 | if (events & (NVREG_IRQ_TX_ERR)) { | ||
2085 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | ||
2086 | dev->name, events); | ||
2087 | } | ||
2088 | if (i > max_interrupt_work) { | ||
2089 | spin_lock(&np->lock); | ||
2090 | /* disable interrupts on the nic */ | ||
2091 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | ||
2092 | pci_push(base); | ||
2093 | |||
2094 | if (!np->in_shutdown) { | ||
2095 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | ||
2096 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2097 | } | ||
2098 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | ||
2099 | spin_unlock(&np->lock); | ||
2100 | break; | ||
2101 | } | ||
2102 | |||
2103 | } | ||
2104 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | ||
2105 | |||
2106 | return IRQ_RETVAL(i); | ||
2107 | } | ||
2108 | |||
2109 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | ||
2110 | { | ||
2111 | struct net_device *dev = (struct net_device *) data; | ||
2112 | struct fe_priv *np = netdev_priv(dev); | ||
2113 | u8 __iomem *base = get_hwbase(dev); | ||
2114 | u32 events; | ||
2115 | int i; | ||
2116 | |||
2117 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | ||
2118 | |||
2119 | for (i=0; ; i++) { | ||
2120 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | ||
2121 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | ||
2122 | pci_push(base); | ||
2123 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | ||
2124 | if (!(events & np->irqmask)) | ||
2125 | break; | ||
2126 | |||
2127 | nv_rx_process(dev); | ||
2128 | if (nv_alloc_rx(dev)) { | ||
2129 | spin_lock(&np->lock); | ||
2130 | if (!np->in_shutdown) | ||
2131 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2132 | spin_unlock(&np->lock); | ||
2133 | } | ||
2134 | |||
2135 | if (i > max_interrupt_work) { | ||
2136 | spin_lock(&np->lock); | ||
2137 | /* disable interrupts on the nic */ | ||
2138 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2139 | pci_push(base); | ||
2140 | |||
2141 | if (!np->in_shutdown) { | ||
2142 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | ||
2143 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2144 | } | ||
2145 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | ||
2146 | spin_unlock(&np->lock); | ||
2147 | break; | ||
2148 | } | ||
2149 | |||
2150 | } | ||
2151 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | ||
2152 | |||
2153 | return IRQ_RETVAL(i); | ||
2154 | } | ||
2155 | |||
2156 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | ||
2157 | { | ||
2158 | struct net_device *dev = (struct net_device *) data; | ||
2159 | struct fe_priv *np = netdev_priv(dev); | ||
2160 | u8 __iomem *base = get_hwbase(dev); | ||
2161 | u32 events; | ||
2162 | int i; | ||
2163 | |||
2164 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | ||
2165 | |||
2166 | for (i=0; ; i++) { | ||
2167 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | ||
2168 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | ||
2169 | pci_push(base); | ||
2170 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2171 | if (!(events & np->irqmask)) | ||
2172 | break; | ||
2173 | |||
2174 | if (events & NVREG_IRQ_LINK) { | ||
2175 | spin_lock(&np->lock); | ||
2176 | nv_link_irq(dev); | ||
2177 | spin_unlock(&np->lock); | ||
2178 | } | ||
2179 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | ||
2180 | spin_lock(&np->lock); | ||
2181 | nv_linkchange(dev); | ||
2182 | spin_unlock(&np->lock); | ||
2183 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
2184 | } | ||
2185 | if (events & (NVREG_IRQ_UNKNOWN)) { | ||
2186 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | ||
2187 | dev->name, events); | ||
2188 | } | ||
2189 | if (i > max_interrupt_work) { | ||
2190 | spin_lock(&np->lock); | ||
2191 | /* disable interrupts on the nic */ | ||
2192 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | ||
2193 | pci_push(base); | ||
2194 | |||
2195 | if (!np->in_shutdown) { | ||
2196 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | ||
2197 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2198 | } | ||
2199 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | ||
2200 | spin_unlock(&np->lock); | ||
2201 | break; | ||
2202 | } | ||
2203 | |||
2204 | } | ||
2205 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | ||
2206 | |||
2207 | return IRQ_RETVAL(i); | ||
2208 | } | ||
2209 | |||
1926 | static void nv_do_nic_poll(unsigned long data) | 2210 | static void nv_do_nic_poll(unsigned long data) |
1927 | { | 2211 | { |
1928 | struct net_device *dev = (struct net_device *) data; | 2212 | struct net_device *dev = (struct net_device *) data; |
1929 | struct fe_priv *np = netdev_priv(dev); | 2213 | struct fe_priv *np = netdev_priv(dev); |
1930 | u8 __iomem *base = get_hwbase(dev); | 2214 | u8 __iomem *base = get_hwbase(dev); |
2215 | u32 mask = 0; | ||
1931 | 2216 | ||
1932 | disable_irq(dev->irq); | ||
1933 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
1934 | /* | 2217 | /* |
2218 | * First disable irq(s) and then | ||
1935 | * reenable interrupts on the nic, we have to do this before calling | 2219 | * reenable interrupts on the nic, we have to do this before calling |
1936 | * nv_nic_irq because that may decide to do otherwise | 2220 | * nv_nic_irq because that may decide to do otherwise |
1937 | */ | 2221 | */ |
1938 | writel(np->irqmask, base + NvRegIrqMask); | 2222 | |
2223 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
2224 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2225 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2226 | disable_irq(dev->irq); | ||
2227 | mask = np->irqmask; | ||
2228 | } else { | ||
2229 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2230 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2231 | mask |= NVREG_IRQ_RX_ALL; | ||
2232 | } | ||
2233 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2234 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2235 | mask |= NVREG_IRQ_TX_ALL; | ||
2236 | } | ||
2237 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2238 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2239 | mask |= NVREG_IRQ_OTHER; | ||
2240 | } | ||
2241 | } | ||
2242 | np->nic_poll_irq = 0; | ||
2243 | |||
2244 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
2245 | |||
2246 | writel(mask, base + NvRegIrqMask); | ||
1939 | pci_push(base); | 2247 | pci_push(base); |
1940 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2248 | |
1941 | enable_irq(dev->irq); | 2249 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
2250 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2251 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2252 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2253 | enable_irq(dev->irq); | ||
2254 | } else { | ||
2255 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2256 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2257 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2258 | } | ||
2259 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2260 | nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2261 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2262 | } | ||
2263 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2264 | nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2265 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2266 | } | ||
2267 | } | ||
1942 | } | 2268 | } |
1943 | 2269 | ||
1944 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2217,11 +2543,66 @@ static struct ethtool_ops ops = { | |||
2217 | .get_perm_addr = ethtool_op_get_perm_addr, | 2543 | .get_perm_addr = ethtool_op_get_perm_addr, |
2218 | }; | 2544 | }; |
2219 | 2545 | ||
2546 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
2547 | { | ||
2548 | struct fe_priv *np = get_nvpriv(dev); | ||
2549 | |||
2550 | spin_lock_irq(&np->lock); | ||
2551 | |||
2552 | /* save vlan group */ | ||
2553 | np->vlangrp = grp; | ||
2554 | |||
2555 | if (grp) { | ||
2556 | /* enable vlan on MAC */ | ||
2557 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | ||
2558 | } else { | ||
2559 | /* disable vlan on MAC */ | ||
2560 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | ||
2561 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | ||
2562 | } | ||
2563 | |||
2564 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
2565 | |||
2566 | spin_unlock_irq(&np->lock); | ||
2567 | }; | ||
2568 | |||
2569 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
2570 | { | ||
2571 | /* nothing to do */ | ||
2572 | }; | ||
2573 | |||
2574 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
2575 | { | ||
2576 | u8 __iomem *base = get_hwbase(dev); | ||
2577 | int i; | ||
2578 | u32 msixmap = 0; | ||
2579 | |||
2580 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
2581 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
2582 | * the remaining 8 interrupts. | ||
2583 | */ | ||
2584 | for (i = 0; i < 8; i++) { | ||
2585 | if ((irqmask >> i) & 0x1) { | ||
2586 | msixmap |= vector << (i << 2); | ||
2587 | } | ||
2588 | } | ||
2589 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2590 | |||
2591 | msixmap = 0; | ||
2592 | for (i = 0; i < 8; i++) { | ||
2593 | if ((irqmask >> (i + 8)) & 0x1) { | ||
2594 | msixmap |= vector << (i << 2); | ||
2595 | } | ||
2596 | } | ||
2597 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
2598 | } | ||
2599 | |||
2220 | static int nv_open(struct net_device *dev) | 2600 | static int nv_open(struct net_device *dev) |
2221 | { | 2601 | { |
2222 | struct fe_priv *np = netdev_priv(dev); | 2602 | struct fe_priv *np = netdev_priv(dev); |
2223 | u8 __iomem *base = get_hwbase(dev); | 2603 | u8 __iomem *base = get_hwbase(dev); |
2224 | int ret, oom, i; | 2604 | int ret = 1; |
2605 | int oom, i; | ||
2225 | 2606 | ||
2226 | dprintk(KERN_DEBUG "nv_open: begin\n"); | 2607 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
2227 | 2608 | ||
@@ -2253,11 +2634,7 @@ static int nv_open(struct net_device *dev) | |||
2253 | nv_copy_mac_to_hw(dev); | 2634 | nv_copy_mac_to_hw(dev); |
2254 | 2635 | ||
2255 | /* 4) give hw rings */ | 2636 | /* 4) give hw rings */ |
2256 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 2637 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
2257 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
2258 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
2259 | else | ||
2260 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
2261 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 2638 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
2262 | base + NvRegRingSizes); | 2639 | base + NvRegRingSizes); |
2263 | 2640 | ||
@@ -2265,6 +2642,7 @@ static int nv_open(struct net_device *dev) | |||
2265 | writel(np->linkspeed, base + NvRegLinkSpeed); | 2642 | writel(np->linkspeed, base + NvRegLinkSpeed); |
2266 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | 2643 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); |
2267 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 2644 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
2645 | writel(np->vlanctl_bits, base + NvRegVlanControl); | ||
2268 | pci_push(base); | 2646 | pci_push(base); |
2269 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); | 2647 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
2270 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, | 2648 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
@@ -2315,9 +2693,77 @@ static int nv_open(struct net_device *dev) | |||
2315 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2693 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2316 | pci_push(base); | 2694 | pci_push(base); |
2317 | 2695 | ||
2318 | ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); | 2696 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
2319 | if (ret) | 2697 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
2320 | goto out_drain; | 2698 | np->msi_x_entry[i].entry = i; |
2699 | } | ||
2700 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2701 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2702 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2703 | /* Request irq for rx handling */ | ||
2704 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2705 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2706 | pci_disable_msix(np->pci_dev); | ||
2707 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2708 | goto out_drain; | ||
2709 | } | ||
2710 | /* Request irq for tx handling */ | ||
2711 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2712 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2713 | pci_disable_msix(np->pci_dev); | ||
2714 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2715 | goto out_drain; | ||
2716 | } | ||
2717 | /* Request irq for link and timer handling */ | ||
2718 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2719 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2720 | pci_disable_msix(np->pci_dev); | ||
2721 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2722 | goto out_drain; | ||
2723 | } | ||
2724 | |||
2725 | /* map interrupts to their respective vector */ | ||
2726 | writel(0, base + NvRegMSIXMap0); | ||
2727 | writel(0, base + NvRegMSIXMap1); | ||
2728 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2729 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2730 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2731 | } else { | ||
2732 | /* Request irq for all interrupts */ | ||
2733 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2734 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2735 | pci_disable_msix(np->pci_dev); | ||
2736 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2737 | goto out_drain; | ||
2738 | } | ||
2739 | |||
2740 | /* map interrupts to vector 0 */ | ||
2741 | writel(0, base + NvRegMSIXMap0); | ||
2742 | writel(0, base + NvRegMSIXMap1); | ||
2743 | } | ||
2744 | } | ||
2745 | } | ||
2746 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2747 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2748 | np->msi_flags |= NV_MSI_ENABLED; | ||
2749 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2750 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2751 | pci_disable_msi(np->pci_dev); | ||
2752 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2753 | goto out_drain; | ||
2754 | } | ||
2755 | |||
2756 | /* map interrupts to vector 0 */ | ||
2757 | writel(0, base + NvRegMSIMap0); | ||
2758 | writel(0, base + NvRegMSIMap1); | ||
2759 | /* enable msi vector 0 */ | ||
2760 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2761 | } | ||
2762 | } | ||
2763 | if (ret != 0) { | ||
2764 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2765 | goto out_drain; | ||
2766 | } | ||
2321 | 2767 | ||
2322 | /* ask for interrupts */ | 2768 | /* ask for interrupts */ |
2323 | writel(np->irqmask, base + NvRegIrqMask); | 2769 | writel(np->irqmask, base + NvRegIrqMask); |
@@ -2364,6 +2810,7 @@ static int nv_close(struct net_device *dev) | |||
2364 | { | 2810 | { |
2365 | struct fe_priv *np = netdev_priv(dev); | 2811 | struct fe_priv *np = netdev_priv(dev); |
2366 | u8 __iomem *base; | 2812 | u8 __iomem *base; |
2813 | int i; | ||
2367 | 2814 | ||
2368 | spin_lock_irq(&np->lock); | 2815 | spin_lock_irq(&np->lock); |
2369 | np->in_shutdown = 1; | 2816 | np->in_shutdown = 1; |
@@ -2381,13 +2828,31 @@ static int nv_close(struct net_device *dev) | |||
2381 | 2828 | ||
2382 | /* disable interrupts on the nic or we will lock up */ | 2829 | /* disable interrupts on the nic or we will lock up */ |
2383 | base = get_hwbase(dev); | 2830 | base = get_hwbase(dev); |
2384 | writel(0, base + NvRegIrqMask); | 2831 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2832 | writel(np->irqmask, base + NvRegIrqMask); | ||
2833 | } else { | ||
2834 | if (np->msi_flags & NV_MSI_ENABLED) | ||
2835 | writel(0, base + NvRegMSIIrqMask); | ||
2836 | writel(0, base + NvRegIrqMask); | ||
2837 | } | ||
2385 | pci_push(base); | 2838 | pci_push(base); |
2386 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2839 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
2387 | 2840 | ||
2388 | spin_unlock_irq(&np->lock); | 2841 | spin_unlock_irq(&np->lock); |
2389 | 2842 | ||
2390 | free_irq(dev->irq, dev); | 2843 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2844 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2845 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2846 | } | ||
2847 | pci_disable_msix(np->pci_dev); | ||
2848 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2849 | } else { | ||
2850 | free_irq(np->pci_dev->irq, dev); | ||
2851 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2852 | pci_disable_msi(np->pci_dev); | ||
2853 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2854 | } | ||
2855 | } | ||
2391 | 2856 | ||
2392 | drain_ring(dev); | 2857 | drain_ring(dev); |
2393 | 2858 | ||
@@ -2471,7 +2936,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2471 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 2936 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
2472 | pci_name(pci_dev)); | 2937 | pci_name(pci_dev)); |
2473 | } else { | 2938 | } else { |
2474 | dev->features |= NETIF_F_HIGHDMA; | 2939 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
2940 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | ||
2941 | pci_name(pci_dev)); | ||
2942 | goto out_relreg; | ||
2943 | } else { | ||
2944 | dev->features |= NETIF_F_HIGHDMA; | ||
2945 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
2946 | } | ||
2475 | } | 2947 | } |
2476 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | 2948 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
2477 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 2949 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
@@ -2496,6 +2968,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2496 | #endif | 2968 | #endif |
2497 | } | 2969 | } |
2498 | 2970 | ||
2971 | np->vlanctl_bits = 0; | ||
2972 | if (id->driver_data & DEV_HAS_VLAN) { | ||
2973 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | ||
2974 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | ||
2975 | dev->vlan_rx_register = nv_vlan_rx_register; | ||
2976 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | ||
2977 | } | ||
2978 | |||
2979 | np->msi_flags = 0; | ||
2980 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | ||
2981 | np->msi_flags |= NV_MSI_CAPABLE; | ||
2982 | } | ||
2983 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | ||
2984 | np->msi_flags |= NV_MSI_X_CAPABLE; | ||
2985 | } | ||
2986 | |||
2499 | err = -ENOMEM; | 2987 | err = -ENOMEM; |
2500 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2988 | np->base = ioremap(addr, NV_PCI_REGSZ); |
2501 | if (!np->base) | 2989 | if (!np->base) |
@@ -2578,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2578 | } else { | 3066 | } else { |
2579 | np->tx_flags = NV_TX2_VALID; | 3067 | np->tx_flags = NV_TX2_VALID; |
2580 | } | 3068 | } |
2581 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3069 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
2582 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; | 3070 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
2583 | else | 3071 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
3072 | np->msi_flags |= 0x0003; | ||
3073 | } else { | ||
2584 | np->irqmask = NVREG_IRQMASK_CPU; | 3074 | np->irqmask = NVREG_IRQMASK_CPU; |
3075 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | ||
3076 | np->msi_flags |= 0x0001; | ||
3077 | } | ||
2585 | 3078 | ||
2586 | if (id->driver_data & DEV_NEED_TIMERIRQ) | 3079 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
2587 | np->irqmask |= NVREG_IRQ_TIMER; | 3080 | np->irqmask |= NVREG_IRQ_TIMER; |
@@ -2737,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = { | |||
2737 | }, | 3230 | }, |
2738 | { /* MCP55 Ethernet Controller */ | 3231 | { /* MCP55 Ethernet Controller */ |
2739 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 3232 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
2740 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3233 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2741 | }, | 3234 | }, |
2742 | { /* MCP55 Ethernet Controller */ | 3235 | { /* MCP55 Ethernet Controller */ |
2743 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 3236 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
2744 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3237 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2745 | }, | 3238 | }, |
2746 | {0,}, | 3239 | {0,}, |
2747 | }; | 3240 | }; |
@@ -2771,6 +3264,10 @@ module_param(optimization_mode, int, 0); | |||
2771 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | 3264 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
2772 | module_param(poll_interval, int, 0); | 3265 | module_param(poll_interval, int, 0); |
2773 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | 3266 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
3267 | module_param(disable_msi, int, 0); | ||
3268 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | ||
3269 | module_param(disable_msix, int, 0); | ||
3270 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | ||
2774 | 3271 | ||
2775 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | 3272 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
2776 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | 3273 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index bc9a3bf8d560..0ea4cb4a0d80 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -427,7 +427,7 @@ that case. | |||
427 | static void hamachi_timer(unsigned long data); | 427 | static void hamachi_timer(unsigned long data); |
428 | 428 | ||
429 | enum capability_flags {CanHaveMII=1, }; | 429 | enum capability_flags {CanHaveMII=1, }; |
430 | static struct chip_info { | 430 | static const struct chip_info { |
431 | u16 vendor_id, device_id, device_id_mask, pad; | 431 | u16 vendor_id, device_id, device_id_mask, pad; |
432 | const char *name; | 432 | const char *name; |
433 | void (*media_timer)(unsigned long data); | 433 | void (*media_timer)(unsigned long data); |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index e4188d082f01..9220de9f4fe7 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -905,7 +905,7 @@ static int epp_open(struct net_device *dev) | |||
905 | /* autoprobe baud rate */ | 905 | /* autoprobe baud rate */ |
906 | tstart = jiffies; | 906 | tstart = jiffies; |
907 | i = 0; | 907 | i = 0; |
908 | while ((signed)(jiffies-tstart-HZ/3) < 0) { | 908 | while (time_before(jiffies, tstart + HZ/3)) { |
909 | if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) | 909 | if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) |
910 | goto epptimeout; | 910 | goto epptimeout; |
911 | if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) { | 911 | if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) { |
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 55c7ed608391..247c8ca86033 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c | |||
@@ -115,6 +115,7 @@ | |||
115 | #include <linux/delay.h> | 115 | #include <linux/delay.h> |
116 | #include <linux/init.h> | 116 | #include <linux/init.h> |
117 | #include <linux/bitops.h> | 117 | #include <linux/bitops.h> |
118 | #include <linux/jiffies.h> | ||
118 | 119 | ||
119 | #include <asm/io.h> | 120 | #include <asm/io.h> |
120 | 121 | ||
@@ -1499,7 +1500,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) | |||
1499 | printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); | 1500 | printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); |
1500 | #endif | 1501 | #endif |
1501 | /* not waited long enough since last tx? */ | 1502 | /* not waited long enough since last tx? */ |
1502 | if (jiffies - dev->trans_start < HZ) | 1503 | if (time_before(jiffies, dev->trans_start + HZ)) |
1503 | return -EAGAIN; | 1504 | return -EAGAIN; |
1504 | 1505 | ||
1505 | if (hp100_check_lan(dev)) | 1506 | if (hp100_check_lan(dev)) |
@@ -1652,7 +1653,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1652 | printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); | 1653 | printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); |
1653 | #endif | 1654 | #endif |
1654 | /* not waited long enough since last failed tx try? */ | 1655 | /* not waited long enough since last failed tx try? */ |
1655 | if (jiffies - dev->trans_start < HZ) { | 1656 | if (time_before(jiffies, dev->trans_start + HZ)) { |
1656 | #ifdef HP100_DEBUG | 1657 | #ifdef HP100_DEBUG |
1657 | printk("hp100: %s: trans_start timing problem\n", | 1658 | printk("hp100: %s: trans_start timing problem\n", |
1658 | dev->name); | 1659 | dev->name); |
@@ -1718,17 +1719,10 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1718 | hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ | 1719 | hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ |
1719 | 1720 | ||
1720 | if (lp->mode == 2) { /* memory mapped */ | 1721 | if (lp->mode == 2) { /* memory mapped */ |
1721 | if (lp->mem_ptr_virt) { /* high pci memory was remapped */ | 1722 | /* Note: The J2585B needs alignment to 32bits here! */ |
1722 | /* Note: The J2585B needs alignment to 32bits here! */ | 1723 | memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); |
1723 | memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); | 1724 | if (!ok_flag) |
1724 | if (!ok_flag) | 1725 | memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); |
1725 | memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); | ||
1726 | } else { | ||
1727 | /* Note: The J2585B needs alignment to 32bits here! */ | ||
1728 | isa_memcpy_toio(lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3); | ||
1729 | if (!ok_flag) | ||
1730 | isa_memset_io(lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len); | ||
1731 | } | ||
1732 | } else { /* programmed i/o */ | 1726 | } else { /* programmed i/o */ |
1733 | outsl(ioaddr + HP100_REG_DATA32, skb->data, | 1727 | outsl(ioaddr + HP100_REG_DATA32, skb->data, |
1734 | (skb->len + 3) >> 2); | 1728 | (skb->len + 3) >> 2); |
@@ -1798,10 +1792,7 @@ static void hp100_rx(struct net_device *dev) | |||
1798 | /* First we get the header, which contains information about the */ | 1792 | /* First we get the header, which contains information about the */ |
1799 | /* actual length of the received packet. */ | 1793 | /* actual length of the received packet. */ |
1800 | if (lp->mode == 2) { /* memory mapped mode */ | 1794 | if (lp->mode == 2) { /* memory mapped mode */ |
1801 | if (lp->mem_ptr_virt) /* if memory was remapped */ | 1795 | header = readl(lp->mem_ptr_virt); |
1802 | header = readl(lp->mem_ptr_virt); | ||
1803 | else | ||
1804 | header = isa_readl(lp->mem_ptr_phys); | ||
1805 | } else /* programmed i/o */ | 1796 | } else /* programmed i/o */ |
1806 | header = hp100_inl(DATA32); | 1797 | header = hp100_inl(DATA32); |
1807 | 1798 | ||
@@ -1833,13 +1824,9 @@ static void hp100_rx(struct net_device *dev) | |||
1833 | ptr = skb->data; | 1824 | ptr = skb->data; |
1834 | 1825 | ||
1835 | /* Now transfer the data from the card into that area */ | 1826 | /* Now transfer the data from the card into that area */ |
1836 | if (lp->mode == 2) { | 1827 | if (lp->mode == 2) |
1837 | if (lp->mem_ptr_virt) | 1828 | memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); |
1838 | memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); | 1829 | else /* io mapped */ |
1839 | /* Note alignment to 32bit transfers */ | ||
1840 | else | ||
1841 | isa_memcpy_fromio(ptr, lp->mem_ptr_phys, pkt_len); | ||
1842 | } else /* io mapped */ | ||
1843 | insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); | 1830 | insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); |
1844 | 1831 | ||
1845 | skb->protocol = eth_type_trans(skb, dev); | 1832 | skb->protocol = eth_type_trans(skb, dev); |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index 591c5864ffb1..7e49522b8b3c 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -204,7 +204,7 @@ static inline int emac_phy_gpcs(int phy_mode) | |||
204 | 204 | ||
205 | static inline void emac_tx_enable(struct ocp_enet_private *dev) | 205 | static inline void emac_tx_enable(struct ocp_enet_private *dev) |
206 | { | 206 | { |
207 | struct emac_regs *p = dev->emacp; | 207 | struct emac_regs __iomem *p = dev->emacp; |
208 | unsigned long flags; | 208 | unsigned long flags; |
209 | u32 r; | 209 | u32 r; |
210 | 210 | ||
@@ -220,7 +220,7 @@ static inline void emac_tx_enable(struct ocp_enet_private *dev) | |||
220 | 220 | ||
221 | static void emac_tx_disable(struct ocp_enet_private *dev) | 221 | static void emac_tx_disable(struct ocp_enet_private *dev) |
222 | { | 222 | { |
223 | struct emac_regs *p = dev->emacp; | 223 | struct emac_regs __iomem *p = dev->emacp; |
224 | unsigned long flags; | 224 | unsigned long flags; |
225 | u32 r; | 225 | u32 r; |
226 | 226 | ||
@@ -244,7 +244,7 @@ static void emac_tx_disable(struct ocp_enet_private *dev) | |||
244 | 244 | ||
245 | static void emac_rx_enable(struct ocp_enet_private *dev) | 245 | static void emac_rx_enable(struct ocp_enet_private *dev) |
246 | { | 246 | { |
247 | struct emac_regs *p = dev->emacp; | 247 | struct emac_regs __iomem *p = dev->emacp; |
248 | unsigned long flags; | 248 | unsigned long flags; |
249 | u32 r; | 249 | u32 r; |
250 | 250 | ||
@@ -275,7 +275,7 @@ static void emac_rx_enable(struct ocp_enet_private *dev) | |||
275 | 275 | ||
276 | static void emac_rx_disable(struct ocp_enet_private *dev) | 276 | static void emac_rx_disable(struct ocp_enet_private *dev) |
277 | { | 277 | { |
278 | struct emac_regs *p = dev->emacp; | 278 | struct emac_regs __iomem *p = dev->emacp; |
279 | unsigned long flags; | 279 | unsigned long flags; |
280 | u32 r; | 280 | u32 r; |
281 | 281 | ||
@@ -299,7 +299,7 @@ static void emac_rx_disable(struct ocp_enet_private *dev) | |||
299 | 299 | ||
300 | static inline void emac_rx_disable_async(struct ocp_enet_private *dev) | 300 | static inline void emac_rx_disable_async(struct ocp_enet_private *dev) |
301 | { | 301 | { |
302 | struct emac_regs *p = dev->emacp; | 302 | struct emac_regs __iomem *p = dev->emacp; |
303 | unsigned long flags; | 303 | unsigned long flags; |
304 | u32 r; | 304 | u32 r; |
305 | 305 | ||
@@ -315,7 +315,7 @@ static inline void emac_rx_disable_async(struct ocp_enet_private *dev) | |||
315 | 315 | ||
316 | static int emac_reset(struct ocp_enet_private *dev) | 316 | static int emac_reset(struct ocp_enet_private *dev) |
317 | { | 317 | { |
318 | struct emac_regs *p = dev->emacp; | 318 | struct emac_regs __iomem *p = dev->emacp; |
319 | unsigned long flags; | 319 | unsigned long flags; |
320 | int n = 20; | 320 | int n = 20; |
321 | 321 | ||
@@ -348,7 +348,7 @@ static int emac_reset(struct ocp_enet_private *dev) | |||
348 | 348 | ||
349 | static void emac_hash_mc(struct ocp_enet_private *dev) | 349 | static void emac_hash_mc(struct ocp_enet_private *dev) |
350 | { | 350 | { |
351 | struct emac_regs *p = dev->emacp; | 351 | struct emac_regs __iomem *p = dev->emacp; |
352 | u16 gaht[4] = { 0 }; | 352 | u16 gaht[4] = { 0 }; |
353 | struct dev_mc_list *dmi; | 353 | struct dev_mc_list *dmi; |
354 | 354 | ||
@@ -393,7 +393,7 @@ static inline int emac_opb_mhz(void) | |||
393 | /* BHs disabled */ | 393 | /* BHs disabled */ |
394 | static int emac_configure(struct ocp_enet_private *dev) | 394 | static int emac_configure(struct ocp_enet_private *dev) |
395 | { | 395 | { |
396 | struct emac_regs *p = dev->emacp; | 396 | struct emac_regs __iomem *p = dev->emacp; |
397 | struct net_device *ndev = dev->ndev; | 397 | struct net_device *ndev = dev->ndev; |
398 | int gige; | 398 | int gige; |
399 | u32 r; | 399 | u32 r; |
@@ -555,7 +555,7 @@ static void emac_full_tx_reset(struct net_device *ndev) | |||
555 | 555 | ||
556 | static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) | 556 | static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) |
557 | { | 557 | { |
558 | struct emac_regs *p = dev->emacp; | 558 | struct emac_regs __iomem *p = dev->emacp; |
559 | u32 r; | 559 | u32 r; |
560 | int n; | 560 | int n; |
561 | 561 | ||
@@ -604,7 +604,7 @@ static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) | |||
604 | static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg, | 604 | static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg, |
605 | u16 val) | 605 | u16 val) |
606 | { | 606 | { |
607 | struct emac_regs *p = dev->emacp; | 607 | struct emac_regs __iomem *p = dev->emacp; |
608 | int n; | 608 | int n; |
609 | 609 | ||
610 | DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg, | 610 | DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg, |
@@ -666,7 +666,7 @@ static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val) | |||
666 | static void emac_set_multicast_list(struct net_device *ndev) | 666 | static void emac_set_multicast_list(struct net_device *ndev) |
667 | { | 667 | { |
668 | struct ocp_enet_private *dev = ndev->priv; | 668 | struct ocp_enet_private *dev = ndev->priv; |
669 | struct emac_regs *p = dev->emacp; | 669 | struct emac_regs __iomem *p = dev->emacp; |
670 | u32 rmr = emac_iff2rmr(ndev); | 670 | u32 rmr = emac_iff2rmr(ndev); |
671 | 671 | ||
672 | DBG("%d: multicast %08x" NL, dev->def->index, rmr); | 672 | DBG("%d: multicast %08x" NL, dev->def->index, rmr); |
@@ -825,7 +825,7 @@ static void emac_clean_rx_ring(struct ocp_enet_private *dev) | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot, | 827 | static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot, |
828 | int flags) | 828 | gfp_t flags) |
829 | { | 829 | { |
830 | struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); | 830 | struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); |
831 | if (unlikely(!skb)) | 831 | if (unlikely(!skb)) |
@@ -1047,7 +1047,7 @@ static inline u16 emac_tx_csum(struct ocp_enet_private *dev, | |||
1047 | 1047 | ||
1048 | static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len) | 1048 | static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len) |
1049 | { | 1049 | { |
1050 | struct emac_regs *p = dev->emacp; | 1050 | struct emac_regs __iomem *p = dev->emacp; |
1051 | struct net_device *ndev = dev->ndev; | 1051 | struct net_device *ndev = dev->ndev; |
1052 | 1052 | ||
1053 | /* Send the packet out */ | 1053 | /* Send the packet out */ |
@@ -1519,7 +1519,7 @@ static void emac_rxde(void *param) | |||
1519 | static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs) | 1519 | static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs) |
1520 | { | 1520 | { |
1521 | struct ocp_enet_private *dev = dev_instance; | 1521 | struct ocp_enet_private *dev = dev_instance; |
1522 | struct emac_regs *p = dev->emacp; | 1522 | struct emac_regs __iomem *p = dev->emacp; |
1523 | struct ibm_emac_error_stats *st = &dev->estats; | 1523 | struct ibm_emac_error_stats *st = &dev->estats; |
1524 | 1524 | ||
1525 | u32 isr = in_be32(&p->isr); | 1525 | u32 isr = in_be32(&p->isr); |
@@ -1619,17 +1619,17 @@ static void emac_remove(struct ocp_device *ocpdev) | |||
1619 | 1619 | ||
1620 | DBG("%d: remove" NL, dev->def->index); | 1620 | DBG("%d: remove" NL, dev->def->index); |
1621 | 1621 | ||
1622 | ocp_set_drvdata(ocpdev, 0); | 1622 | ocp_set_drvdata(ocpdev, NULL); |
1623 | unregister_netdev(dev->ndev); | 1623 | unregister_netdev(dev->ndev); |
1624 | 1624 | ||
1625 | tah_fini(dev->tah_dev); | 1625 | tah_fini(dev->tah_dev); |
1626 | rgmii_fini(dev->rgmii_dev, dev->rgmii_input); | 1626 | rgmii_fini(dev->rgmii_dev, dev->rgmii_input); |
1627 | zmii_fini(dev->zmii_dev, dev->zmii_input); | 1627 | zmii_fini(dev->zmii_dev, dev->zmii_input); |
1628 | 1628 | ||
1629 | emac_dbg_register(dev->def->index, 0); | 1629 | emac_dbg_register(dev->def->index, NULL); |
1630 | 1630 | ||
1631 | mal_unregister_commac(dev->mal, &dev->commac); | 1631 | mal_unregister_commac(dev->mal, &dev->commac); |
1632 | iounmap((void *)dev->emacp); | 1632 | iounmap(dev->emacp); |
1633 | kfree(dev->ndev); | 1633 | kfree(dev->ndev); |
1634 | } | 1634 | } |
1635 | 1635 | ||
@@ -2048,9 +2048,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) | |||
2048 | goto out4; | 2048 | goto out4; |
2049 | 2049 | ||
2050 | /* Map EMAC regs */ | 2050 | /* Map EMAC regs */ |
2051 | dev->emacp = | 2051 | dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs)); |
2052 | (struct emac_regs *)ioremap(dev->def->paddr, | ||
2053 | sizeof(struct emac_regs)); | ||
2054 | if (!dev->emacp) { | 2052 | if (!dev->emacp) { |
2055 | printk(KERN_ERR "emac%d: could not ioremap device registers!\n", | 2053 | printk(KERN_ERR "emac%d: could not ioremap device registers!\n", |
2056 | dev->def->index); | 2054 | dev->def->index); |
@@ -2210,7 +2208,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) | |||
2210 | 2208 | ||
2211 | return 0; | 2209 | return 0; |
2212 | out6: | 2210 | out6: |
2213 | iounmap((void *)dev->emacp); | 2211 | iounmap(dev->emacp); |
2214 | out5: | 2212 | out5: |
2215 | tah_fini(dev->tah_dev); | 2213 | tah_fini(dev->tah_dev); |
2216 | out4: | 2214 | out4: |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h index 911abbaf471b..f61273b2e94f 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.h +++ b/drivers/net/ibm_emac/ibm_emac_core.h | |||
@@ -155,7 +155,7 @@ struct ibm_emac_error_stats { | |||
155 | 155 | ||
156 | struct ocp_enet_private { | 156 | struct ocp_enet_private { |
157 | struct net_device *ndev; /* 0 */ | 157 | struct net_device *ndev; /* 0 */ |
158 | struct emac_regs *emacp; | 158 | struct emac_regs __iomem *emacp; |
159 | 159 | ||
160 | struct mal_descriptor *tx_desc; | 160 | struct mal_descriptor *tx_desc; |
161 | int tx_cnt; | 161 | int tx_cnt; |
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c index 75d3b8639041..c7e1ecfa08fe 100644 --- a/drivers/net/ibm_emac/ibm_emac_debug.c +++ b/drivers/net/ibm_emac/ibm_emac_debug.c | |||
@@ -58,7 +58,7 @@ static void emac_desc_dump(int idx, struct ocp_enet_private *p) | |||
58 | 58 | ||
59 | static void emac_mac_dump(int idx, struct ocp_enet_private *dev) | 59 | static void emac_mac_dump(int idx, struct ocp_enet_private *dev) |
60 | { | 60 | { |
61 | struct emac_regs *p = dev->emacp; | 61 | struct emac_regs __iomem *p = dev->emacp; |
62 | 62 | ||
63 | printk("** EMAC%d registers **\n" | 63 | printk("** EMAC%d registers **\n" |
64 | "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" | 64 | "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" |
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h index a1ffb8a44fff..7f03d536c9a3 100644 --- a/drivers/net/ibm_emac/ibm_emac_rgmii.h +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h | |||
@@ -31,7 +31,7 @@ struct rgmii_regs { | |||
31 | 31 | ||
32 | /* RGMII device */ | 32 | /* RGMII device */ |
33 | struct ibm_ocp_rgmii { | 33 | struct ibm_ocp_rgmii { |
34 | struct rgmii_regs *base; | 34 | struct rgmii_regs __iomem *base; |
35 | int users; /* number of EMACs using this RGMII bridge */ | 35 | int users; /* number of EMACs using this RGMII bridge */ |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c index 35c1185079ed..e129e0aaa045 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.c +++ b/drivers/net/ibm_emac/ibm_emac_zmii.c | |||
@@ -80,7 +80,7 @@ static inline u32 zmii_mode_mask(int mode, int input) | |||
80 | static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) | 80 | static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) |
81 | { | 81 | { |
82 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); | 82 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); |
83 | struct zmii_regs *p; | 83 | struct zmii_regs __iomem *p; |
84 | 84 | ||
85 | ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode); | 85 | ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode); |
86 | 86 | ||
@@ -94,8 +94,7 @@ static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) | |||
94 | } | 94 | } |
95 | dev->mode = PHY_MODE_NA; | 95 | dev->mode = PHY_MODE_NA; |
96 | 96 | ||
97 | p = (struct zmii_regs *)ioremap(ocpdev->def->paddr, | 97 | p = ioremap(ocpdev->def->paddr, sizeof(struct zmii_regs)); |
98 | sizeof(struct zmii_regs)); | ||
99 | if (!p) { | 98 | if (!p) { |
100 | printk(KERN_ERR | 99 | printk(KERN_ERR |
101 | "zmii%d: could not ioremap device registers!\n", | 100 | "zmii%d: could not ioremap device registers!\n", |
@@ -231,7 +230,7 @@ void __exit __zmii_fini(struct ocp_device *ocpdev, int input) | |||
231 | if (!--dev->users) { | 230 | if (!--dev->users) { |
232 | /* Free everything if this is the last user */ | 231 | /* Free everything if this is the last user */ |
233 | ocp_set_drvdata(ocpdev, NULL); | 232 | ocp_set_drvdata(ocpdev, NULL); |
234 | iounmap((void *)dev->base); | 233 | iounmap(dev->base); |
235 | kfree(dev); | 234 | kfree(dev); |
236 | } | 235 | } |
237 | } | 236 | } |
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h index 0bb26062c0ad..92c854410753 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.h +++ b/drivers/net/ibm_emac/ibm_emac_zmii.h | |||
@@ -32,7 +32,7 @@ struct zmii_regs { | |||
32 | 32 | ||
33 | /* ZMII device */ | 33 | /* ZMII device */ |
34 | struct ibm_ocp_zmii { | 34 | struct ibm_ocp_zmii { |
35 | struct zmii_regs *base; | 35 | struct zmii_regs __iomem *base; |
36 | int mode; /* subset of PHY_MODE_XXXX */ | 36 | int mode; /* subset of PHY_MODE_XXXX */ |
37 | int users; /* number of EMACs using this ZMII bridge */ | 37 | int users; /* number of EMACs using this ZMII bridge */ |
38 | u32 fer_save; /* FER value left by firmware */ | 38 | u32 fer_save; /* FER value left by firmware */ |
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 7a081346f079..c81fe1c382d5 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -283,7 +283,7 @@ config USB_IRDA | |||
283 | Say Y here if you want to build support for the USB IrDA FIR Dongle | 283 | Say Y here if you want to build support for the USB IrDA FIR Dongle |
284 | device driver. To compile it as a module, choose M here: the module | 284 | device driver. To compile it as a module, choose M here: the module |
285 | will be called irda-usb. IrDA-USB support the various IrDA USB | 285 | will be called irda-usb. IrDA-USB support the various IrDA USB |
286 | dongles available and most of their pecularities. Those dongles | 286 | dongles available and most of their peculiarities. Those dongles |
287 | plug in the USB port of your computer, are plug and play, and | 287 | plug in the USB port of your computer, are plug and play, and |
288 | support SIR and FIR (4Mbps) speeds. On the other hand, those | 288 | support SIR and FIR (4Mbps) speeds. On the other hand, those |
289 | dongles tend to be less efficient than a FIR chipset. | 289 | dongles tend to be less efficient than a FIR chipset. |
@@ -360,7 +360,7 @@ config ALI_FIR | |||
360 | help | 360 | help |
361 | Say Y here if you want to build support for the ALi M5123 FIR | 361 | Say Y here if you want to build support for the ALi M5123 FIR |
362 | Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, | 362 | Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, |
363 | M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports | 363 | M1535, M1535D, M1535+, M1535D South Bridge. This driver supports |
364 | SIR, MIR and FIR (4Mbps) speeds. | 364 | SIR, MIR and FIR (4Mbps) speeds. |
365 | 365 | ||
366 | To compile it as a module, choose M here: the module will be called | 366 | To compile it as a module, choose M here: the module will be called |
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 02d5c6822733..f6f3dafe83ee 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c | |||
@@ -622,7 +622,7 @@ static int __init mac_sonic_init_module(void) | |||
622 | return 0; | 622 | return 0; |
623 | 623 | ||
624 | out_unregister: | 624 | out_unregister: |
625 | driver_unregister(&mac_sonic_driver); | 625 | platform_driver_unregister(&mac_sonic_driver); |
626 | 626 | ||
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | } | 628 | } |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index c0998ef938e0..9f2661355a4a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | 11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
12 | * | 12 | * |
13 | * Copyright (C) 2004-2005 MontaVista Software, Inc. | 13 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
14 | * Dale Farnsworth <dale@farnsworth.org> | 14 | * Dale Farnsworth <dale@farnsworth.org> |
15 | * | 15 | * |
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | 16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
@@ -37,8 +37,6 @@ | |||
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/udp.h> | 38 | #include <linux/udp.h> |
39 | #include <linux/etherdevice.h> | 39 | #include <linux/etherdevice.h> |
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | 40 | ||
43 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
44 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
@@ -52,39 +50,16 @@ | |||
52 | #include <asm/delay.h> | 50 | #include <asm/delay.h> |
53 | #include "mv643xx_eth.h" | 51 | #include "mv643xx_eth.h" |
54 | 52 | ||
55 | /* | ||
56 | * The first part is the high level driver of the gigE ethernet ports. | ||
57 | */ | ||
58 | |||
59 | /* Constants */ | ||
60 | #define VLAN_HLEN 4 | ||
61 | #define FCS_LEN 4 | ||
62 | #define DMA_ALIGN 8 /* hw requires 8-byte alignment */ | ||
63 | #define HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
64 | #define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | ||
65 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) | ||
66 | |||
67 | #define INT_UNMASK_ALL 0x0007ffff | ||
68 | #define INT_UNMASK_ALL_EXT 0x0011ffff | ||
69 | #define INT_MASK_ALL 0x00000000 | ||
70 | #define INT_MASK_ALL_EXT 0x00000000 | ||
71 | #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL | ||
72 | #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT | ||
73 | |||
74 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
75 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
76 | #else | ||
77 | #define MAX_DESCS_PER_SKB 1 | ||
78 | #endif | ||
79 | |||
80 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
81 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
82 | |||
83 | /* Static function declarations */ | 53 | /* Static function declarations */ |
84 | static int eth_port_link_is_up(unsigned int eth_port_num); | ||
85 | static void eth_port_uc_addr_get(struct net_device *dev, | 54 | static void eth_port_uc_addr_get(struct net_device *dev, |
86 | unsigned char *MacAddr); | 55 | unsigned char *MacAddr); |
87 | static void eth_port_set_multicast_list(struct net_device *); | 56 | static void eth_port_set_multicast_list(struct net_device *); |
57 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
58 | unsigned int queues); | ||
59 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
60 | unsigned int queues); | ||
61 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); | ||
62 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); | ||
88 | static int mv643xx_eth_open(struct net_device *); | 63 | static int mv643xx_eth_open(struct net_device *); |
89 | static int mv643xx_eth_stop(struct net_device *); | 64 | static int mv643xx_eth_stop(struct net_device *); |
90 | static int mv643xx_eth_change_mtu(struct net_device *, int); | 65 | static int mv643xx_eth_change_mtu(struct net_device *, int); |
@@ -93,8 +68,12 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num); | |||
93 | #ifdef MV643XX_NAPI | 68 | #ifdef MV643XX_NAPI |
94 | static int mv643xx_poll(struct net_device *dev, int *budget); | 69 | static int mv643xx_poll(struct net_device *dev, int *budget); |
95 | #endif | 70 | #endif |
71 | static int ethernet_phy_get(unsigned int eth_port_num); | ||
96 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 72 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
97 | static int ethernet_phy_detect(unsigned int eth_port_num); | 73 | static int ethernet_phy_detect(unsigned int eth_port_num); |
74 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); | ||
75 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); | ||
76 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
98 | static struct ethtool_ops mv643xx_ethtool_ops; | 77 | static struct ethtool_ops mv643xx_ethtool_ops; |
99 | 78 | ||
100 | static char mv643xx_driver_name[] = "mv643xx_eth"; | 79 | static char mv643xx_driver_name[] = "mv643xx_eth"; |
@@ -153,67 +132,53 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | |||
153 | } | 132 | } |
154 | 133 | ||
155 | /* | 134 | /* |
156 | * mv643xx_eth_rx_task | 135 | * mv643xx_eth_rx_refill_descs |
157 | * | 136 | * |
158 | * Fills / refills RX queue on a certain gigabit ethernet port | 137 | * Fills / refills RX queue on a certain gigabit ethernet port |
159 | * | 138 | * |
160 | * Input : pointer to ethernet interface network device structure | 139 | * Input : pointer to ethernet interface network device structure |
161 | * Output : N/A | 140 | * Output : N/A |
162 | */ | 141 | */ |
163 | static void mv643xx_eth_rx_task(void *data) | 142 | static void mv643xx_eth_rx_refill_descs(struct net_device *dev) |
164 | { | 143 | { |
165 | struct net_device *dev = (struct net_device *)data; | ||
166 | struct mv643xx_private *mp = netdev_priv(dev); | 144 | struct mv643xx_private *mp = netdev_priv(dev); |
167 | struct pkt_info pkt_info; | 145 | struct pkt_info pkt_info; |
168 | struct sk_buff *skb; | 146 | struct sk_buff *skb; |
169 | int unaligned; | 147 | int unaligned; |
170 | 148 | ||
171 | if (test_and_set_bit(0, &mp->rx_task_busy)) | 149 | while (mp->rx_desc_count < mp->rx_ring_size) { |
172 | panic("%s: Error in test_set_bit / clear_bit", dev->name); | 150 | skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); |
173 | |||
174 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { | ||
175 | skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); | ||
176 | if (!skb) | 151 | if (!skb) |
177 | break; | 152 | break; |
178 | mp->rx_ring_skbs++; | 153 | mp->rx_desc_count++; |
179 | unaligned = (u32)skb->data & (DMA_ALIGN - 1); | 154 | unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1); |
180 | if (unaligned) | 155 | if (unaligned) |
181 | skb_reserve(skb, DMA_ALIGN - unaligned); | 156 | skb_reserve(skb, ETH_DMA_ALIGN - unaligned); |
182 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | 157 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; |
183 | pkt_info.byte_cnt = RX_SKB_SIZE; | 158 | pkt_info.byte_cnt = ETH_RX_SKB_SIZE; |
184 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, | 159 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, |
185 | DMA_FROM_DEVICE); | 160 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); |
186 | pkt_info.return_info = skb; | 161 | pkt_info.return_info = skb; |
187 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { | 162 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { |
188 | printk(KERN_ERR | 163 | printk(KERN_ERR |
189 | "%s: Error allocating RX Ring\n", dev->name); | 164 | "%s: Error allocating RX Ring\n", dev->name); |
190 | break; | 165 | break; |
191 | } | 166 | } |
192 | skb_reserve(skb, HW_IP_ALIGN); | 167 | skb_reserve(skb, ETH_HW_IP_ALIGN); |
193 | } | 168 | } |
194 | clear_bit(0, &mp->rx_task_busy); | ||
195 | /* | 169 | /* |
196 | * If RX ring is empty of SKB, set a timer to try allocating | 170 | * If RX ring is empty of SKB, set a timer to try allocating |
197 | * again in a later time . | 171 | * again at a later time. |
198 | */ | 172 | */ |
199 | if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { | 173 | if (mp->rx_desc_count == 0) { |
200 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); | 174 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); |
201 | /* After 100mSec */ | 175 | mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */ |
202 | mp->timeout.expires = jiffies + (HZ / 10); | ||
203 | add_timer(&mp->timeout); | 176 | add_timer(&mp->timeout); |
204 | mp->rx_timer_flag = 1; | ||
205 | } | ||
206 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
207 | else { | ||
208 | /* Return interrupts */ | ||
209 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), | ||
210 | INT_UNMASK_ALL); | ||
211 | } | 177 | } |
212 | #endif | ||
213 | } | 178 | } |
214 | 179 | ||
215 | /* | 180 | /* |
216 | * mv643xx_eth_rx_task_timer_wrapper | 181 | * mv643xx_eth_rx_refill_descs_timer_wrapper |
217 | * | 182 | * |
218 | * Timer routine to wake up RX queue filling task. This function is | 183 | * Timer routine to wake up RX queue filling task. This function is |
219 | * used only in case the RX queue is empty, and all alloc_skb has | 184 | * used only in case the RX queue is empty, and all alloc_skb has |
@@ -222,13 +187,9 @@ static void mv643xx_eth_rx_task(void *data) | |||
222 | * Input : pointer to ethernet interface network device structure | 187 | * Input : pointer to ethernet interface network device structure |
223 | * Output : N/A | 188 | * Output : N/A |
224 | */ | 189 | */ |
225 | static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) | 190 | static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) |
226 | { | 191 | { |
227 | struct net_device *dev = (struct net_device *)data; | 192 | mv643xx_eth_rx_refill_descs((struct net_device *)data); |
228 | struct mv643xx_private *mp = netdev_priv(dev); | ||
229 | |||
230 | mp->rx_timer_flag = 0; | ||
231 | mv643xx_eth_rx_task((void *)data); | ||
232 | } | 193 | } |
233 | 194 | ||
234 | /* | 195 | /* |
@@ -245,8 +206,7 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
245 | unsigned int port_num = mp->port_num; | 206 | unsigned int port_num = mp->port_num; |
246 | 207 | ||
247 | eth_port_init_mac_tables(port_num); | 208 | eth_port_init_mac_tables(port_num); |
248 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | 209 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
249 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | ||
250 | } | 210 | } |
251 | 211 | ||
252 | /* | 212 | /* |
@@ -260,13 +220,14 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
260 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | 220 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
261 | { | 221 | { |
262 | struct mv643xx_private *mp = netdev_priv(dev); | 222 | struct mv643xx_private *mp = netdev_priv(dev); |
223 | u32 config_reg; | ||
263 | 224 | ||
225 | config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); | ||
264 | if (dev->flags & IFF_PROMISC) | 226 | if (dev->flags & IFF_PROMISC) |
265 | mp->port_config |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 227 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
266 | else | 228 | else |
267 | mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 229 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
268 | 230 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); | |
269 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); | ||
270 | 231 | ||
271 | eth_port_set_multicast_list(dev); | 232 | eth_port_set_multicast_list(dev); |
272 | } | 233 | } |
@@ -322,53 +283,82 @@ static void mv643xx_eth_tx_timeout_task(struct net_device *dev) | |||
322 | 283 | ||
323 | netif_device_detach(dev); | 284 | netif_device_detach(dev); |
324 | eth_port_reset(mp->port_num); | 285 | eth_port_reset(mp->port_num); |
325 | eth_port_start(mp); | 286 | eth_port_start(dev); |
326 | netif_device_attach(dev); | 287 | netif_device_attach(dev); |
327 | } | 288 | } |
328 | 289 | ||
329 | /* | 290 | /** |
330 | * mv643xx_eth_free_tx_queue | 291 | * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors |
331 | * | ||
332 | * Input : dev - a pointer to the required interface | ||
333 | * | 292 | * |
334 | * Output : 0 if was able to release skb , nonzero otherwise | 293 | * If force is non-zero, frees uncompleted descriptors as well |
335 | */ | 294 | */ |
336 | static int mv643xx_eth_free_tx_queue(struct net_device *dev, | 295 | int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) |
337 | unsigned int eth_int_cause_ext) | ||
338 | { | 296 | { |
339 | struct mv643xx_private *mp = netdev_priv(dev); | 297 | struct mv643xx_private *mp = netdev_priv(dev); |
340 | struct net_device_stats *stats = &mp->stats; | 298 | struct eth_tx_desc *desc; |
341 | struct pkt_info pkt_info; | 299 | u32 cmd_sts; |
342 | int released = 1; | 300 | struct sk_buff *skb; |
301 | unsigned long flags; | ||
302 | int tx_index; | ||
303 | dma_addr_t addr; | ||
304 | int count; | ||
305 | int released = 0; | ||
306 | |||
307 | while (mp->tx_desc_count > 0) { | ||
308 | spin_lock_irqsave(&mp->lock, flags); | ||
309 | tx_index = mp->tx_used_desc_q; | ||
310 | desc = &mp->p_tx_desc_area[tx_index]; | ||
311 | cmd_sts = desc->cmd_sts; | ||
312 | |||
313 | if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) { | ||
314 | spin_unlock_irqrestore(&mp->lock, flags); | ||
315 | return released; | ||
316 | } | ||
343 | 317 | ||
344 | if (!(eth_int_cause_ext & (BIT0 | BIT8))) | 318 | mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size; |
345 | return released; | 319 | mp->tx_desc_count--; |
346 | 320 | ||
347 | /* Check only queue 0 */ | 321 | addr = desc->buf_ptr; |
348 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | 322 | count = desc->byte_cnt; |
349 | if (pkt_info.cmd_sts & BIT0) { | 323 | skb = mp->tx_skb[tx_index]; |
324 | if (skb) | ||
325 | mp->tx_skb[tx_index] = NULL; | ||
326 | |||
327 | spin_unlock_irqrestore(&mp->lock, flags); | ||
328 | |||
329 | if (cmd_sts & ETH_ERROR_SUMMARY) { | ||
350 | printk("%s: Error in TX\n", dev->name); | 330 | printk("%s: Error in TX\n", dev->name); |
351 | stats->tx_errors++; | 331 | mp->stats.tx_errors++; |
352 | } | 332 | } |
353 | 333 | ||
354 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | 334 | if (cmd_sts & ETH_TX_FIRST_DESC) |
355 | dma_unmap_single(NULL, pkt_info.buf_ptr, | 335 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); |
356 | pkt_info.byte_cnt, | ||
357 | DMA_TO_DEVICE); | ||
358 | else | 336 | else |
359 | dma_unmap_page(NULL, pkt_info.buf_ptr, | 337 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); |
360 | pkt_info.byte_cnt, | ||
361 | DMA_TO_DEVICE); | ||
362 | 338 | ||
363 | if (pkt_info.return_info) { | 339 | if (skb) |
364 | dev_kfree_skb_irq(pkt_info.return_info); | 340 | dev_kfree_skb_irq(skb); |
365 | released = 0; | 341 | |
366 | } | 342 | released = 1; |
367 | } | 343 | } |
368 | 344 | ||
369 | return released; | 345 | return released; |
370 | } | 346 | } |
371 | 347 | ||
348 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev) | ||
349 | { | ||
350 | struct mv643xx_private *mp = netdev_priv(dev); | ||
351 | |||
352 | if (mv643xx_eth_free_tx_descs(dev, 0) && | ||
353 | mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | ||
354 | netif_wake_queue(dev); | ||
355 | } | ||
356 | |||
357 | static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | ||
358 | { | ||
359 | mv643xx_eth_free_tx_descs(dev, 1); | ||
360 | } | ||
361 | |||
372 | /* | 362 | /* |
373 | * mv643xx_eth_receive | 363 | * mv643xx_eth_receive |
374 | * | 364 | * |
@@ -380,11 +370,7 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, | |||
380 | * | 370 | * |
381 | * Output : number of served packets | 371 | * Output : number of served packets |
382 | */ | 372 | */ |
383 | #ifdef MV643XX_NAPI | ||
384 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 373 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) |
385 | #else | ||
386 | static int mv643xx_eth_receive_queue(struct net_device *dev) | ||
387 | #endif | ||
388 | { | 374 | { |
389 | struct mv643xx_private *mp = netdev_priv(dev); | 375 | struct mv643xx_private *mp = netdev_priv(dev); |
390 | struct net_device_stats *stats = &mp->stats; | 376 | struct net_device_stats *stats = &mp->stats; |
@@ -392,15 +378,14 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
392 | struct sk_buff *skb; | 378 | struct sk_buff *skb; |
393 | struct pkt_info pkt_info; | 379 | struct pkt_info pkt_info; |
394 | 380 | ||
395 | #ifdef MV643XX_NAPI | ||
396 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { | 381 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { |
397 | #else | 382 | mp->rx_desc_count--; |
398 | while (eth_port_receive(mp, &pkt_info) == ETH_OK) { | ||
399 | #endif | ||
400 | mp->rx_ring_skbs--; | ||
401 | received_packets++; | 383 | received_packets++; |
402 | 384 | ||
403 | /* Update statistics. Note byte count includes 4 byte CRC count */ | 385 | /* |
386 | * Update statistics. | ||
387 | * Note byte count includes 4 byte CRC count | ||
388 | */ | ||
404 | stats->rx_packets++; | 389 | stats->rx_packets++; |
405 | stats->rx_bytes += pkt_info.byte_cnt; | 390 | stats->rx_bytes += pkt_info.byte_cnt; |
406 | skb = pkt_info.return_info; | 391 | skb = pkt_info.return_info; |
@@ -448,10 +433,61 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
448 | } | 433 | } |
449 | dev->last_rx = jiffies; | 434 | dev->last_rx = jiffies; |
450 | } | 435 | } |
436 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
451 | 437 | ||
452 | return received_packets; | 438 | return received_packets; |
453 | } | 439 | } |
454 | 440 | ||
441 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ | ||
442 | static void mv643xx_eth_update_pscr(struct net_device *dev, | ||
443 | struct ethtool_cmd *ecmd) | ||
444 | { | ||
445 | struct mv643xx_private *mp = netdev_priv(dev); | ||
446 | int port_num = mp->port_num; | ||
447 | u32 o_pscr, n_pscr; | ||
448 | unsigned int queues; | ||
449 | |||
450 | o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
451 | n_pscr = o_pscr; | ||
452 | |||
453 | /* clear speed, duplex and rx buffer size fields */ | ||
454 | n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | | ||
455 | MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
456 | MV643XX_ETH_SET_FULL_DUPLEX_MODE | | ||
457 | MV643XX_ETH_MAX_RX_PACKET_MASK); | ||
458 | |||
459 | if (ecmd->duplex == DUPLEX_FULL) | ||
460 | n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; | ||
461 | |||
462 | if (ecmd->speed == SPEED_1000) | ||
463 | n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
464 | MV643XX_ETH_MAX_RX_PACKET_9700BYTE; | ||
465 | else { | ||
466 | if (ecmd->speed == SPEED_100) | ||
467 | n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; | ||
468 | n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; | ||
469 | } | ||
470 | |||
471 | if (n_pscr != o_pscr) { | ||
472 | if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) | ||
473 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
474 | n_pscr); | ||
475 | else { | ||
476 | queues = mv643xx_eth_port_disable_tx(port_num); | ||
477 | |||
478 | o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | ||
479 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
480 | o_pscr); | ||
481 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
482 | n_pscr); | ||
483 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
484 | n_pscr); | ||
485 | if (queues) | ||
486 | mv643xx_eth_port_enable_tx(port_num, queues); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
455 | /* | 491 | /* |
456 | * mv643xx_eth_int_handler | 492 | * mv643xx_eth_int_handler |
457 | * | 493 | * |
@@ -473,78 +509,52 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | |||
473 | 509 | ||
474 | /* Read interrupt cause registers */ | 510 | /* Read interrupt cause registers */ |
475 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | 511 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & |
476 | INT_UNMASK_ALL; | 512 | ETH_INT_UNMASK_ALL; |
477 | 513 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | |
478 | if (eth_int_cause & BIT1) | ||
479 | eth_int_cause_ext = mv_read( | 514 | eth_int_cause_ext = mv_read( |
480 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 515 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
481 | INT_UNMASK_ALL_EXT; | 516 | ETH_INT_UNMASK_ALL_EXT; |
517 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), | ||
518 | ~eth_int_cause_ext); | ||
519 | } | ||
482 | 520 | ||
483 | #ifdef MV643XX_NAPI | 521 | /* PHY status changed */ |
484 | if (!(eth_int_cause & 0x0007fffd)) { | 522 | if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { |
485 | /* Dont ack the Rx interrupt */ | 523 | struct ethtool_cmd cmd; |
486 | #endif | 524 | |
487 | /* | 525 | if (mii_link_ok(&mp->mii)) { |
488 | * Clear specific ethernet port intrerrupt registers by | 526 | mii_ethtool_gset(&mp->mii, &cmd); |
489 | * acknowleding relevant bits. | 527 | mv643xx_eth_update_pscr(dev, &cmd); |
490 | */ | 528 | mv643xx_eth_port_enable_tx(port_num, |
491 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), | 529 | ETH_TX_QUEUES_ENABLED); |
492 | ~eth_int_cause); | 530 | if (!netif_carrier_ok(dev)) { |
493 | if (eth_int_cause_ext != 0x0) | 531 | netif_carrier_on(dev); |
494 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG | 532 | if (mp->tx_ring_size - mp->tx_desc_count >= |
495 | (port_num), ~eth_int_cause_ext); | 533 | MAX_DESCS_PER_SKB) |
496 | 534 | netif_wake_queue(dev); | |
497 | /* UDP change : We may need this */ | 535 | } |
498 | if ((eth_int_cause_ext & 0x0000ffff) && | 536 | } else if (netif_carrier_ok(dev)) { |
499 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && | 537 | netif_stop_queue(dev); |
500 | (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | 538 | netif_carrier_off(dev); |
501 | netif_wake_queue(dev); | ||
502 | #ifdef MV643XX_NAPI | ||
503 | } else { | ||
504 | if (netif_rx_schedule_prep(dev)) { | ||
505 | /* Mask all the interrupts */ | ||
506 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
507 | INT_MASK_ALL); | ||
508 | /* wait for previous write to complete */ | ||
509 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | ||
510 | __netif_rx_schedule(dev); | ||
511 | } | 539 | } |
512 | #else | 540 | } |
513 | if (eth_int_cause & (BIT2 | BIT11)) | ||
514 | mv643xx_eth_receive_queue(dev, 0); | ||
515 | 541 | ||
516 | /* | 542 | #ifdef MV643XX_NAPI |
517 | * After forwarded received packets to upper layer, add a task | 543 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
518 | * in an interrupts enabled context that refills the RX ring | 544 | /* schedule the NAPI poll routine to maintain port */ |
519 | * with skb's. | ||
520 | */ | ||
521 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
522 | /* Mask all interrupts on ethernet port */ | ||
523 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 545 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
524 | INT_MASK_ALL); | 546 | ETH_INT_MASK_ALL); |
525 | /* wait for previous write to take effect */ | 547 | /* wait for previous write to complete */ |
526 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 548 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
527 | 549 | ||
528 | queue_task(&mp->rx_task, &tq_immediate); | 550 | netif_rx_schedule(dev); |
529 | mark_bh(IMMEDIATE_BH); | 551 | } |
530 | #else | 552 | #else |
531 | mp->rx_task.func(dev); | 553 | if (eth_int_cause & ETH_INT_CAUSE_RX) |
554 | mv643xx_eth_receive_queue(dev, INT_MAX); | ||
555 | if (eth_int_cause_ext & ETH_INT_CAUSE_TX) | ||
556 | mv643xx_eth_free_completed_tx_descs(dev); | ||
532 | #endif | 557 | #endif |
533 | #endif | ||
534 | } | ||
535 | /* PHY status changed */ | ||
536 | if (eth_int_cause_ext & (BIT16 | BIT20)) { | ||
537 | if (eth_port_link_is_up(port_num)) { | ||
538 | netif_carrier_on(dev); | ||
539 | netif_wake_queue(dev); | ||
540 | /* Start TX queue */ | ||
541 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG | ||
542 | (port_num), 1); | ||
543 | } else { | ||
544 | netif_carrier_off(dev); | ||
545 | netif_stop_queue(dev); | ||
546 | } | ||
547 | } | ||
548 | 558 | ||
549 | /* | 559 | /* |
550 | * If no real interrupt occured, exit. | 560 | * If no real interrupt occured, exit. |
@@ -670,9 +680,6 @@ static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | |||
670 | mp->rx_used_desc_q = 0; | 680 | mp->rx_used_desc_q = 0; |
671 | 681 | ||
672 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | 682 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); |
673 | |||
674 | /* Add the queue to the list of RX queues of this port */ | ||
675 | mp->port_rx_queue_command |= 1; | ||
676 | } | 683 | } |
677 | 684 | ||
678 | /* | 685 | /* |
@@ -712,14 +719,36 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | |||
712 | 719 | ||
713 | mp->tx_curr_desc_q = 0; | 720 | mp->tx_curr_desc_q = 0; |
714 | mp->tx_used_desc_q = 0; | 721 | mp->tx_used_desc_q = 0; |
715 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
716 | mp->tx_first_desc_q = 0; | ||
717 | #endif | ||
718 | 722 | ||
719 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | 723 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); |
724 | } | ||
720 | 725 | ||
721 | /* Add the queue to the list of Tx queues of this port */ | 726 | static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
722 | mp->port_tx_queue_command |= 1; | 727 | { |
728 | struct mv643xx_private *mp = netdev_priv(dev); | ||
729 | int err; | ||
730 | |||
731 | spin_lock_irq(&mp->lock); | ||
732 | err = mii_ethtool_sset(&mp->mii, cmd); | ||
733 | spin_unlock_irq(&mp->lock); | ||
734 | |||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
739 | { | ||
740 | struct mv643xx_private *mp = netdev_priv(dev); | ||
741 | int err; | ||
742 | |||
743 | spin_lock_irq(&mp->lock); | ||
744 | err = mii_ethtool_gset(&mp->mii, cmd); | ||
745 | spin_unlock_irq(&mp->lock); | ||
746 | |||
747 | /* The PHY may support 1000baseT_Half, but the mv643xx does not */ | ||
748 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
749 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
750 | |||
751 | return err; | ||
723 | } | 752 | } |
724 | 753 | ||
725 | /* | 754 | /* |
@@ -750,23 +779,12 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
750 | return -EAGAIN; | 779 | return -EAGAIN; |
751 | } | 780 | } |
752 | 781 | ||
753 | /* Stop RX Queues */ | ||
754 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
755 | |||
756 | /* Set the MAC Address */ | ||
757 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | ||
758 | |||
759 | eth_port_init(mp); | 782 | eth_port_init(mp); |
760 | 783 | ||
761 | INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev); | ||
762 | |||
763 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | 784 | memset(&mp->timeout, 0, sizeof(struct timer_list)); |
764 | mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; | 785 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; |
765 | mp->timeout.data = (unsigned long)dev; | 786 | mp->timeout.data = (unsigned long)dev; |
766 | 787 | ||
767 | mp->rx_task_busy = 0; | ||
768 | mp->rx_timer_flag = 0; | ||
769 | |||
770 | /* Allocate RX and TX skb rings */ | 788 | /* Allocate RX and TX skb rings */ |
771 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, | 789 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, |
772 | GFP_KERNEL); | 790 | GFP_KERNEL); |
@@ -784,7 +802,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
784 | } | 802 | } |
785 | 803 | ||
786 | /* Allocate TX ring */ | 804 | /* Allocate TX ring */ |
787 | mp->tx_ring_skbs = 0; | 805 | mp->tx_desc_count = 0; |
788 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); | 806 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); |
789 | mp->tx_desc_area_size = size; | 807 | mp->tx_desc_area_size = size; |
790 | 808 | ||
@@ -809,7 +827,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
809 | ether_init_tx_desc_ring(mp); | 827 | ether_init_tx_desc_ring(mp); |
810 | 828 | ||
811 | /* Allocate RX ring */ | 829 | /* Allocate RX ring */ |
812 | mp->rx_ring_skbs = 0; | 830 | mp->rx_desc_count = 0; |
813 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); | 831 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); |
814 | mp->rx_desc_area_size = size; | 832 | mp->rx_desc_area_size = size; |
815 | 833 | ||
@@ -839,9 +857,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
839 | 857 | ||
840 | ether_init_rx_desc_ring(mp); | 858 | ether_init_rx_desc_ring(mp); |
841 | 859 | ||
842 | mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ | 860 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ |
861 | |||
862 | /* Clear any pending ethernet port interrupts */ | ||
863 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
864 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
843 | 865 | ||
844 | eth_port_start(mp); | 866 | eth_port_start(dev); |
845 | 867 | ||
846 | /* Interrupt Coalescing */ | 868 | /* Interrupt Coalescing */ |
847 | 869 | ||
@@ -853,16 +875,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
853 | mp->tx_int_coal = | 875 | mp->tx_int_coal = |
854 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 876 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); |
855 | 877 | ||
856 | /* Clear any pending ethernet port interrupts */ | ||
857 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
858 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
859 | |||
860 | /* Unmask phy and link status changes interrupts */ | 878 | /* Unmask phy and link status changes interrupts */ |
861 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | 879 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), |
862 | INT_UNMASK_ALL_EXT); | 880 | ETH_INT_UNMASK_ALL_EXT); |
863 | 881 | ||
864 | /* Unmask RX buffer and TX end interrupt */ | 882 | /* Unmask RX buffer and TX end interrupt */ |
865 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 883 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
884 | |||
866 | return 0; | 885 | return 0; |
867 | 886 | ||
868 | out_free_tx_skb: | 887 | out_free_tx_skb: |
@@ -878,25 +897,14 @@ out_free_irq: | |||
878 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | 897 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) |
879 | { | 898 | { |
880 | struct mv643xx_private *mp = netdev_priv(dev); | 899 | struct mv643xx_private *mp = netdev_priv(dev); |
881 | unsigned int port_num = mp->port_num; | ||
882 | unsigned int curr; | ||
883 | struct sk_buff *skb; | ||
884 | 900 | ||
885 | /* Stop Tx Queues */ | 901 | /* Stop Tx Queues */ |
886 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 902 | mv643xx_eth_port_disable_tx(mp->port_num); |
887 | 903 | ||
888 | /* Free outstanding skb's on TX rings */ | 904 | /* Free outstanding skb's on TX ring */ |
889 | for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { | 905 | mv643xx_eth_free_all_tx_descs(dev); |
890 | skb = mp->tx_skb[curr]; | 906 | |
891 | if (skb) { | 907 | BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); |
892 | mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags; | ||
893 | dev_kfree_skb(skb); | ||
894 | mp->tx_ring_skbs--; | ||
895 | } | ||
896 | } | ||
897 | if (mp->tx_ring_skbs) | ||
898 | printk("%s: Error on Tx descriptor free - could not free %d" | ||
899 | " descriptors\n", dev->name, mp->tx_ring_skbs); | ||
900 | 908 | ||
901 | /* Free TX ring */ | 909 | /* Free TX ring */ |
902 | if (mp->tx_sram_size) | 910 | if (mp->tx_sram_size) |
@@ -913,21 +921,21 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) | |||
913 | int curr; | 921 | int curr; |
914 | 922 | ||
915 | /* Stop RX Queues */ | 923 | /* Stop RX Queues */ |
916 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 924 | mv643xx_eth_port_disable_rx(port_num); |
917 | 925 | ||
918 | /* Free preallocated skb's on RX rings */ | 926 | /* Free preallocated skb's on RX rings */ |
919 | for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { | 927 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { |
920 | if (mp->rx_skb[curr]) { | 928 | if (mp->rx_skb[curr]) { |
921 | dev_kfree_skb(mp->rx_skb[curr]); | 929 | dev_kfree_skb(mp->rx_skb[curr]); |
922 | mp->rx_ring_skbs--; | 930 | mp->rx_desc_count--; |
923 | } | 931 | } |
924 | } | 932 | } |
925 | 933 | ||
926 | if (mp->rx_ring_skbs) | 934 | if (mp->rx_desc_count) |
927 | printk(KERN_ERR | 935 | printk(KERN_ERR |
928 | "%s: Error in freeing Rx Ring. %d skb's still" | 936 | "%s: Error in freeing Rx Ring. %d skb's still" |
929 | " stuck in RX Ring - ignoring them\n", dev->name, | 937 | " stuck in RX Ring - ignoring them\n", dev->name, |
930 | mp->rx_ring_skbs); | 938 | mp->rx_desc_count); |
931 | /* Free RX ring */ | 939 | /* Free RX ring */ |
932 | if (mp->rx_sram_size) | 940 | if (mp->rx_sram_size) |
933 | iounmap(mp->p_rx_desc_area); | 941 | iounmap(mp->p_rx_desc_area); |
@@ -952,7 +960,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
952 | unsigned int port_num = mp->port_num; | 960 | unsigned int port_num = mp->port_num; |
953 | 961 | ||
954 | /* Mask all interrupts on ethernet port */ | 962 | /* Mask all interrupts on ethernet port */ |
955 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 963 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
956 | /* wait for previous write to complete */ | 964 | /* wait for previous write to complete */ |
957 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 965 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
958 | 966 | ||
@@ -977,30 +985,6 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
977 | } | 985 | } |
978 | 986 | ||
979 | #ifdef MV643XX_NAPI | 987 | #ifdef MV643XX_NAPI |
980 | static void mv643xx_tx(struct net_device *dev) | ||
981 | { | ||
982 | struct mv643xx_private *mp = netdev_priv(dev); | ||
983 | struct pkt_info pkt_info; | ||
984 | |||
985 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | ||
986 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | ||
987 | dma_unmap_single(NULL, pkt_info.buf_ptr, | ||
988 | pkt_info.byte_cnt, | ||
989 | DMA_TO_DEVICE); | ||
990 | else | ||
991 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
992 | pkt_info.byte_cnt, | ||
993 | DMA_TO_DEVICE); | ||
994 | |||
995 | if (pkt_info.return_info) | ||
996 | dev_kfree_skb_irq(pkt_info.return_info); | ||
997 | } | ||
998 | |||
999 | if (netif_queue_stopped(dev) && | ||
1000 | mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB) | ||
1001 | netif_wake_queue(dev); | ||
1002 | } | ||
1003 | |||
1004 | /* | 988 | /* |
1005 | * mv643xx_poll | 989 | * mv643xx_poll |
1006 | * | 990 | * |
@@ -1014,7 +998,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1014 | 998 | ||
1015 | #ifdef MV643XX_TX_FAST_REFILL | 999 | #ifdef MV643XX_TX_FAST_REFILL |
1016 | if (++mp->tx_clean_threshold > 5) { | 1000 | if (++mp->tx_clean_threshold > 5) { |
1017 | mv643xx_tx(dev); | 1001 | mv643xx_eth_free_completed_tx_descs(dev); |
1018 | mp->tx_clean_threshold = 0; | 1002 | mp->tx_clean_threshold = 0; |
1019 | } | 1003 | } |
1020 | #endif | 1004 | #endif |
@@ -1025,7 +1009,6 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1025 | if (orig_budget > dev->quota) | 1009 | if (orig_budget > dev->quota) |
1026 | orig_budget = dev->quota; | 1010 | orig_budget = dev->quota; |
1027 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); | 1011 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); |
1028 | mp->rx_task.func(dev); | ||
1029 | *budget -= work_done; | 1012 | *budget -= work_done; |
1030 | dev->quota -= work_done; | 1013 | dev->quota -= work_done; |
1031 | if (work_done >= orig_budget) | 1014 | if (work_done >= orig_budget) |
@@ -1037,14 +1020,17 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1037 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1020 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); |
1038 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1021 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1039 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1022 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
1040 | INT_UNMASK_ALL); | 1023 | ETH_INT_UNMASK_ALL); |
1041 | } | 1024 | } |
1042 | 1025 | ||
1043 | return done ? 0 : 1; | 1026 | return done ? 0 : 1; |
1044 | } | 1027 | } |
1045 | #endif | 1028 | #endif |
1046 | 1029 | ||
1047 | /* Hardware can't handle unaligned fragments smaller than 9 bytes. | 1030 | /** |
1031 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | ||
1032 | * | ||
1033 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | ||
1048 | * This helper function detects that case. | 1034 | * This helper function detects that case. |
1049 | */ | 1035 | */ |
1050 | 1036 | ||
@@ -1061,223 +1047,166 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
1061 | return 0; | 1047 | return 0; |
1062 | } | 1048 | } |
1063 | 1049 | ||
1050 | /** | ||
1051 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | ||
1052 | */ | ||
1053 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | ||
1054 | { | ||
1055 | int tx_desc_curr; | ||
1064 | 1056 | ||
1065 | /* | 1057 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); |
1066 | * mv643xx_eth_start_xmit | 1058 | |
1067 | * | 1059 | tx_desc_curr = mp->tx_curr_desc_q; |
1068 | * This function is queues a packet in the Tx descriptor for | 1060 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; |
1069 | * required port. | 1061 | |
1070 | * | 1062 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); |
1071 | * Input : skb - a pointer to socket buffer | 1063 | |
1072 | * dev - a pointer to the required port | 1064 | return tx_desc_curr; |
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | ||
1073 | * | 1069 | * |
1074 | * Output : zero upon success | 1070 | * Ensure the data for each fragment to be transmitted is mapped properly, |
1071 | * then fill in descriptors in the tx hw queue. | ||
1075 | */ | 1072 | */ |
1076 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1073 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, |
1074 | struct sk_buff *skb) | ||
1077 | { | 1075 | { |
1078 | struct mv643xx_private *mp = netdev_priv(dev); | 1076 | int frag; |
1079 | struct net_device_stats *stats = &mp->stats; | 1077 | int tx_index; |
1080 | ETH_FUNC_RET_STATUS status; | 1078 | struct eth_tx_desc *desc; |
1081 | unsigned long flags; | ||
1082 | struct pkt_info pkt_info; | ||
1083 | 1079 | ||
1084 | if (netif_queue_stopped(dev)) { | 1080 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1085 | printk(KERN_ERR | 1081 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1086 | "%s: Tried sending packet when interface is stopped\n", | 1082 | |
1087 | dev->name); | 1083 | tx_index = eth_alloc_tx_desc_index(mp); |
1088 | return 1; | 1084 | desc = &mp->p_tx_desc_area[tx_index]; |
1085 | |||
1086 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | ||
1087 | /* Last Frag enables interrupt and frees the skb */ | ||
1088 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1089 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1090 | ETH_TX_LAST_DESC | | ||
1091 | ETH_TX_ENABLE_INTERRUPT; | ||
1092 | mp->tx_skb[tx_index] = skb; | ||
1093 | } else | ||
1094 | mp->tx_skb[tx_index] = 0; | ||
1095 | |||
1096 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1097 | desc->l4i_chk = 0; | ||
1098 | desc->byte_cnt = this_frag->size; | ||
1099 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1100 | this_frag->page_offset, | ||
1101 | this_frag->size, | ||
1102 | DMA_TO_DEVICE); | ||
1089 | } | 1103 | } |
1104 | } | ||
1090 | 1105 | ||
1091 | /* This is a hard error, log it. */ | 1106 | /** |
1092 | if ((mp->tx_ring_size - mp->tx_ring_skbs) <= | 1107 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw |
1093 | (skb_shinfo(skb)->nr_frags + 1)) { | 1108 | * |
1094 | netif_stop_queue(dev); | 1109 | * Ensure the data for an skb to be transmitted is mapped properly, |
1095 | printk(KERN_ERR | 1110 | * then fill in descriptors in the tx hw queue and start the hardware. |
1096 | "%s: Bug in mv643xx_eth - Trying to transmit when" | 1111 | */ |
1097 | " queue full !\n", dev->name); | 1112 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, |
1098 | return 1; | 1113 | struct sk_buff *skb) |
1099 | } | 1114 | { |
1115 | int tx_index; | ||
1116 | struct eth_tx_desc *desc; | ||
1117 | u32 cmd_sts; | ||
1118 | int length; | ||
1119 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1100 | 1120 | ||
1101 | /* Paranoid check - this shouldn't happen */ | 1121 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; |
1102 | if (skb == NULL) { | ||
1103 | stats->tx_dropped++; | ||
1104 | printk(KERN_ERR "mv64320_eth paranoid check failed\n"); | ||
1105 | return 1; | ||
1106 | } | ||
1107 | 1122 | ||
1108 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1123 | tx_index = eth_alloc_tx_desc_index(mp); |
1109 | if (has_tiny_unaligned_frags(skb)) { | 1124 | desc = &mp->p_tx_desc_area[tx_index]; |
1110 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { | ||
1111 | stats->tx_dropped++; | ||
1112 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1113 | "unaligned fragment\n", dev->name); | ||
1114 | return 1; | ||
1115 | } | ||
1116 | } | ||
1117 | 1125 | ||
1118 | spin_lock_irqsave(&mp->lock, flags); | 1126 | if (nr_frags) { |
1127 | eth_tx_fill_frag_descs(mp, skb); | ||
1119 | 1128 | ||
1120 | if (!skb_shinfo(skb)->nr_frags) { | 1129 | length = skb_headlen(skb); |
1121 | if (skb->ip_summed != CHECKSUM_HW) { | 1130 | mp->tx_skb[tx_index] = 0; |
1122 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1123 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1124 | ETH_TX_FIRST_DESC | | ||
1125 | ETH_TX_LAST_DESC | | ||
1126 | 5 << ETH_TX_IHL_SHIFT; | ||
1127 | pkt_info.l4i_chk = 0; | ||
1128 | } else { | ||
1129 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1130 | ETH_TX_FIRST_DESC | | ||
1131 | ETH_TX_LAST_DESC | | ||
1132 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1133 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1134 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1135 | /* CPU already calculated pseudo header checksum. */ | ||
1136 | if ((skb->protocol == ETH_P_IP) && | ||
1137 | (skb->nh.iph->protocol == IPPROTO_UDP) ) { | ||
1138 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1139 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1140 | } else if ((skb->protocol == ETH_P_IP) && | ||
1141 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1142 | pkt_info.l4i_chk = skb->h.th->check; | ||
1143 | else { | ||
1144 | printk(KERN_ERR | ||
1145 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1146 | dev->name); | ||
1147 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1148 | return 1; | ||
1149 | } | ||
1150 | } | ||
1151 | pkt_info.byte_cnt = skb->len; | ||
1152 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1153 | DMA_TO_DEVICE); | ||
1154 | pkt_info.return_info = skb; | ||
1155 | status = eth_port_send(mp, &pkt_info); | ||
1156 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1157 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1158 | dev->name); | ||
1159 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1160 | } else { | 1131 | } else { |
1161 | unsigned int frag; | 1132 | cmd_sts |= ETH_ZERO_PADDING | |
1133 | ETH_TX_LAST_DESC | | ||
1134 | ETH_TX_ENABLE_INTERRUPT; | ||
1135 | length = skb->len; | ||
1136 | mp->tx_skb[tx_index] = skb; | ||
1137 | } | ||
1162 | 1138 | ||
1163 | /* first frag which is skb header */ | 1139 | desc->byte_cnt = length; |
1164 | pkt_info.byte_cnt = skb_headlen(skb); | 1140 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); |
1165 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | ||
1166 | skb_headlen(skb), | ||
1167 | DMA_TO_DEVICE); | ||
1168 | pkt_info.l4i_chk = 0; | ||
1169 | pkt_info.return_info = 0; | ||
1170 | |||
1171 | if (skb->ip_summed != CHECKSUM_HW) | ||
1172 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1173 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1174 | 5 << ETH_TX_IHL_SHIFT; | ||
1175 | else { | ||
1176 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1177 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1178 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1179 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1180 | /* CPU already calculated pseudo header checksum. */ | ||
1181 | if ((skb->protocol == ETH_P_IP) && | ||
1182 | (skb->nh.iph->protocol == IPPROTO_UDP)) { | ||
1183 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1184 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1185 | } else if ((skb->protocol == ETH_P_IP) && | ||
1186 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1187 | pkt_info.l4i_chk = skb->h.th->check; | ||
1188 | else { | ||
1189 | printk(KERN_ERR | ||
1190 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1191 | dev->name); | ||
1192 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1193 | return 1; | ||
1194 | } | ||
1195 | } | ||
1196 | 1141 | ||
1197 | status = eth_port_send(mp, &pkt_info); | 1142 | if (skb->ip_summed == CHECKSUM_HW) { |
1198 | if (status != ETH_OK) { | 1143 | BUG_ON(skb->protocol != ETH_P_IP); |
1199 | if ((status == ETH_ERROR)) | 1144 | |
1200 | printk(KERN_ERR | 1145 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1201 | "%s: Error on transmitting packet\n", | 1146 | ETH_GEN_IP_V_4_CHECKSUM | |
1202 | dev->name); | 1147 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; |
1203 | if (status == ETH_QUEUE_FULL) | 1148 | |
1204 | printk("Error on Queue Full \n"); | 1149 | switch (skb->nh.iph->protocol) { |
1205 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1150 | case IPPROTO_UDP: |
1206 | printk("Tx resource error \n"); | 1151 | cmd_sts |= ETH_UDP_FRAME; |
1152 | desc->l4i_chk = skb->h.uh->check; | ||
1153 | break; | ||
1154 | case IPPROTO_TCP: | ||
1155 | desc->l4i_chk = skb->h.th->check; | ||
1156 | break; | ||
1157 | default: | ||
1158 | BUG(); | ||
1207 | } | 1159 | } |
1208 | stats->tx_bytes += pkt_info.byte_cnt; | 1160 | } else { |
1209 | 1161 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | |
1210 | /* Check for the remaining frags */ | 1162 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; |
1211 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 1163 | desc->l4i_chk = 0; |
1212 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 1164 | } |
1213 | pkt_info.l4i_chk = 0x0000; | 1165 | |
1214 | pkt_info.cmd_sts = 0x00000000; | 1166 | /* ensure all other descriptors are written before first cmd_sts */ |
1215 | 1167 | wmb(); | |
1216 | /* Last Frag enables interrupt and frees the skb */ | 1168 | desc->cmd_sts = cmd_sts; |
1217 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1218 | pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | | ||
1219 | ETH_TX_LAST_DESC; | ||
1220 | pkt_info.return_info = skb; | ||
1221 | } else { | ||
1222 | pkt_info.return_info = 0; | ||
1223 | } | ||
1224 | pkt_info.l4i_chk = 0; | ||
1225 | pkt_info.byte_cnt = this_frag->size; | ||
1226 | 1169 | ||
1227 | pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, | 1170 | /* ensure all descriptors are written before poking hardware */ |
1228 | this_frag->page_offset, | 1171 | wmb(); |
1229 | this_frag->size, | 1172 | mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); |
1230 | DMA_TO_DEVICE); | ||
1231 | 1173 | ||
1232 | status = eth_port_send(mp, &pkt_info); | 1174 | mp->tx_desc_count += nr_frags + 1; |
1175 | } | ||
1233 | 1176 | ||
1234 | if (status != ETH_OK) { | 1177 | /** |
1235 | if ((status == ETH_ERROR)) | 1178 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission |
1236 | printk(KERN_ERR "%s: Error on " | 1179 | * |
1237 | "transmitting packet\n", | 1180 | */ |
1238 | dev->name); | 1181 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1182 | { | ||
1183 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1184 | struct net_device_stats *stats = &mp->stats; | ||
1185 | unsigned long flags; | ||
1239 | 1186 | ||
1240 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1187 | BUG_ON(netif_queue_stopped(dev)); |
1241 | printk("Tx resource error \n"); | 1188 | BUG_ON(skb == NULL); |
1189 | BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB); | ||
1242 | 1190 | ||
1243 | if (status == ETH_QUEUE_FULL) | 1191 | if (has_tiny_unaligned_frags(skb)) { |
1244 | printk("Queue is full \n"); | 1192 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { |
1245 | } | 1193 | stats->tx_dropped++; |
1246 | stats->tx_bytes += pkt_info.byte_cnt; | 1194 | printk(KERN_DEBUG "%s: failed to linearize tiny " |
1195 | "unaligned fragment\n", dev->name); | ||
1196 | return 1; | ||
1247 | } | 1197 | } |
1248 | } | 1198 | } |
1249 | #else | ||
1250 | spin_lock_irqsave(&mp->lock, flags); | ||
1251 | 1199 | ||
1252 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | | 1200 | spin_lock_irqsave(&mp->lock, flags); |
1253 | ETH_TX_LAST_DESC; | ||
1254 | pkt_info.l4i_chk = 0; | ||
1255 | pkt_info.byte_cnt = skb->len; | ||
1256 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1257 | DMA_TO_DEVICE); | ||
1258 | pkt_info.return_info = skb; | ||
1259 | status = eth_port_send(mp, &pkt_info); | ||
1260 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1261 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1262 | dev->name); | ||
1263 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1264 | #endif | ||
1265 | |||
1266 | /* Check if TX queue can handle another skb. If not, then | ||
1267 | * signal higher layers to stop requesting TX | ||
1268 | */ | ||
1269 | if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | ||
1270 | /* | ||
1271 | * Stop getting skb's from upper layers. | ||
1272 | * Getting skb's from upper layers will be enabled again after | ||
1273 | * packets are released. | ||
1274 | */ | ||
1275 | netif_stop_queue(dev); | ||
1276 | 1201 | ||
1277 | /* Update statistics and start of transmittion time */ | 1202 | eth_tx_submit_descs_for_skb(mp, skb); |
1203 | stats->tx_bytes = skb->len; | ||
1278 | stats->tx_packets++; | 1204 | stats->tx_packets++; |
1279 | dev->trans_start = jiffies; | 1205 | dev->trans_start = jiffies; |
1280 | 1206 | ||
1207 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | ||
1208 | netif_stop_queue(dev); | ||
1209 | |||
1281 | spin_unlock_irqrestore(&mp->lock, flags); | 1210 | spin_unlock_irqrestore(&mp->lock, flags); |
1282 | 1211 | ||
1283 | return 0; /* success */ | 1212 | return 0; /* success */ |
@@ -1306,16 +1235,45 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1306 | struct mv643xx_private *mp = netdev_priv(netdev); | 1235 | struct mv643xx_private *mp = netdev_priv(netdev); |
1307 | int port_num = mp->port_num; | 1236 | int port_num = mp->port_num; |
1308 | 1237 | ||
1309 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 1238 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1310 | /* wait for previous write to complete */ | 1239 | /* wait for previous write to complete */ |
1311 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1240 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
1312 | 1241 | ||
1313 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); | 1242 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); |
1314 | 1243 | ||
1315 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 1244 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1316 | } | 1245 | } |
1317 | #endif | 1246 | #endif |
1318 | 1247 | ||
1248 | static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | ||
1249 | int speed, int duplex, | ||
1250 | struct ethtool_cmd *cmd) | ||
1251 | { | ||
1252 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1253 | |||
1254 | memset(cmd, 0, sizeof(*cmd)); | ||
1255 | |||
1256 | cmd->port = PORT_MII; | ||
1257 | cmd->transceiver = XCVR_INTERNAL; | ||
1258 | cmd->phy_address = phy_address; | ||
1259 | |||
1260 | if (speed == 0) { | ||
1261 | cmd->autoneg = AUTONEG_ENABLE; | ||
1262 | /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */ | ||
1263 | cmd->speed = SPEED_100; | ||
1264 | cmd->advertising = ADVERTISED_10baseT_Half | | ||
1265 | ADVERTISED_10baseT_Full | | ||
1266 | ADVERTISED_100baseT_Half | | ||
1267 | ADVERTISED_100baseT_Full; | ||
1268 | if (mp->mii.supports_gmii) | ||
1269 | cmd->advertising |= ADVERTISED_1000baseT_Full; | ||
1270 | } else { | ||
1271 | cmd->autoneg = AUTONEG_DISABLE; | ||
1272 | cmd->speed = speed; | ||
1273 | cmd->duplex = duplex; | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1319 | /*/ | 1277 | /*/ |
1320 | * mv643xx_eth_probe | 1278 | * mv643xx_eth_probe |
1321 | * | 1279 | * |
@@ -1336,6 +1294,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1336 | u8 *p; | 1294 | u8 *p; |
1337 | struct resource *res; | 1295 | struct resource *res; |
1338 | int err; | 1296 | int err; |
1297 | struct ethtool_cmd cmd; | ||
1298 | int duplex = DUPLEX_HALF; | ||
1299 | int speed = 0; /* default to auto-negotiation */ | ||
1339 | 1300 | ||
1340 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1301 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1341 | if (!dev) | 1302 | if (!dev) |
@@ -1373,6 +1334,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | dev->tx_queue_len = mp->tx_ring_size; | 1334 | dev->tx_queue_len = mp->tx_ring_size; |
1374 | dev->base_addr = 0; | 1335 | dev->base_addr = 0; |
1375 | dev->change_mtu = mv643xx_eth_change_mtu; | 1336 | dev->change_mtu = mv643xx_eth_change_mtu; |
1337 | dev->do_ioctl = mv643xx_eth_do_ioctl; | ||
1376 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); | 1338 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); |
1377 | 1339 | ||
1378 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1340 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
@@ -1393,33 +1355,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1393 | 1355 | ||
1394 | /* set default config values */ | 1356 | /* set default config values */ |
1395 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1357 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1396 | mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE; | ||
1397 | mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE; | ||
1398 | mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE; | ||
1399 | mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE; | ||
1400 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1358 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1401 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1359 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1402 | 1360 | ||
1403 | pd = pdev->dev.platform_data; | 1361 | pd = pdev->dev.platform_data; |
1404 | if (pd) { | 1362 | if (pd) { |
1405 | if (pd->mac_addr != NULL) | 1363 | if (pd->mac_addr) |
1406 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1364 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1407 | 1365 | ||
1408 | if (pd->phy_addr || pd->force_phy_addr) | 1366 | if (pd->phy_addr || pd->force_phy_addr) |
1409 | ethernet_phy_set(port_num, pd->phy_addr); | 1367 | ethernet_phy_set(port_num, pd->phy_addr); |
1410 | 1368 | ||
1411 | if (pd->port_config || pd->force_port_config) | ||
1412 | mp->port_config = pd->port_config; | ||
1413 | |||
1414 | if (pd->port_config_extend || pd->force_port_config_extend) | ||
1415 | mp->port_config_extend = pd->port_config_extend; | ||
1416 | |||
1417 | if (pd->port_sdma_config || pd->force_port_sdma_config) | ||
1418 | mp->port_sdma_config = pd->port_sdma_config; | ||
1419 | |||
1420 | if (pd->port_serial_control || pd->force_port_serial_control) | ||
1421 | mp->port_serial_control = pd->port_serial_control; | ||
1422 | |||
1423 | if (pd->rx_queue_size) | 1369 | if (pd->rx_queue_size) |
1424 | mp->rx_ring_size = pd->rx_queue_size; | 1370 | mp->rx_ring_size = pd->rx_queue_size; |
1425 | 1371 | ||
@@ -1435,16 +1381,33 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1435 | mp->rx_sram_size = pd->rx_sram_size; | 1381 | mp->rx_sram_size = pd->rx_sram_size; |
1436 | mp->rx_sram_addr = pd->rx_sram_addr; | 1382 | mp->rx_sram_addr = pd->rx_sram_addr; |
1437 | } | 1383 | } |
1384 | |||
1385 | duplex = pd->duplex; | ||
1386 | speed = pd->speed; | ||
1438 | } | 1387 | } |
1439 | 1388 | ||
1389 | /* Hook up MII support for ethtool */ | ||
1390 | mp->mii.dev = dev; | ||
1391 | mp->mii.mdio_read = mv643xx_mdio_read; | ||
1392 | mp->mii.mdio_write = mv643xx_mdio_write; | ||
1393 | mp->mii.phy_id = ethernet_phy_get(port_num); | ||
1394 | mp->mii.phy_id_mask = 0x3f; | ||
1395 | mp->mii.reg_num_mask = 0x1f; | ||
1396 | |||
1440 | err = ethernet_phy_detect(port_num); | 1397 | err = ethernet_phy_detect(port_num); |
1441 | if (err) { | 1398 | if (err) { |
1442 | pr_debug("MV643xx ethernet port %d: " | 1399 | pr_debug("MV643xx ethernet port %d: " |
1443 | "No PHY detected at addr %d\n", | 1400 | "No PHY detected at addr %d\n", |
1444 | port_num, ethernet_phy_get(port_num)); | 1401 | port_num, ethernet_phy_get(port_num)); |
1445 | return err; | 1402 | goto out; |
1446 | } | 1403 | } |
1447 | 1404 | ||
1405 | ethernet_phy_reset(port_num); | ||
1406 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | ||
1407 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | ||
1408 | mv643xx_eth_update_pscr(dev, &cmd); | ||
1409 | mv643xx_set_settings(dev, &cmd); | ||
1410 | |||
1448 | err = register_netdev(dev); | 1411 | err = register_netdev(dev); |
1449 | if (err) | 1412 | if (err) |
1450 | goto out; | 1413 | goto out; |
@@ -1689,26 +1652,9 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1689 | * to the Rx descriptor ring to enable the reuse of this source. | 1652 | * to the Rx descriptor ring to enable the reuse of this source. |
1690 | * Return Rx resource is done using the eth_rx_return_buff API. | 1653 | * Return Rx resource is done using the eth_rx_return_buff API. |
1691 | * | 1654 | * |
1692 | * Transmit operation: | ||
1693 | * The eth_port_send API supports Scatter-Gather which enables to | ||
1694 | * send a packet spanned over multiple buffers. This means that | ||
1695 | * for each packet info structure given by the user and put into | ||
1696 | * the Tx descriptors ring, will be transmitted only if the 'LAST' | ||
1697 | * bit will be set in the packet info command status field. This | ||
1698 | * API also consider restriction regarding buffer alignments and | ||
1699 | * sizes. | ||
1700 | * The user must return a Tx resource after ensuring the buffer | ||
1701 | * has been transmitted to enable the Tx ring indexes to update. | ||
1702 | * | ||
1703 | * BOARD LAYOUT | ||
1704 | * This device is on-board. No jumper diagram is necessary. | ||
1705 | * | ||
1706 | * EXTERNAL INTERFACE | ||
1707 | * | ||
1708 | * Prior to calling the initialization routine eth_port_init() the user | 1655 | * Prior to calling the initialization routine eth_port_init() the user |
1709 | * must set the following fields under mv643xx_private struct: | 1656 | * must set the following fields under mv643xx_private struct: |
1710 | * port_num User Ethernet port number. | 1657 | * port_num User Ethernet port number. |
1711 | * port_mac_addr[6] User defined port MAC address. | ||
1712 | * port_config User port configuration value. | 1658 | * port_config User port configuration value. |
1713 | * port_config_extend User port config extend value. | 1659 | * port_config_extend User port config extend value. |
1714 | * port_sdma_config User port SDMA config value. | 1660 | * port_sdma_config User port SDMA config value. |
@@ -1725,20 +1671,12 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1725 | * return_info Tx/Rx user resource return information. | 1671 | * return_info Tx/Rx user resource return information. |
1726 | */ | 1672 | */ |
1727 | 1673 | ||
1728 | /* defines */ | ||
1729 | /* SDMA command macros */ | ||
1730 | #define ETH_ENABLE_TX_QUEUE(eth_port) \ | ||
1731 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1) | ||
1732 | |||
1733 | /* locals */ | ||
1734 | |||
1735 | /* PHY routines */ | 1674 | /* PHY routines */ |
1736 | static int ethernet_phy_get(unsigned int eth_port_num); | 1675 | static int ethernet_phy_get(unsigned int eth_port_num); |
1737 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 1676 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
1738 | 1677 | ||
1739 | /* Ethernet Port routines */ | 1678 | /* Ethernet Port routines */ |
1740 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | 1679 | static void eth_port_set_filter_table_entry(int table, unsigned char entry); |
1741 | int option); | ||
1742 | 1680 | ||
1743 | /* | 1681 | /* |
1744 | * eth_port_init - Initialize the Ethernet port driver | 1682 | * eth_port_init - Initialize the Ethernet port driver |
@@ -1766,17 +1704,11 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | |||
1766 | */ | 1704 | */ |
1767 | static void eth_port_init(struct mv643xx_private *mp) | 1705 | static void eth_port_init(struct mv643xx_private *mp) |
1768 | { | 1706 | { |
1769 | mp->port_rx_queue_command = 0; | ||
1770 | mp->port_tx_queue_command = 0; | ||
1771 | |||
1772 | mp->rx_resource_err = 0; | 1707 | mp->rx_resource_err = 0; |
1773 | mp->tx_resource_err = 0; | ||
1774 | 1708 | ||
1775 | eth_port_reset(mp->port_num); | 1709 | eth_port_reset(mp->port_num); |
1776 | 1710 | ||
1777 | eth_port_init_mac_tables(mp->port_num); | 1711 | eth_port_init_mac_tables(mp->port_num); |
1778 | |||
1779 | ethernet_phy_reset(mp->port_num); | ||
1780 | } | 1712 | } |
1781 | 1713 | ||
1782 | /* | 1714 | /* |
@@ -1798,7 +1730,7 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1798 | * and ether_init_rx_desc_ring for Rx queues). | 1730 | * and ether_init_rx_desc_ring for Rx queues). |
1799 | * | 1731 | * |
1800 | * INPUT: | 1732 | * INPUT: |
1801 | * struct mv643xx_private *mp Ethernet port control struct | 1733 | * dev - a pointer to the required interface |
1802 | * | 1734 | * |
1803 | * OUTPUT: | 1735 | * OUTPUT: |
1804 | * Ethernet port is ready to receive and transmit. | 1736 | * Ethernet port is ready to receive and transmit. |
@@ -1806,10 +1738,13 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1806 | * RETURN: | 1738 | * RETURN: |
1807 | * None. | 1739 | * None. |
1808 | */ | 1740 | */ |
1809 | static void eth_port_start(struct mv643xx_private *mp) | 1741 | static void eth_port_start(struct net_device *dev) |
1810 | { | 1742 | { |
1743 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1811 | unsigned int port_num = mp->port_num; | 1744 | unsigned int port_num = mp->port_num; |
1812 | int tx_curr_desc, rx_curr_desc; | 1745 | int tx_curr_desc, rx_curr_desc; |
1746 | u32 pscr; | ||
1747 | struct ethtool_cmd ethtool_cmd; | ||
1813 | 1748 | ||
1814 | /* Assignment of Tx CTRP of given queue */ | 1749 | /* Assignment of Tx CTRP of given queue */ |
1815 | tx_curr_desc = mp->tx_curr_desc_q; | 1750 | tx_curr_desc = mp->tx_curr_desc_q; |
@@ -1822,37 +1757,45 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1822 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 1757 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
1823 | 1758 | ||
1824 | /* Add the assigned Ethernet address to the port's address table */ | 1759 | /* Add the assigned Ethernet address to the port's address table */ |
1825 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | 1760 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
1826 | 1761 | ||
1827 | /* Assign port configuration and command. */ | 1762 | /* Assign port configuration and command. */ |
1828 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config); | 1763 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), |
1764 | MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); | ||
1829 | 1765 | ||
1830 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | 1766 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), |
1831 | mp->port_config_extend); | 1767 | MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
1832 | 1768 | ||
1769 | pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
1833 | 1770 | ||
1834 | /* Increase the Rx side buffer size if supporting GigE */ | 1771 | pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); |
1835 | if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | 1772 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1836 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1773 | |
1837 | (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17)); | 1774 | pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1838 | else | 1775 | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | |
1839 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1776 | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | |
1840 | mp->port_serial_control); | 1777 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | |
1778 | MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; | ||
1841 | 1779 | ||
1842 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1780 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1843 | mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) | | 1781 | |
1844 | MV643XX_ETH_SERIAL_PORT_ENABLE); | 1782 | pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; |
1783 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
1845 | 1784 | ||
1846 | /* Assign port SDMA configuration */ | 1785 | /* Assign port SDMA configuration */ |
1847 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | 1786 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), |
1848 | mp->port_sdma_config); | 1787 | MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1849 | 1788 | ||
1850 | /* Enable port Rx. */ | 1789 | /* Enable port Rx. */ |
1851 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | 1790 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); |
1852 | mp->port_rx_queue_command); | ||
1853 | 1791 | ||
1854 | /* Disable port bandwidth limits by clearing MTU register */ | 1792 | /* Disable port bandwidth limits by clearing MTU register */ |
1855 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 1793 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
1794 | |||
1795 | /* save phy settings across reset */ | ||
1796 | mv643xx_get_settings(dev, ðtool_cmd); | ||
1797 | ethernet_phy_reset(mp->port_num); | ||
1798 | mv643xx_set_settings(dev, ðtool_cmd); | ||
1856 | } | 1799 | } |
1857 | 1800 | ||
1858 | /* | 1801 | /* |
@@ -1866,8 +1809,9 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1866 | * char * p_addr Address to be set | 1809 | * char * p_addr Address to be set |
1867 | * | 1810 | * |
1868 | * OUTPUT: | 1811 | * OUTPUT: |
1869 | * Set MAC address low and high registers. also calls eth_port_uc_addr() | 1812 | * Set MAC address low and high registers. also calls |
1870 | * To set the unicast table with the proper information. | 1813 | * eth_port_set_filter_table_entry() to set the unicast |
1814 | * table with the proper information. | ||
1871 | * | 1815 | * |
1872 | * RETURN: | 1816 | * RETURN: |
1873 | * N/A. | 1817 | * N/A. |
@@ -1878,6 +1822,7 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1878 | { | 1822 | { |
1879 | unsigned int mac_h; | 1823 | unsigned int mac_h; |
1880 | unsigned int mac_l; | 1824 | unsigned int mac_l; |
1825 | int table; | ||
1881 | 1826 | ||
1882 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | 1827 | mac_l = (p_addr[4] << 8) | (p_addr[5]); |
1883 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 1828 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
@@ -1887,9 +1832,8 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1887 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); | 1832 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); |
1888 | 1833 | ||
1889 | /* Accept frames of this address */ | 1834 | /* Accept frames of this address */ |
1890 | eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); | 1835 | table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num); |
1891 | 1836 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | |
1892 | return; | ||
1893 | } | 1837 | } |
1894 | 1838 | ||
1895 | /* | 1839 | /* |
@@ -1928,72 +1872,6 @@ static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr) | |||
1928 | } | 1872 | } |
1929 | 1873 | ||
1930 | /* | 1874 | /* |
1931 | * eth_port_uc_addr - This function Set the port unicast address table | ||
1932 | * | ||
1933 | * DESCRIPTION: | ||
1934 | * This function locates the proper entry in the Unicast table for the | ||
1935 | * specified MAC nibble and sets its properties according to function | ||
1936 | * parameters. | ||
1937 | * | ||
1938 | * INPUT: | ||
1939 | * unsigned int eth_port_num Port number. | ||
1940 | * unsigned char uc_nibble Unicast MAC Address last nibble. | ||
1941 | * int option 0 = Add, 1 = remove address. | ||
1942 | * | ||
1943 | * OUTPUT: | ||
1944 | * This function add/removes MAC addresses from the port unicast address | ||
1945 | * table. | ||
1946 | * | ||
1947 | * RETURN: | ||
1948 | * true is output succeeded. | ||
1949 | * false if option parameter is invalid. | ||
1950 | * | ||
1951 | */ | ||
1952 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | ||
1953 | int option) | ||
1954 | { | ||
1955 | unsigned int unicast_reg; | ||
1956 | unsigned int tbl_offset; | ||
1957 | unsigned int reg_offset; | ||
1958 | |||
1959 | /* Locate the Unicast table entry */ | ||
1960 | uc_nibble = (0xf & uc_nibble); | ||
1961 | tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */ | ||
1962 | reg_offset = uc_nibble % 4; /* Entry offset within the above register */ | ||
1963 | |||
1964 | switch (option) { | ||
1965 | case REJECT_MAC_ADDR: | ||
1966 | /* Clear accepts frame bit at given unicast DA table entry */ | ||
1967 | unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1968 | (eth_port_num) + tbl_offset)); | ||
1969 | |||
1970 | unicast_reg &= (0x0E << (8 * reg_offset)); | ||
1971 | |||
1972 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1973 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1974 | break; | ||
1975 | |||
1976 | case ACCEPT_MAC_ADDR: | ||
1977 | /* Set accepts frame bit at unicast DA filter table entry */ | ||
1978 | unicast_reg = | ||
1979 | mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1980 | (eth_port_num) + tbl_offset)); | ||
1981 | |||
1982 | unicast_reg |= (0x01 << (8 * reg_offset)); | ||
1983 | |||
1984 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1985 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1986 | |||
1987 | break; | ||
1988 | |||
1989 | default: | ||
1990 | return 0; | ||
1991 | } | ||
1992 | |||
1993 | return 1; | ||
1994 | } | ||
1995 | |||
1996 | /* | ||
1997 | * The entries in each table are indexed by a hash of a packet's MAC | 1875 | * The entries in each table are indexed by a hash of a packet's MAC |
1998 | * address. One bit in each entry determines whether the packet is | 1876 | * address. One bit in each entry determines whether the packet is |
1999 | * accepted. There are 4 entries (each 8 bits wide) in each register | 1877 | * accepted. There are 4 entries (each 8 bits wide) in each register |
@@ -2205,8 +2083,8 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2205 | 2083 | ||
2206 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2084 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2207 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2085 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2208 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | 2086 | mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE |
2209 | (eth_port_num) + table_index), 0); | 2087 | (eth_port_num) + table_index, 0); |
2210 | 2088 | ||
2211 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2089 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2212 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2090 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
@@ -2389,6 +2267,73 @@ static void ethernet_phy_reset(unsigned int eth_port_num) | |||
2389 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | 2267 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); |
2390 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | 2268 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ |
2391 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); | 2269 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); |
2270 | |||
2271 | /* wait for PHY to come out of reset */ | ||
2272 | do { | ||
2273 | udelay(1); | ||
2274 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | ||
2275 | } while (phy_reg_data & 0x8000); | ||
2276 | } | ||
2277 | |||
2278 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
2279 | unsigned int queues) | ||
2280 | { | ||
2281 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | ||
2282 | } | ||
2283 | |||
2284 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
2285 | unsigned int queues) | ||
2286 | { | ||
2287 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | ||
2288 | } | ||
2289 | |||
2290 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | ||
2291 | { | ||
2292 | u32 queues; | ||
2293 | |||
2294 | /* Stop Tx port activity. Check port Tx activity. */ | ||
2295 | queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2296 | & 0xFF; | ||
2297 | if (queues) { | ||
2298 | /* Issue stop command for active queues only */ | ||
2299 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2300 | (queues << 8)); | ||
2301 | |||
2302 | /* Wait for all Tx activity to terminate. */ | ||
2303 | /* Check port cause register that all Tx queues are stopped */ | ||
2304 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2305 | & 0xFF) | ||
2306 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2307 | |||
2308 | /* Wait for Tx FIFO to empty */ | ||
2309 | while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & | ||
2310 | ETH_PORT_TX_FIFO_EMPTY) | ||
2311 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2312 | } | ||
2313 | |||
2314 | return queues; | ||
2315 | } | ||
2316 | |||
2317 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | ||
2318 | { | ||
2319 | u32 queues; | ||
2320 | |||
2321 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2322 | queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2323 | & 0xFF; | ||
2324 | if (queues) { | ||
2325 | /* Issue stop command for active queues only */ | ||
2326 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2327 | (queues << 8)); | ||
2328 | |||
2329 | /* Wait for all Rx activity to terminate. */ | ||
2330 | /* Check port cause register that all Rx queues are stopped */ | ||
2331 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2332 | & 0xFF) | ||
2333 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2334 | } | ||
2335 | |||
2336 | return queues; | ||
2392 | } | 2337 | } |
2393 | 2338 | ||
2394 | /* | 2339 | /* |
@@ -2413,70 +2358,21 @@ static void eth_port_reset(unsigned int port_num) | |||
2413 | { | 2358 | { |
2414 | unsigned int reg_data; | 2359 | unsigned int reg_data; |
2415 | 2360 | ||
2416 | /* Stop Tx port activity. Check port Tx activity. */ | 2361 | mv643xx_eth_port_disable_tx(port_num); |
2417 | reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)); | 2362 | mv643xx_eth_port_disable_rx(port_num); |
2418 | |||
2419 | if (reg_data & 0xFF) { | ||
2420 | /* Issue stop command for active channels only */ | ||
2421 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2422 | (reg_data << 8)); | ||
2423 | |||
2424 | /* Wait for all Tx activity to terminate. */ | ||
2425 | /* Check port cause register that all Tx queues are stopped */ | ||
2426 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2427 | & 0xFF) | ||
2428 | udelay(10); | ||
2429 | } | ||
2430 | |||
2431 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2432 | reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)); | ||
2433 | |||
2434 | if (reg_data & 0xFF) { | ||
2435 | /* Issue stop command for active channels only */ | ||
2436 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2437 | (reg_data << 8)); | ||
2438 | |||
2439 | /* Wait for all Rx activity to terminate. */ | ||
2440 | /* Check port cause register that all Rx queues are stopped */ | ||
2441 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2442 | & 0xFF) | ||
2443 | udelay(10); | ||
2444 | } | ||
2445 | 2363 | ||
2446 | /* Clear all MIB counters */ | 2364 | /* Clear all MIB counters */ |
2447 | eth_clear_mib_counters(port_num); | 2365 | eth_clear_mib_counters(port_num); |
2448 | 2366 | ||
2449 | /* Reset the Enable bit in the Configuration Register */ | 2367 | /* Reset the Enable bit in the Configuration Register */ |
2450 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2368 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); |
2451 | reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | 2369 | reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | |
2370 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | ||
2371 | MV643XX_ETH_FORCE_LINK_PASS); | ||
2452 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2372 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2453 | } | 2373 | } |
2454 | 2374 | ||
2455 | 2375 | ||
2456 | static int eth_port_autoneg_supported(unsigned int eth_port_num) | ||
2457 | { | ||
2458 | unsigned int phy_reg_data0; | ||
2459 | |||
2460 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0); | ||
2461 | |||
2462 | return phy_reg_data0 & 0x1000; | ||
2463 | } | ||
2464 | |||
2465 | static int eth_port_link_is_up(unsigned int eth_port_num) | ||
2466 | { | ||
2467 | unsigned int phy_reg_data1; | ||
2468 | |||
2469 | eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1); | ||
2470 | |||
2471 | if (eth_port_autoneg_supported(eth_port_num)) { | ||
2472 | if (phy_reg_data1 & 0x20) /* auto-neg complete */ | ||
2473 | return 1; | ||
2474 | } else if (phy_reg_data1 & 0x4) /* link up */ | ||
2475 | return 1; | ||
2476 | |||
2477 | return 0; | ||
2478 | } | ||
2479 | |||
2480 | /* | 2376 | /* |
2481 | * eth_port_read_smi_reg - Read PHY registers | 2377 | * eth_port_read_smi_reg - Read PHY registers |
2482 | * | 2378 | * |
@@ -2582,250 +2478,21 @@ out: | |||
2582 | } | 2478 | } |
2583 | 2479 | ||
2584 | /* | 2480 | /* |
2585 | * eth_port_send - Send an Ethernet packet | 2481 | * Wrappers for MII support library. |
2586 | * | ||
2587 | * DESCRIPTION: | ||
2588 | * This routine send a given packet described by p_pktinfo parameter. It | ||
2589 | * supports transmitting of a packet spaned over multiple buffers. The | ||
2590 | * routine updates 'curr' and 'first' indexes according to the packet | ||
2591 | * segment passed to the routine. In case the packet segment is first, | ||
2592 | * the 'first' index is update. In any case, the 'curr' index is updated. | ||
2593 | * If the routine get into Tx resource error it assigns 'curr' index as | ||
2594 | * 'first'. This way the function can abort Tx process of multiple | ||
2595 | * descriptors per packet. | ||
2596 | * | ||
2597 | * INPUT: | ||
2598 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2599 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2600 | * | ||
2601 | * OUTPUT: | ||
2602 | * Tx ring 'curr' and 'first' indexes are updated. | ||
2603 | * | ||
2604 | * RETURN: | ||
2605 | * ETH_QUEUE_FULL in case of Tx resource error. | ||
2606 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2607 | * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. | ||
2608 | * ETH_OK otherwise. | ||
2609 | * | ||
2610 | */ | ||
2611 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2612 | /* | ||
2613 | * Modified to include the first descriptor pointer in case of SG | ||
2614 | */ | 2482 | */ |
2615 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | 2483 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) |
2616 | struct pkt_info *p_pkt_info) | ||
2617 | { | ||
2618 | int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; | ||
2619 | struct eth_tx_desc *current_descriptor; | ||
2620 | struct eth_tx_desc *first_descriptor; | ||
2621 | u32 command; | ||
2622 | |||
2623 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2624 | if (mp->tx_resource_err) | ||
2625 | return ETH_QUEUE_FULL; | ||
2626 | |||
2627 | /* | ||
2628 | * The hardware requires that each buffer that is <= 8 bytes | ||
2629 | * in length must be aligned on an 8 byte boundary. | ||
2630 | */ | ||
2631 | if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) { | ||
2632 | printk(KERN_ERR | ||
2633 | "mv643xx_eth port %d: packet size <= 8 problem\n", | ||
2634 | mp->port_num); | ||
2635 | return ETH_ERROR; | ||
2636 | } | ||
2637 | |||
2638 | mp->tx_ring_skbs++; | ||
2639 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2640 | |||
2641 | /* Get the Tx Desc ring indexes */ | ||
2642 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2643 | tx_desc_used = mp->tx_used_desc_q; | ||
2644 | |||
2645 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2646 | |||
2647 | tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2648 | |||
2649 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2650 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2651 | current_descriptor->l4i_chk = p_pkt_info->l4i_chk; | ||
2652 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2653 | |||
2654 | command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC | | ||
2655 | ETH_BUFFER_OWNED_BY_DMA; | ||
2656 | if (command & ETH_TX_FIRST_DESC) { | ||
2657 | tx_first_desc = tx_desc_curr; | ||
2658 | mp->tx_first_desc_q = tx_first_desc; | ||
2659 | first_descriptor = current_descriptor; | ||
2660 | mp->tx_first_command = command; | ||
2661 | } else { | ||
2662 | tx_first_desc = mp->tx_first_desc_q; | ||
2663 | first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; | ||
2664 | BUG_ON(first_descriptor == NULL); | ||
2665 | current_descriptor->cmd_sts = command; | ||
2666 | } | ||
2667 | |||
2668 | if (command & ETH_TX_LAST_DESC) { | ||
2669 | wmb(); | ||
2670 | first_descriptor->cmd_sts = mp->tx_first_command; | ||
2671 | |||
2672 | wmb(); | ||
2673 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2674 | |||
2675 | /* | ||
2676 | * Finish Tx packet. Update first desc in case of Tx resource | ||
2677 | * error */ | ||
2678 | tx_first_desc = tx_next_desc; | ||
2679 | mp->tx_first_desc_q = tx_first_desc; | ||
2680 | } | ||
2681 | |||
2682 | /* Check for ring index overlap in the Tx desc ring */ | ||
2683 | if (tx_next_desc == tx_desc_used) { | ||
2684 | mp->tx_resource_err = 1; | ||
2685 | mp->tx_curr_desc_q = tx_first_desc; | ||
2686 | |||
2687 | return ETH_QUEUE_LAST_RESOURCE; | ||
2688 | } | ||
2689 | |||
2690 | mp->tx_curr_desc_q = tx_next_desc; | ||
2691 | |||
2692 | return ETH_OK; | ||
2693 | } | ||
2694 | #else | ||
2695 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2696 | struct pkt_info *p_pkt_info) | ||
2697 | { | 2484 | { |
2698 | int tx_desc_curr; | 2485 | int val; |
2699 | int tx_desc_used; | 2486 | struct mv643xx_private *mp = netdev_priv(dev); |
2700 | struct eth_tx_desc *current_descriptor; | ||
2701 | unsigned int command_status; | ||
2702 | |||
2703 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2704 | if (mp->tx_resource_err) | ||
2705 | return ETH_QUEUE_FULL; | ||
2706 | |||
2707 | mp->tx_ring_skbs++; | ||
2708 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2709 | |||
2710 | /* Get the Tx Desc ring indexes */ | ||
2711 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2712 | tx_desc_used = mp->tx_used_desc_q; | ||
2713 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2714 | |||
2715 | command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; | ||
2716 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2717 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2718 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2719 | |||
2720 | /* Set last desc with DMA ownership and interrupt enable. */ | ||
2721 | wmb(); | ||
2722 | current_descriptor->cmd_sts = command_status | | ||
2723 | ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; | ||
2724 | |||
2725 | wmb(); | ||
2726 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2727 | |||
2728 | /* Finish Tx packet. Update first desc in case of Tx resource error */ | ||
2729 | tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2730 | |||
2731 | /* Update the current descriptor */ | ||
2732 | mp->tx_curr_desc_q = tx_desc_curr; | ||
2733 | |||
2734 | /* Check for ring index overlap in the Tx desc ring */ | ||
2735 | if (tx_desc_curr == tx_desc_used) { | ||
2736 | mp->tx_resource_err = 1; | ||
2737 | return ETH_QUEUE_LAST_RESOURCE; | ||
2738 | } | ||
2739 | 2487 | ||
2740 | return ETH_OK; | 2488 | eth_port_read_smi_reg(mp->port_num, location, &val); |
2489 | return val; | ||
2741 | } | 2490 | } |
2742 | #endif | ||
2743 | 2491 | ||
2744 | /* | 2492 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
2745 | * eth_tx_return_desc - Free all used Tx descriptors | ||
2746 | * | ||
2747 | * DESCRIPTION: | ||
2748 | * This routine returns the transmitted packet information to the caller. | ||
2749 | * It uses the 'first' index to support Tx desc return in case a transmit | ||
2750 | * of a packet spanned over multiple buffer still in process. | ||
2751 | * In case the Tx queue was in "resource error" condition, where there are | ||
2752 | * no available Tx resources, the function resets the resource error flag. | ||
2753 | * | ||
2754 | * INPUT: | ||
2755 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2756 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2757 | * | ||
2758 | * OUTPUT: | ||
2759 | * Tx ring 'first' and 'used' indexes are updated. | ||
2760 | * | ||
2761 | * RETURN: | ||
2762 | * ETH_OK on success | ||
2763 | * ETH_ERROR otherwise. | ||
2764 | * | ||
2765 | */ | ||
2766 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
2767 | struct pkt_info *p_pkt_info) | ||
2768 | { | 2493 | { |
2769 | int tx_desc_used; | 2494 | struct mv643xx_private *mp = netdev_priv(dev); |
2770 | int tx_busy_desc; | 2495 | eth_port_write_smi_reg(mp->port_num, location, val); |
2771 | struct eth_tx_desc *p_tx_desc_used; | ||
2772 | unsigned int command_status; | ||
2773 | unsigned long flags; | ||
2774 | int err = ETH_OK; | ||
2775 | |||
2776 | spin_lock_irqsave(&mp->lock, flags); | ||
2777 | |||
2778 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2779 | tx_busy_desc = mp->tx_first_desc_q; | ||
2780 | #else | ||
2781 | tx_busy_desc = mp->tx_curr_desc_q; | ||
2782 | #endif | ||
2783 | |||
2784 | /* Get the Tx Desc ring indexes */ | ||
2785 | tx_desc_used = mp->tx_used_desc_q; | ||
2786 | |||
2787 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; | ||
2788 | |||
2789 | /* Sanity check */ | ||
2790 | if (p_tx_desc_used == NULL) { | ||
2791 | err = ETH_ERROR; | ||
2792 | goto out; | ||
2793 | } | ||
2794 | |||
2795 | /* Stop release. About to overlap the current available Tx descriptor */ | ||
2796 | if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) { | ||
2797 | err = ETH_ERROR; | ||
2798 | goto out; | ||
2799 | } | ||
2800 | |||
2801 | command_status = p_tx_desc_used->cmd_sts; | ||
2802 | |||
2803 | /* Still transmitting... */ | ||
2804 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | ||
2805 | err = ETH_ERROR; | ||
2806 | goto out; | ||
2807 | } | ||
2808 | |||
2809 | /* Pass the packet information to the caller */ | ||
2810 | p_pkt_info->cmd_sts = command_status; | ||
2811 | p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; | ||
2812 | p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr; | ||
2813 | p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt; | ||
2814 | mp->tx_skb[tx_desc_used] = NULL; | ||
2815 | |||
2816 | /* Update the next descriptor to release. */ | ||
2817 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; | ||
2818 | |||
2819 | /* Any Tx return cancels the Tx resource error status */ | ||
2820 | mp->tx_resource_err = 0; | ||
2821 | |||
2822 | BUG_ON(mp->tx_ring_skbs == 0); | ||
2823 | mp->tx_ring_skbs--; | ||
2824 | |||
2825 | out: | ||
2826 | spin_unlock_irqrestore(&mp->lock, flags); | ||
2827 | |||
2828 | return err; | ||
2829 | } | 2496 | } |
2830 | 2497 | ||
2831 | /* | 2498 | /* |
@@ -3017,111 +2684,6 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | |||
3017 | #define MV643XX_STATS_LEN \ | 2684 | #define MV643XX_STATS_LEN \ |
3018 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) | 2685 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) |
3019 | 2686 | ||
3020 | static int | ||
3021 | mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
3022 | { | ||
3023 | struct mv643xx_private *mp = netdev->priv; | ||
3024 | int port_num = mp->port_num; | ||
3025 | int autoneg = eth_port_autoneg_supported(port_num); | ||
3026 | int mode_10_bit; | ||
3027 | int auto_duplex; | ||
3028 | int half_duplex = 0; | ||
3029 | int full_duplex = 0; | ||
3030 | int auto_speed; | ||
3031 | int speed_10 = 0; | ||
3032 | int speed_100 = 0; | ||
3033 | int speed_1000 = 0; | ||
3034 | |||
3035 | u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
3036 | u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)); | ||
3037 | |||
3038 | mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT; | ||
3039 | |||
3040 | if (mode_10_bit) { | ||
3041 | ecmd->supported = SUPPORTED_10baseT_Half; | ||
3042 | } else { | ||
3043 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
3044 | SUPPORTED_10baseT_Full | | ||
3045 | SUPPORTED_100baseT_Half | | ||
3046 | SUPPORTED_100baseT_Full | | ||
3047 | SUPPORTED_1000baseT_Full | | ||
3048 | (autoneg ? SUPPORTED_Autoneg : 0) | | ||
3049 | SUPPORTED_TP); | ||
3050 | |||
3051 | auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX); | ||
3052 | auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII); | ||
3053 | |||
3054 | ecmd->advertising = ADVERTISED_TP; | ||
3055 | |||
3056 | if (autoneg) { | ||
3057 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
3058 | |||
3059 | if (auto_duplex) { | ||
3060 | half_duplex = 1; | ||
3061 | full_duplex = 1; | ||
3062 | } else { | ||
3063 | if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE) | ||
3064 | full_duplex = 1; | ||
3065 | else | ||
3066 | half_duplex = 1; | ||
3067 | } | ||
3068 | |||
3069 | if (auto_speed) { | ||
3070 | speed_10 = 1; | ||
3071 | speed_100 = 1; | ||
3072 | speed_1000 = 1; | ||
3073 | } else { | ||
3074 | if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | ||
3075 | speed_1000 = 1; | ||
3076 | else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100) | ||
3077 | speed_100 = 1; | ||
3078 | else | ||
3079 | speed_10 = 1; | ||
3080 | } | ||
3081 | |||
3082 | if (speed_10 & half_duplex) | ||
3083 | ecmd->advertising |= ADVERTISED_10baseT_Half; | ||
3084 | if (speed_10 & full_duplex) | ||
3085 | ecmd->advertising |= ADVERTISED_10baseT_Full; | ||
3086 | if (speed_100 & half_duplex) | ||
3087 | ecmd->advertising |= ADVERTISED_100baseT_Half; | ||
3088 | if (speed_100 & full_duplex) | ||
3089 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
3090 | if (speed_1000) | ||
3091 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
3092 | } | ||
3093 | } | ||
3094 | |||
3095 | ecmd->port = PORT_TP; | ||
3096 | ecmd->phy_address = ethernet_phy_get(port_num); | ||
3097 | |||
3098 | ecmd->transceiver = XCVR_EXTERNAL; | ||
3099 | |||
3100 | if (netif_carrier_ok(netdev)) { | ||
3101 | if (mode_10_bit) | ||
3102 | ecmd->speed = SPEED_10; | ||
3103 | else { | ||
3104 | if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000) | ||
3105 | ecmd->speed = SPEED_1000; | ||
3106 | else if (psr & MV643XX_ETH_PORT_STATUS_MII_100) | ||
3107 | ecmd->speed = SPEED_100; | ||
3108 | else | ||
3109 | ecmd->speed = SPEED_10; | ||
3110 | } | ||
3111 | |||
3112 | if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX) | ||
3113 | ecmd->duplex = DUPLEX_FULL; | ||
3114 | else | ||
3115 | ecmd->duplex = DUPLEX_HALF; | ||
3116 | } else { | ||
3117 | ecmd->speed = -1; | ||
3118 | ecmd->duplex = -1; | ||
3119 | } | ||
3120 | |||
3121 | ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
3122 | return 0; | ||
3123 | } | ||
3124 | |||
3125 | static void mv643xx_get_drvinfo(struct net_device *netdev, | 2687 | static void mv643xx_get_drvinfo(struct net_device *netdev, |
3126 | struct ethtool_drvinfo *drvinfo) | 2688 | struct ethtool_drvinfo *drvinfo) |
3127 | { | 2689 | { |
@@ -3168,15 +2730,41 @@ static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, | |||
3168 | } | 2730 | } |
3169 | } | 2731 | } |
3170 | 2732 | ||
2733 | static u32 mv643xx_eth_get_link(struct net_device *dev) | ||
2734 | { | ||
2735 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2736 | |||
2737 | return mii_link_ok(&mp->mii); | ||
2738 | } | ||
2739 | |||
2740 | static int mv643xx_eth_nway_restart(struct net_device *dev) | ||
2741 | { | ||
2742 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2743 | |||
2744 | return mii_nway_restart(&mp->mii); | ||
2745 | } | ||
2746 | |||
2747 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2748 | { | ||
2749 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2750 | |||
2751 | return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); | ||
2752 | } | ||
2753 | |||
3171 | static struct ethtool_ops mv643xx_ethtool_ops = { | 2754 | static struct ethtool_ops mv643xx_ethtool_ops = { |
3172 | .get_settings = mv643xx_get_settings, | 2755 | .get_settings = mv643xx_get_settings, |
2756 | .set_settings = mv643xx_set_settings, | ||
3173 | .get_drvinfo = mv643xx_get_drvinfo, | 2757 | .get_drvinfo = mv643xx_get_drvinfo, |
3174 | .get_link = ethtool_op_get_link, | 2758 | .get_link = mv643xx_eth_get_link, |
3175 | .get_sg = ethtool_op_get_sg, | 2759 | .get_sg = ethtool_op_get_sg, |
3176 | .set_sg = ethtool_op_set_sg, | 2760 | .set_sg = ethtool_op_set_sg, |
3177 | .get_strings = mv643xx_get_strings, | 2761 | .get_strings = mv643xx_get_strings, |
3178 | .get_stats_count = mv643xx_get_stats_count, | 2762 | .get_stats_count = mv643xx_get_stats_count, |
3179 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 2763 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2764 | .get_strings = mv643xx_get_strings, | ||
2765 | .get_stats_count = mv643xx_get_stats_count, | ||
2766 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
2767 | .nway_reset = mv643xx_eth_nway_restart, | ||
3180 | }; | 2768 | }; |
3181 | 2769 | ||
3182 | /************* End ethtool support *************************/ | 2770 | /************* End ethtool support *************************/ |
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index f769f9b626ea..7754d1974b9e 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -5,53 +5,16 @@ | |||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/workqueue.h> | 7 | #include <linux/workqueue.h> |
8 | #include <linux/mii.h> | ||
8 | 9 | ||
9 | #include <linux/mv643xx.h> | 10 | #include <linux/mv643xx.h> |
10 | 11 | ||
11 | #define BIT0 0x00000001 | ||
12 | #define BIT1 0x00000002 | ||
13 | #define BIT2 0x00000004 | ||
14 | #define BIT3 0x00000008 | ||
15 | #define BIT4 0x00000010 | ||
16 | #define BIT5 0x00000020 | ||
17 | #define BIT6 0x00000040 | ||
18 | #define BIT7 0x00000080 | ||
19 | #define BIT8 0x00000100 | ||
20 | #define BIT9 0x00000200 | ||
21 | #define BIT10 0x00000400 | ||
22 | #define BIT11 0x00000800 | ||
23 | #define BIT12 0x00001000 | ||
24 | #define BIT13 0x00002000 | ||
25 | #define BIT14 0x00004000 | ||
26 | #define BIT15 0x00008000 | ||
27 | #define BIT16 0x00010000 | ||
28 | #define BIT17 0x00020000 | ||
29 | #define BIT18 0x00040000 | ||
30 | #define BIT19 0x00080000 | ||
31 | #define BIT20 0x00100000 | ||
32 | #define BIT21 0x00200000 | ||
33 | #define BIT22 0x00400000 | ||
34 | #define BIT23 0x00800000 | ||
35 | #define BIT24 0x01000000 | ||
36 | #define BIT25 0x02000000 | ||
37 | #define BIT26 0x04000000 | ||
38 | #define BIT27 0x08000000 | ||
39 | #define BIT28 0x10000000 | ||
40 | #define BIT29 0x20000000 | ||
41 | #define BIT30 0x40000000 | ||
42 | #define BIT31 0x80000000 | ||
43 | |||
44 | /* | ||
45 | * The first part is the high level driver of the gigE ethernet ports. | ||
46 | */ | ||
47 | |||
48 | /* Checksum offload for Tx works for most packets, but | 12 | /* Checksum offload for Tx works for most packets, but |
49 | * fails if previous packet sent did not use hw csum | 13 | * fails if previous packet sent did not use hw csum |
50 | */ | 14 | */ |
51 | #define MV643XX_CHECKSUM_OFFLOAD_TX | 15 | #define MV643XX_CHECKSUM_OFFLOAD_TX |
52 | #define MV643XX_NAPI | 16 | #define MV643XX_NAPI |
53 | #define MV643XX_TX_FAST_REFILL | 17 | #define MV643XX_TX_FAST_REFILL |
54 | #undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */ | ||
55 | #undef MV643XX_COAL | 18 | #undef MV643XX_COAL |
56 | 19 | ||
57 | /* | 20 | /* |
@@ -73,25 +36,40 @@ | |||
73 | #define MV643XX_RX_COAL 100 | 36 | #define MV643XX_RX_COAL 100 |
74 | #endif | 37 | #endif |
75 | 38 | ||
76 | /* | 39 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
77 | * The second part is the low level driver of the gigE ethernet ports. | 40 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) |
78 | */ | 41 | #else |
42 | #define MAX_DESCS_PER_SKB 1 | ||
43 | #endif | ||
79 | 44 | ||
80 | /* | 45 | #define ETH_VLAN_HLEN 4 |
81 | * Header File for : MV-643xx network interface header | 46 | #define ETH_FCS_LEN 4 |
82 | * | 47 | #define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ |
83 | * DESCRIPTION: | 48 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ |
84 | * This header file contains macros typedefs and function declaration for | 49 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ |
85 | * the Marvell Gig Bit Ethernet Controller. | 50 | ETH_VLAN_HLEN + ETH_FCS_LEN) |
86 | * | 51 | #define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) |
87 | * DEPENDENCIES: | 52 | |
88 | * None. | 53 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ |
89 | * | 54 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ |
90 | */ | 55 | |
56 | #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) | ||
57 | #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) | ||
58 | #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) | ||
59 | #define ETH_INT_CAUSE_EXT 0x00000002 | ||
60 | #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) | ||
91 | 61 | ||
92 | /* MAC accepet/reject macros */ | 62 | #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) |
93 | #define ACCEPT_MAC_ADDR 0 | 63 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) |
94 | #define REJECT_MAC_ADDR 1 | 64 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) |
65 | #define ETH_INT_CAUSE_PHY 0x00010000 | ||
66 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) | ||
67 | |||
68 | #define ETH_INT_MASK_ALL 0x00000000 | ||
69 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | ||
70 | |||
71 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
72 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
95 | 73 | ||
96 | /* Buffer offset from buffer pointer */ | 74 | /* Buffer offset from buffer pointer */ |
97 | #define RX_BUF_OFFSET 0x2 | 75 | #define RX_BUF_OFFSET 0x2 |
@@ -133,88 +111,71 @@ | |||
133 | #define ETH_MIB_LATE_COLLISION 0x7c | 111 | #define ETH_MIB_LATE_COLLISION 0x7c |
134 | 112 | ||
135 | /* Port serial status reg (PSR) */ | 113 | /* Port serial status reg (PSR) */ |
136 | #define ETH_INTERFACE_GMII_MII 0 | 114 | #define ETH_INTERFACE_PCM 0x00000001 |
137 | #define ETH_INTERFACE_PCM BIT0 | 115 | #define ETH_LINK_IS_UP 0x00000002 |
138 | #define ETH_LINK_IS_DOWN 0 | 116 | #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 |
139 | #define ETH_LINK_IS_UP BIT1 | 117 | #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 |
140 | #define ETH_PORT_AT_HALF_DUPLEX 0 | 118 | #define ETH_GMII_SPEED_1000 0x00000010 |
141 | #define ETH_PORT_AT_FULL_DUPLEX BIT2 | 119 | #define ETH_MII_SPEED_100 0x00000020 |
142 | #define ETH_RX_FLOW_CTRL_DISABLED 0 | 120 | #define ETH_TX_IN_PROGRESS 0x00000080 |
143 | #define ETH_RX_FLOW_CTRL_ENBALED BIT3 | 121 | #define ETH_BYPASS_ACTIVE 0x00000100 |
144 | #define ETH_GMII_SPEED_100_10 0 | 122 | #define ETH_PORT_AT_PARTITION_STATE 0x00000200 |
145 | #define ETH_GMII_SPEED_1000 BIT4 | 123 | #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 |
146 | #define ETH_MII_SPEED_10 0 | ||
147 | #define ETH_MII_SPEED_100 BIT5 | ||
148 | #define ETH_NO_TX 0 | ||
149 | #define ETH_TX_IN_PROGRESS BIT7 | ||
150 | #define ETH_BYPASS_NO_ACTIVE 0 | ||
151 | #define ETH_BYPASS_ACTIVE BIT8 | ||
152 | #define ETH_PORT_NOT_AT_PARTITION_STATE 0 | ||
153 | #define ETH_PORT_AT_PARTITION_STATE BIT9 | ||
154 | #define ETH_PORT_TX_FIFO_NOT_EMPTY 0 | ||
155 | #define ETH_PORT_TX_FIFO_EMPTY BIT10 | ||
156 | |||
157 | #define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22) | ||
158 | #define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24 | ||
159 | #define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22) | ||
160 | #define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23) | ||
161 | #define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22) | ||
162 | 124 | ||
163 | /* SMI reg */ | 125 | /* SMI reg */ |
164 | #define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */ | 126 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ |
165 | #define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */ | 127 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ |
166 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */ | 128 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ |
167 | #define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */ | 129 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ |
130 | |||
131 | /* Interrupt Cause Register Bit Definitions */ | ||
168 | 132 | ||
169 | /* SDMA command status fields macros */ | 133 | /* SDMA command status fields macros */ |
170 | 134 | ||
171 | /* Tx & Rx descriptors status */ | 135 | /* Tx & Rx descriptors status */ |
172 | #define ETH_ERROR_SUMMARY (BIT0) | 136 | #define ETH_ERROR_SUMMARY 0x00000001 |
173 | 137 | ||
174 | /* Tx & Rx descriptors command */ | 138 | /* Tx & Rx descriptors command */ |
175 | #define ETH_BUFFER_OWNED_BY_DMA (BIT31) | 139 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 |
176 | 140 | ||
177 | /* Tx descriptors status */ | 141 | /* Tx descriptors status */ |
178 | #define ETH_LC_ERROR (0 ) | 142 | #define ETH_LC_ERROR 0 |
179 | #define ETH_UR_ERROR (BIT1 ) | 143 | #define ETH_UR_ERROR 0x00000002 |
180 | #define ETH_RL_ERROR (BIT2 ) | 144 | #define ETH_RL_ERROR 0x00000004 |
181 | #define ETH_LLC_SNAP_FORMAT (BIT9 ) | 145 | #define ETH_LLC_SNAP_FORMAT 0x00000200 |
182 | 146 | ||
183 | /* Rx descriptors status */ | 147 | /* Rx descriptors status */ |
184 | #define ETH_CRC_ERROR (0 ) | 148 | #define ETH_OVERRUN_ERROR 0x00000002 |
185 | #define ETH_OVERRUN_ERROR (BIT1 ) | 149 | #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 |
186 | #define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 ) | 150 | #define ETH_RESOURCE_ERROR 0x00000006 |
187 | #define ETH_RESOURCE_ERROR ((BIT2 | BIT1)) | 151 | #define ETH_VLAN_TAGGED 0x00080000 |
188 | #define ETH_VLAN_TAGGED (BIT19) | 152 | #define ETH_BPDU_FRAME 0x00100000 |
189 | #define ETH_BPDU_FRAME (BIT20) | 153 | #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 |
190 | #define ETH_TCP_FRAME_OVER_IP_V_4 (0 ) | 154 | #define ETH_OTHER_FRAME_TYPE 0x00400000 |
191 | #define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21) | 155 | #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 |
192 | #define ETH_OTHER_FRAME_TYPE (BIT22) | 156 | #define ETH_FRAME_TYPE_IP_V_4 0x01000000 |
193 | #define ETH_LAYER_2_IS_ETH_V_2 (BIT23) | 157 | #define ETH_FRAME_HEADER_OK 0x02000000 |
194 | #define ETH_FRAME_TYPE_IP_V_4 (BIT24) | 158 | #define ETH_RX_LAST_DESC 0x04000000 |
195 | #define ETH_FRAME_HEADER_OK (BIT25) | 159 | #define ETH_RX_FIRST_DESC 0x08000000 |
196 | #define ETH_RX_LAST_DESC (BIT26) | 160 | #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 |
197 | #define ETH_RX_FIRST_DESC (BIT27) | 161 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 |
198 | #define ETH_UNKNOWN_DESTINATION_ADDR (BIT28) | 162 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 |
199 | #define ETH_RX_ENABLE_INTERRUPT (BIT29) | ||
200 | #define ETH_LAYER_4_CHECKSUM_OK (BIT30) | ||
201 | 163 | ||
202 | /* Rx descriptors byte count */ | 164 | /* Rx descriptors byte count */ |
203 | #define ETH_FRAME_FRAGMENTED (BIT2) | 165 | #define ETH_FRAME_FRAGMENTED 0x00000004 |
204 | 166 | ||
205 | /* Tx descriptors command */ | 167 | /* Tx descriptors command */ |
206 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10) | 168 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 |
207 | #define ETH_FRAME_SET_TO_VLAN (BIT15) | 169 | #define ETH_FRAME_SET_TO_VLAN 0x00008000 |
208 | #define ETH_TCP_FRAME (0 ) | 170 | #define ETH_UDP_FRAME 0x00010000 |
209 | #define ETH_UDP_FRAME (BIT16) | 171 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 |
210 | #define ETH_GEN_TCP_UDP_CHECKSUM (BIT17) | 172 | #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 |
211 | #define ETH_GEN_IP_V_4_CHECKSUM (BIT18) | 173 | #define ETH_ZERO_PADDING 0x00080000 |
212 | #define ETH_ZERO_PADDING (BIT19) | 174 | #define ETH_TX_LAST_DESC 0x00100000 |
213 | #define ETH_TX_LAST_DESC (BIT20) | 175 | #define ETH_TX_FIRST_DESC 0x00200000 |
214 | #define ETH_TX_FIRST_DESC (BIT21) | 176 | #define ETH_GEN_CRC 0x00400000 |
215 | #define ETH_GEN_CRC (BIT22) | 177 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 |
216 | #define ETH_TX_ENABLE_INTERRUPT (BIT23) | 178 | #define ETH_AUTO_MODE 0x40000000 |
217 | #define ETH_AUTO_MODE (BIT30) | ||
218 | 179 | ||
219 | #define ETH_TX_IHL_SHIFT 11 | 180 | #define ETH_TX_IHL_SHIFT 11 |
220 | 181 | ||
@@ -324,13 +285,6 @@ struct mv643xx_mib_counters { | |||
324 | 285 | ||
325 | struct mv643xx_private { | 286 | struct mv643xx_private { |
326 | int port_num; /* User Ethernet port number */ | 287 | int port_num; /* User Ethernet port number */ |
327 | u8 port_mac_addr[6]; /* User defined port MAC address.*/ | ||
328 | u32 port_config; /* User port configuration value*/ | ||
329 | u32 port_config_extend; /* User port config extend value*/ | ||
330 | u32 port_sdma_config; /* User port SDMA config value */ | ||
331 | u32 port_serial_control; /* User port serial control value */ | ||
332 | u32 port_tx_queue_command; /* Port active Tx queues summary*/ | ||
333 | u32 port_rx_queue_command; /* Port active Rx queues summary*/ | ||
334 | 288 | ||
335 | u32 rx_sram_addr; /* Base address of rx sram area */ | 289 | u32 rx_sram_addr; /* Base address of rx sram area */ |
336 | u32 rx_sram_size; /* Size of rx sram area */ | 290 | u32 rx_sram_size; /* Size of rx sram area */ |
@@ -338,7 +292,6 @@ struct mv643xx_private { | |||
338 | u32 tx_sram_size; /* Size of tx sram area */ | 292 | u32 tx_sram_size; /* Size of tx sram area */ |
339 | 293 | ||
340 | int rx_resource_err; /* Rx ring resource error flag */ | 294 | int rx_resource_err; /* Rx ring resource error flag */ |
341 | int tx_resource_err; /* Tx ring resource error flag */ | ||
342 | 295 | ||
343 | /* Tx/Rx rings managment indexes fields. For driver use */ | 296 | /* Tx/Rx rings managment indexes fields. For driver use */ |
344 | 297 | ||
@@ -347,10 +300,6 @@ struct mv643xx_private { | |||
347 | 300 | ||
348 | /* Next available and first returning Tx resource */ | 301 | /* Next available and first returning Tx resource */ |
349 | int tx_curr_desc_q, tx_used_desc_q; | 302 | int tx_curr_desc_q, tx_used_desc_q; |
350 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
351 | int tx_first_desc_q; | ||
352 | u32 tx_first_command; | ||
353 | #endif | ||
354 | 303 | ||
355 | #ifdef MV643XX_TX_FAST_REFILL | 304 | #ifdef MV643XX_TX_FAST_REFILL |
356 | u32 tx_clean_threshold; | 305 | u32 tx_clean_threshold; |
@@ -358,54 +307,43 @@ struct mv643xx_private { | |||
358 | 307 | ||
359 | struct eth_rx_desc *p_rx_desc_area; | 308 | struct eth_rx_desc *p_rx_desc_area; |
360 | dma_addr_t rx_desc_dma; | 309 | dma_addr_t rx_desc_dma; |
361 | unsigned int rx_desc_area_size; | 310 | int rx_desc_area_size; |
362 | struct sk_buff **rx_skb; | 311 | struct sk_buff **rx_skb; |
363 | 312 | ||
364 | struct eth_tx_desc *p_tx_desc_area; | 313 | struct eth_tx_desc *p_tx_desc_area; |
365 | dma_addr_t tx_desc_dma; | 314 | dma_addr_t tx_desc_dma; |
366 | unsigned int tx_desc_area_size; | 315 | int tx_desc_area_size; |
367 | struct sk_buff **tx_skb; | 316 | struct sk_buff **tx_skb; |
368 | 317 | ||
369 | struct work_struct tx_timeout_task; | 318 | struct work_struct tx_timeout_task; |
370 | 319 | ||
371 | /* | ||
372 | * Former struct mv643xx_eth_priv members start here | ||
373 | */ | ||
374 | struct net_device_stats stats; | 320 | struct net_device_stats stats; |
375 | struct mv643xx_mib_counters mib_counters; | 321 | struct mv643xx_mib_counters mib_counters; |
376 | spinlock_t lock; | 322 | spinlock_t lock; |
377 | /* Size of Tx Ring per queue */ | 323 | /* Size of Tx Ring per queue */ |
378 | unsigned int tx_ring_size; | 324 | int tx_ring_size; |
379 | /* Ammont of SKBs outstanding on Tx queue */ | 325 | /* Number of tx descriptors in use */ |
380 | unsigned int tx_ring_skbs; | 326 | int tx_desc_count; |
381 | /* Size of Rx Ring per queue */ | 327 | /* Size of Rx Ring per queue */ |
382 | unsigned int rx_ring_size; | 328 | int rx_ring_size; |
383 | /* Ammount of SKBs allocated to Rx Ring per queue */ | 329 | /* Number of rx descriptors in use */ |
384 | unsigned int rx_ring_skbs; | 330 | int rx_desc_count; |
385 | |||
386 | /* | ||
387 | * rx_task used to fill RX ring out of bottom half context | ||
388 | */ | ||
389 | struct work_struct rx_task; | ||
390 | 331 | ||
391 | /* | 332 | /* |
392 | * Used in case RX Ring is empty, which can be caused when | 333 | * Used in case RX Ring is empty, which can be caused when |
393 | * system does not have resources (skb's) | 334 | * system does not have resources (skb's) |
394 | */ | 335 | */ |
395 | struct timer_list timeout; | 336 | struct timer_list timeout; |
396 | long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES))); | ||
397 | unsigned rx_timer_flag; | ||
398 | 337 | ||
399 | u32 rx_int_coal; | 338 | u32 rx_int_coal; |
400 | u32 tx_int_coal; | 339 | u32 tx_int_coal; |
340 | struct mii_if_info mii; | ||
401 | }; | 341 | }; |
402 | 342 | ||
403 | /* ethernet.h API list */ | ||
404 | |||
405 | /* Port operation control routines */ | 343 | /* Port operation control routines */ |
406 | static void eth_port_init(struct mv643xx_private *mp); | 344 | static void eth_port_init(struct mv643xx_private *mp); |
407 | static void eth_port_reset(unsigned int eth_port_num); | 345 | static void eth_port_reset(unsigned int eth_port_num); |
408 | static void eth_port_start(struct mv643xx_private *mp); | 346 | static void eth_port_start(struct net_device *dev); |
409 | 347 | ||
410 | /* Port MAC address routines */ | 348 | /* Port MAC address routines */ |
411 | static void eth_port_uc_addr_set(unsigned int eth_port_num, | 349 | static void eth_port_uc_addr_set(unsigned int eth_port_num, |
@@ -423,10 +361,6 @@ static void eth_port_read_smi_reg(unsigned int eth_port_num, | |||
423 | static void eth_clear_mib_counters(unsigned int eth_port_num); | 361 | static void eth_clear_mib_counters(unsigned int eth_port_num); |
424 | 362 | ||
425 | /* Port data flow control routines */ | 363 | /* Port data flow control routines */ |
426 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
427 | struct pkt_info *p_pkt_info); | ||
428 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
429 | struct pkt_info *p_pkt_info); | ||
430 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 364 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |
431 | struct pkt_info *p_pkt_info); | 365 | struct pkt_info *p_pkt_info); |
432 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | 366 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 9d6d2548c2d3..8d4999837b65 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -3,6 +3,7 @@ | |||
3 | Written/copyright 1999-2001 by Donald Becker. | 3 | Written/copyright 1999-2001 by Donald Becker. |
4 | Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) | 4 | Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) |
5 | Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) | 5 | Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) |
6 | Portions copyright 2004 Harald Welte <laforge@gnumonks.org> | ||
6 | 7 | ||
7 | This software may be used and distributed according to the terms of | 8 | This software may be used and distributed according to the terms of |
8 | the GNU General Public License (GPL), incorporated herein by reference. | 9 | the GNU General Public License (GPL), incorporated herein by reference. |
@@ -135,8 +136,6 @@ | |||
135 | 136 | ||
136 | TODO: | 137 | TODO: |
137 | * big endian support with CFG:BEM instead of cpu_to_le32 | 138 | * big endian support with CFG:BEM instead of cpu_to_le32 |
138 | * support for an external PHY | ||
139 | * NAPI | ||
140 | */ | 139 | */ |
141 | 140 | ||
142 | #include <linux/config.h> | 141 | #include <linux/config.h> |
@@ -160,6 +159,7 @@ | |||
160 | #include <linux/mii.h> | 159 | #include <linux/mii.h> |
161 | #include <linux/crc32.h> | 160 | #include <linux/crc32.h> |
162 | #include <linux/bitops.h> | 161 | #include <linux/bitops.h> |
162 | #include <linux/prefetch.h> | ||
163 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 163 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
164 | #include <asm/io.h> | 164 | #include <asm/io.h> |
165 | #include <asm/irq.h> | 165 | #include <asm/irq.h> |
@@ -183,13 +183,11 @@ | |||
183 | NETIF_MSG_TX_ERR) | 183 | NETIF_MSG_TX_ERR) |
184 | static int debug = -1; | 184 | static int debug = -1; |
185 | 185 | ||
186 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
187 | static int max_interrupt_work = 20; | ||
188 | static int mtu; | 186 | static int mtu; |
189 | 187 | ||
190 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 188 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
191 | This chip uses a 512 element hash table based on the Ethernet CRC. */ | 189 | This chip uses a 512 element hash table based on the Ethernet CRC. */ |
192 | static int multicast_filter_limit = 100; | 190 | static const int multicast_filter_limit = 100; |
193 | 191 | ||
194 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 192 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
195 | Setting to > 1518 effectively disables this feature. */ | 193 | Setting to > 1518 effectively disables this feature. */ |
@@ -251,14 +249,11 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |||
251 | MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); | 249 | MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); |
252 | MODULE_LICENSE("GPL"); | 250 | MODULE_LICENSE("GPL"); |
253 | 251 | ||
254 | module_param(max_interrupt_work, int, 0); | ||
255 | module_param(mtu, int, 0); | 252 | module_param(mtu, int, 0); |
256 | module_param(debug, int, 0); | 253 | module_param(debug, int, 0); |
257 | module_param(rx_copybreak, int, 0); | 254 | module_param(rx_copybreak, int, 0); |
258 | module_param_array(options, int, NULL, 0); | 255 | module_param_array(options, int, NULL, 0); |
259 | module_param_array(full_duplex, int, NULL, 0); | 256 | module_param_array(full_duplex, int, NULL, 0); |
260 | MODULE_PARM_DESC(max_interrupt_work, | ||
261 | "DP8381x maximum events handled per interrupt"); | ||
262 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); | 257 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); |
263 | MODULE_PARM_DESC(debug, "DP8381x default debug level"); | 258 | MODULE_PARM_DESC(debug, "DP8381x default debug level"); |
264 | MODULE_PARM_DESC(rx_copybreak, | 259 | MODULE_PARM_DESC(rx_copybreak, |
@@ -374,7 +369,7 @@ enum pcistuff { | |||
374 | 369 | ||
375 | 370 | ||
376 | /* array of board data directly indexed by pci_tbl[x].driver_data */ | 371 | /* array of board data directly indexed by pci_tbl[x].driver_data */ |
377 | static struct { | 372 | static const struct { |
378 | const char *name; | 373 | const char *name; |
379 | unsigned long flags; | 374 | unsigned long flags; |
380 | } natsemi_pci_info[] __devinitdata = { | 375 | } natsemi_pci_info[] __devinitdata = { |
@@ -691,6 +686,8 @@ struct netdev_private { | |||
691 | /* Based on MTU+slack. */ | 686 | /* Based on MTU+slack. */ |
692 | unsigned int rx_buf_sz; | 687 | unsigned int rx_buf_sz; |
693 | int oom; | 688 | int oom; |
689 | /* Interrupt status */ | ||
690 | u32 intr_status; | ||
694 | /* Do not touch the nic registers */ | 691 | /* Do not touch the nic registers */ |
695 | int hands_off; | 692 | int hands_off; |
696 | /* external phy that is used: only valid if dev->if_port != PORT_TP */ | 693 | /* external phy that is used: only valid if dev->if_port != PORT_TP */ |
@@ -748,7 +745,8 @@ static void init_registers(struct net_device *dev); | |||
748 | static int start_tx(struct sk_buff *skb, struct net_device *dev); | 745 | static int start_tx(struct sk_buff *skb, struct net_device *dev); |
749 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); | 746 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); |
750 | static void netdev_error(struct net_device *dev, int intr_status); | 747 | static void netdev_error(struct net_device *dev, int intr_status); |
751 | static void netdev_rx(struct net_device *dev); | 748 | static int natsemi_poll(struct net_device *dev, int *budget); |
749 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); | ||
752 | static void netdev_tx_done(struct net_device *dev); | 750 | static void netdev_tx_done(struct net_device *dev); |
753 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); | 751 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); |
754 | #ifdef CONFIG_NET_POLL_CONTROLLER | 752 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -776,6 +774,18 @@ static inline void __iomem *ns_ioaddr(struct net_device *dev) | |||
776 | return (void __iomem *) dev->base_addr; | 774 | return (void __iomem *) dev->base_addr; |
777 | } | 775 | } |
778 | 776 | ||
777 | static inline void natsemi_irq_enable(struct net_device *dev) | ||
778 | { | ||
779 | writel(1, ns_ioaddr(dev) + IntrEnable); | ||
780 | readl(ns_ioaddr(dev) + IntrEnable); | ||
781 | } | ||
782 | |||
783 | static inline void natsemi_irq_disable(struct net_device *dev) | ||
784 | { | ||
785 | writel(0, ns_ioaddr(dev) + IntrEnable); | ||
786 | readl(ns_ioaddr(dev) + IntrEnable); | ||
787 | } | ||
788 | |||
779 | static void move_int_phy(struct net_device *dev, int addr) | 789 | static void move_int_phy(struct net_device *dev, int addr) |
780 | { | 790 | { |
781 | struct netdev_private *np = netdev_priv(dev); | 791 | struct netdev_private *np = netdev_priv(dev); |
@@ -879,6 +889,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
879 | spin_lock_init(&np->lock); | 889 | spin_lock_init(&np->lock); |
880 | np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; | 890 | np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; |
881 | np->hands_off = 0; | 891 | np->hands_off = 0; |
892 | np->intr_status = 0; | ||
882 | 893 | ||
883 | /* Initial port: | 894 | /* Initial port: |
884 | * - If the nic was configured to use an external phy and if find_mii | 895 | * - If the nic was configured to use an external phy and if find_mii |
@@ -932,6 +943,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
932 | dev->do_ioctl = &netdev_ioctl; | 943 | dev->do_ioctl = &netdev_ioctl; |
933 | dev->tx_timeout = &tx_timeout; | 944 | dev->tx_timeout = &tx_timeout; |
934 | dev->watchdog_timeo = TX_TIMEOUT; | 945 | dev->watchdog_timeo = TX_TIMEOUT; |
946 | dev->poll = natsemi_poll; | ||
947 | dev->weight = 64; | ||
948 | |||
935 | #ifdef CONFIG_NET_POLL_CONTROLLER | 949 | #ifdef CONFIG_NET_POLL_CONTROLLER |
936 | dev->poll_controller = &natsemi_poll_controller; | 950 | dev->poll_controller = &natsemi_poll_controller; |
937 | #endif | 951 | #endif |
@@ -1484,6 +1498,31 @@ static void natsemi_reset(struct net_device *dev) | |||
1484 | writel(rfcr, ioaddr + RxFilterAddr); | 1498 | writel(rfcr, ioaddr + RxFilterAddr); |
1485 | } | 1499 | } |
1486 | 1500 | ||
1501 | static void reset_rx(struct net_device *dev) | ||
1502 | { | ||
1503 | int i; | ||
1504 | struct netdev_private *np = netdev_priv(dev); | ||
1505 | void __iomem *ioaddr = ns_ioaddr(dev); | ||
1506 | |||
1507 | np->intr_status &= ~RxResetDone; | ||
1508 | |||
1509 | writel(RxReset, ioaddr + ChipCmd); | ||
1510 | |||
1511 | for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { | ||
1512 | np->intr_status |= readl(ioaddr + IntrStatus); | ||
1513 | if (np->intr_status & RxResetDone) | ||
1514 | break; | ||
1515 | udelay(15); | ||
1516 | } | ||
1517 | if (i==NATSEMI_HW_TIMEOUT) { | ||
1518 | printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", | ||
1519 | dev->name, i*15); | ||
1520 | } else if (netif_msg_hw(np)) { | ||
1521 | printk(KERN_WARNING "%s: RX reset took %d usec.\n", | ||
1522 | dev->name, i*15); | ||
1523 | } | ||
1524 | } | ||
1525 | |||
1487 | static void natsemi_reload_eeprom(struct net_device *dev) | 1526 | static void natsemi_reload_eeprom(struct net_device *dev) |
1488 | { | 1527 | { |
1489 | struct netdev_private *np = netdev_priv(dev); | 1528 | struct netdev_private *np = netdev_priv(dev); |
@@ -2158,68 +2197,92 @@ static void netdev_tx_done(struct net_device *dev) | |||
2158 | } | 2197 | } |
2159 | } | 2198 | } |
2160 | 2199 | ||
2161 | /* The interrupt handler does all of the Rx thread work and cleans up | 2200 | /* The interrupt handler doesn't actually handle interrupts itself, it |
2162 | after the Tx thread. */ | 2201 | * schedules a NAPI poll if there is anything to do. */ |
2163 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) | 2202 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) |
2164 | { | 2203 | { |
2165 | struct net_device *dev = dev_instance; | 2204 | struct net_device *dev = dev_instance; |
2166 | struct netdev_private *np = netdev_priv(dev); | 2205 | struct netdev_private *np = netdev_priv(dev); |
2167 | void __iomem * ioaddr = ns_ioaddr(dev); | 2206 | void __iomem * ioaddr = ns_ioaddr(dev); |
2168 | int boguscnt = max_interrupt_work; | ||
2169 | unsigned int handled = 0; | ||
2170 | 2207 | ||
2171 | if (np->hands_off) | 2208 | if (np->hands_off) |
2172 | return IRQ_NONE; | 2209 | return IRQ_NONE; |
2173 | do { | 2210 | |
2174 | /* Reading automatically acknowledges all int sources. */ | 2211 | /* Reading automatically acknowledges. */ |
2175 | u32 intr_status = readl(ioaddr + IntrStatus); | 2212 | np->intr_status = readl(ioaddr + IntrStatus); |
2176 | 2213 | ||
2177 | if (netif_msg_intr(np)) | 2214 | if (netif_msg_intr(np)) |
2178 | printk(KERN_DEBUG | 2215 | printk(KERN_DEBUG |
2179 | "%s: Interrupt, status %#08x, mask %#08x.\n", | 2216 | "%s: Interrupt, status %#08x, mask %#08x.\n", |
2180 | dev->name, intr_status, | 2217 | dev->name, np->intr_status, |
2181 | readl(ioaddr + IntrMask)); | 2218 | readl(ioaddr + IntrMask)); |
2182 | 2219 | ||
2183 | if (intr_status == 0) | 2220 | if (!np->intr_status) |
2184 | break; | 2221 | return IRQ_NONE; |
2185 | handled = 1; | ||
2186 | 2222 | ||
2187 | if (intr_status & | 2223 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); |
2188 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | | ||
2189 | IntrRxErr | IntrRxOverrun)) { | ||
2190 | netdev_rx(dev); | ||
2191 | } | ||
2192 | 2224 | ||
2193 | if (intr_status & | 2225 | if (netif_rx_schedule_prep(dev)) { |
2194 | (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { | 2226 | /* Disable interrupts and register for poll */ |
2227 | natsemi_irq_disable(dev); | ||
2228 | __netif_rx_schedule(dev); | ||
2229 | } | ||
2230 | return IRQ_HANDLED; | ||
2231 | } | ||
2232 | |||
2233 | /* This is the NAPI poll routine. As well as the standard RX handling | ||
2234 | * it also handles all other interrupts that the chip might raise. | ||
2235 | */ | ||
2236 | static int natsemi_poll(struct net_device *dev, int *budget) | ||
2237 | { | ||
2238 | struct netdev_private *np = netdev_priv(dev); | ||
2239 | void __iomem * ioaddr = ns_ioaddr(dev); | ||
2240 | |||
2241 | int work_to_do = min(*budget, dev->quota); | ||
2242 | int work_done = 0; | ||
2243 | |||
2244 | do { | ||
2245 | if (np->intr_status & | ||
2246 | (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { | ||
2195 | spin_lock(&np->lock); | 2247 | spin_lock(&np->lock); |
2196 | netdev_tx_done(dev); | 2248 | netdev_tx_done(dev); |
2197 | spin_unlock(&np->lock); | 2249 | spin_unlock(&np->lock); |
2198 | } | 2250 | } |
2199 | 2251 | ||
2200 | /* Abnormal error summary/uncommon events handlers. */ | 2252 | /* Abnormal error summary/uncommon events handlers. */ |
2201 | if (intr_status & IntrAbnormalSummary) | 2253 | if (np->intr_status & IntrAbnormalSummary) |
2202 | netdev_error(dev, intr_status); | 2254 | netdev_error(dev, np->intr_status); |
2203 | 2255 | ||
2204 | if (--boguscnt < 0) { | 2256 | if (np->intr_status & |
2205 | if (netif_msg_intr(np)) | 2257 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | |
2206 | printk(KERN_WARNING | 2258 | IntrRxErr | IntrRxOverrun)) { |
2207 | "%s: Too much work at interrupt, " | 2259 | netdev_rx(dev, &work_done, work_to_do); |
2208 | "status=%#08x.\n", | ||
2209 | dev->name, intr_status); | ||
2210 | break; | ||
2211 | } | 2260 | } |
2212 | } while (1); | 2261 | |
2262 | *budget -= work_done; | ||
2263 | dev->quota -= work_done; | ||
2213 | 2264 | ||
2214 | if (netif_msg_intr(np)) | 2265 | if (work_done >= work_to_do) |
2215 | printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name); | 2266 | return 1; |
2267 | |||
2268 | np->intr_status = readl(ioaddr + IntrStatus); | ||
2269 | } while (np->intr_status); | ||
2270 | |||
2271 | netif_rx_complete(dev); | ||
2216 | 2272 | ||
2217 | return IRQ_RETVAL(handled); | 2273 | /* Reenable interrupts providing nothing is trying to shut |
2274 | * the chip down. */ | ||
2275 | spin_lock(&np->lock); | ||
2276 | if (!np->hands_off && netif_running(dev)) | ||
2277 | natsemi_irq_enable(dev); | ||
2278 | spin_unlock(&np->lock); | ||
2279 | |||
2280 | return 0; | ||
2218 | } | 2281 | } |
2219 | 2282 | ||
2220 | /* This routine is logically part of the interrupt handler, but separated | 2283 | /* This routine is logically part of the interrupt handler, but separated |
2221 | for clarity and better register allocation. */ | 2284 | for clarity and better register allocation. */ |
2222 | static void netdev_rx(struct net_device *dev) | 2285 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) |
2223 | { | 2286 | { |
2224 | struct netdev_private *np = netdev_priv(dev); | 2287 | struct netdev_private *np = netdev_priv(dev); |
2225 | int entry = np->cur_rx % RX_RING_SIZE; | 2288 | int entry = np->cur_rx % RX_RING_SIZE; |
@@ -2237,6 +2300,12 @@ static void netdev_rx(struct net_device *dev) | |||
2237 | entry, desc_status); | 2300 | entry, desc_status); |
2238 | if (--boguscnt < 0) | 2301 | if (--boguscnt < 0) |
2239 | break; | 2302 | break; |
2303 | |||
2304 | if (*work_done >= work_to_do) | ||
2305 | break; | ||
2306 | |||
2307 | (*work_done)++; | ||
2308 | |||
2240 | pkt_len = (desc_status & DescSizeMask) - 4; | 2309 | pkt_len = (desc_status & DescSizeMask) - 4; |
2241 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2310 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2242 | if (desc_status & DescMore) { | 2311 | if (desc_status & DescMore) { |
@@ -2248,6 +2317,23 @@ static void netdev_rx(struct net_device *dev) | |||
2248 | "status %#08x.\n", dev->name, | 2317 | "status %#08x.\n", dev->name, |
2249 | np->cur_rx, desc_status); | 2318 | np->cur_rx, desc_status); |
2250 | np->stats.rx_length_errors++; | 2319 | np->stats.rx_length_errors++; |
2320 | |||
2321 | /* The RX state machine has probably | ||
2322 | * locked up beneath us. Follow the | ||
2323 | * reset procedure documented in | ||
2324 | * AN-1287. */ | ||
2325 | |||
2326 | spin_lock_irq(&np->lock); | ||
2327 | reset_rx(dev); | ||
2328 | reinit_rx(dev); | ||
2329 | writel(np->ring_dma, ioaddr + RxRingPtr); | ||
2330 | check_link(dev); | ||
2331 | spin_unlock_irq(&np->lock); | ||
2332 | |||
2333 | /* We'll enable RX on exit from this | ||
2334 | * function. */ | ||
2335 | break; | ||
2336 | |||
2251 | } else { | 2337 | } else { |
2252 | /* There was an error. */ | 2338 | /* There was an error. */ |
2253 | np->stats.rx_errors++; | 2339 | np->stats.rx_errors++; |
@@ -2293,7 +2379,7 @@ static void netdev_rx(struct net_device *dev) | |||
2293 | np->rx_skbuff[entry] = NULL; | 2379 | np->rx_skbuff[entry] = NULL; |
2294 | } | 2380 | } |
2295 | skb->protocol = eth_type_trans(skb, dev); | 2381 | skb->protocol = eth_type_trans(skb, dev); |
2296 | netif_rx(skb); | 2382 | netif_receive_skb(skb); |
2297 | dev->last_rx = jiffies; | 2383 | dev->last_rx = jiffies; |
2298 | np->stats.rx_packets++; | 2384 | np->stats.rx_packets++; |
2299 | np->stats.rx_bytes += pkt_len; | 2385 | np->stats.rx_bytes += pkt_len; |
@@ -3074,9 +3160,7 @@ static int netdev_close(struct net_device *dev) | |||
3074 | del_timer_sync(&np->timer); | 3160 | del_timer_sync(&np->timer); |
3075 | disable_irq(dev->irq); | 3161 | disable_irq(dev->irq); |
3076 | spin_lock_irq(&np->lock); | 3162 | spin_lock_irq(&np->lock); |
3077 | /* Disable interrupts, and flush posted writes */ | 3163 | natsemi_irq_disable(dev); |
3078 | writel(0, ioaddr + IntrEnable); | ||
3079 | readl(ioaddr + IntrEnable); | ||
3080 | np->hands_off = 1; | 3164 | np->hands_off = 1; |
3081 | spin_unlock_irq(&np->lock); | 3165 | spin_unlock_irq(&np->lock); |
3082 | enable_irq(dev->irq); | 3166 | enable_irq(dev->irq); |
@@ -3158,6 +3242,9 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) | |||
3158 | * * netdev_timer: timer stopped by natsemi_suspend. | 3242 | * * netdev_timer: timer stopped by natsemi_suspend. |
3159 | * * intr_handler: doesn't acquire the spinlock. suspend calls | 3243 | * * intr_handler: doesn't acquire the spinlock. suspend calls |
3160 | * disable_irq() to enforce synchronization. | 3244 | * disable_irq() to enforce synchronization. |
3245 | * * natsemi_poll: checks before reenabling interrupts. suspend | ||
3246 | * sets hands_off, disables interrupts and then waits with | ||
3247 | * netif_poll_disable(). | ||
3161 | * | 3248 | * |
3162 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. | 3249 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. |
3163 | */ | 3250 | */ |
@@ -3183,6 +3270,8 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) | |||
3183 | spin_unlock_irq(&np->lock); | 3270 | spin_unlock_irq(&np->lock); |
3184 | enable_irq(dev->irq); | 3271 | enable_irq(dev->irq); |
3185 | 3272 | ||
3273 | netif_poll_disable(dev); | ||
3274 | |||
3186 | /* Update the error counts. */ | 3275 | /* Update the error counts. */ |
3187 | __get_stats(dev); | 3276 | __get_stats(dev); |
3188 | 3277 | ||
@@ -3235,6 +3324,7 @@ static int natsemi_resume (struct pci_dev *pdev) | |||
3235 | mod_timer(&np->timer, jiffies + 1*HZ); | 3324 | mod_timer(&np->timer, jiffies + 1*HZ); |
3236 | } | 3325 | } |
3237 | netif_device_attach(dev); | 3326 | netif_device_attach(dev); |
3327 | netif_poll_enable(dev); | ||
3238 | out: | 3328 | out: |
3239 | rtnl_unlock(); | 3329 | rtnl_unlock(); |
3240 | return 0; | 3330 | return 0; |
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 8f40368cf2e9..aaebd28a1920 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -27,6 +27,7 @@ static const char version1[] = | |||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/etherdevice.h> | 29 | #include <linux/etherdevice.h> |
30 | #include <linux/jiffies.h> | ||
30 | 31 | ||
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
@@ -365,7 +366,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
365 | 366 | ||
366 | /* This check _should_not_ be necessary, omit eventually. */ | 367 | /* This check _should_not_ be necessary, omit eventually. */ |
367 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 368 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
368 | if (jiffies - reset_start_time > 2*HZ/100) { | 369 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
369 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); | 370 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); |
370 | break; | 371 | break; |
371 | } | 372 | } |
@@ -580,7 +581,7 @@ retry: | |||
580 | #endif | 581 | #endif |
581 | 582 | ||
582 | while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) | 583 | while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) |
583 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 584 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
584 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 585 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); |
585 | ne_reset_8390(dev); | 586 | ne_reset_8390(dev); |
586 | NS8390_init(dev,1); | 587 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 94f782d51f0f..08b218c5bfbc 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -50,6 +50,7 @@ static const char version2[] = | |||
50 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
51 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
52 | #include <linux/etherdevice.h> | 52 | #include <linux/etherdevice.h> |
53 | #include <linux/jiffies.h> | ||
53 | 54 | ||
54 | #include <asm/system.h> | 55 | #include <asm/system.h> |
55 | #include <asm/io.h> | 56 | #include <asm/io.h> |
@@ -341,7 +342,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
341 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 342 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
342 | 343 | ||
343 | while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) | 344 | while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) |
344 | if (jiffies - reset_start_time > 2*HZ/100) { | 345 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
345 | if (bad_card) { | 346 | if (bad_card) { |
346 | printk(" (warning: no reset ack)"); | 347 | printk(" (warning: no reset ack)"); |
347 | break; | 348 | break; |
@@ -580,7 +581,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
580 | 581 | ||
581 | /* This check _should_not_ be necessary, omit eventually. */ | 582 | /* This check _should_not_ be necessary, omit eventually. */ |
582 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 583 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
583 | if (jiffies - reset_start_time > 2*HZ/100) { | 584 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
584 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); | 585 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); |
585 | break; | 586 | break; |
586 | } | 587 | } |
@@ -787,7 +788,7 @@ retry: | |||
787 | #endif | 788 | #endif |
788 | 789 | ||
789 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) | 790 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) |
790 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 791 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
791 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 792 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); |
792 | ne_reset_8390(dev); | 793 | ne_reset_8390(dev); |
793 | NS8390_init(dev,1); | 794 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index e6df375a1d4b..2aa7b77f84f8 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c | |||
@@ -75,6 +75,7 @@ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.o | |||
75 | #include <linux/etherdevice.h> | 75 | #include <linux/etherdevice.h> |
76 | #include <linux/skbuff.h> | 76 | #include <linux/skbuff.h> |
77 | #include <linux/bitops.h> | 77 | #include <linux/bitops.h> |
78 | #include <linux/jiffies.h> | ||
78 | 79 | ||
79 | #include <asm/system.h> | 80 | #include <asm/system.h> |
80 | #include <asm/io.h> | 81 | #include <asm/io.h> |
@@ -395,7 +396,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) | |||
395 | outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); | 396 | outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); |
396 | 397 | ||
397 | while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) | 398 | while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) |
398 | if (jiffies - reset_start_time > 2*HZ/100) { | 399 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
399 | printk(" not found (no reset ack).\n"); | 400 | printk(" not found (no reset ack).\n"); |
400 | retval = -ENODEV; | 401 | retval = -ENODEV; |
401 | goto out; | 402 | goto out; |
@@ -548,7 +549,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
548 | 549 | ||
549 | /* This check _should_not_ be necessary, omit eventually. */ | 550 | /* This check _should_not_ be necessary, omit eventually. */ |
550 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 551 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
551 | if (jiffies - reset_start_time > 2*HZ/100) { | 552 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
552 | printk("%s: ne_reset_8390() did not complete.\n", | 553 | printk("%s: ne_reset_8390() did not complete.\n", |
553 | dev->name); | 554 | dev->name); |
554 | break; | 555 | break; |
@@ -749,7 +750,7 @@ retry: | |||
749 | #endif | 750 | #endif |
750 | 751 | ||
751 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) | 752 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) |
752 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 753 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
753 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 754 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
754 | ne_reset_8390(dev); | 755 | ne_reset_8390(dev); |
755 | NS8390_init(dev,1); | 756 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index d11821dd86ed..e3ebb5803b02 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c | |||
@@ -117,7 +117,7 @@ enum ne2k_pci_chipsets { | |||
117 | }; | 117 | }; |
118 | 118 | ||
119 | 119 | ||
120 | static struct { | 120 | static const struct { |
121 | char *name; | 121 | char *name; |
122 | int flags; | 122 | int flags; |
123 | } pci_clone_list[] __devinitdata = { | 123 | } pci_clone_list[] __devinitdata = { |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index b0c3b6ab6263..0fede50abd3e 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -116,6 +116,7 @@ | |||
116 | #include <linux/timer.h> | 116 | #include <linux/timer.h> |
117 | #include <linux/if_vlan.h> | 117 | #include <linux/if_vlan.h> |
118 | #include <linux/rtnetlink.h> | 118 | #include <linux/rtnetlink.h> |
119 | #include <linux/jiffies.h> | ||
119 | 120 | ||
120 | #include <asm/io.h> | 121 | #include <asm/io.h> |
121 | #include <asm/uaccess.h> | 122 | #include <asm/uaccess.h> |
@@ -651,7 +652,7 @@ static void FASTCALL(phy_intr(struct net_device *ndev)); | |||
651 | static void fastcall phy_intr(struct net_device *ndev) | 652 | static void fastcall phy_intr(struct net_device *ndev) |
652 | { | 653 | { |
653 | struct ns83820 *dev = PRIV(ndev); | 654 | struct ns83820 *dev = PRIV(ndev); |
654 | static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; | 655 | static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; |
655 | u32 cfg, new_cfg; | 656 | u32 cfg, new_cfg; |
656 | u32 tbisr, tanar, tanlpar; | 657 | u32 tbisr, tanar, tanlpar; |
657 | int speed, fullduplex, newlinkstate; | 658 | int speed, fullduplex, newlinkstate; |
@@ -1607,7 +1608,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab | |||
1607 | { | 1608 | { |
1608 | struct ns83820 *dev = PRIV(ndev); | 1609 | struct ns83820 *dev = PRIV(ndev); |
1609 | int timed_out = 0; | 1610 | int timed_out = 0; |
1610 | long start; | 1611 | unsigned long start; |
1611 | u32 status; | 1612 | u32 status; |
1612 | int loops = 0; | 1613 | int loops = 0; |
1613 | 1614 | ||
@@ -1625,7 +1626,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab | |||
1625 | break; | 1626 | break; |
1626 | if (status & fail) | 1627 | if (status & fail) |
1627 | break; | 1628 | break; |
1628 | if ((jiffies - start) >= HZ) { | 1629 | if (time_after_eq(jiffies, start + HZ)) { |
1629 | timed_out = 1; | 1630 | timed_out = 1; |
1630 | break; | 1631 | break; |
1631 | } | 1632 | } |
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c index 62167a29debe..d0f686d6eaaa 100644 --- a/drivers/net/oaknet.c +++ b/drivers/net/oaknet.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/jiffies.h> | ||
23 | 24 | ||
24 | #include <asm/board.h> | 25 | #include <asm/board.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -606,7 +607,7 @@ retry: | |||
606 | #endif | 607 | #endif |
607 | 608 | ||
608 | while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { | 609 | while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { |
609 | if (jiffies - start > OAKNET_WAIT) { | 610 | if (time_after(jiffies, start + OAKNET_WAIT)) { |
610 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 611 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
611 | oaknet_reset_8390(dev); | 612 | oaknet_reset_8390(dev); |
612 | NS8390_init(dev, TRUE); | 613 | NS8390_init(dev, TRUE); |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 48774efeec71..ce90becb8bdf 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -341,7 +341,7 @@ static void tc574_detach(struct pcmcia_device *p_dev) | |||
341 | #define CS_CHECK(fn, ret) \ | 341 | #define CS_CHECK(fn, ret) \ |
342 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | 342 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) |
343 | 343 | ||
344 | static char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 344 | static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
345 | 345 | ||
346 | static void tc574_config(dev_link_t *link) | 346 | static void tc574_config(dev_link_t *link) |
347 | { | 347 | { |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 1c3c9c666f74..3dba50849da7 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/if_arp.h> | 39 | #include <linux/if_arp.h> |
40 | #include <linux/ioport.h> | 40 | #include <linux/ioport.h> |
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/jiffies.h> | ||
42 | 43 | ||
43 | #include <pcmcia/cs_types.h> | 44 | #include <pcmcia/cs_types.h> |
44 | #include <pcmcia/cs.h> | 45 | #include <pcmcia/cs.h> |
@@ -115,7 +116,7 @@ struct el3_private { | |||
115 | spinlock_t lock; | 116 | spinlock_t lock; |
116 | }; | 117 | }; |
117 | 118 | ||
118 | static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; | 119 | static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; |
119 | 120 | ||
120 | /*====================================================================*/ | 121 | /*====================================================================*/ |
121 | 122 | ||
@@ -796,7 +797,7 @@ static void media_check(unsigned long arg) | |||
796 | media = inw(ioaddr+WN4_MEDIA) & 0xc810; | 797 | media = inw(ioaddr+WN4_MEDIA) & 0xc810; |
797 | 798 | ||
798 | /* Ignore collisions unless we've had no irq's recently */ | 799 | /* Ignore collisions unless we've had no irq's recently */ |
799 | if (jiffies - lp->last_irq < HZ) { | 800 | if (time_before(jiffies, lp->last_irq + HZ)) { |
800 | media &= ~0x0010; | 801 | media &= ~0x0010; |
801 | } else { | 802 | } else { |
802 | /* Try harder to detect carrier errors */ | 803 | /* Try harder to detect carrier errors */ |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 28fe2fb4d6c0..b7ac14ba8877 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -309,7 +309,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | |||
309 | static int mfc_try_io_port(dev_link_t *link) | 309 | static int mfc_try_io_port(dev_link_t *link) |
310 | { | 310 | { |
311 | int i, ret; | 311 | int i, ret; |
312 | static kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 312 | static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; |
313 | 313 | ||
314 | for (i = 0; i < 5; i++) { | 314 | for (i = 0; i < 5; i++) { |
315 | link->io.BasePort2 = serial_base[i]; | 315 | link->io.BasePort2 = serial_base[i]; |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 4a232254a497..787176c57fd9 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -388,7 +388,7 @@ static char *version = | |||
388 | DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; | 388 | DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; |
389 | #endif | 389 | #endif |
390 | 390 | ||
391 | static char *if_names[]={ | 391 | static const char *if_names[]={ |
392 | "Auto", "10baseT", "BNC", | 392 | "Auto", "10baseT", "BNC", |
393 | }; | 393 | }; |
394 | 394 | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index d85b758f3efa..b46e5f703efa 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -66,7 +66,7 @@ | |||
66 | 66 | ||
67 | #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ | 67 | #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ |
68 | 68 | ||
69 | static char *if_names[] = { "auto", "10baseT", "10base2"}; | 69 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; |
70 | 70 | ||
71 | #ifdef PCMCIA_DEBUG | 71 | #ifdef PCMCIA_DEBUG |
72 | static int pc_debug = PCMCIA_DEBUG; | 72 | static int pc_debug = PCMCIA_DEBUG; |
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1727 | PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), | 1727 | PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), |
1728 | PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), | 1728 | PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), |
1729 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), | 1729 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), |
1730 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), | ||
1730 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), | 1731 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), |
1731 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), | 1732 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), |
1732 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), | 1733 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 0122415dfeef..8839c4faafd6 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -59,7 +59,7 @@ | |||
59 | 59 | ||
60 | /*====================================================================*/ | 60 | /*====================================================================*/ |
61 | 61 | ||
62 | static char *if_names[] = { "auto", "10baseT", "10base2"}; | 62 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; |
63 | 63 | ||
64 | /* Module parameters */ | 64 | /* Module parameters */ |
65 | 65 | ||
@@ -777,7 +777,7 @@ free_cfg_mem: | |||
777 | static int osi_config(dev_link_t *link) | 777 | static int osi_config(dev_link_t *link) |
778 | { | 778 | { |
779 | struct net_device *dev = link->priv; | 779 | struct net_device *dev = link->priv; |
780 | static kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; | 780 | static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; |
781 | int i, j; | 781 | int i, j; |
782 | 782 | ||
783 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 783 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 593d8adee891..eed496803fe4 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -208,7 +208,7 @@ enum xirc_cmd { /* Commands */ | |||
208 | #define XIRCREG45_REV 15 /* Revision Register (rd) */ | 208 | #define XIRCREG45_REV 15 /* Revision Register (rd) */ |
209 | #define XIRCREG50_IA 8 /* Individual Address (8-13) */ | 209 | #define XIRCREG50_IA 8 /* Individual Address (8-13) */ |
210 | 210 | ||
211 | static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; | 211 | static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; |
212 | 212 | ||
213 | /**************** | 213 | /**************** |
214 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | 214 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 8f6cf8c896a4..7e900572eaf8 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #define DRV_RELDATE "01.Nov.2005" | 26 | #define DRV_RELDATE "01.Nov.2005" |
27 | #define PFX DRV_NAME ": " | 27 | #define PFX DRV_NAME ": " |
28 | 28 | ||
29 | static const char *version = | 29 | static const char * const version = |
30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | 30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; |
31 | 31 | ||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -109,7 +109,7 @@ static int rx_copybreak = 200; | |||
109 | * table to translate option values from tulip | 109 | * table to translate option values from tulip |
110 | * to internal options | 110 | * to internal options |
111 | */ | 111 | */ |
112 | static unsigned char options_mapping[] = { | 112 | static const unsigned char options_mapping[] = { |
113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ | 113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ |
114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ | 114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ |
115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ | 115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ |
@@ -733,7 +733,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) | |||
733 | int rc; /* return code */ | 733 | int rc; /* return code */ |
734 | int size; /* size of packets */ | 734 | int size; /* size of packets */ |
735 | unsigned char *packet; /* source packet data */ | 735 | unsigned char *packet; /* source packet data */ |
736 | static int data_len = 60; /* length of source packets */ | 736 | static const int data_len = 60; /* length of source packets */ |
737 | unsigned long flags; | 737 | unsigned long flags; |
738 | unsigned long ticks; | 738 | unsigned long ticks; |
739 | 739 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1474b7c5ac0b..33cec2dab942 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -132,7 +132,7 @@ struct phy_setting { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* A mapping of all SUPPORTED settings to speed/duplex */ | 134 | /* A mapping of all SUPPORTED settings to speed/duplex */ |
135 | static struct phy_setting settings[] = { | 135 | static const struct phy_setting settings[] = { |
136 | { | 136 | { |
137 | .speed = 10000, | 137 | .speed = 10000, |
138 | .duplex = DUPLEX_FULL, | 138 | .duplex = DUPLEX_FULL, |
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 87ee3271b17d..d4449d6d1fe4 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -123,7 +123,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" | |||
123 | #ifndef NET_DEBUG | 123 | #ifndef NET_DEBUG |
124 | #define NET_DEBUG 1 | 124 | #define NET_DEBUG 1 |
125 | #endif | 125 | #endif |
126 | static unsigned int net_debug = NET_DEBUG; | 126 | static const unsigned int net_debug = NET_DEBUG; |
127 | 127 | ||
128 | #define ENABLE(irq) if (irq != -1) enable_irq(irq) | 128 | #define ENABLE(irq) if (irq != -1) enable_irq(irq) |
129 | #define DISABLE(irq) if (irq != -1) disable_irq(irq) | 129 | #define DISABLE(irq) if (irq != -1) disable_irq(irq) |
@@ -351,7 +351,7 @@ static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |||
351 | typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, | 351 | typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, |
352 | struct plip_local *snd, struct plip_local *rcv); | 352 | struct plip_local *snd, struct plip_local *rcv); |
353 | 353 | ||
354 | static plip_func connection_state_table[] = | 354 | static const plip_func connection_state_table[] = |
355 | { | 355 | { |
356 | plip_none, | 356 | plip_none, |
357 | plip_receive_packet, | 357 | plip_receive_packet, |
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index aa6540b39466..23659fd7c3a6 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/ppp_channel.h> | 30 | #include <linux/ppp_channel.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/jiffies.h> | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/string.h> | 35 | #include <asm/string.h> |
35 | 36 | ||
@@ -570,7 +571,7 @@ ppp_async_encode(struct asyncppp *ap) | |||
570 | * character if necessary. | 571 | * character if necessary. |
571 | */ | 572 | */ |
572 | if (islcp || flag_time == 0 | 573 | if (islcp || flag_time == 0 |
573 | || jiffies - ap->last_xmit >= flag_time) | 574 | || time_after_eq(jiffies, ap->last_xmit + flag_time)) |
574 | *buf++ = PPP_FLAG; | 575 | *buf++ = PPP_FLAG; |
575 | ap->last_xmit = jiffies; | 576 | ap->last_xmit = jiffies; |
576 | fcs = PPP_INITFCS; | 577 | fcs = PPP_INITFCS; |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 33cb8254e79d..33255fe8031e 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -108,7 +108,7 @@ static void | |||
108 | ppp_print_hex (register __u8 * out, const __u8 * in, int count) | 108 | ppp_print_hex (register __u8 * out, const __u8 * in, int count) |
109 | { | 109 | { |
110 | register __u8 next_ch; | 110 | register __u8 next_ch; |
111 | static char hex[] = "0123456789ABCDEF"; | 111 | static const char hex[] = "0123456789ABCDEF"; |
112 | 112 | ||
113 | while (count-- > 0) { | 113 | while (count-- > 0) { |
114 | next_ch = *in++; | 114 | next_ch = *in++; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 8cc0d0bbdf50..0ad3310290f1 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -113,11 +113,11 @@ static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; | |||
113 | static int num_media = 0; | 113 | static int num_media = 0; |
114 | 114 | ||
115 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | 115 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ |
116 | static int max_interrupt_work = 20; | 116 | static const int max_interrupt_work = 20; |
117 | 117 | ||
118 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 118 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
119 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | 119 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ |
120 | static int multicast_filter_limit = 32; | 120 | static const int multicast_filter_limit = 32; |
121 | 121 | ||
122 | /* MAC address length */ | 122 | /* MAC address length */ |
123 | #define MAC_ADDR_LEN 6 | 123 | #define MAC_ADDR_LEN 6 |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index b7f00d6eb6a6..79208f434ac1 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -57,23 +57,27 @@ | |||
57 | #include <linux/ethtool.h> | 57 | #include <linux/ethtool.h> |
58 | #include <linux/workqueue.h> | 58 | #include <linux/workqueue.h> |
59 | #include <linux/if_vlan.h> | 59 | #include <linux/if_vlan.h> |
60 | #include <linux/ip.h> | ||
61 | #include <linux/tcp.h> | ||
62 | #include <net/tcp.h> | ||
60 | 63 | ||
61 | #include <asm/system.h> | 64 | #include <asm/system.h> |
62 | #include <asm/uaccess.h> | 65 | #include <asm/uaccess.h> |
63 | #include <asm/io.h> | 66 | #include <asm/io.h> |
67 | #include <asm/div64.h> | ||
64 | 68 | ||
65 | /* local include */ | 69 | /* local include */ |
66 | #include "s2io.h" | 70 | #include "s2io.h" |
67 | #include "s2io-regs.h" | 71 | #include "s2io-regs.h" |
68 | 72 | ||
69 | #define DRV_VERSION "Version 2.0.9.4" | 73 | #define DRV_VERSION "2.0.11.2" |
70 | 74 | ||
71 | /* S2io Driver name & version. */ | 75 | /* S2io Driver name & version. */ |
72 | static char s2io_driver_name[] = "Neterion"; | 76 | static char s2io_driver_name[] = "Neterion"; |
73 | static char s2io_driver_version[] = DRV_VERSION; | 77 | static char s2io_driver_version[] = DRV_VERSION; |
74 | 78 | ||
75 | int rxd_size[4] = {32,48,48,64}; | 79 | static int rxd_size[4] = {32,48,48,64}; |
76 | int rxd_count[4] = {127,85,85,63}; | 80 | static int rxd_count[4] = {127,85,85,63}; |
77 | 81 | ||
78 | static inline int RXD_IS_UP2DT(RxD_t *rxdp) | 82 | static inline int RXD_IS_UP2DT(RxD_t *rxdp) |
79 | { | 83 | { |
@@ -168,6 +172,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { | |||
168 | {"\n DRIVER STATISTICS"}, | 172 | {"\n DRIVER STATISTICS"}, |
169 | {"single_bit_ecc_errs"}, | 173 | {"single_bit_ecc_errs"}, |
170 | {"double_bit_ecc_errs"}, | 174 | {"double_bit_ecc_errs"}, |
175 | ("lro_aggregated_pkts"), | ||
176 | ("lro_flush_both_count"), | ||
177 | ("lro_out_of_sequence_pkts"), | ||
178 | ("lro_flush_due_to_max_pkts"), | ||
179 | ("lro_avg_aggr_pkts"), | ||
171 | }; | 180 | }; |
172 | 181 | ||
173 | #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN | 182 | #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN |
@@ -214,7 +223,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) | |||
214 | #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL | 223 | #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL |
215 | #define END_SIGN 0x0 | 224 | #define END_SIGN 0x0 |
216 | 225 | ||
217 | static u64 herc_act_dtx_cfg[] = { | 226 | static const u64 herc_act_dtx_cfg[] = { |
218 | /* Set address */ | 227 | /* Set address */ |
219 | 0x8000051536750000ULL, 0x80000515367500E0ULL, | 228 | 0x8000051536750000ULL, 0x80000515367500E0ULL, |
220 | /* Write data */ | 229 | /* Write data */ |
@@ -235,7 +244,7 @@ static u64 herc_act_dtx_cfg[] = { | |||
235 | END_SIGN | 244 | END_SIGN |
236 | }; | 245 | }; |
237 | 246 | ||
238 | static u64 xena_mdio_cfg[] = { | 247 | static const u64 xena_mdio_cfg[] = { |
239 | /* Reset PMA PLL */ | 248 | /* Reset PMA PLL */ |
240 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, | 249 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, |
241 | 0xC0010100008000E4ULL, | 250 | 0xC0010100008000E4ULL, |
@@ -245,7 +254,7 @@ static u64 xena_mdio_cfg[] = { | |||
245 | END_SIGN | 254 | END_SIGN |
246 | }; | 255 | }; |
247 | 256 | ||
248 | static u64 xena_dtx_cfg[] = { | 257 | static const u64 xena_dtx_cfg[] = { |
249 | 0x8000051500000000ULL, 0x80000515000000E0ULL, | 258 | 0x8000051500000000ULL, 0x80000515000000E0ULL, |
250 | 0x80000515D93500E4ULL, 0x8001051500000000ULL, | 259 | 0x80000515D93500E4ULL, 0x8001051500000000ULL, |
251 | 0x80010515000000E0ULL, 0x80010515001E00E4ULL, | 260 | 0x80010515000000E0ULL, 0x80010515001E00E4ULL, |
@@ -273,7 +282,7 @@ static u64 xena_dtx_cfg[] = { | |||
273 | * Constants for Fixing the MacAddress problem seen mostly on | 282 | * Constants for Fixing the MacAddress problem seen mostly on |
274 | * Alpha machines. | 283 | * Alpha machines. |
275 | */ | 284 | */ |
276 | static u64 fix_mac[] = { | 285 | static const u64 fix_mac[] = { |
277 | 0x0060000000000000ULL, 0x0060600000000000ULL, | 286 | 0x0060000000000000ULL, 0x0060600000000000ULL, |
278 | 0x0040600000000000ULL, 0x0000600000000000ULL, | 287 | 0x0040600000000000ULL, 0x0000600000000000ULL, |
279 | 0x0020600000000000ULL, 0x0060600000000000ULL, | 288 | 0x0020600000000000ULL, 0x0060600000000000ULL, |
@@ -317,6 +326,12 @@ static unsigned int indicate_max_pkts; | |||
317 | static unsigned int rxsync_frequency = 3; | 326 | static unsigned int rxsync_frequency = 3; |
318 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ | 327 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ |
319 | static unsigned int intr_type = 0; | 328 | static unsigned int intr_type = 0; |
329 | /* Large receive offload feature */ | ||
330 | static unsigned int lro = 0; | ||
331 | /* Max pkts to be aggregated by LRO at one time. If not specified, | ||
332 | * aggregation happens until we hit max IP pkt size(64K) | ||
333 | */ | ||
334 | static unsigned int lro_max_pkts = 0xFFFF; | ||
320 | 335 | ||
321 | /* | 336 | /* |
322 | * S2IO device table. | 337 | * S2IO device table. |
@@ -1476,6 +1491,19 @@ static int init_nic(struct s2io_nic *nic) | |||
1476 | writel((u32) (val64 >> 32), (add + 4)); | 1491 | writel((u32) (val64 >> 32), (add + 4)); |
1477 | val64 = readq(&bar0->mac_cfg); | 1492 | val64 = readq(&bar0->mac_cfg); |
1478 | 1493 | ||
1494 | /* Enable FCS stripping by adapter */ | ||
1495 | add = &bar0->mac_cfg; | ||
1496 | val64 = readq(&bar0->mac_cfg); | ||
1497 | val64 |= MAC_CFG_RMAC_STRIP_FCS; | ||
1498 | if (nic->device_type == XFRAME_II_DEVICE) | ||
1499 | writeq(val64, &bar0->mac_cfg); | ||
1500 | else { | ||
1501 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | ||
1502 | writel((u32) (val64), add); | ||
1503 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | ||
1504 | writel((u32) (val64 >> 32), (add + 4)); | ||
1505 | } | ||
1506 | |||
1479 | /* | 1507 | /* |
1480 | * Set the time value to be inserted in the pause frame | 1508 | * Set the time value to be inserted in the pause frame |
1481 | * generated by xena. | 1509 | * generated by xena. |
@@ -2127,7 +2155,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
2127 | } | 2155 | } |
2128 | } | 2156 | } |
2129 | 2157 | ||
2130 | int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) | 2158 | static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) |
2131 | { | 2159 | { |
2132 | struct net_device *dev = nic->dev; | 2160 | struct net_device *dev = nic->dev; |
2133 | struct sk_buff *frag_list; | 2161 | struct sk_buff *frag_list; |
@@ -2569,6 +2597,8 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2569 | #ifndef CONFIG_S2IO_NAPI | 2597 | #ifndef CONFIG_S2IO_NAPI |
2570 | int pkt_cnt = 0; | 2598 | int pkt_cnt = 0; |
2571 | #endif | 2599 | #endif |
2600 | int i; | ||
2601 | |||
2572 | spin_lock(&nic->rx_lock); | 2602 | spin_lock(&nic->rx_lock); |
2573 | if (atomic_read(&nic->card_state) == CARD_DOWN) { | 2603 | if (atomic_read(&nic->card_state) == CARD_DOWN) { |
2574 | DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", | 2604 | DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", |
@@ -2661,6 +2691,18 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2661 | break; | 2691 | break; |
2662 | #endif | 2692 | #endif |
2663 | } | 2693 | } |
2694 | if (nic->lro) { | ||
2695 | /* Clear all LRO sessions before exiting */ | ||
2696 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
2697 | lro_t *lro = &nic->lro0_n[i]; | ||
2698 | if (lro->in_use) { | ||
2699 | update_L3L4_header(nic, lro); | ||
2700 | queue_rx_frame(lro->parent); | ||
2701 | clear_lro_session(lro); | ||
2702 | } | ||
2703 | } | ||
2704 | } | ||
2705 | |||
2664 | spin_unlock(&nic->rx_lock); | 2706 | spin_unlock(&nic->rx_lock); |
2665 | } | 2707 | } |
2666 | 2708 | ||
@@ -2852,7 +2894,7 @@ static int wait_for_cmd_complete(nic_t * sp) | |||
2852 | * void. | 2894 | * void. |
2853 | */ | 2895 | */ |
2854 | 2896 | ||
2855 | void s2io_reset(nic_t * sp) | 2897 | static void s2io_reset(nic_t * sp) |
2856 | { | 2898 | { |
2857 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 2899 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
2858 | u64 val64; | 2900 | u64 val64; |
@@ -2940,7 +2982,7 @@ void s2io_reset(nic_t * sp) | |||
2940 | * SUCCESS on success and FAILURE on failure. | 2982 | * SUCCESS on success and FAILURE on failure. |
2941 | */ | 2983 | */ |
2942 | 2984 | ||
2943 | int s2io_set_swapper(nic_t * sp) | 2985 | static int s2io_set_swapper(nic_t * sp) |
2944 | { | 2986 | { |
2945 | struct net_device *dev = sp->dev; | 2987 | struct net_device *dev = sp->dev; |
2946 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 2988 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
@@ -3089,7 +3131,7 @@ static int wait_for_msix_trans(nic_t *nic, int i) | |||
3089 | return ret; | 3131 | return ret; |
3090 | } | 3132 | } |
3091 | 3133 | ||
3092 | void restore_xmsi_data(nic_t *nic) | 3134 | static void restore_xmsi_data(nic_t *nic) |
3093 | { | 3135 | { |
3094 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 3136 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
3095 | u64 val64; | 3137 | u64 val64; |
@@ -3180,7 +3222,7 @@ int s2io_enable_msi(nic_t *nic) | |||
3180 | return 0; | 3222 | return 0; |
3181 | } | 3223 | } |
3182 | 3224 | ||
3183 | int s2io_enable_msi_x(nic_t *nic) | 3225 | static int s2io_enable_msi_x(nic_t *nic) |
3184 | { | 3226 | { |
3185 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 3227 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
3186 | u64 tx_mat, rx_mat; | 3228 | u64 tx_mat, rx_mat; |
@@ -3668,23 +3710,32 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
3668 | * else schedule a tasklet to reallocate the buffers. | 3710 | * else schedule a tasklet to reallocate the buffers. |
3669 | */ | 3711 | */ |
3670 | for (i = 0; i < config->rx_ring_num; i++) { | 3712 | for (i = 0; i < config->rx_ring_num; i++) { |
3671 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | 3713 | if (!sp->lro) { |
3672 | int level = rx_buffer_level(sp, rxb_size, i); | 3714 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); |
3673 | 3715 | int level = rx_buffer_level(sp, rxb_size, i); | |
3674 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3716 | |
3675 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); | 3717 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3676 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3718 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", |
3677 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | 3719 | dev->name); |
3678 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | 3720 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3679 | dev->name); | 3721 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { |
3680 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | 3722 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3723 | dev->name); | ||
3724 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
3725 | clear_bit(0, (&sp->tasklet_status)); | ||
3726 | atomic_dec(&sp->isr_cnt); | ||
3727 | return IRQ_HANDLED; | ||
3728 | } | ||
3681 | clear_bit(0, (&sp->tasklet_status)); | 3729 | clear_bit(0, (&sp->tasklet_status)); |
3682 | atomic_dec(&sp->isr_cnt); | 3730 | } else if (level == LOW) { |
3683 | return IRQ_HANDLED; | 3731 | tasklet_schedule(&sp->task); |
3684 | } | 3732 | } |
3685 | clear_bit(0, (&sp->tasklet_status)); | 3733 | } |
3686 | } else if (level == LOW) { | 3734 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { |
3687 | tasklet_schedule(&sp->task); | 3735 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3736 | dev->name); | ||
3737 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
3738 | break; | ||
3688 | } | 3739 | } |
3689 | } | 3740 | } |
3690 | 3741 | ||
@@ -3697,29 +3748,37 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
3697 | { | 3748 | { |
3698 | ring_info_t *ring = (ring_info_t *)dev_id; | 3749 | ring_info_t *ring = (ring_info_t *)dev_id; |
3699 | nic_t *sp = ring->nic; | 3750 | nic_t *sp = ring->nic; |
3751 | struct net_device *dev = (struct net_device *) dev_id; | ||
3700 | int rxb_size, level, rng_n; | 3752 | int rxb_size, level, rng_n; |
3701 | 3753 | ||
3702 | atomic_inc(&sp->isr_cnt); | 3754 | atomic_inc(&sp->isr_cnt); |
3703 | rx_intr_handler(ring); | 3755 | rx_intr_handler(ring); |
3704 | 3756 | ||
3705 | rng_n = ring->ring_no; | 3757 | rng_n = ring->ring_no; |
3706 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | 3758 | if (!sp->lro) { |
3707 | level = rx_buffer_level(sp, rxb_size, rng_n); | 3759 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); |
3708 | 3760 | level = rx_buffer_level(sp, rxb_size, rng_n); | |
3709 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3761 | |
3710 | int ret; | 3762 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3711 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | 3763 | int ret; |
3712 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3764 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); |
3713 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | 3765 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3714 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | 3766 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { |
3715 | __FUNCTION__); | 3767 | DBG_PRINT(ERR_DBG, "Out of memory in %s", |
3768 | __FUNCTION__); | ||
3769 | clear_bit(0, (&sp->tasklet_status)); | ||
3770 | return IRQ_HANDLED; | ||
3771 | } | ||
3716 | clear_bit(0, (&sp->tasklet_status)); | 3772 | clear_bit(0, (&sp->tasklet_status)); |
3717 | return IRQ_HANDLED; | 3773 | } else if (level == LOW) { |
3774 | tasklet_schedule(&sp->task); | ||
3718 | } | 3775 | } |
3719 | clear_bit(0, (&sp->tasklet_status)); | ||
3720 | } else if (level == LOW) { | ||
3721 | tasklet_schedule(&sp->task); | ||
3722 | } | 3776 | } |
3777 | else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
3778 | DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); | ||
3779 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
3780 | } | ||
3781 | |||
3723 | atomic_dec(&sp->isr_cnt); | 3782 | atomic_dec(&sp->isr_cnt); |
3724 | 3783 | ||
3725 | return IRQ_HANDLED; | 3784 | return IRQ_HANDLED; |
@@ -3875,24 +3934,33 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3875 | */ | 3934 | */ |
3876 | #ifndef CONFIG_S2IO_NAPI | 3935 | #ifndef CONFIG_S2IO_NAPI |
3877 | for (i = 0; i < config->rx_ring_num; i++) { | 3936 | for (i = 0; i < config->rx_ring_num; i++) { |
3878 | int ret; | 3937 | if (!sp->lro) { |
3879 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | 3938 | int ret; |
3880 | int level = rx_buffer_level(sp, rxb_size, i); | 3939 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); |
3881 | 3940 | int level = rx_buffer_level(sp, rxb_size, i); | |
3882 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3941 | |
3883 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); | 3942 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3884 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3943 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", |
3885 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | 3944 | dev->name); |
3886 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | 3945 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3887 | dev->name); | 3946 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { |
3888 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | 3947 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3948 | dev->name); | ||
3949 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
3950 | clear_bit(0, (&sp->tasklet_status)); | ||
3951 | atomic_dec(&sp->isr_cnt); | ||
3952 | return IRQ_HANDLED; | ||
3953 | } | ||
3889 | clear_bit(0, (&sp->tasklet_status)); | 3954 | clear_bit(0, (&sp->tasklet_status)); |
3890 | atomic_dec(&sp->isr_cnt); | 3955 | } else if (level == LOW) { |
3891 | return IRQ_HANDLED; | 3956 | tasklet_schedule(&sp->task); |
3892 | } | 3957 | } |
3893 | clear_bit(0, (&sp->tasklet_status)); | 3958 | } |
3894 | } else if (level == LOW) { | 3959 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { |
3895 | tasklet_schedule(&sp->task); | 3960 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3961 | dev->name); | ||
3962 | DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); | ||
3963 | break; | ||
3896 | } | 3964 | } |
3897 | } | 3965 | } |
3898 | #endif | 3966 | #endif |
@@ -4129,7 +4197,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4129 | * as defined in errno.h file on failure. | 4197 | * as defined in errno.h file on failure. |
4130 | */ | 4198 | */ |
4131 | 4199 | ||
4132 | int s2io_set_mac_addr(struct net_device *dev, u8 * addr) | 4200 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) |
4133 | { | 4201 | { |
4134 | nic_t *sp = dev->priv; | 4202 | nic_t *sp = dev->priv; |
4135 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 4203 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
@@ -5044,6 +5112,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5044 | int i = 0; | 5112 | int i = 0; |
5045 | nic_t *sp = dev->priv; | 5113 | nic_t *sp = dev->priv; |
5046 | StatInfo_t *stat_info = sp->mac_control.stats_info; | 5114 | StatInfo_t *stat_info = sp->mac_control.stats_info; |
5115 | u64 tmp; | ||
5047 | 5116 | ||
5048 | s2io_updt_stats(sp); | 5117 | s2io_updt_stats(sp); |
5049 | tmp_stats[i++] = | 5118 | tmp_stats[i++] = |
@@ -5135,6 +5204,16 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5135 | tmp_stats[i++] = 0; | 5204 | tmp_stats[i++] = 0; |
5136 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; | 5205 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; |
5137 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; | 5206 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; |
5207 | tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt; | ||
5208 | tmp_stats[i++] = stat_info->sw_stat.sending_both; | ||
5209 | tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts; | ||
5210 | tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts; | ||
5211 | tmp = 0; | ||
5212 | if (stat_info->sw_stat.num_aggregations) { | ||
5213 | tmp = stat_info->sw_stat.sum_avg_pkts_aggregated; | ||
5214 | do_div(tmp, stat_info->sw_stat.num_aggregations); | ||
5215 | } | ||
5216 | tmp_stats[i++] = tmp; | ||
5138 | } | 5217 | } |
5139 | 5218 | ||
5140 | static int s2io_ethtool_get_regs_len(struct net_device *dev) | 5219 | static int s2io_ethtool_get_regs_len(struct net_device *dev) |
@@ -5516,6 +5595,14 @@ static int s2io_card_up(nic_t * sp) | |||
5516 | /* Setting its receive mode */ | 5595 | /* Setting its receive mode */ |
5517 | s2io_set_multicast(dev); | 5596 | s2io_set_multicast(dev); |
5518 | 5597 | ||
5598 | if (sp->lro) { | ||
5599 | /* Initialize max aggregatable pkts based on MTU */ | ||
5600 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | ||
5601 | /* Check if we can use(if specified) user provided value */ | ||
5602 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | ||
5603 | sp->lro_max_aggr_per_sess = lro_max_pkts; | ||
5604 | } | ||
5605 | |||
5519 | /* Enable tasklet for the device */ | 5606 | /* Enable tasklet for the device */ |
5520 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | 5607 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); |
5521 | 5608 | ||
@@ -5608,6 +5695,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5608 | ((unsigned long) rxdp->Host_Control); | 5695 | ((unsigned long) rxdp->Host_Control); |
5609 | int ring_no = ring_data->ring_no; | 5696 | int ring_no = ring_data->ring_no; |
5610 | u16 l3_csum, l4_csum; | 5697 | u16 l3_csum, l4_csum; |
5698 | lro_t *lro; | ||
5611 | 5699 | ||
5612 | skb->dev = dev; | 5700 | skb->dev = dev; |
5613 | if (rxdp->Control_1 & RXD_T_CODE) { | 5701 | if (rxdp->Control_1 & RXD_T_CODE) { |
@@ -5656,7 +5744,8 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5656 | skb_put(skb, buf2_len); | 5744 | skb_put(skb, buf2_len); |
5657 | } | 5745 | } |
5658 | 5746 | ||
5659 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && | 5747 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || |
5748 | (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | ||
5660 | (sp->rx_csum)) { | 5749 | (sp->rx_csum)) { |
5661 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); | 5750 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); |
5662 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); | 5751 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); |
@@ -5667,6 +5756,54 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5667 | * a flag in the RxD. | 5756 | * a flag in the RxD. |
5668 | */ | 5757 | */ |
5669 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 5758 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5759 | if (sp->lro) { | ||
5760 | u32 tcp_len; | ||
5761 | u8 *tcp; | ||
5762 | int ret = 0; | ||
5763 | |||
5764 | ret = s2io_club_tcp_session(skb->data, &tcp, | ||
5765 | &tcp_len, &lro, rxdp, sp); | ||
5766 | switch (ret) { | ||
5767 | case 3: /* Begin anew */ | ||
5768 | lro->parent = skb; | ||
5769 | goto aggregate; | ||
5770 | case 1: /* Aggregate */ | ||
5771 | { | ||
5772 | lro_append_pkt(sp, lro, | ||
5773 | skb, tcp_len); | ||
5774 | goto aggregate; | ||
5775 | } | ||
5776 | case 4: /* Flush session */ | ||
5777 | { | ||
5778 | lro_append_pkt(sp, lro, | ||
5779 | skb, tcp_len); | ||
5780 | queue_rx_frame(lro->parent); | ||
5781 | clear_lro_session(lro); | ||
5782 | sp->mac_control.stats_info-> | ||
5783 | sw_stat.flush_max_pkts++; | ||
5784 | goto aggregate; | ||
5785 | } | ||
5786 | case 2: /* Flush both */ | ||
5787 | lro->parent->data_len = | ||
5788 | lro->frags_len; | ||
5789 | sp->mac_control.stats_info-> | ||
5790 | sw_stat.sending_both++; | ||
5791 | queue_rx_frame(lro->parent); | ||
5792 | clear_lro_session(lro); | ||
5793 | goto send_up; | ||
5794 | case 0: /* sessions exceeded */ | ||
5795 | case 5: /* | ||
5796 | * First pkt in session not | ||
5797 | * L3/L4 aggregatable | ||
5798 | */ | ||
5799 | break; | ||
5800 | default: | ||
5801 | DBG_PRINT(ERR_DBG, | ||
5802 | "%s: Samadhana!!\n", | ||
5803 | __FUNCTION__); | ||
5804 | BUG(); | ||
5805 | } | ||
5806 | } | ||
5670 | } else { | 5807 | } else { |
5671 | /* | 5808 | /* |
5672 | * Packet with erroneous checksum, let the | 5809 | * Packet with erroneous checksum, let the |
@@ -5678,25 +5815,31 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5678 | skb->ip_summed = CHECKSUM_NONE; | 5815 | skb->ip_summed = CHECKSUM_NONE; |
5679 | } | 5816 | } |
5680 | 5817 | ||
5681 | skb->protocol = eth_type_trans(skb, dev); | 5818 | if (!sp->lro) { |
5819 | skb->protocol = eth_type_trans(skb, dev); | ||
5682 | #ifdef CONFIG_S2IO_NAPI | 5820 | #ifdef CONFIG_S2IO_NAPI |
5683 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | 5821 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { |
5684 | /* Queueing the vlan frame to the upper layer */ | 5822 | /* Queueing the vlan frame to the upper layer */ |
5685 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, | 5823 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, |
5686 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | 5824 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
5687 | } else { | 5825 | } else { |
5688 | netif_receive_skb(skb); | 5826 | netif_receive_skb(skb); |
5689 | } | 5827 | } |
5690 | #else | 5828 | #else |
5691 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | 5829 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { |
5692 | /* Queueing the vlan frame to the upper layer */ | 5830 | /* Queueing the vlan frame to the upper layer */ |
5693 | vlan_hwaccel_rx(skb, sp->vlgrp, | 5831 | vlan_hwaccel_rx(skb, sp->vlgrp, |
5694 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | 5832 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
5695 | } else { | 5833 | } else { |
5696 | netif_rx(skb); | 5834 | netif_rx(skb); |
5697 | } | 5835 | } |
5698 | #endif | 5836 | #endif |
5837 | } else { | ||
5838 | send_up: | ||
5839 | queue_rx_frame(skb); | ||
5840 | } | ||
5699 | dev->last_rx = jiffies; | 5841 | dev->last_rx = jiffies; |
5842 | aggregate: | ||
5700 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 5843 | atomic_dec(&sp->rx_bufs_left[ring_no]); |
5701 | return SUCCESS; | 5844 | return SUCCESS; |
5702 | } | 5845 | } |
@@ -5714,7 +5857,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5714 | * void. | 5857 | * void. |
5715 | */ | 5858 | */ |
5716 | 5859 | ||
5717 | void s2io_link(nic_t * sp, int link) | 5860 | static void s2io_link(nic_t * sp, int link) |
5718 | { | 5861 | { |
5719 | struct net_device *dev = (struct net_device *) sp->dev; | 5862 | struct net_device *dev = (struct net_device *) sp->dev; |
5720 | 5863 | ||
@@ -5739,7 +5882,7 @@ void s2io_link(nic_t * sp, int link) | |||
5739 | * returns the revision ID of the device. | 5882 | * returns the revision ID of the device. |
5740 | */ | 5883 | */ |
5741 | 5884 | ||
5742 | int get_xena_rev_id(struct pci_dev *pdev) | 5885 | static int get_xena_rev_id(struct pci_dev *pdev) |
5743 | { | 5886 | { |
5744 | u8 id = 0; | 5887 | u8 id = 0; |
5745 | int ret; | 5888 | int ret; |
@@ -5808,6 +5951,8 @@ module_param(indicate_max_pkts, int, 0); | |||
5808 | #endif | 5951 | #endif |
5809 | module_param(rxsync_frequency, int, 0); | 5952 | module_param(rxsync_frequency, int, 0); |
5810 | module_param(intr_type, int, 0); | 5953 | module_param(intr_type, int, 0); |
5954 | module_param(lro, int, 0); | ||
5955 | module_param(lro_max_pkts, int, 0); | ||
5811 | 5956 | ||
5812 | /** | 5957 | /** |
5813 | * s2io_init_nic - Initialization of the adapter . | 5958 | * s2io_init_nic - Initialization of the adapter . |
@@ -5939,6 +6084,7 @@ Defaulting to INTA\n"); | |||
5939 | else | 6084 | else |
5940 | sp->device_type = XFRAME_I_DEVICE; | 6085 | sp->device_type = XFRAME_I_DEVICE; |
5941 | 6086 | ||
6087 | sp->lro = lro; | ||
5942 | 6088 | ||
5943 | /* Initialize some PCI/PCI-X fields of the NIC. */ | 6089 | /* Initialize some PCI/PCI-X fields of the NIC. */ |
5944 | s2io_init_pci(sp); | 6090 | s2io_init_pci(sp); |
@@ -6242,6 +6388,10 @@ Defaulting to INTA\n"); | |||
6242 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " | 6388 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " |
6243 | "enabled\n",dev->name); | 6389 | "enabled\n",dev->name); |
6244 | 6390 | ||
6391 | if (sp->lro) | ||
6392 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | ||
6393 | dev->name); | ||
6394 | |||
6245 | /* Initialize device name */ | 6395 | /* Initialize device name */ |
6246 | strcpy(sp->name, dev->name); | 6396 | strcpy(sp->name, dev->name); |
6247 | if (sp->device_type & XFRAME_II_DEVICE) | 6397 | if (sp->device_type & XFRAME_II_DEVICE) |
@@ -6344,7 +6494,7 @@ int __init s2io_starter(void) | |||
6344 | * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. | 6494 | * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. |
6345 | */ | 6495 | */ |
6346 | 6496 | ||
6347 | void s2io_closer(void) | 6497 | static void s2io_closer(void) |
6348 | { | 6498 | { |
6349 | pci_unregister_driver(&s2io_driver); | 6499 | pci_unregister_driver(&s2io_driver); |
6350 | DBG_PRINT(INIT_DBG, "cleanup done\n"); | 6500 | DBG_PRINT(INIT_DBG, "cleanup done\n"); |
@@ -6352,3 +6502,318 @@ void s2io_closer(void) | |||
6352 | 6502 | ||
6353 | module_init(s2io_starter); | 6503 | module_init(s2io_starter); |
6354 | module_exit(s2io_closer); | 6504 | module_exit(s2io_closer); |
6505 | |||
6506 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | ||
6507 | struct tcphdr **tcp, RxD_t *rxdp) | ||
6508 | { | ||
6509 | int ip_off; | ||
6510 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; | ||
6511 | |||
6512 | if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { | ||
6513 | DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", | ||
6514 | __FUNCTION__); | ||
6515 | return -1; | ||
6516 | } | ||
6517 | |||
6518 | /* TODO: | ||
6519 | * By default the VLAN field in the MAC is stripped by the card, if this | ||
6520 | * feature is turned off in rx_pa_cfg register, then the ip_off field | ||
6521 | * has to be shifted by a further 2 bytes | ||
6522 | */ | ||
6523 | switch (l2_type) { | ||
6524 | case 0: /* DIX type */ | ||
6525 | case 4: /* DIX type with VLAN */ | ||
6526 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; | ||
6527 | break; | ||
6528 | /* LLC, SNAP etc are considered non-mergeable */ | ||
6529 | default: | ||
6530 | return -1; | ||
6531 | } | ||
6532 | |||
6533 | *ip = (struct iphdr *)((u8 *)buffer + ip_off); | ||
6534 | ip_len = (u8)((*ip)->ihl); | ||
6535 | ip_len <<= 2; | ||
6536 | *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); | ||
6537 | |||
6538 | return 0; | ||
6539 | } | ||
6540 | |||
6541 | static int check_for_socket_match(lro_t *lro, struct iphdr *ip, | ||
6542 | struct tcphdr *tcp) | ||
6543 | { | ||
6544 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6545 | if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || | ||
6546 | (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) | ||
6547 | return -1; | ||
6548 | return 0; | ||
6549 | } | ||
6550 | |||
6551 | static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) | ||
6552 | { | ||
6553 | return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); | ||
6554 | } | ||
6555 | |||
6556 | static void initiate_new_session(lro_t *lro, u8 *l2h, | ||
6557 | struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) | ||
6558 | { | ||
6559 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6560 | lro->l2h = l2h; | ||
6561 | lro->iph = ip; | ||
6562 | lro->tcph = tcp; | ||
6563 | lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); | ||
6564 | lro->tcp_ack = ntohl(tcp->ack_seq); | ||
6565 | lro->sg_num = 1; | ||
6566 | lro->total_len = ntohs(ip->tot_len); | ||
6567 | lro->frags_len = 0; | ||
6568 | /* | ||
6569 | * check if we saw TCP timestamp. Other consistency checks have | ||
6570 | * already been done. | ||
6571 | */ | ||
6572 | if (tcp->doff == 8) { | ||
6573 | u32 *ptr; | ||
6574 | ptr = (u32 *)(tcp+1); | ||
6575 | lro->saw_ts = 1; | ||
6576 | lro->cur_tsval = *(ptr+1); | ||
6577 | lro->cur_tsecr = *(ptr+2); | ||
6578 | } | ||
6579 | lro->in_use = 1; | ||
6580 | } | ||
6581 | |||
6582 | static void update_L3L4_header(nic_t *sp, lro_t *lro) | ||
6583 | { | ||
6584 | struct iphdr *ip = lro->iph; | ||
6585 | struct tcphdr *tcp = lro->tcph; | ||
6586 | u16 nchk; | ||
6587 | StatInfo_t *statinfo = sp->mac_control.stats_info; | ||
6588 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6589 | |||
6590 | /* Update L3 header */ | ||
6591 | ip->tot_len = htons(lro->total_len); | ||
6592 | ip->check = 0; | ||
6593 | nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl); | ||
6594 | ip->check = nchk; | ||
6595 | |||
6596 | /* Update L4 header */ | ||
6597 | tcp->ack_seq = lro->tcp_ack; | ||
6598 | tcp->window = lro->window; | ||
6599 | |||
6600 | /* Update tsecr field if this session has timestamps enabled */ | ||
6601 | if (lro->saw_ts) { | ||
6602 | u32 *ptr = (u32 *)(tcp + 1); | ||
6603 | *(ptr+2) = lro->cur_tsecr; | ||
6604 | } | ||
6605 | |||
6606 | /* Update counters required for calculation of | ||
6607 | * average no. of packets aggregated. | ||
6608 | */ | ||
6609 | statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num; | ||
6610 | statinfo->sw_stat.num_aggregations++; | ||
6611 | } | ||
6612 | |||
6613 | static void aggregate_new_rx(lro_t *lro, struct iphdr *ip, | ||
6614 | struct tcphdr *tcp, u32 l4_pyld) | ||
6615 | { | ||
6616 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6617 | lro->total_len += l4_pyld; | ||
6618 | lro->frags_len += l4_pyld; | ||
6619 | lro->tcp_next_seq += l4_pyld; | ||
6620 | lro->sg_num++; | ||
6621 | |||
6622 | /* Update ack seq no. and window ad(from this pkt) in LRO object */ | ||
6623 | lro->tcp_ack = tcp->ack_seq; | ||
6624 | lro->window = tcp->window; | ||
6625 | |||
6626 | if (lro->saw_ts) { | ||
6627 | u32 *ptr; | ||
6628 | /* Update tsecr and tsval from this packet */ | ||
6629 | ptr = (u32 *) (tcp + 1); | ||
6630 | lro->cur_tsval = *(ptr + 1); | ||
6631 | lro->cur_tsecr = *(ptr + 2); | ||
6632 | } | ||
6633 | } | ||
6634 | |||
6635 | static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, | ||
6636 | struct tcphdr *tcp, u32 tcp_pyld_len) | ||
6637 | { | ||
6638 | u8 *ptr; | ||
6639 | |||
6640 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6641 | |||
6642 | if (!tcp_pyld_len) { | ||
6643 | /* Runt frame or a pure ack */ | ||
6644 | return -1; | ||
6645 | } | ||
6646 | |||
6647 | if (ip->ihl != 5) /* IP has options */ | ||
6648 | return -1; | ||
6649 | |||
6650 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | ||
6651 | !tcp->ack) { | ||
6652 | /* | ||
6653 | * Currently recognize only the ack control word and | ||
6654 | * any other control field being set would result in | ||
6655 | * flushing the LRO session | ||
6656 | */ | ||
6657 | return -1; | ||
6658 | } | ||
6659 | |||
6660 | /* | ||
6661 | * Allow only one TCP timestamp option. Don't aggregate if | ||
6662 | * any other options are detected. | ||
6663 | */ | ||
6664 | if (tcp->doff != 5 && tcp->doff != 8) | ||
6665 | return -1; | ||
6666 | |||
6667 | if (tcp->doff == 8) { | ||
6668 | ptr = (u8 *)(tcp + 1); | ||
6669 | while (*ptr == TCPOPT_NOP) | ||
6670 | ptr++; | ||
6671 | if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP) | ||
6672 | return -1; | ||
6673 | |||
6674 | /* Ensure timestamp value increases monotonically */ | ||
6675 | if (l_lro) | ||
6676 | if (l_lro->cur_tsval > *((u32 *)(ptr+2))) | ||
6677 | return -1; | ||
6678 | |||
6679 | /* timestamp echo reply should be non-zero */ | ||
6680 | if (*((u32 *)(ptr+6)) == 0) | ||
6681 | return -1; | ||
6682 | } | ||
6683 | |||
6684 | return 0; | ||
6685 | } | ||
6686 | |||
6687 | static int | ||
6688 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, | ||
6689 | RxD_t *rxdp, nic_t *sp) | ||
6690 | { | ||
6691 | struct iphdr *ip; | ||
6692 | struct tcphdr *tcph; | ||
6693 | int ret = 0, i; | ||
6694 | |||
6695 | if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, | ||
6696 | rxdp))) { | ||
6697 | DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", | ||
6698 | ip->saddr, ip->daddr); | ||
6699 | } else { | ||
6700 | return ret; | ||
6701 | } | ||
6702 | |||
6703 | tcph = (struct tcphdr *)*tcp; | ||
6704 | *tcp_len = get_l4_pyld_length(ip, tcph); | ||
6705 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
6706 | lro_t *l_lro = &sp->lro0_n[i]; | ||
6707 | if (l_lro->in_use) { | ||
6708 | if (check_for_socket_match(l_lro, ip, tcph)) | ||
6709 | continue; | ||
6710 | /* Sock pair matched */ | ||
6711 | *lro = l_lro; | ||
6712 | |||
6713 | if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { | ||
6714 | DBG_PRINT(INFO_DBG, "%s:Out of order. expected " | ||
6715 | "0x%x, actual 0x%x\n", __FUNCTION__, | ||
6716 | (*lro)->tcp_next_seq, | ||
6717 | ntohl(tcph->seq)); | ||
6718 | |||
6719 | sp->mac_control.stats_info-> | ||
6720 | sw_stat.outof_sequence_pkts++; | ||
6721 | ret = 2; | ||
6722 | break; | ||
6723 | } | ||
6724 | |||
6725 | if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len)) | ||
6726 | ret = 1; /* Aggregate */ | ||
6727 | else | ||
6728 | ret = 2; /* Flush both */ | ||
6729 | break; | ||
6730 | } | ||
6731 | } | ||
6732 | |||
6733 | if (ret == 0) { | ||
6734 | /* Before searching for available LRO objects, | ||
6735 | * check if the pkt is L3/L4 aggregatable. If not | ||
6736 | * don't create new LRO session. Just send this | ||
6737 | * packet up. | ||
6738 | */ | ||
6739 | if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) { | ||
6740 | return 5; | ||
6741 | } | ||
6742 | |||
6743 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
6744 | lro_t *l_lro = &sp->lro0_n[i]; | ||
6745 | if (!(l_lro->in_use)) { | ||
6746 | *lro = l_lro; | ||
6747 | ret = 3; /* Begin anew */ | ||
6748 | break; | ||
6749 | } | ||
6750 | } | ||
6751 | } | ||
6752 | |||
6753 | if (ret == 0) { /* sessions exceeded */ | ||
6754 | DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", | ||
6755 | __FUNCTION__); | ||
6756 | *lro = NULL; | ||
6757 | return ret; | ||
6758 | } | ||
6759 | |||
6760 | switch (ret) { | ||
6761 | case 3: | ||
6762 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len); | ||
6763 | break; | ||
6764 | case 2: | ||
6765 | update_L3L4_header(sp, *lro); | ||
6766 | break; | ||
6767 | case 1: | ||
6768 | aggregate_new_rx(*lro, ip, tcph, *tcp_len); | ||
6769 | if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { | ||
6770 | update_L3L4_header(sp, *lro); | ||
6771 | ret = 4; /* Flush the LRO */ | ||
6772 | } | ||
6773 | break; | ||
6774 | default: | ||
6775 | DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", | ||
6776 | __FUNCTION__); | ||
6777 | break; | ||
6778 | } | ||
6779 | |||
6780 | return ret; | ||
6781 | } | ||
6782 | |||
6783 | static void clear_lro_session(lro_t *lro) | ||
6784 | { | ||
6785 | static u16 lro_struct_size = sizeof(lro_t); | ||
6786 | |||
6787 | memset(lro, 0, lro_struct_size); | ||
6788 | } | ||
6789 | |||
6790 | static void queue_rx_frame(struct sk_buff *skb) | ||
6791 | { | ||
6792 | struct net_device *dev = skb->dev; | ||
6793 | |||
6794 | skb->protocol = eth_type_trans(skb, dev); | ||
6795 | #ifdef CONFIG_S2IO_NAPI | ||
6796 | netif_receive_skb(skb); | ||
6797 | #else | ||
6798 | netif_rx(skb); | ||
6799 | #endif | ||
6800 | } | ||
6801 | |||
6802 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | ||
6803 | u32 tcp_len) | ||
6804 | { | ||
6805 | struct sk_buff *tmp, *first = lro->parent; | ||
6806 | |||
6807 | first->len += tcp_len; | ||
6808 | first->data_len = lro->frags_len; | ||
6809 | skb_pull(skb, (skb->len - tcp_len)); | ||
6810 | if ((tmp = skb_shinfo(first)->frag_list)) { | ||
6811 | while (tmp->next) | ||
6812 | tmp = tmp->next; | ||
6813 | tmp->next = skb; | ||
6814 | } | ||
6815 | else | ||
6816 | skb_shinfo(first)->frag_list = skb; | ||
6817 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; | ||
6818 | return; | ||
6819 | } | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 852a6a899d07..0a0b5b29d81e 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -64,7 +64,7 @@ typedef enum xena_max_outstanding_splits { | |||
64 | #define INTR_DBG 4 | 64 | #define INTR_DBG 4 |
65 | 65 | ||
66 | /* Global variable that defines the present debug level of the driver. */ | 66 | /* Global variable that defines the present debug level of the driver. */ |
67 | int debug_level = ERR_DBG; /* Default level. */ | 67 | static int debug_level = ERR_DBG; |
68 | 68 | ||
69 | /* DEBUG message print. */ | 69 | /* DEBUG message print. */ |
70 | #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) | 70 | #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) |
@@ -78,6 +78,13 @@ int debug_level = ERR_DBG; /* Default level. */ | |||
78 | typedef struct { | 78 | typedef struct { |
79 | unsigned long long single_ecc_errs; | 79 | unsigned long long single_ecc_errs; |
80 | unsigned long long double_ecc_errs; | 80 | unsigned long long double_ecc_errs; |
81 | /* LRO statistics */ | ||
82 | unsigned long long clubbed_frms_cnt; | ||
83 | unsigned long long sending_both; | ||
84 | unsigned long long outof_sequence_pkts; | ||
85 | unsigned long long flush_max_pkts; | ||
86 | unsigned long long sum_avg_pkts_aggregated; | ||
87 | unsigned long long num_aggregations; | ||
81 | } swStat_t; | 88 | } swStat_t; |
82 | 89 | ||
83 | /* The statistics block of Xena */ | 90 | /* The statistics block of Xena */ |
@@ -268,7 +275,7 @@ typedef struct stat_block { | |||
268 | #define MAX_RX_RINGS 8 | 275 | #define MAX_RX_RINGS 8 |
269 | 276 | ||
270 | /* FIFO mappings for all possible number of fifos configured */ | 277 | /* FIFO mappings for all possible number of fifos configured */ |
271 | int fifo_map[][MAX_TX_FIFOS] = { | 278 | static int fifo_map[][MAX_TX_FIFOS] = { |
272 | {0, 0, 0, 0, 0, 0, 0, 0}, | 279 | {0, 0, 0, 0, 0, 0, 0, 0}, |
273 | {0, 0, 0, 0, 1, 1, 1, 1}, | 280 | {0, 0, 0, 0, 1, 1, 1, 1}, |
274 | {0, 0, 0, 1, 1, 1, 2, 2}, | 281 | {0, 0, 0, 1, 1, 1, 2, 2}, |
@@ -680,6 +687,24 @@ struct msix_info_st { | |||
680 | u64 data; | 687 | u64 data; |
681 | }; | 688 | }; |
682 | 689 | ||
690 | /* Data structure to represent a LRO session */ | ||
691 | typedef struct lro { | ||
692 | struct sk_buff *parent; | ||
693 | u8 *l2h; | ||
694 | struct iphdr *iph; | ||
695 | struct tcphdr *tcph; | ||
696 | u32 tcp_next_seq; | ||
697 | u32 tcp_ack; | ||
698 | int total_len; | ||
699 | int frags_len; | ||
700 | int sg_num; | ||
701 | int in_use; | ||
702 | u16 window; | ||
703 | u32 cur_tsval; | ||
704 | u32 cur_tsecr; | ||
705 | u8 saw_ts; | ||
706 | }lro_t; | ||
707 | |||
683 | /* Structure representing one instance of the NIC */ | 708 | /* Structure representing one instance of the NIC */ |
684 | struct s2io_nic { | 709 | struct s2io_nic { |
685 | int rxd_mode; | 710 | int rxd_mode; |
@@ -784,6 +809,13 @@ struct s2io_nic { | |||
784 | #define XFRAME_II_DEVICE 2 | 809 | #define XFRAME_II_DEVICE 2 |
785 | u8 device_type; | 810 | u8 device_type; |
786 | 811 | ||
812 | #define MAX_LRO_SESSIONS 32 | ||
813 | lro_t lro0_n[MAX_LRO_SESSIONS]; | ||
814 | unsigned long clubbed_frms_cnt; | ||
815 | unsigned long sending_both; | ||
816 | u8 lro; | ||
817 | u16 lro_max_aggr_per_sess; | ||
818 | |||
787 | #define INTA 0 | 819 | #define INTA 0 |
788 | #define MSI 1 | 820 | #define MSI 1 |
789 | #define MSI_X 2 | 821 | #define MSI_X 2 |
@@ -911,18 +943,16 @@ static void tx_intr_handler(fifo_info_t *fifo_data); | |||
911 | static void alarm_intr_handler(struct s2io_nic *sp); | 943 | static void alarm_intr_handler(struct s2io_nic *sp); |
912 | 944 | ||
913 | static int s2io_starter(void); | 945 | static int s2io_starter(void); |
914 | void s2io_closer(void); | ||
915 | static void s2io_tx_watchdog(struct net_device *dev); | 946 | static void s2io_tx_watchdog(struct net_device *dev); |
916 | static void s2io_tasklet(unsigned long dev_addr); | 947 | static void s2io_tasklet(unsigned long dev_addr); |
917 | static void s2io_set_multicast(struct net_device *dev); | 948 | static void s2io_set_multicast(struct net_device *dev); |
918 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); | 949 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); |
919 | void s2io_link(nic_t * sp, int link); | 950 | static void s2io_link(nic_t * sp, int link); |
920 | void s2io_reset(nic_t * sp); | ||
921 | #if defined(CONFIG_S2IO_NAPI) | 951 | #if defined(CONFIG_S2IO_NAPI) |
922 | static int s2io_poll(struct net_device *dev, int *budget); | 952 | static int s2io_poll(struct net_device *dev, int *budget); |
923 | #endif | 953 | #endif |
924 | static void s2io_init_pci(nic_t * sp); | 954 | static void s2io_init_pci(nic_t * sp); |
925 | int s2io_set_mac_addr(struct net_device *dev, u8 * addr); | 955 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); |
926 | static void s2io_alarm_handle(unsigned long data); | 956 | static void s2io_alarm_handle(unsigned long data); |
927 | static int s2io_enable_msi(nic_t *nic); | 957 | static int s2io_enable_msi(nic_t *nic); |
928 | static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); | 958 | static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); |
@@ -930,14 +960,19 @@ static irqreturn_t | |||
930 | s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); | 960 | s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); |
931 | static irqreturn_t | 961 | static irqreturn_t |
932 | s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); | 962 | s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); |
933 | int s2io_enable_msi_x(nic_t *nic); | ||
934 | static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); | 963 | static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); |
935 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | 964 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); |
936 | static struct ethtool_ops netdev_ethtool_ops; | 965 | static struct ethtool_ops netdev_ethtool_ops; |
937 | static void s2io_set_link(unsigned long data); | 966 | static void s2io_set_link(unsigned long data); |
938 | int s2io_set_swapper(nic_t * sp); | 967 | static int s2io_set_swapper(nic_t * sp); |
939 | static void s2io_card_down(nic_t *nic); | 968 | static void s2io_card_down(nic_t *nic); |
940 | static int s2io_card_up(nic_t *nic); | 969 | static int s2io_card_up(nic_t *nic); |
941 | int get_xena_rev_id(struct pci_dev *pdev); | 970 | static int get_xena_rev_id(struct pci_dev *pdev); |
942 | void restore_xmsi_data(nic_t *nic); | 971 | static void restore_xmsi_data(nic_t *nic); |
972 | |||
973 | static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp); | ||
974 | static void clear_lro_session(lro_t *lro); | ||
975 | static void queue_rx_frame(struct sk_buff *skb); | ||
976 | static void update_L3L4_header(nic_t *sp, lro_t *lro); | ||
977 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); | ||
943 | #endif /* _S2IO_H */ | 978 | #endif /* _S2IO_H */ |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 76139478c3df..66cf226c4ee3 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -59,7 +59,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n"; | |||
59 | #ifdef SB1000_DEBUG | 59 | #ifdef SB1000_DEBUG |
60 | static int sb1000_debug = SB1000_DEBUG; | 60 | static int sb1000_debug = SB1000_DEBUG; |
61 | #else | 61 | #else |
62 | static int sb1000_debug = 1; | 62 | static const int sb1000_debug = 1; |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | static const int SB1000_IO_EXTENT = 8; | 65 | static const int SB1000_IO_EXTENT = 8; |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index aa4ca1821759..f2be9f83f091 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001,2002,2003 Broadcom Corporation | 2 | * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
@@ -43,6 +43,7 @@ | |||
43 | #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00" | 43 | #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00" |
44 | #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01" | 44 | #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01" |
45 | #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02" | 45 | #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02" |
46 | #define SBMAC_ETH3_HWADDR "40:00:00:00:01:03" | ||
46 | #endif | 47 | #endif |
47 | 48 | ||
48 | 49 | ||
@@ -57,7 +58,7 @@ static char version1[] __devinitdata = | |||
57 | 58 | ||
58 | #define CONFIG_SBMAC_COALESCE | 59 | #define CONFIG_SBMAC_COALESCE |
59 | 60 | ||
60 | #define MAX_UNITS 3 /* More are supported, limit only on options */ | 61 | #define MAX_UNITS 4 /* More are supported, limit only on options */ |
61 | 62 | ||
62 | /* Time in jiffies before concluding the transmitter is hung. */ | 63 | /* Time in jiffies before concluding the transmitter is hung. */ |
63 | #define TX_TIMEOUT (2*HZ) | 64 | #define TX_TIMEOUT (2*HZ) |
@@ -85,11 +86,11 @@ MODULE_PARM_DESC(noisy_mii, "MII status messages"); | |||
85 | The media type is usually passed in 'options[]'. | 86 | The media type is usually passed in 'options[]'. |
86 | */ | 87 | */ |
87 | #ifdef MODULE | 88 | #ifdef MODULE |
88 | static int options[MAX_UNITS] = {-1, -1, -1}; | 89 | static int options[MAX_UNITS] = {-1, -1, -1, -1}; |
89 | module_param_array(options, int, NULL, S_IRUGO); | 90 | module_param_array(options, int, NULL, S_IRUGO); |
90 | MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); | 91 | MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); |
91 | 92 | ||
92 | static int full_duplex[MAX_UNITS] = {-1, -1, -1}; | 93 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1}; |
93 | module_param_array(full_duplex, int, NULL, S_IRUGO); | 94 | module_param_array(full_duplex, int, NULL, S_IRUGO); |
94 | MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); | 95 | MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); |
95 | #endif | 96 | #endif |
@@ -105,13 +106,26 @@ MODULE_PARM_DESC(int_timeout, "Timeout value"); | |||
105 | #endif | 106 | #endif |
106 | 107 | ||
107 | #include <asm/sibyte/sb1250.h> | 108 | #include <asm/sibyte/sb1250.h> |
108 | #include <asm/sibyte/sb1250_defs.h> | 109 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) |
110 | #include <asm/sibyte/bcm1480_regs.h> | ||
111 | #include <asm/sibyte/bcm1480_int.h> | ||
112 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
109 | #include <asm/sibyte/sb1250_regs.h> | 113 | #include <asm/sibyte/sb1250_regs.h> |
110 | #include <asm/sibyte/sb1250_mac.h> | ||
111 | #include <asm/sibyte/sb1250_dma.h> | ||
112 | #include <asm/sibyte/sb1250_int.h> | 114 | #include <asm/sibyte/sb1250_int.h> |
115 | #else | ||
116 | #error invalid SiByte MAC configuation | ||
117 | #endif | ||
113 | #include <asm/sibyte/sb1250_scd.h> | 118 | #include <asm/sibyte/sb1250_scd.h> |
119 | #include <asm/sibyte/sb1250_mac.h> | ||
120 | #include <asm/sibyte/sb1250_dma.h> | ||
114 | 121 | ||
122 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) | ||
123 | #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) | ||
124 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
125 | #define UNIT_INT(n) (K_INT_MAC_0 + (n)) | ||
126 | #else | ||
127 | #error invalid SiByte MAC configuation | ||
128 | #endif | ||
115 | 129 | ||
116 | /********************************************************************** | 130 | /********************************************************************** |
117 | * Simple types | 131 | * Simple types |
@@ -1476,10 +1490,10 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1476 | * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above | 1490 | * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above |
1477 | * Use a larger RD_THRSH for gigabit | 1491 | * Use a larger RD_THRSH for gigabit |
1478 | */ | 1492 | */ |
1479 | if (periph_rev >= 2) | 1493 | if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) |
1480 | th_value = 64; | ||
1481 | else | ||
1482 | th_value = 28; | 1494 | th_value = 28; |
1495 | else | ||
1496 | th_value = 64; | ||
1483 | 1497 | ||
1484 | fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ | 1498 | fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ |
1485 | ((s->sbm_speed == sbmac_speed_1000) | 1499 | ((s->sbm_speed == sbmac_speed_1000) |
@@ -1589,13 +1603,17 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1589 | * Turn on the rest of the bits in the enable register | 1603 | * Turn on the rest of the bits in the enable register |
1590 | */ | 1604 | */ |
1591 | 1605 | ||
1606 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) | ||
1607 | __raw_writeq(M_MAC_RXDMA_EN0 | | ||
1608 | M_MAC_TXDMA_EN0, s->sbm_macenable); | ||
1609 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
1592 | __raw_writeq(M_MAC_RXDMA_EN0 | | 1610 | __raw_writeq(M_MAC_RXDMA_EN0 | |
1593 | M_MAC_TXDMA_EN0 | | 1611 | M_MAC_TXDMA_EN0 | |
1594 | M_MAC_RX_ENABLE | | 1612 | M_MAC_RX_ENABLE | |
1595 | M_MAC_TX_ENABLE, s->sbm_macenable); | 1613 | M_MAC_TX_ENABLE, s->sbm_macenable); |
1596 | 1614 | #else | |
1597 | 1615 | #error invalid SiByte MAC configuation | |
1598 | 1616 | #endif | |
1599 | 1617 | ||
1600 | #ifdef CONFIG_SBMAC_COALESCE | 1618 | #ifdef CONFIG_SBMAC_COALESCE |
1601 | /* | 1619 | /* |
@@ -1786,11 +1804,12 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) | |||
1786 | reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); | 1804 | reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); |
1787 | __raw_writeq(reg, sc->sbm_rxfilter); | 1805 | __raw_writeq(reg, sc->sbm_rxfilter); |
1788 | 1806 | ||
1789 | /* read system identification to determine revision */ | 1807 | /* BCM1250 pass1 didn't have hardware checksum. Everything |
1790 | if (periph_rev >= 2) { | 1808 | later does. */ |
1791 | sc->rx_hw_checksum = ENABLE; | 1809 | if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { |
1792 | } else { | ||
1793 | sc->rx_hw_checksum = DISABLE; | 1810 | sc->rx_hw_checksum = DISABLE; |
1811 | } else { | ||
1812 | sc->rx_hw_checksum = ENABLE; | ||
1794 | } | 1813 | } |
1795 | } | 1814 | } |
1796 | 1815 | ||
@@ -2220,7 +2239,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc) | |||
2220 | 2239 | ||
2221 | 2240 | ||
2222 | 2241 | ||
2223 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) | 2242 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) |
2224 | /********************************************************************** | 2243 | /********************************************************************** |
2225 | * SBMAC_PARSE_XDIGIT(str) | 2244 | * SBMAC_PARSE_XDIGIT(str) |
2226 | * | 2245 | * |
@@ -2792,7 +2811,7 @@ static int sbmac_close(struct net_device *dev) | |||
2792 | 2811 | ||
2793 | 2812 | ||
2794 | 2813 | ||
2795 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) | 2814 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) |
2796 | static void | 2815 | static void |
2797 | sbmac_setup_hwaddr(int chan,char *addr) | 2816 | sbmac_setup_hwaddr(int chan,char *addr) |
2798 | { | 2817 | { |
@@ -2818,25 +2837,7 @@ sbmac_init_module(void) | |||
2818 | unsigned long port; | 2837 | unsigned long port; |
2819 | int chip_max_units; | 2838 | int chip_max_units; |
2820 | 2839 | ||
2821 | /* | 2840 | /* Set the number of available units based on the SOC type. */ |
2822 | * For bringup when not using the firmware, we can pre-fill | ||
2823 | * the MAC addresses using the environment variables | ||
2824 | * specified in this file (or maybe from the config file?) | ||
2825 | */ | ||
2826 | #ifdef SBMAC_ETH0_HWADDR | ||
2827 | sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); | ||
2828 | #endif | ||
2829 | #ifdef SBMAC_ETH1_HWADDR | ||
2830 | sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); | ||
2831 | #endif | ||
2832 | #ifdef SBMAC_ETH2_HWADDR | ||
2833 | sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); | ||
2834 | #endif | ||
2835 | |||
2836 | /* | ||
2837 | * Walk through the Ethernet controllers and find | ||
2838 | * those who have their MAC addresses set. | ||
2839 | */ | ||
2840 | switch (soc_type) { | 2841 | switch (soc_type) { |
2841 | case K_SYS_SOC_TYPE_BCM1250: | 2842 | case K_SYS_SOC_TYPE_BCM1250: |
2842 | case K_SYS_SOC_TYPE_BCM1250_ALT: | 2843 | case K_SYS_SOC_TYPE_BCM1250_ALT: |
@@ -2848,6 +2849,10 @@ sbmac_init_module(void) | |||
2848 | case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ | 2849 | case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ |
2849 | chip_max_units = 2; | 2850 | chip_max_units = 2; |
2850 | break; | 2851 | break; |
2852 | case K_SYS_SOC_TYPE_BCM1x55: | ||
2853 | case K_SYS_SOC_TYPE_BCM1x80: | ||
2854 | chip_max_units = 4; | ||
2855 | break; | ||
2851 | default: | 2856 | default: |
2852 | chip_max_units = 0; | 2857 | chip_max_units = 0; |
2853 | break; | 2858 | break; |
@@ -2855,6 +2860,32 @@ sbmac_init_module(void) | |||
2855 | if (chip_max_units > MAX_UNITS) | 2860 | if (chip_max_units > MAX_UNITS) |
2856 | chip_max_units = MAX_UNITS; | 2861 | chip_max_units = MAX_UNITS; |
2857 | 2862 | ||
2863 | /* | ||
2864 | * For bringup when not using the firmware, we can pre-fill | ||
2865 | * the MAC addresses using the environment variables | ||
2866 | * specified in this file (or maybe from the config file?) | ||
2867 | */ | ||
2868 | #ifdef SBMAC_ETH0_HWADDR | ||
2869 | if (chip_max_units > 0) | ||
2870 | sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); | ||
2871 | #endif | ||
2872 | #ifdef SBMAC_ETH1_HWADDR | ||
2873 | if (chip_max_units > 1) | ||
2874 | sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); | ||
2875 | #endif | ||
2876 | #ifdef SBMAC_ETH2_HWADDR | ||
2877 | if (chip_max_units > 2) | ||
2878 | sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); | ||
2879 | #endif | ||
2880 | #ifdef SBMAC_ETH3_HWADDR | ||
2881 | if (chip_max_units > 3) | ||
2882 | sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR); | ||
2883 | #endif | ||
2884 | |||
2885 | /* | ||
2886 | * Walk through the Ethernet controllers and find | ||
2887 | * those who have their MAC addresses set. | ||
2888 | */ | ||
2858 | for (idx = 0; idx < chip_max_units; idx++) { | 2889 | for (idx = 0; idx < chip_max_units; idx++) { |
2859 | 2890 | ||
2860 | /* | 2891 | /* |
@@ -2886,7 +2917,7 @@ sbmac_init_module(void) | |||
2886 | 2917 | ||
2887 | printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); | 2918 | printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); |
2888 | 2919 | ||
2889 | dev->irq = K_INT_MAC_0 + idx; | 2920 | dev->irq = UNIT_INT(idx); |
2890 | dev->base_addr = port; | 2921 | dev->base_addr = port; |
2891 | dev->mem_end = 0; | 2922 | dev->mem_end = 0; |
2892 | if (sbmac_init(dev, idx)) { | 2923 | if (sbmac_init(dev, idx)) { |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 79dca398f3ac..bcef03feb2fc 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -46,6 +46,7 @@ static const char version[] = | |||
46 | #include <linux/etherdevice.h> | 46 | #include <linux/etherdevice.h> |
47 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
48 | #include <linux/bitops.h> | 48 | #include <linux/bitops.h> |
49 | #include <linux/jiffies.h> | ||
49 | 50 | ||
50 | #include <asm/system.h> | 51 | #include <asm/system.h> |
51 | #include <asm/io.h> | 52 | #include <asm/io.h> |
@@ -699,7 +700,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length) | |||
699 | int ioaddr = dev->base_addr; | 700 | int ioaddr = dev->base_addr; |
700 | int status = inw(SEEQ_STATUS); | 701 | int status = inw(SEEQ_STATUS); |
701 | int transmit_ptr = 0; | 702 | int transmit_ptr = 0; |
702 | int tmp; | 703 | unsigned long tmp; |
703 | 704 | ||
704 | if (net_debug>4) { | 705 | if (net_debug>4) { |
705 | printk("%s: send 0x%04x\n",dev->name,length); | 706 | printk("%s: send 0x%04x\n",dev->name,length); |
@@ -724,7 +725,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length) | |||
724 | 725 | ||
725 | /* drain FIFO */ | 726 | /* drain FIFO */ |
726 | tmp = jiffies; | 727 | tmp = jiffies; |
727 | while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies - tmp < HZ)) | 728 | while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ)) |
728 | mb(); | 729 | mb(); |
729 | 730 | ||
730 | /* doit ! */ | 731 | /* doit ! */ |
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index a4614df38a90..f95a5b0223fb 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | 4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) |
5 | */ | 5 | */ |
6 | |||
7 | #undef DEBUG | ||
8 | |||
6 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
8 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
@@ -59,8 +62,6 @@ static char *sgiseeqstr = "SGI Seeq8003"; | |||
59 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | 62 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ |
60 | sp->tx_old - sp->tx_new - 1) | 63 | sp->tx_old - sp->tx_new - 1) |
61 | 64 | ||
62 | #define DEBUG | ||
63 | |||
64 | struct sgiseeq_rx_desc { | 65 | struct sgiseeq_rx_desc { |
65 | volatile struct hpc_dma_desc rdma; | 66 | volatile struct hpc_dma_desc rdma; |
66 | volatile signed int buf_vaddr; | 67 | volatile signed int buf_vaddr; |
@@ -209,7 +210,7 @@ static int seeq_init_ring(struct net_device *dev) | |||
209 | static struct sgiseeq_private *gpriv; | 210 | static struct sgiseeq_private *gpriv; |
210 | static struct net_device *gdev; | 211 | static struct net_device *gdev; |
211 | 212 | ||
212 | void sgiseeq_dump_rings(void) | 213 | static void sgiseeq_dump_rings(void) |
213 | { | 214 | { |
214 | static int once; | 215 | static int once; |
215 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | 216 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; |
@@ -311,9 +312,9 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
311 | struct sgiseeq_regs *sregs) | 312 | struct sgiseeq_regs *sregs) |
312 | { | 313 | { |
313 | struct sgiseeq_rx_desc *rd; | 314 | struct sgiseeq_rx_desc *rd; |
314 | struct sk_buff *skb = 0; | 315 | struct sk_buff *skb = NULL; |
315 | unsigned char pkt_status; | 316 | unsigned char pkt_status; |
316 | unsigned char *pkt_pointer = 0; | 317 | unsigned char *pkt_pointer = NULL; |
317 | int len = 0; | 318 | int len = 0; |
318 | unsigned int orig_end = PREV_RX(sp->rx_new); | 319 | unsigned int orig_end = PREV_RX(sp->rx_new); |
319 | 320 | ||
@@ -515,12 +516,6 @@ static inline int sgiseeq_reset(struct net_device *dev) | |||
515 | return 0; | 516 | return 0; |
516 | } | 517 | } |
517 | 518 | ||
518 | void sgiseeq_my_reset(void) | ||
519 | { | ||
520 | printk("RESET!\n"); | ||
521 | sgiseeq_reset(gdev); | ||
522 | } | ||
523 | |||
524 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | 519 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) |
525 | { | 520 | { |
526 | struct sgiseeq_private *sp = netdev_priv(dev); | 521 | struct sgiseeq_private *sp = netdev_priv(dev); |
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 221354eea21f..88e212043a43 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c | |||
@@ -83,6 +83,7 @@ | |||
83 | #include <linux/if_arp.h> | 83 | #include <linux/if_arp.h> |
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/if_shaper.h> | 85 | #include <linux/if_shaper.h> |
86 | #include <linux/jiffies.h> | ||
86 | 87 | ||
87 | #include <net/dst.h> | 88 | #include <net/dst.h> |
88 | #include <net/arp.h> | 89 | #include <net/arp.h> |
@@ -168,7 +169,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
168 | /* | 169 | /* |
169 | * Queue over time. Spill packet. | 170 | * Queue over time. Spill packet. |
170 | */ | 171 | */ |
171 | if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) { | 172 | if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { |
172 | dev_kfree_skb(skb); | 173 | dev_kfree_skb(skb); |
173 | shaper->stats.tx_dropped++; | 174 | shaper->stats.tx_dropped++; |
174 | } else | 175 | } else |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index ed4bc91638d2..31dd3f036fa8 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -366,7 +366,7 @@ static const u32 sis190_intr_mask = | |||
366 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 366 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
367 | * The chips use a 64 element hash table based on the Ethernet CRC. | 367 | * The chips use a 64 element hash table based on the Ethernet CRC. |
368 | */ | 368 | */ |
369 | static int multicast_filter_limit = 32; | 369 | static const int multicast_filter_limit = 32; |
370 | 370 | ||
371 | static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) | 371 | static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) |
372 | { | 372 | { |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 7a952fe60be2..a1cb07cdb60f 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -100,7 +100,7 @@ enum { | |||
100 | SIS_900 = 0, | 100 | SIS_900 = 0, |
101 | SIS_7016 | 101 | SIS_7016 |
102 | }; | 102 | }; |
103 | static char * card_names[] = { | 103 | static const char * card_names[] = { |
104 | "SiS 900 PCI Fast Ethernet", | 104 | "SiS 900 PCI Fast Ethernet", |
105 | "SiS 7016 PCI Fast Ethernet" | 105 | "SiS 7016 PCI Fast Ethernet" |
106 | }; | 106 | }; |
@@ -115,7 +115,7 @@ MODULE_DEVICE_TABLE (pci, sis900_pci_tbl); | |||
115 | 115 | ||
116 | static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); | 116 | static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); |
117 | 117 | ||
118 | static struct mii_chip_info { | 118 | static const struct mii_chip_info { |
119 | const char * name; | 119 | const char * name; |
120 | u16 phy_id0; | 120 | u16 phy_id0; |
121 | u16 phy_id1; | 121 | u16 phy_id1; |
@@ -400,7 +400,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, | |||
400 | void *ring_space; | 400 | void *ring_space; |
401 | long ioaddr; | 401 | long ioaddr; |
402 | int i, ret; | 402 | int i, ret; |
403 | char *card_name = card_names[pci_id->driver_data]; | 403 | const char *card_name = card_names[pci_id->driver_data]; |
404 | const char *dev_name = pci_name(pci_dev); | 404 | const char *dev_name = pci_name(pci_dev); |
405 | 405 | ||
406 | /* when built into the kernel, we only print version if device is found */ | 406 | /* when built into the kernel, we only print version if device is found */ |
@@ -1275,7 +1275,7 @@ static void sis900_timer(unsigned long data) | |||
1275 | struct net_device *net_dev = (struct net_device *)data; | 1275 | struct net_device *net_dev = (struct net_device *)data; |
1276 | struct sis900_private *sis_priv = net_dev->priv; | 1276 | struct sis900_private *sis_priv = net_dev->priv; |
1277 | struct mii_phy *mii_phy = sis_priv->mii; | 1277 | struct mii_phy *mii_phy = sis_priv->mii; |
1278 | static int next_tick = 5*HZ; | 1278 | static const int next_tick = 5*HZ; |
1279 | u16 status; | 1279 | u16 status; |
1280 | 1280 | ||
1281 | if (!sis_priv->autong_complete){ | 1281 | if (!sis_priv->autong_complete){ |
diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h index 3a2ea4a4b539..423ad063d09b 100644 --- a/drivers/net/sk98lin/h/skaddr.h +++ b/drivers/net/sk98lin/h/skaddr.h | |||
@@ -236,18 +236,6 @@ extern int SkAddrMcClear( | |||
236 | SK_U32 PortNumber, | 236 | SK_U32 PortNumber, |
237 | int Flags); | 237 | int Flags); |
238 | 238 | ||
239 | extern int SkAddrXmacMcClear( | ||
240 | SK_AC *pAC, | ||
241 | SK_IOC IoC, | ||
242 | SK_U32 PortNumber, | ||
243 | int Flags); | ||
244 | |||
245 | extern int SkAddrGmacMcClear( | ||
246 | SK_AC *pAC, | ||
247 | SK_IOC IoC, | ||
248 | SK_U32 PortNumber, | ||
249 | int Flags); | ||
250 | |||
251 | extern int SkAddrMcAdd( | 239 | extern int SkAddrMcAdd( |
252 | SK_AC *pAC, | 240 | SK_AC *pAC, |
253 | SK_IOC IoC, | 241 | SK_IOC IoC, |
@@ -255,35 +243,11 @@ extern int SkAddrMcAdd( | |||
255 | SK_MAC_ADDR *pMc, | 243 | SK_MAC_ADDR *pMc, |
256 | int Flags); | 244 | int Flags); |
257 | 245 | ||
258 | extern int SkAddrXmacMcAdd( | ||
259 | SK_AC *pAC, | ||
260 | SK_IOC IoC, | ||
261 | SK_U32 PortNumber, | ||
262 | SK_MAC_ADDR *pMc, | ||
263 | int Flags); | ||
264 | |||
265 | extern int SkAddrGmacMcAdd( | ||
266 | SK_AC *pAC, | ||
267 | SK_IOC IoC, | ||
268 | SK_U32 PortNumber, | ||
269 | SK_MAC_ADDR *pMc, | ||
270 | int Flags); | ||
271 | |||
272 | extern int SkAddrMcUpdate( | 246 | extern int SkAddrMcUpdate( |
273 | SK_AC *pAC, | 247 | SK_AC *pAC, |
274 | SK_IOC IoC, | 248 | SK_IOC IoC, |
275 | SK_U32 PortNumber); | 249 | SK_U32 PortNumber); |
276 | 250 | ||
277 | extern int SkAddrXmacMcUpdate( | ||
278 | SK_AC *pAC, | ||
279 | SK_IOC IoC, | ||
280 | SK_U32 PortNumber); | ||
281 | |||
282 | extern int SkAddrGmacMcUpdate( | ||
283 | SK_AC *pAC, | ||
284 | SK_IOC IoC, | ||
285 | SK_U32 PortNumber); | ||
286 | |||
287 | extern int SkAddrOverride( | 251 | extern int SkAddrOverride( |
288 | SK_AC *pAC, | 252 | SK_AC *pAC, |
289 | SK_IOC IoC, | 253 | SK_IOC IoC, |
@@ -297,18 +261,6 @@ extern int SkAddrPromiscuousChange( | |||
297 | SK_U32 PortNumber, | 261 | SK_U32 PortNumber, |
298 | int NewPromMode); | 262 | int NewPromMode); |
299 | 263 | ||
300 | extern int SkAddrXmacPromiscuousChange( | ||
301 | SK_AC *pAC, | ||
302 | SK_IOC IoC, | ||
303 | SK_U32 PortNumber, | ||
304 | int NewPromMode); | ||
305 | |||
306 | extern int SkAddrGmacPromiscuousChange( | ||
307 | SK_AC *pAC, | ||
308 | SK_IOC IoC, | ||
309 | SK_U32 PortNumber, | ||
310 | int NewPromMode); | ||
311 | |||
312 | #ifndef SK_SLIM | 264 | #ifndef SK_SLIM |
313 | extern int SkAddrSwap( | 265 | extern int SkAddrSwap( |
314 | SK_AC *pAC, | 266 | SK_AC *pAC, |
diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h index 2b94adb93331..6e256bd9a28c 100644 --- a/drivers/net/sk98lin/h/skcsum.h +++ b/drivers/net/sk98lin/h/skcsum.h | |||
@@ -203,12 +203,6 @@ extern SKCS_STATUS SkCsGetReceiveInfo( | |||
203 | unsigned Checksum2, | 203 | unsigned Checksum2, |
204 | int NetNumber); | 204 | int NetNumber); |
205 | 205 | ||
206 | extern void SkCsGetSendInfo( | ||
207 | SK_AC *pAc, | ||
208 | void *pIpHeader, | ||
209 | SKCS_PACKET_INFO *pPacketInfo, | ||
210 | int NetNumber); | ||
211 | |||
212 | extern void SkCsSetReceiveFlags( | 206 | extern void SkCsSetReceiveFlags( |
213 | SK_AC *pAc, | 207 | SK_AC *pAc, |
214 | unsigned ReceiveFlags, | 208 | unsigned ReceiveFlags, |
diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h index 184f47c5a60f..143e635ec24d 100644 --- a/drivers/net/sk98lin/h/skgeinit.h +++ b/drivers/net/sk98lin/h/skgeinit.h | |||
@@ -464,12 +464,6 @@ typedef struct s_GeInit { | |||
464 | /* | 464 | /* |
465 | * public functions in skgeinit.c | 465 | * public functions in skgeinit.c |
466 | */ | 466 | */ |
467 | extern void SkGePollRxD( | ||
468 | SK_AC *pAC, | ||
469 | SK_IOC IoC, | ||
470 | int Port, | ||
471 | SK_BOOL PollRxD); | ||
472 | |||
473 | extern void SkGePollTxD( | 467 | extern void SkGePollTxD( |
474 | SK_AC *pAC, | 468 | SK_AC *pAC, |
475 | SK_IOC IoC, | 469 | SK_IOC IoC, |
@@ -522,10 +516,6 @@ extern void SkGeXmitLED( | |||
522 | int Led, | 516 | int Led, |
523 | int Mode); | 517 | int Mode); |
524 | 518 | ||
525 | extern void SkGeInitRamIface( | ||
526 | SK_AC *pAC, | ||
527 | SK_IOC IoC); | ||
528 | |||
529 | extern int SkGeInitAssignRamToQueues( | 519 | extern int SkGeInitAssignRamToQueues( |
530 | SK_AC *pAC, | 520 | SK_AC *pAC, |
531 | int ActivePort, | 521 | int ActivePort, |
@@ -549,11 +539,6 @@ extern void SkMacHardRst( | |||
549 | SK_IOC IoC, | 539 | SK_IOC IoC, |
550 | int Port); | 540 | int Port); |
551 | 541 | ||
552 | extern void SkMacClearRst( | ||
553 | SK_AC *pAC, | ||
554 | SK_IOC IoC, | ||
555 | int Port); | ||
556 | |||
557 | extern void SkXmInitMac( | 542 | extern void SkXmInitMac( |
558 | SK_AC *pAC, | 543 | SK_AC *pAC, |
559 | SK_IOC IoC, | 544 | SK_IOC IoC, |
@@ -580,11 +565,6 @@ extern void SkMacFlushTxFifo( | |||
580 | SK_IOC IoC, | 565 | SK_IOC IoC, |
581 | int Port); | 566 | int Port); |
582 | 567 | ||
583 | extern void SkMacFlushRxFifo( | ||
584 | SK_AC *pAC, | ||
585 | SK_IOC IoC, | ||
586 | int Port); | ||
587 | |||
588 | extern void SkMacIrq( | 568 | extern void SkMacIrq( |
589 | SK_AC *pAC, | 569 | SK_AC *pAC, |
590 | SK_IOC IoC, | 570 | SK_IOC IoC, |
@@ -601,12 +581,6 @@ extern void SkMacAutoNegLipaPhy( | |||
601 | int Port, | 581 | int Port, |
602 | SK_U16 IStatus); | 582 | SK_U16 IStatus); |
603 | 583 | ||
604 | extern void SkMacSetRxTxEn( | ||
605 | SK_AC *pAC, | ||
606 | SK_IOC IoC, | ||
607 | int Port, | ||
608 | int Para); | ||
609 | |||
610 | extern int SkMacRxTxEnable( | 584 | extern int SkMacRxTxEnable( |
611 | SK_AC *pAC, | 585 | SK_AC *pAC, |
612 | SK_IOC IoC, | 586 | SK_IOC IoC, |
@@ -659,16 +633,6 @@ extern void SkXmClrExactAddr( | |||
659 | int StartNum, | 633 | int StartNum, |
660 | int StopNum); | 634 | int StopNum); |
661 | 635 | ||
662 | extern void SkXmInitDupMd( | ||
663 | SK_AC *pAC, | ||
664 | SK_IOC IoC, | ||
665 | int Port); | ||
666 | |||
667 | extern void SkXmInitPauseMd( | ||
668 | SK_AC *pAC, | ||
669 | SK_IOC IoC, | ||
670 | int Port); | ||
671 | |||
672 | extern void SkXmAutoNegLipaXmac( | 636 | extern void SkXmAutoNegLipaXmac( |
673 | SK_AC *pAC, | 637 | SK_AC *pAC, |
674 | SK_IOC IoC, | 638 | SK_IOC IoC, |
@@ -729,17 +693,6 @@ extern int SkGmCableDiagStatus( | |||
729 | int Port, | 693 | int Port, |
730 | SK_BOOL StartTest); | 694 | SK_BOOL StartTest); |
731 | 695 | ||
732 | extern int SkGmEnterLowPowerMode( | ||
733 | SK_AC *pAC, | ||
734 | SK_IOC IoC, | ||
735 | int Port, | ||
736 | SK_U8 Mode); | ||
737 | |||
738 | extern int SkGmLeaveLowPowerMode( | ||
739 | SK_AC *pAC, | ||
740 | SK_IOC IoC, | ||
741 | int Port); | ||
742 | |||
743 | #ifdef SK_DIAG | 696 | #ifdef SK_DIAG |
744 | extern void SkGePhyRead( | 697 | extern void SkGePhyRead( |
745 | SK_AC *pAC, | 698 | SK_AC *pAC, |
@@ -782,7 +735,6 @@ extern void SkXmSendCont( | |||
782 | /* | 735 | /* |
783 | * public functions in skgeinit.c | 736 | * public functions in skgeinit.c |
784 | */ | 737 | */ |
785 | extern void SkGePollRxD(); | ||
786 | extern void SkGePollTxD(); | 738 | extern void SkGePollTxD(); |
787 | extern void SkGeYellowLED(); | 739 | extern void SkGeYellowLED(); |
788 | extern int SkGeCfgSync(); | 740 | extern int SkGeCfgSync(); |
@@ -792,7 +744,6 @@ extern int SkGeInit(); | |||
792 | extern void SkGeDeInit(); | 744 | extern void SkGeDeInit(); |
793 | extern int SkGeInitPort(); | 745 | extern int SkGeInitPort(); |
794 | extern void SkGeXmitLED(); | 746 | extern void SkGeXmitLED(); |
795 | extern void SkGeInitRamIface(); | ||
796 | extern int SkGeInitAssignRamToQueues(); | 747 | extern int SkGeInitAssignRamToQueues(); |
797 | 748 | ||
798 | /* | 749 | /* |
@@ -801,18 +752,15 @@ extern int SkGeInitAssignRamToQueues(); | |||
801 | extern void SkMacRxTxDisable(); | 752 | extern void SkMacRxTxDisable(); |
802 | extern void SkMacSoftRst(); | 753 | extern void SkMacSoftRst(); |
803 | extern void SkMacHardRst(); | 754 | extern void SkMacHardRst(); |
804 | extern void SkMacClearRst(); | ||
805 | extern void SkMacInitPhy(); | 755 | extern void SkMacInitPhy(); |
806 | extern int SkMacRxTxEnable(); | 756 | extern int SkMacRxTxEnable(); |
807 | extern void SkMacPromiscMode(); | 757 | extern void SkMacPromiscMode(); |
808 | extern void SkMacHashing(); | 758 | extern void SkMacHashing(); |
809 | extern void SkMacIrqDisable(); | 759 | extern void SkMacIrqDisable(); |
810 | extern void SkMacFlushTxFifo(); | 760 | extern void SkMacFlushTxFifo(); |
811 | extern void SkMacFlushRxFifo(); | ||
812 | extern void SkMacIrq(); | 761 | extern void SkMacIrq(); |
813 | extern int SkMacAutoNegDone(); | 762 | extern int SkMacAutoNegDone(); |
814 | extern void SkMacAutoNegLipaPhy(); | 763 | extern void SkMacAutoNegLipaPhy(); |
815 | extern void SkMacSetRxTxEn(); | ||
816 | extern void SkXmInitMac(); | 764 | extern void SkXmInitMac(); |
817 | extern void SkXmPhyRead(); | 765 | extern void SkXmPhyRead(); |
818 | extern void SkXmPhyWrite(); | 766 | extern void SkXmPhyWrite(); |
@@ -820,8 +768,6 @@ extern void SkGmInitMac(); | |||
820 | extern void SkGmPhyRead(); | 768 | extern void SkGmPhyRead(); |
821 | extern void SkGmPhyWrite(); | 769 | extern void SkGmPhyWrite(); |
822 | extern void SkXmClrExactAddr(); | 770 | extern void SkXmClrExactAddr(); |
823 | extern void SkXmInitDupMd(); | ||
824 | extern void SkXmInitPauseMd(); | ||
825 | extern void SkXmAutoNegLipaXmac(); | 771 | extern void SkXmAutoNegLipaXmac(); |
826 | extern int SkXmUpdateStats(); | 772 | extern int SkXmUpdateStats(); |
827 | extern int SkGmUpdateStats(); | 773 | extern int SkGmUpdateStats(); |
@@ -832,8 +778,6 @@ extern int SkGmResetCounter(); | |||
832 | extern int SkXmOverflowStatus(); | 778 | extern int SkXmOverflowStatus(); |
833 | extern int SkGmOverflowStatus(); | 779 | extern int SkGmOverflowStatus(); |
834 | extern int SkGmCableDiagStatus(); | 780 | extern int SkGmCableDiagStatus(); |
835 | extern int SkGmEnterLowPowerMode(); | ||
836 | extern int SkGmLeaveLowPowerMode(); | ||
837 | 781 | ||
838 | #ifdef SK_DIAG | 782 | #ifdef SK_DIAG |
839 | extern void SkGePhyRead(); | 783 | extern void SkGePhyRead(); |
diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h index 3b2773e6f822..1ed214ccb253 100644 --- a/drivers/net/sk98lin/h/skgepnmi.h +++ b/drivers/net/sk98lin/h/skgepnmi.h | |||
@@ -946,10 +946,6 @@ typedef struct s_PnmiData { | |||
946 | * Function prototypes | 946 | * Function prototypes |
947 | */ | 947 | */ |
948 | extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); | 948 | extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); |
949 | extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, | ||
950 | unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
951 | extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, | ||
952 | void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
953 | extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, | 949 | extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, |
954 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | 950 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); |
955 | extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, | 951 | extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, |
diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h index b486bd9b6628..3eec6274e413 100644 --- a/drivers/net/sk98lin/h/skgesirq.h +++ b/drivers/net/sk98lin/h/skgesirq.h | |||
@@ -105,7 +105,6 @@ | |||
105 | 105 | ||
106 | extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); | 106 | extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); |
107 | extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); | 107 | extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); |
108 | extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port); | ||
109 | extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); | 108 | extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); |
110 | 109 | ||
111 | #endif /* _INC_SKGESIRQ_H_ */ | 110 | #endif /* _INC_SKGESIRQ_H_ */ |
diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h index 598bb42ccc3d..6a63f4a15de6 100644 --- a/drivers/net/sk98lin/h/ski2c.h +++ b/drivers/net/sk98lin/h/ski2c.h | |||
@@ -162,9 +162,6 @@ typedef struct s_I2c { | |||
162 | } SK_I2C; | 162 | } SK_I2C; |
163 | 163 | ||
164 | extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); | 164 | extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); |
165 | extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size, | ||
166 | int Reg, int Burst); | ||
167 | extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen); | ||
168 | #ifdef SK_DIAG | 165 | #ifdef SK_DIAG |
169 | extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, | 166 | extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, |
170 | int Burst); | 167 | int Burst); |
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h index daa9a8d154fc..fdd9e48e8040 100644 --- a/drivers/net/sk98lin/h/skvpd.h +++ b/drivers/net/sk98lin/h/skvpd.h | |||
@@ -183,14 +183,6 @@ extern SK_U32 VpdReadDWord( | |||
183 | int addr); | 183 | int addr); |
184 | #endif /* SKDIAG */ | 184 | #endif /* SKDIAG */ |
185 | 185 | ||
186 | extern int VpdSetupPara( | ||
187 | SK_AC *pAC, | ||
188 | const char *key, | ||
189 | const char *buf, | ||
190 | int len, | ||
191 | int type, | ||
192 | int op); | ||
193 | |||
194 | extern SK_VPD_STATUS *VpdStat( | 186 | extern SK_VPD_STATUS *VpdStat( |
195 | SK_AC *pAC, | 187 | SK_AC *pAC, |
196 | SK_IOC IoC); | 188 | SK_IOC IoC); |
@@ -227,11 +219,6 @@ extern int VpdUpdate( | |||
227 | SK_AC *pAC, | 219 | SK_AC *pAC, |
228 | SK_IOC IoC); | 220 | SK_IOC IoC); |
229 | 221 | ||
230 | extern void VpdErrLog( | ||
231 | SK_AC *pAC, | ||
232 | SK_IOC IoC, | ||
233 | char *msg); | ||
234 | |||
235 | #ifdef SKDIAG | 222 | #ifdef SKDIAG |
236 | extern int VpdReadBlock( | 223 | extern int VpdReadBlock( |
237 | SK_AC *pAC, | 224 | SK_AC *pAC, |
@@ -249,7 +236,6 @@ extern int VpdWriteBlock( | |||
249 | #endif /* SKDIAG */ | 236 | #endif /* SKDIAG */ |
250 | #else /* SK_KR_PROTO */ | 237 | #else /* SK_KR_PROTO */ |
251 | extern SK_U32 VpdReadDWord(); | 238 | extern SK_U32 VpdReadDWord(); |
252 | extern int VpdSetupPara(); | ||
253 | extern SK_VPD_STATUS *VpdStat(); | 239 | extern SK_VPD_STATUS *VpdStat(); |
254 | extern int VpdKeys(); | 240 | extern int VpdKeys(); |
255 | extern int VpdRead(); | 241 | extern int VpdRead(); |
@@ -257,7 +243,6 @@ extern SK_BOOL VpdMayWrite(); | |||
257 | extern int VpdWrite(); | 243 | extern int VpdWrite(); |
258 | extern int VpdDelete(); | 244 | extern int VpdDelete(); |
259 | extern int VpdUpdate(); | 245 | extern int VpdUpdate(); |
260 | extern void VpdErrLog(); | ||
261 | #endif /* SK_KR_PROTO */ | 246 | #endif /* SK_KR_PROTO */ |
262 | 247 | ||
263 | #endif /* __INC_SKVPD_H_ */ | 248 | #endif /* __INC_SKVPD_H_ */ |
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c index a7e25edc7fc4..6e6c56aa6d6f 100644 --- a/drivers/net/sk98lin/skaddr.c +++ b/drivers/net/sk98lin/skaddr.c | |||
@@ -87,6 +87,21 @@ static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF}; | |||
87 | static int Next0[SK_MAX_MACS] = {0}; | 87 | static int Next0[SK_MAX_MACS] = {0}; |
88 | #endif /* DEBUG */ | 88 | #endif /* DEBUG */ |
89 | 89 | ||
90 | static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
91 | SK_MAC_ADDR *pMc, int Flags); | ||
92 | static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
93 | int Flags); | ||
94 | static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); | ||
95 | static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, | ||
96 | SK_U32 PortNumber, int NewPromMode); | ||
97 | static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
98 | SK_MAC_ADDR *pMc, int Flags); | ||
99 | static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
100 | int Flags); | ||
101 | static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); | ||
102 | static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, | ||
103 | SK_U32 PortNumber, int NewPromMode); | ||
104 | |||
90 | /* functions ******************************************************************/ | 105 | /* functions ******************************************************************/ |
91 | 106 | ||
92 | /****************************************************************************** | 107 | /****************************************************************************** |
@@ -372,7 +387,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
372 | * SK_ADDR_SUCCESS | 387 | * SK_ADDR_SUCCESS |
373 | * SK_ADDR_ILLEGAL_PORT | 388 | * SK_ADDR_ILLEGAL_PORT |
374 | */ | 389 | */ |
375 | int SkAddrXmacMcClear( | 390 | static int SkAddrXmacMcClear( |
376 | SK_AC *pAC, /* adapter context */ | 391 | SK_AC *pAC, /* adapter context */ |
377 | SK_IOC IoC, /* I/O context */ | 392 | SK_IOC IoC, /* I/O context */ |
378 | SK_U32 PortNumber, /* Index of affected port */ | 393 | SK_U32 PortNumber, /* Index of affected port */ |
@@ -429,7 +444,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
429 | * SK_ADDR_SUCCESS | 444 | * SK_ADDR_SUCCESS |
430 | * SK_ADDR_ILLEGAL_PORT | 445 | * SK_ADDR_ILLEGAL_PORT |
431 | */ | 446 | */ |
432 | int SkAddrGmacMcClear( | 447 | static int SkAddrGmacMcClear( |
433 | SK_AC *pAC, /* adapter context */ | 448 | SK_AC *pAC, /* adapter context */ |
434 | SK_IOC IoC, /* I/O context */ | 449 | SK_IOC IoC, /* I/O context */ |
435 | SK_U32 PortNumber, /* Index of affected port */ | 450 | SK_U32 PortNumber, /* Index of affected port */ |
@@ -519,7 +534,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
519 | * Returns: | 534 | * Returns: |
520 | * Hash value of multicast address. | 535 | * Hash value of multicast address. |
521 | */ | 536 | */ |
522 | SK_U32 SkXmacMcHash( | 537 | static SK_U32 SkXmacMcHash( |
523 | unsigned char *pMc) /* Multicast address */ | 538 | unsigned char *pMc) /* Multicast address */ |
524 | { | 539 | { |
525 | SK_U32 Idx; | 540 | SK_U32 Idx; |
@@ -557,7 +572,7 @@ unsigned char *pMc) /* Multicast address */ | |||
557 | * Returns: | 572 | * Returns: |
558 | * Hash value of multicast address. | 573 | * Hash value of multicast address. |
559 | */ | 574 | */ |
560 | SK_U32 SkGmacMcHash( | 575 | static SK_U32 SkGmacMcHash( |
561 | unsigned char *pMc) /* Multicast address */ | 576 | unsigned char *pMc) /* Multicast address */ |
562 | { | 577 | { |
563 | SK_U32 Data; | 578 | SK_U32 Data; |
@@ -672,7 +687,7 @@ int Flags) /* permanent/non-permanent */ | |||
672 | * SK_MC_ILLEGAL_ADDRESS | 687 | * SK_MC_ILLEGAL_ADDRESS |
673 | * SK_MC_RLMT_OVERFLOW | 688 | * SK_MC_RLMT_OVERFLOW |
674 | */ | 689 | */ |
675 | int SkAddrXmacMcAdd( | 690 | static int SkAddrXmacMcAdd( |
676 | SK_AC *pAC, /* adapter context */ | 691 | SK_AC *pAC, /* adapter context */ |
677 | SK_IOC IoC, /* I/O context */ | 692 | SK_IOC IoC, /* I/O context */ |
678 | SK_U32 PortNumber, /* Port Number */ | 693 | SK_U32 PortNumber, /* Port Number */ |
@@ -778,7 +793,7 @@ int Flags) /* permanent/non-permanent */ | |||
778 | * SK_MC_FILTERING_INEXACT | 793 | * SK_MC_FILTERING_INEXACT |
779 | * SK_MC_ILLEGAL_ADDRESS | 794 | * SK_MC_ILLEGAL_ADDRESS |
780 | */ | 795 | */ |
781 | int SkAddrGmacMcAdd( | 796 | static int SkAddrGmacMcAdd( |
782 | SK_AC *pAC, /* adapter context */ | 797 | SK_AC *pAC, /* adapter context */ |
783 | SK_IOC IoC, /* I/O context */ | 798 | SK_IOC IoC, /* I/O context */ |
784 | SK_U32 PortNumber, /* Port Number */ | 799 | SK_U32 PortNumber, /* Port Number */ |
@@ -937,7 +952,7 @@ SK_U32 PortNumber) /* Port Number */ | |||
937 | * SK_MC_FILTERING_INEXACT | 952 | * SK_MC_FILTERING_INEXACT |
938 | * SK_ADDR_ILLEGAL_PORT | 953 | * SK_ADDR_ILLEGAL_PORT |
939 | */ | 954 | */ |
940 | int SkAddrXmacMcUpdate( | 955 | static int SkAddrXmacMcUpdate( |
941 | SK_AC *pAC, /* adapter context */ | 956 | SK_AC *pAC, /* adapter context */ |
942 | SK_IOC IoC, /* I/O context */ | 957 | SK_IOC IoC, /* I/O context */ |
943 | SK_U32 PortNumber) /* Port Number */ | 958 | SK_U32 PortNumber) /* Port Number */ |
@@ -1082,7 +1097,7 @@ SK_U32 PortNumber) /* Port Number */ | |||
1082 | * SK_MC_FILTERING_INEXACT | 1097 | * SK_MC_FILTERING_INEXACT |
1083 | * SK_ADDR_ILLEGAL_PORT | 1098 | * SK_ADDR_ILLEGAL_PORT |
1084 | */ | 1099 | */ |
1085 | int SkAddrGmacMcUpdate( | 1100 | static int SkAddrGmacMcUpdate( |
1086 | SK_AC *pAC, /* adapter context */ | 1101 | SK_AC *pAC, /* adapter context */ |
1087 | SK_IOC IoC, /* I/O context */ | 1102 | SK_IOC IoC, /* I/O context */ |
1088 | SK_U32 PortNumber) /* Port Number */ | 1103 | SK_U32 PortNumber) /* Port Number */ |
@@ -1468,7 +1483,7 @@ int NewPromMode) /* new promiscuous mode */ | |||
1468 | * SK_ADDR_SUCCESS | 1483 | * SK_ADDR_SUCCESS |
1469 | * SK_ADDR_ILLEGAL_PORT | 1484 | * SK_ADDR_ILLEGAL_PORT |
1470 | */ | 1485 | */ |
1471 | int SkAddrXmacPromiscuousChange( | 1486 | static int SkAddrXmacPromiscuousChange( |
1472 | SK_AC *pAC, /* adapter context */ | 1487 | SK_AC *pAC, /* adapter context */ |
1473 | SK_IOC IoC, /* I/O context */ | 1488 | SK_IOC IoC, /* I/O context */ |
1474 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ | 1489 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ |
@@ -1585,7 +1600,7 @@ int NewPromMode) /* new promiscuous mode */ | |||
1585 | * SK_ADDR_SUCCESS | 1600 | * SK_ADDR_SUCCESS |
1586 | * SK_ADDR_ILLEGAL_PORT | 1601 | * SK_ADDR_ILLEGAL_PORT |
1587 | */ | 1602 | */ |
1588 | int SkAddrGmacPromiscuousChange( | 1603 | static int SkAddrGmacPromiscuousChange( |
1589 | SK_AC *pAC, /* adapter context */ | 1604 | SK_AC *pAC, /* adapter context */ |
1590 | SK_IOC IoC, /* I/O context */ | 1605 | SK_IOC IoC, /* I/O context */ |
1591 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ | 1606 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ |
diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c index 6cb49dd02251..67f1d6a5c15d 100644 --- a/drivers/net/sk98lin/skgeinit.c +++ b/drivers/net/sk98lin/skgeinit.c | |||
@@ -59,34 +59,6 @@ static struct s_Config OemConfig = { | |||
59 | 59 | ||
60 | /****************************************************************************** | 60 | /****************************************************************************** |
61 | * | 61 | * |
62 | * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring | ||
63 | * | ||
64 | * Description: | ||
65 | * Enable or disable the descriptor polling of the receive descriptor | ||
66 | * ring (RxD) for port 'Port'. | ||
67 | * The new configuration is *not* saved over any SkGeStopPort() and | ||
68 | * SkGeInitPort() calls. | ||
69 | * | ||
70 | * Returns: | ||
71 | * nothing | ||
72 | */ | ||
73 | void SkGePollRxD( | ||
74 | SK_AC *pAC, /* adapter context */ | ||
75 | SK_IOC IoC, /* IO context */ | ||
76 | int Port, /* Port Index (MAC_1 + n) */ | ||
77 | SK_BOOL PollRxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */ | ||
78 | { | ||
79 | SK_GEPORT *pPrt; | ||
80 | |||
81 | pPrt = &pAC->GIni.GP[Port]; | ||
82 | |||
83 | SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ? | ||
84 | CSR_ENA_POL : CSR_DIS_POL); | ||
85 | } /* SkGePollRxD */ | ||
86 | |||
87 | |||
88 | /****************************************************************************** | ||
89 | * | ||
90 | * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings | 62 | * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings |
91 | * | 63 | * |
92 | * Description: | 64 | * Description: |
@@ -952,7 +924,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
952 | * Returns: | 924 | * Returns: |
953 | * nothing | 925 | * nothing |
954 | */ | 926 | */ |
955 | void SkGeInitRamIface( | 927 | static void SkGeInitRamIface( |
956 | SK_AC *pAC, /* adapter context */ | 928 | SK_AC *pAC, /* adapter context */ |
957 | SK_IOC IoC) /* IO context */ | 929 | SK_IOC IoC) /* IO context */ |
958 | { | 930 | { |
@@ -1409,83 +1381,6 @@ SK_IOC IoC) /* IO context */ | |||
1409 | 1381 | ||
1410 | } /* SkGeInit0*/ | 1382 | } /* SkGeInit0*/ |
1411 | 1383 | ||
1412 | #ifdef SK_PCI_RESET | ||
1413 | |||
1414 | /****************************************************************************** | ||
1415 | * | ||
1416 | * SkGePciReset() - Reset PCI interface | ||
1417 | * | ||
1418 | * Description: | ||
1419 | * o Read PCI configuration. | ||
1420 | * o Change power state to 3. | ||
1421 | * o Change power state to 0. | ||
1422 | * o Restore PCI configuration. | ||
1423 | * | ||
1424 | * Returns: | ||
1425 | * 0: Success. | ||
1426 | * 1: Power state could not be changed to 3. | ||
1427 | */ | ||
1428 | static int SkGePciReset( | ||
1429 | SK_AC *pAC, /* adapter context */ | ||
1430 | SK_IOC IoC) /* IO context */ | ||
1431 | { | ||
1432 | int i; | ||
1433 | SK_U16 PmCtlSts; | ||
1434 | SK_U32 Bp1; | ||
1435 | SK_U32 Bp2; | ||
1436 | SK_U16 PciCmd; | ||
1437 | SK_U8 Cls; | ||
1438 | SK_U8 Lat; | ||
1439 | SK_U8 ConfigSpace[PCI_CFG_SIZE]; | ||
1440 | |||
1441 | /* | ||
1442 | * Note: Switching to D3 state is like a software reset. | ||
1443 | * Switching from D3 to D0 is a hardware reset. | ||
1444 | * We have to save and restore the configuration space. | ||
1445 | */ | ||
1446 | for (i = 0; i < PCI_CFG_SIZE; i++) { | ||
1447 | SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]); | ||
1448 | } | ||
1449 | |||
1450 | /* We know the RAM Interface Arbiter is enabled. */ | ||
1451 | SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3); | ||
1452 | SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); | ||
1453 | |||
1454 | if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) { | ||
1455 | return(1); | ||
1456 | } | ||
1457 | |||
1458 | /* Return to D0 state. */ | ||
1459 | SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0); | ||
1460 | |||
1461 | /* Check for D0 state. */ | ||
1462 | SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); | ||
1463 | |||
1464 | if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) { | ||
1465 | return(1); | ||
1466 | } | ||
1467 | |||
1468 | /* Check PCI Config Registers. */ | ||
1469 | SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd); | ||
1470 | SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls); | ||
1471 | SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1); | ||
1472 | SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2); | ||
1473 | SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat); | ||
1474 | |||
1475 | if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 || | ||
1476 | (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) { | ||
1477 | return(1); | ||
1478 | } | ||
1479 | |||
1480 | /* Restore PCI Config Space. */ | ||
1481 | for (i = 0; i < PCI_CFG_SIZE; i++) { | ||
1482 | SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]); | ||
1483 | } | ||
1484 | |||
1485 | return(0); | ||
1486 | } /* SkGePciReset */ | ||
1487 | |||
1488 | #endif /* SK_PCI_RESET */ | ||
1489 | 1384 | ||
1490 | /****************************************************************************** | 1385 | /****************************************************************************** |
1491 | * | 1386 | * |
@@ -1524,10 +1419,6 @@ SK_IOC IoC) /* IO context */ | |||
1524 | /* save CLK_RUN bits (YUKON-Lite) */ | 1419 | /* save CLK_RUN bits (YUKON-Lite) */ |
1525 | SK_IN16(IoC, B0_CTST, &CtrlStat); | 1420 | SK_IN16(IoC, B0_CTST, &CtrlStat); |
1526 | 1421 | ||
1527 | #ifdef SK_PCI_RESET | ||
1528 | (void)SkGePciReset(pAC, IoC); | ||
1529 | #endif /* SK_PCI_RESET */ | ||
1530 | |||
1531 | /* do the SW-reset */ | 1422 | /* do the SW-reset */ |
1532 | SK_OUT8(IoC, B0_CTST, CS_RST_SET); | 1423 | SK_OUT8(IoC, B0_CTST, CS_RST_SET); |
1533 | 1424 | ||
@@ -1991,11 +1882,6 @@ SK_IOC IoC) /* IO context */ | |||
1991 | int i; | 1882 | int i; |
1992 | SK_U16 Word; | 1883 | SK_U16 Word; |
1993 | 1884 | ||
1994 | #ifdef SK_PHY_LP_MODE | ||
1995 | SK_U8 Byte; | ||
1996 | SK_U16 PmCtlSts; | ||
1997 | #endif /* SK_PHY_LP_MODE */ | ||
1998 | |||
1999 | #if (!defined(SK_SLIM) && !defined(VCPU)) | 1885 | #if (!defined(SK_SLIM) && !defined(VCPU)) |
2000 | /* ensure I2C is ready */ | 1886 | /* ensure I2C is ready */ |
2001 | SkI2cWaitIrq(pAC, IoC); | 1887 | SkI2cWaitIrq(pAC, IoC); |
@@ -2010,38 +1896,6 @@ SK_IOC IoC) /* IO context */ | |||
2010 | } | 1896 | } |
2011 | } | 1897 | } |
2012 | 1898 | ||
2013 | #ifdef SK_PHY_LP_MODE | ||
2014 | /* | ||
2015 | * for power saving purposes within mobile environments | ||
2016 | * we set the PHY to coma mode and switch to D3 power state. | ||
2017 | */ | ||
2018 | if (pAC->GIni.GIYukonLite && | ||
2019 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2020 | |||
2021 | /* for all ports switch PHY to coma mode */ | ||
2022 | for (i = 0; i < pAC->GIni.GIMacsFound; i++) { | ||
2023 | |||
2024 | SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP); | ||
2025 | } | ||
2026 | |||
2027 | if (pAC->GIni.GIVauxAvail) { | ||
2028 | /* switch power to VAUX */ | ||
2029 | Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF; | ||
2030 | |||
2031 | SK_OUT8(IoC, B0_POWER_CTRL, Byte); | ||
2032 | } | ||
2033 | |||
2034 | /* switch to D3 state */ | ||
2035 | SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts); | ||
2036 | |||
2037 | PmCtlSts |= PCI_PM_STATE_D3; | ||
2038 | |||
2039 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2040 | |||
2041 | SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts); | ||
2042 | } | ||
2043 | #endif /* SK_PHY_LP_MODE */ | ||
2044 | |||
2045 | /* Reset all bits in the PCI STATUS register */ | 1899 | /* Reset all bits in the PCI STATUS register */ |
2046 | /* | 1900 | /* |
2047 | * Note: PCI Cfg cycles cannot be used, because they are not | 1901 | * Note: PCI Cfg cycles cannot be used, because they are not |
diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c index 2991bc85cf2c..0a6f67a7a395 100644 --- a/drivers/net/sk98lin/skgemib.c +++ b/drivers/net/sk98lin/skgemib.c | |||
@@ -871,13 +871,6 @@ PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = { | |||
871 | sizeof(SK_PNMI_CONF), | 871 | sizeof(SK_PNMI_CONF), |
872 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), | 872 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), |
873 | SK_PNMI_RO, MacPrivateConf, 0}, | 873 | SK_PNMI_RO, MacPrivateConf, 0}, |
874 | #ifdef SK_PHY_LP_MODE | ||
875 | {OID_SKGE_PHY_LP_MODE, | ||
876 | SK_PNMI_MAC_ENTRIES, | ||
877 | sizeof(SK_PNMI_CONF), | ||
878 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode), | ||
879 | SK_PNMI_RW, MacPrivateConf, 0}, | ||
880 | #endif | ||
881 | {OID_SKGE_LINK_CAP, | 874 | {OID_SKGE_LINK_CAP, |
882 | SK_PNMI_MAC_ENTRIES, | 875 | SK_PNMI_MAC_ENTRIES, |
883 | sizeof(SK_PNMI_CONF), | 876 | sizeof(SK_PNMI_CONF), |
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c index a386172107e8..b36dd9ac6b29 100644 --- a/drivers/net/sk98lin/skgepnmi.c +++ b/drivers/net/sk98lin/skgepnmi.c | |||
@@ -56,10 +56,6 @@ static const char SysKonnectFileId[] = | |||
56 | * Public Function prototypes | 56 | * Public Function prototypes |
57 | */ | 57 | */ |
58 | int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); | 58 | int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); |
59 | int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | ||
60 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
61 | int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | ||
62 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
63 | int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | 59 | int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, |
64 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | 60 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); |
65 | int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, | 61 | int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, |
@@ -587,7 +583,7 @@ int Level) /* Initialization level */ | |||
587 | * exist (e.g. port instance 3 on a two port | 583 | * exist (e.g. port instance 3 on a two port |
588 | * adapter. | 584 | * adapter. |
589 | */ | 585 | */ |
590 | int SkPnmiGetVar( | 586 | static int SkPnmiGetVar( |
591 | SK_AC *pAC, /* Pointer to adapter context */ | 587 | SK_AC *pAC, /* Pointer to adapter context */ |
592 | SK_IOC IoC, /* IO context handle */ | 588 | SK_IOC IoC, /* IO context handle */ |
593 | SK_U32 Id, /* Object ID that is to be processed */ | 589 | SK_U32 Id, /* Object ID that is to be processed */ |
@@ -629,7 +625,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
629 | * exist (e.g. port instance 3 on a two port | 625 | * exist (e.g. port instance 3 on a two port |
630 | * adapter. | 626 | * adapter. |
631 | */ | 627 | */ |
632 | int SkPnmiPreSetVar( | 628 | static int SkPnmiPreSetVar( |
633 | SK_AC *pAC, /* Pointer to adapter context */ | 629 | SK_AC *pAC, /* Pointer to adapter context */ |
634 | SK_IOC IoC, /* IO context handle */ | 630 | SK_IOC IoC, /* IO context handle */ |
635 | SK_U32 Id, /* Object ID that is to be processed */ | 631 | SK_U32 Id, /* Object ID that is to be processed */ |
@@ -5062,9 +5058,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5062 | case OID_SKGE_SPEED_CAP: | 5058 | case OID_SKGE_SPEED_CAP: |
5063 | case OID_SKGE_SPEED_MODE: | 5059 | case OID_SKGE_SPEED_MODE: |
5064 | case OID_SKGE_SPEED_STATUS: | 5060 | case OID_SKGE_SPEED_STATUS: |
5065 | #ifdef SK_PHY_LP_MODE | ||
5066 | case OID_SKGE_PHY_LP_MODE: | ||
5067 | #endif | ||
5068 | if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { | 5061 | if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { |
5069 | 5062 | ||
5070 | *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); | 5063 | *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); |
@@ -5140,28 +5133,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5140 | Offset += sizeof(SK_U32); | 5133 | Offset += sizeof(SK_U32); |
5141 | break; | 5134 | break; |
5142 | 5135 | ||
5143 | #ifdef SK_PHY_LP_MODE | ||
5144 | case OID_SKGE_PHY_LP_MODE: | ||
5145 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | ||
5146 | if (LogPortIndex == 0) { | ||
5147 | continue; | ||
5148 | } | ||
5149 | else { | ||
5150 | /* Get value for physical ports */ | ||
5151 | PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); | ||
5152 | Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState; | ||
5153 | *pBufPtr = Val8; | ||
5154 | } | ||
5155 | } | ||
5156 | else { /* DualNetMode */ | ||
5157 | |||
5158 | Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState; | ||
5159 | *pBufPtr = Val8; | ||
5160 | } | ||
5161 | Offset += sizeof(SK_U8); | ||
5162 | break; | ||
5163 | #endif | ||
5164 | |||
5165 | case OID_SKGE_LINK_CAP: | 5136 | case OID_SKGE_LINK_CAP: |
5166 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | 5137 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ |
5167 | if (LogPortIndex == 0) { | 5138 | if (LogPortIndex == 0) { |
@@ -5478,16 +5449,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5478 | } | 5449 | } |
5479 | break; | 5450 | break; |
5480 | 5451 | ||
5481 | #ifdef SK_PHY_LP_MODE | ||
5482 | case OID_SKGE_PHY_LP_MODE: | ||
5483 | if (*pLen < Limit - LogPortIndex) { | ||
5484 | |||
5485 | *pLen = Limit - LogPortIndex; | ||
5486 | return (SK_PNMI_ERR_TOO_SHORT); | ||
5487 | } | ||
5488 | break; | ||
5489 | #endif | ||
5490 | |||
5491 | case OID_SKGE_MTU: | 5452 | case OID_SKGE_MTU: |
5492 | if (*pLen < sizeof(SK_U32)) { | 5453 | if (*pLen < sizeof(SK_U32)) { |
5493 | 5454 | ||
@@ -5845,116 +5806,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5845 | Offset += sizeof(SK_U32); | 5806 | Offset += sizeof(SK_U32); |
5846 | break; | 5807 | break; |
5847 | 5808 | ||
5848 | #ifdef SK_PHY_LP_MODE | ||
5849 | case OID_SKGE_PHY_LP_MODE: | ||
5850 | /* The preset ends here */ | ||
5851 | if (Action == SK_PNMI_PRESET) { | ||
5852 | |||
5853 | return (SK_PNMI_ERR_OK); | ||
5854 | } | ||
5855 | |||
5856 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | ||
5857 | if (LogPortIndex == 0) { | ||
5858 | Offset = 0; | ||
5859 | continue; | ||
5860 | } | ||
5861 | else { | ||
5862 | /* Set value for physical ports */ | ||
5863 | PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); | ||
5864 | |||
5865 | switch (*(pBuf + Offset)) { | ||
5866 | case 0: | ||
5867 | /* If LowPowerMode is active, we can leave it. */ | ||
5868 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5869 | |||
5870 | Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex); | ||
5871 | |||
5872 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) { | ||
5873 | |||
5874 | SkDrvInitAdapter(pAC); | ||
5875 | } | ||
5876 | break; | ||
5877 | } | ||
5878 | else { | ||
5879 | *pLen = 0; | ||
5880 | return (SK_PNMI_ERR_GENERAL); | ||
5881 | } | ||
5882 | case 1: | ||
5883 | case 2: | ||
5884 | case 3: | ||
5885 | case 4: | ||
5886 | /* If no LowPowerMode is active, we can enter it. */ | ||
5887 | if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5888 | |||
5889 | if ((*(pBuf + Offset)) < 3) { | ||
5890 | |||
5891 | SkDrvDeInitAdapter(pAC); | ||
5892 | } | ||
5893 | |||
5894 | Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf); | ||
5895 | break; | ||
5896 | } | ||
5897 | else { | ||
5898 | *pLen = 0; | ||
5899 | return (SK_PNMI_ERR_GENERAL); | ||
5900 | } | ||
5901 | default: | ||
5902 | *pLen = 0; | ||
5903 | return (SK_PNMI_ERR_BAD_VALUE); | ||
5904 | } | ||
5905 | } | ||
5906 | } | ||
5907 | else { /* DualNetMode */ | ||
5908 | |||
5909 | switch (*(pBuf + Offset)) { | ||
5910 | case 0: | ||
5911 | /* If we are in a LowPowerMode, we can leave it. */ | ||
5912 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5913 | |||
5914 | Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex); | ||
5915 | |||
5916 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) { | ||
5917 | |||
5918 | SkDrvInitAdapter(pAC); | ||
5919 | } | ||
5920 | break; | ||
5921 | } | ||
5922 | else { | ||
5923 | *pLen = 0; | ||
5924 | return (SK_PNMI_ERR_GENERAL); | ||
5925 | } | ||
5926 | |||
5927 | case 1: | ||
5928 | case 2: | ||
5929 | case 3: | ||
5930 | case 4: | ||
5931 | /* If we are not already in LowPowerMode, we can enter it. */ | ||
5932 | if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5933 | |||
5934 | if ((*(pBuf + Offset)) < 3) { | ||
5935 | |||
5936 | SkDrvDeInitAdapter(pAC); | ||
5937 | } | ||
5938 | else { | ||
5939 | |||
5940 | Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf); | ||
5941 | } | ||
5942 | break; | ||
5943 | } | ||
5944 | else { | ||
5945 | *pLen = 0; | ||
5946 | return (SK_PNMI_ERR_GENERAL); | ||
5947 | } | ||
5948 | |||
5949 | default: | ||
5950 | *pLen = 0; | ||
5951 | return (SK_PNMI_ERR_BAD_VALUE); | ||
5952 | } | ||
5953 | } | ||
5954 | Offset += sizeof(SK_U8); | ||
5955 | break; | ||
5956 | #endif | ||
5957 | |||
5958 | default: | 5809 | default: |
5959 | SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, | 5810 | SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, |
5960 | ("MacPrivateConf: Unknown OID should be handled before set")); | 5811 | ("MacPrivateConf: Unknown OID should be handled before set")); |
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c index 87520f0057d7..ab66d80a4455 100644 --- a/drivers/net/sk98lin/skgesirq.c +++ b/drivers/net/sk98lin/skgesirq.c | |||
@@ -265,7 +265,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
265 | * | 265 | * |
266 | * Returns: N/A | 266 | * Returns: N/A |
267 | */ | 267 | */ |
268 | void SkHWLinkUp( | 268 | static void SkHWLinkUp( |
269 | SK_AC *pAC, /* adapter context */ | 269 | SK_AC *pAC, /* adapter context */ |
270 | SK_IOC IoC, /* IO context */ | 270 | SK_IOC IoC, /* IO context */ |
271 | int Port) /* Port Index (MAC_1 + n) */ | 271 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -612,14 +612,6 @@ SK_U32 Istatus) /* Interrupt status word */ | |||
612 | * we ignore those | 612 | * we ignore those |
613 | */ | 613 | */ |
614 | pPrt->HalfDupTimerActive = SK_TRUE; | 614 | pPrt->HalfDupTimerActive = SK_TRUE; |
615 | #ifdef XXX | ||
616 | Len = sizeof(SK_U64); | ||
617 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
618 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0), | ||
619 | pAC->Rlmt.Port[0].Net->NetNumber); | ||
620 | |||
621 | pPrt->LastOctets = Octets; | ||
622 | #endif /* XXX */ | ||
623 | /* Snap statistic counters */ | 615 | /* Snap statistic counters */ |
624 | (void)SkXmUpdateStats(pAC, IoC, 0); | 616 | (void)SkXmUpdateStats(pAC, IoC, 0); |
625 | 617 | ||
@@ -653,14 +645,6 @@ SK_U32 Istatus) /* Interrupt status word */ | |||
653 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && | 645 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && |
654 | !pPrt->HalfDupTimerActive) { | 646 | !pPrt->HalfDupTimerActive) { |
655 | pPrt->HalfDupTimerActive = SK_TRUE; | 647 | pPrt->HalfDupTimerActive = SK_TRUE; |
656 | #ifdef XXX | ||
657 | Len = sizeof(SK_U64); | ||
658 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
659 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1), | ||
660 | pAC->Rlmt.Port[1].Net->NetNumber); | ||
661 | |||
662 | pPrt->LastOctets = Octets; | ||
663 | #endif /* XXX */ | ||
664 | /* Snap statistic counters */ | 648 | /* Snap statistic counters */ |
665 | (void)SkXmUpdateStats(pAC, IoC, 1); | 649 | (void)SkXmUpdateStats(pAC, IoC, 1); |
666 | 650 | ||
@@ -2085,12 +2069,6 @@ SK_EVPARA Para) /* Event specific Parameter */ | |||
2085 | pPrt->HalfDupTimerActive = SK_FALSE; | 2069 | pPrt->HalfDupTimerActive = SK_FALSE; |
2086 | if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || | 2070 | if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || |
2087 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { | 2071 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { |
2088 | #ifdef XXX | ||
2089 | Len = sizeof(SK_U64); | ||
2090 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
2091 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), | ||
2092 | pAC->Rlmt.Port[Port].Net->NetNumber); | ||
2093 | #endif /* XXX */ | ||
2094 | /* Snap statistic counters */ | 2072 | /* Snap statistic counters */ |
2095 | (void)SkXmUpdateStats(pAC, IoC, Port); | 2073 | (void)SkXmUpdateStats(pAC, IoC, Port); |
2096 | 2074 | ||
diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c index 075a0464e56b..79bf57cb5326 100644 --- a/drivers/net/sk98lin/ski2c.c +++ b/drivers/net/sk98lin/ski2c.c | |||
@@ -396,7 +396,7 @@ int Rw) /* Read / Write Flag */ | |||
396 | * 1: error, transfer does not complete, I2C transfer | 396 | * 1: error, transfer does not complete, I2C transfer |
397 | * killed, wait loop terminated. | 397 | * killed, wait loop terminated. |
398 | */ | 398 | */ |
399 | int SkI2cWait( | 399 | static int SkI2cWait( |
400 | SK_AC *pAC, /* Adapter Context */ | 400 | SK_AC *pAC, /* Adapter Context */ |
401 | SK_IOC IoC, /* I/O Context */ | 401 | SK_IOC IoC, /* I/O Context */ |
402 | int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ | 402 | int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ |
@@ -481,7 +481,7 @@ SK_IOC IoC) /* I/O Context */ | |||
481 | * returns 0: success | 481 | * returns 0: success |
482 | * 1: error | 482 | * 1: error |
483 | */ | 483 | */ |
484 | int SkI2cWrite( | 484 | static int SkI2cWrite( |
485 | SK_AC *pAC, /* Adapter Context */ | 485 | SK_AC *pAC, /* Adapter Context */ |
486 | SK_IOC IoC, /* I/O Context */ | 486 | SK_IOC IoC, /* I/O Context */ |
487 | SK_U32 I2cData, /* I2C Data to write */ | 487 | SK_U32 I2cData, /* I2C Data to write */ |
@@ -538,7 +538,7 @@ int I2cBurst) /* I2C Burst Flag */ | |||
538 | * 1 if the read is completed | 538 | * 1 if the read is completed |
539 | * 0 if the read must be continued (I2C Bus still allocated) | 539 | * 0 if the read must be continued (I2C Bus still allocated) |
540 | */ | 540 | */ |
541 | int SkI2cReadSensor( | 541 | static int SkI2cReadSensor( |
542 | SK_AC *pAC, /* Adapter Context */ | 542 | SK_AC *pAC, /* Adapter Context */ |
543 | SK_IOC IoC, /* I/O Context */ | 543 | SK_IOC IoC, /* I/O Context */ |
544 | SK_SENSOR *pSen) /* Sensor to be read */ | 544 | SK_SENSOR *pSen) /* Sensor to be read */ |
diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c index 68292d18175b..a204f5bb55d4 100644 --- a/drivers/net/sk98lin/sklm80.c +++ b/drivers/net/sk98lin/sklm80.c | |||
@@ -34,79 +34,7 @@ static const char SysKonnectFileId[] = | |||
34 | #include "h/lm80.h" | 34 | #include "h/lm80.h" |
35 | #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ | 35 | #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ |
36 | 36 | ||
37 | #ifdef SK_DIAG | ||
38 | #define BREAK_OR_WAIT(pAC,IoC,Event) SkI2cWait(pAC,IoC,Event) | ||
39 | #else /* nSK_DIAG */ | ||
40 | #define BREAK_OR_WAIT(pAC,IoC,Event) break | 37 | #define BREAK_OR_WAIT(pAC,IoC,Event) break |
41 | #endif /* nSK_DIAG */ | ||
42 | |||
43 | #ifdef SK_DIAG | ||
44 | /* | ||
45 | * read the register 'Reg' from the device 'Dev' | ||
46 | * | ||
47 | * return read error -1 | ||
48 | * success the read value | ||
49 | */ | ||
50 | int SkLm80RcvReg( | ||
51 | SK_IOC IoC, /* Adapter Context */ | ||
52 | int Dev, /* I2C device address */ | ||
53 | int Reg) /* register to read */ | ||
54 | { | ||
55 | int Val = 0; | ||
56 | int TempExt; | ||
57 | |||
58 | /* Signal device number */ | ||
59 | if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) { | ||
60 | return(-1); | ||
61 | } | ||
62 | |||
63 | if (SkI2cSndByte(IoC, Reg)) { | ||
64 | return(-1); | ||
65 | } | ||
66 | |||
67 | /* repeat start */ | ||
68 | if (SkI2cSndDev(IoC, Dev, I2C_READ)) { | ||
69 | return(-1); | ||
70 | } | ||
71 | |||
72 | switch (Reg) { | ||
73 | case LM80_TEMP_IN: | ||
74 | Val = (int)SkI2cRcvByte(IoC, 1); | ||
75 | |||
76 | /* First: correct the value: it might be negative */ | ||
77 | if ((Val & 0x80) != 0) { | ||
78 | /* Value is negative */ | ||
79 | Val = Val - 256; | ||
80 | } | ||
81 | Val = Val * SK_LM80_TEMP_LSB; | ||
82 | SkI2cStop(IoC); | ||
83 | |||
84 | TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL); | ||
85 | |||
86 | if (Val > 0) { | ||
87 | Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB); | ||
88 | } | ||
89 | else { | ||
90 | Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB); | ||
91 | } | ||
92 | return(Val); | ||
93 | break; | ||
94 | case LM80_VT0_IN: | ||
95 | case LM80_VT1_IN: | ||
96 | case LM80_VT2_IN: | ||
97 | case LM80_VT3_IN: | ||
98 | Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB; | ||
99 | break; | ||
100 | |||
101 | default: | ||
102 | Val = (int)SkI2cRcvByte(IoC, 1); | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | SkI2cStop(IoC); | ||
107 | return(Val); | ||
108 | } | ||
109 | #endif /* SK_DIAG */ | ||
110 | 38 | ||
111 | /* | 39 | /* |
112 | * read a sensors value (LM80 specific) | 40 | * read a sensors value (LM80 specific) |
diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c index 9ea11ab2296a..be8d1ccddf6d 100644 --- a/drivers/net/sk98lin/skrlmt.c +++ b/drivers/net/sk98lin/skrlmt.c | |||
@@ -282,7 +282,6 @@ typedef struct s_SpTreeRlmtPacket { | |||
282 | 282 | ||
283 | SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; | 283 | SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; |
284 | SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; | 284 | SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; |
285 | SK_MAC_ADDR BcAddr = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; | ||
286 | 285 | ||
287 | /* local variables ************************************************************/ | 286 | /* local variables ************************************************************/ |
288 | 287 | ||
diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c index eb3c8988ced1..17786056c66a 100644 --- a/drivers/net/sk98lin/skvpd.c +++ b/drivers/net/sk98lin/skvpd.c | |||
@@ -132,65 +132,6 @@ int addr) /* VPD address */ | |||
132 | 132 | ||
133 | #endif /* SKDIAG */ | 133 | #endif /* SKDIAG */ |
134 | 134 | ||
135 | #if 0 | ||
136 | |||
137 | /* | ||
138 | Write the dword 'data' at address 'addr' into the VPD EEPROM, and | ||
139 | verify that the data is written. | ||
140 | |||
141 | Needed Time: | ||
142 | |||
143 | . MIN MAX | ||
144 | . ------------------------------------------------------------------- | ||
145 | . write 1.8 ms 3.6 ms | ||
146 | . internal write cyles 0.7 ms 7.0 ms | ||
147 | . ------------------------------------------------------------------- | ||
148 | . over all program time 2.5 ms 10.6 ms | ||
149 | . read 1.3 ms 2.6 ms | ||
150 | . ------------------------------------------------------------------- | ||
151 | . over all 3.8 ms 13.2 ms | ||
152 | . | ||
153 | |||
154 | |||
155 | Returns 0: success | ||
156 | 1: error, I2C transfer does not terminate | ||
157 | 2: error, data verify error | ||
158 | |||
159 | */ | ||
160 | static int VpdWriteDWord( | ||
161 | SK_AC *pAC, /* pAC pointer */ | ||
162 | SK_IOC IoC, /* IO Context */ | ||
163 | int addr, /* VPD address */ | ||
164 | SK_U32 data) /* VPD data to write */ | ||
165 | { | ||
166 | /* start VPD write */ | ||
167 | /* Don't swap here, it's a data stream of bytes */ | ||
168 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, | ||
169 | ("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data)); | ||
170 | VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data); | ||
171 | /* But do it here */ | ||
172 | addr |= VPD_WRITE; | ||
173 | |||
174 | VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE)); | ||
175 | |||
176 | /* this may take up to 10,6 ms */ | ||
177 | if (VpdWait(pAC, IoC, VPD_WRITE)) { | ||
178 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, | ||
179 | ("Write Timed Out\n")); | ||
180 | return(1); | ||
181 | }; | ||
182 | |||
183 | /* verify data */ | ||
184 | if (VpdReadDWord(pAC, IoC, addr) != data) { | ||
185 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, | ||
186 | ("Data Verify Error\n")); | ||
187 | return(2); | ||
188 | } | ||
189 | return(0); | ||
190 | } /* VpdWriteDWord */ | ||
191 | |||
192 | #endif /* 0 */ | ||
193 | |||
194 | /* | 135 | /* |
195 | * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from | 136 | * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from |
196 | * or to the I2C EEPROM. | 137 | * or to the I2C EEPROM. |
@@ -728,7 +669,7 @@ char *etp) /* end pointer input position */ | |||
728 | * 6: fatal VPD error | 669 | * 6: fatal VPD error |
729 | * | 670 | * |
730 | */ | 671 | */ |
731 | int VpdSetupPara( | 672 | static int VpdSetupPara( |
732 | SK_AC *pAC, /* common data base */ | 673 | SK_AC *pAC, /* common data base */ |
733 | const char *key, /* keyword to insert */ | 674 | const char *key, /* keyword to insert */ |
734 | const char *buf, /* buffer with the keyword value */ | 675 | const char *buf, /* buffer with the keyword value */ |
@@ -1148,50 +1089,3 @@ SK_IOC IoC) /* IO Context */ | |||
1148 | return(0); | 1089 | return(0); |
1149 | } | 1090 | } |
1150 | 1091 | ||
1151 | |||
1152 | |||
1153 | /* | ||
1154 | * Read the contents of the VPD EEPROM and copy it to the VPD buffer | ||
1155 | * if not already done. If the keyword "VF" is not present it will be | ||
1156 | * created and the error log message will be stored to this keyword. | ||
1157 | * If "VF" is not present the error log message will be stored to the | ||
1158 | * keyword "VL". "VL" will created or overwritten if "VF" is present. | ||
1159 | * The VPD read/write area is saved to the VPD EEPROM. | ||
1160 | * | ||
1161 | * returns nothing, errors will be ignored. | ||
1162 | */ | ||
1163 | void VpdErrLog( | ||
1164 | SK_AC *pAC, /* common data base */ | ||
1165 | SK_IOC IoC, /* IO Context */ | ||
1166 | char *msg) /* error log message */ | ||
1167 | { | ||
1168 | SK_VPD_PARA *v, vf; /* VF */ | ||
1169 | int len; | ||
1170 | |||
1171 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, | ||
1172 | ("VPD error log msg %s\n", msg)); | ||
1173 | if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { | ||
1174 | if (VpdInit(pAC, IoC) != 0) { | ||
1175 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, | ||
1176 | ("VPD init error\n")); | ||
1177 | return; | ||
1178 | } | ||
1179 | } | ||
1180 | |||
1181 | len = strlen(msg); | ||
1182 | if (len > VPD_MAX_LEN) { | ||
1183 | /* cut it */ | ||
1184 | len = VPD_MAX_LEN; | ||
1185 | } | ||
1186 | if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) { | ||
1187 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n")); | ||
1188 | (void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY); | ||
1189 | } | ||
1190 | else { | ||
1191 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n")); | ||
1192 | (void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY); | ||
1193 | } | ||
1194 | |||
1195 | (void)VpdUpdate(pAC, IoC); | ||
1196 | } | ||
1197 | |||
diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c index 42d2d963150a..b4e75022a657 100644 --- a/drivers/net/sk98lin/skxmac2.c +++ b/drivers/net/sk98lin/skxmac2.c | |||
@@ -41,13 +41,13 @@ static const char SysKonnectFileId[] = | |||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #ifdef GENESIS | 43 | #ifdef GENESIS |
44 | BCOM_HACK BcomRegA1Hack[] = { | 44 | static BCOM_HACK BcomRegA1Hack[] = { |
45 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, | 45 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, |
46 | { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, | 46 | { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, |
47 | { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, | 47 | { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, |
48 | { 0, 0 } | 48 | { 0, 0 } |
49 | }; | 49 | }; |
50 | BCOM_HACK BcomRegC0Hack[] = { | 50 | static BCOM_HACK BcomRegC0Hack[] = { |
51 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, | 51 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, |
52 | { 0x15, 0x0A04 }, { 0x18, 0x0420 }, | 52 | { 0x15, 0x0A04 }, { 0x18, 0x0420 }, |
53 | { 0, 0 } | 53 | { 0, 0 } |
@@ -790,7 +790,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
790 | * Returns: | 790 | * Returns: |
791 | * nothing | 791 | * nothing |
792 | */ | 792 | */ |
793 | void SkMacFlushRxFifo( | 793 | static void SkMacFlushRxFifo( |
794 | SK_AC *pAC, /* adapter context */ | 794 | SK_AC *pAC, /* adapter context */ |
795 | SK_IOC IoC, /* IO context */ | 795 | SK_IOC IoC, /* IO context */ |
796 | int Port) /* Port Index (MAC_1 + n) */ | 796 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -1231,38 +1231,6 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1231 | } /* SkMacHardRst */ | 1231 | } /* SkMacHardRst */ |
1232 | 1232 | ||
1233 | 1233 | ||
1234 | /****************************************************************************** | ||
1235 | * | ||
1236 | * SkMacClearRst() - Clear the MAC reset | ||
1237 | * | ||
1238 | * Description: calls a clear MAC reset routine dep. on board type | ||
1239 | * | ||
1240 | * Returns: | ||
1241 | * nothing | ||
1242 | */ | ||
1243 | void SkMacClearRst( | ||
1244 | SK_AC *pAC, /* adapter context */ | ||
1245 | SK_IOC IoC, /* IO context */ | ||
1246 | int Port) /* Port Index (MAC_1 + n) */ | ||
1247 | { | ||
1248 | |||
1249 | #ifdef GENESIS | ||
1250 | if (pAC->GIni.GIGenesis) { | ||
1251 | |||
1252 | SkXmClearRst(pAC, IoC, Port); | ||
1253 | } | ||
1254 | #endif /* GENESIS */ | ||
1255 | |||
1256 | #ifdef YUKON | ||
1257 | if (pAC->GIni.GIYukon) { | ||
1258 | |||
1259 | SkGmClearRst(pAC, IoC, Port); | ||
1260 | } | ||
1261 | #endif /* YUKON */ | ||
1262 | |||
1263 | } /* SkMacClearRst */ | ||
1264 | |||
1265 | |||
1266 | #ifdef GENESIS | 1234 | #ifdef GENESIS |
1267 | /****************************************************************************** | 1235 | /****************************************************************************** |
1268 | * | 1236 | * |
@@ -1713,7 +1681,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1713 | * Returns: | 1681 | * Returns: |
1714 | * nothing | 1682 | * nothing |
1715 | */ | 1683 | */ |
1716 | void SkXmInitDupMd( | 1684 | static void SkXmInitDupMd( |
1717 | SK_AC *pAC, /* adapter context */ | 1685 | SK_AC *pAC, /* adapter context */ |
1718 | SK_IOC IoC, /* IO context */ | 1686 | SK_IOC IoC, /* IO context */ |
1719 | int Port) /* Port Index (MAC_1 + n) */ | 1687 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -1761,7 +1729,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1761 | * Returns: | 1729 | * Returns: |
1762 | * nothing | 1730 | * nothing |
1763 | */ | 1731 | */ |
1764 | void SkXmInitPauseMd( | 1732 | static void SkXmInitPauseMd( |
1765 | SK_AC *pAC, /* adapter context */ | 1733 | SK_AC *pAC, /* adapter context */ |
1766 | SK_IOC IoC, /* IO context */ | 1734 | SK_IOC IoC, /* IO context */ |
1767 | int Port) /* Port Index (MAC_1 + n) */ | 1735 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -2076,283 +2044,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ | |||
2076 | } /* SkXmInitPhyBcom */ | 2044 | } /* SkXmInitPhyBcom */ |
2077 | #endif /* GENESIS */ | 2045 | #endif /* GENESIS */ |
2078 | 2046 | ||
2079 | |||
2080 | #ifdef YUKON | 2047 | #ifdef YUKON |
2081 | #ifndef SK_SLIM | ||
2082 | /****************************************************************************** | ||
2083 | * | ||
2084 | * SkGmEnterLowPowerMode() | ||
2085 | * | ||
2086 | * Description: | ||
2087 | * This function sets the Marvell Alaska PHY to the low power mode | ||
2088 | * given by parameter mode. | ||
2089 | * The following low power modes are available: | ||
2090 | * | ||
2091 | * - Coma Mode (Deep Sleep): | ||
2092 | * Power consumption: ~15 - 30 mW | ||
2093 | * The PHY cannot wake up on its own. | ||
2094 | * | ||
2095 | * - IEEE 22.2.4.1.5 compatible power down mode | ||
2096 | * Power consumption: ~240 mW | ||
2097 | * The PHY cannot wake up on its own. | ||
2098 | * | ||
2099 | * - energy detect mode | ||
2100 | * Power consumption: ~160 mW | ||
2101 | * The PHY can wake up on its own by detecting activity | ||
2102 | * on the CAT 5 cable. | ||
2103 | * | ||
2104 | * - energy detect plus mode | ||
2105 | * Power consumption: ~150 mW | ||
2106 | * The PHY can wake up on its own by detecting activity | ||
2107 | * on the CAT 5 cable. | ||
2108 | * Connected devices can be woken up by sending normal link | ||
2109 | * pulses every one second. | ||
2110 | * | ||
2111 | * Note: | ||
2112 | * | ||
2113 | * Returns: | ||
2114 | * 0: ok | ||
2115 | * 1: error | ||
2116 | */ | ||
2117 | int SkGmEnterLowPowerMode( | ||
2118 | SK_AC *pAC, /* adapter context */ | ||
2119 | SK_IOC IoC, /* IO context */ | ||
2120 | int Port, /* Port Index (e.g. MAC_1) */ | ||
2121 | SK_U8 Mode) /* low power mode */ | ||
2122 | { | ||
2123 | SK_U16 Word; | ||
2124 | SK_U32 DWord; | ||
2125 | SK_U8 LastMode; | ||
2126 | int Ret = 0; | ||
2127 | |||
2128 | if (pAC->GIni.GIYukonLite && | ||
2129 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2130 | |||
2131 | /* save current power mode */ | ||
2132 | LastMode = pAC->GIni.GP[Port].PPhyPowerState; | ||
2133 | pAC->GIni.GP[Port].PPhyPowerState = Mode; | ||
2134 | |||
2135 | switch (Mode) { | ||
2136 | /* coma mode (deep sleep) */ | ||
2137 | case PHY_PM_DEEP_SLEEP: | ||
2138 | /* setup General Purpose Control Register */ | ||
2139 | GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS | | ||
2140 | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS); | ||
2141 | |||
2142 | /* apply COMA mode workaround */ | ||
2143 | SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f); | ||
2144 | SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3); | ||
2145 | |||
2146 | SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord); | ||
2147 | |||
2148 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2149 | |||
2150 | /* Set PHY to Coma Mode */ | ||
2151 | SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA); | ||
2152 | |||
2153 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2154 | |||
2155 | break; | ||
2156 | |||
2157 | /* IEEE 22.2.4.1.5 compatible power down mode */ | ||
2158 | case PHY_PM_IEEE_POWER_DOWN: | ||
2159 | /* | ||
2160 | * - disable MAC 125 MHz clock | ||
2161 | * - allow MAC power down | ||
2162 | */ | ||
2163 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2164 | Word |= PHY_M_PC_DIS_125CLK; | ||
2165 | Word &= ~PHY_M_PC_MAC_POW_UP; | ||
2166 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2167 | |||
2168 | /* | ||
2169 | * register changes must be followed by a software | ||
2170 | * reset to take effect | ||
2171 | */ | ||
2172 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2173 | Word |= PHY_CT_RESET; | ||
2174 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2175 | |||
2176 | /* switch IEEE compatible power down mode on */ | ||
2177 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2178 | Word |= PHY_CT_PDOWN; | ||
2179 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2180 | break; | ||
2181 | |||
2182 | /* energy detect and energy detect plus mode */ | ||
2183 | case PHY_PM_ENERGY_DETECT: | ||
2184 | case PHY_PM_ENERGY_DETECT_PLUS: | ||
2185 | /* | ||
2186 | * - disable MAC 125 MHz clock | ||
2187 | */ | ||
2188 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2189 | Word |= PHY_M_PC_DIS_125CLK; | ||
2190 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2191 | |||
2192 | /* activate energy detect mode 1 */ | ||
2193 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2194 | |||
2195 | /* energy detect mode */ | ||
2196 | if (Mode == PHY_PM_ENERGY_DETECT) { | ||
2197 | Word |= PHY_M_PC_EN_DET; | ||
2198 | } | ||
2199 | /* energy detect plus mode */ | ||
2200 | else { | ||
2201 | Word |= PHY_M_PC_EN_DET_PLUS; | ||
2202 | } | ||
2203 | |||
2204 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2205 | |||
2206 | /* | ||
2207 | * reinitialize the PHY to force a software reset | ||
2208 | * which is necessary after the register settings | ||
2209 | * for the energy detect modes. | ||
2210 | * Furthermore reinitialisation prevents that the | ||
2211 | * PHY is running out of a stable state. | ||
2212 | */ | ||
2213 | SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); | ||
2214 | break; | ||
2215 | |||
2216 | /* don't change current power mode */ | ||
2217 | default: | ||
2218 | pAC->GIni.GP[Port].PPhyPowerState = LastMode; | ||
2219 | Ret = 1; | ||
2220 | break; | ||
2221 | } | ||
2222 | } | ||
2223 | /* low power modes are not supported by this chip */ | ||
2224 | else { | ||
2225 | Ret = 1; | ||
2226 | } | ||
2227 | |||
2228 | return(Ret); | ||
2229 | |||
2230 | } /* SkGmEnterLowPowerMode */ | ||
2231 | |||
2232 | /****************************************************************************** | ||
2233 | * | ||
2234 | * SkGmLeaveLowPowerMode() | ||
2235 | * | ||
2236 | * Description: | ||
2237 | * Leave the current low power mode and switch to normal mode | ||
2238 | * | ||
2239 | * Note: | ||
2240 | * | ||
2241 | * Returns: | ||
2242 | * 0: ok | ||
2243 | * 1: error | ||
2244 | */ | ||
2245 | int SkGmLeaveLowPowerMode( | ||
2246 | SK_AC *pAC, /* adapter context */ | ||
2247 | SK_IOC IoC, /* IO context */ | ||
2248 | int Port) /* Port Index (e.g. MAC_1) */ | ||
2249 | { | ||
2250 | SK_U32 DWord; | ||
2251 | SK_U16 Word; | ||
2252 | SK_U8 LastMode; | ||
2253 | int Ret = 0; | ||
2254 | |||
2255 | if (pAC->GIni.GIYukonLite && | ||
2256 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2257 | |||
2258 | /* save current power mode */ | ||
2259 | LastMode = pAC->GIni.GP[Port].PPhyPowerState; | ||
2260 | pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE; | ||
2261 | |||
2262 | switch (LastMode) { | ||
2263 | /* coma mode (deep sleep) */ | ||
2264 | case PHY_PM_DEEP_SLEEP: | ||
2265 | SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord); | ||
2266 | |||
2267 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2268 | |||
2269 | /* Release PHY from Coma Mode */ | ||
2270 | SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA); | ||
2271 | |||
2272 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2273 | |||
2274 | SK_IN32(IoC, B2_GP_IO, &DWord); | ||
2275 | |||
2276 | /* set to output */ | ||
2277 | DWord |= (GP_DIR_9 | GP_IO_9); | ||
2278 | |||
2279 | /* set PHY reset */ | ||
2280 | SK_OUT32(IoC, B2_GP_IO, DWord); | ||
2281 | |||
2282 | DWord &= ~GP_IO_9; /* clear PHY reset (active high) */ | ||
2283 | |||
2284 | /* clear PHY reset */ | ||
2285 | SK_OUT32(IoC, B2_GP_IO, DWord); | ||
2286 | break; | ||
2287 | |||
2288 | /* IEEE 22.2.4.1.5 compatible power down mode */ | ||
2289 | case PHY_PM_IEEE_POWER_DOWN: | ||
2290 | /* | ||
2291 | * - enable MAC 125 MHz clock | ||
2292 | * - set MAC power up | ||
2293 | */ | ||
2294 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2295 | Word &= ~PHY_M_PC_DIS_125CLK; | ||
2296 | Word |= PHY_M_PC_MAC_POW_UP; | ||
2297 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2298 | |||
2299 | /* | ||
2300 | * register changes must be followed by a software | ||
2301 | * reset to take effect | ||
2302 | */ | ||
2303 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2304 | Word |= PHY_CT_RESET; | ||
2305 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2306 | |||
2307 | /* switch IEEE compatible power down mode off */ | ||
2308 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2309 | Word &= ~PHY_CT_PDOWN; | ||
2310 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2311 | break; | ||
2312 | |||
2313 | /* energy detect and energy detect plus mode */ | ||
2314 | case PHY_PM_ENERGY_DETECT: | ||
2315 | case PHY_PM_ENERGY_DETECT_PLUS: | ||
2316 | /* | ||
2317 | * - enable MAC 125 MHz clock | ||
2318 | */ | ||
2319 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2320 | Word &= ~PHY_M_PC_DIS_125CLK; | ||
2321 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2322 | |||
2323 | /* disable energy detect mode */ | ||
2324 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2325 | Word &= ~PHY_M_PC_EN_DET_MSK; | ||
2326 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2327 | |||
2328 | /* | ||
2329 | * reinitialize the PHY to force a software reset | ||
2330 | * which is necessary after the register settings | ||
2331 | * for the energy detect modes. | ||
2332 | * Furthermore reinitialisation prevents that the | ||
2333 | * PHY is running out of a stable state. | ||
2334 | */ | ||
2335 | SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); | ||
2336 | break; | ||
2337 | |||
2338 | /* don't change current power mode */ | ||
2339 | default: | ||
2340 | pAC->GIni.GP[Port].PPhyPowerState = LastMode; | ||
2341 | Ret = 1; | ||
2342 | break; | ||
2343 | } | ||
2344 | } | ||
2345 | /* low power modes are not supported by this chip */ | ||
2346 | else { | ||
2347 | Ret = 1; | ||
2348 | } | ||
2349 | |||
2350 | return(Ret); | ||
2351 | |||
2352 | } /* SkGmLeaveLowPowerMode */ | ||
2353 | #endif /* !SK_SLIM */ | ||
2354 | |||
2355 | |||
2356 | /****************************************************************************** | 2048 | /****************************************************************************** |
2357 | * | 2049 | * |
2358 | * SkGmInitPhyMarv() - Initialize the Marvell Phy registers | 2050 | * SkGmInitPhyMarv() - Initialize the Marvell Phy registers |
@@ -3420,145 +3112,6 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
3420 | } /* SkMacAutoNegDone */ | 3112 | } /* SkMacAutoNegDone */ |
3421 | 3113 | ||
3422 | 3114 | ||
3423 | #ifdef GENESIS | ||
3424 | /****************************************************************************** | ||
3425 | * | ||
3426 | * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC | ||
3427 | * | ||
3428 | * Description: | ||
3429 | * sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg. | ||
3430 | * enables Rx/Tx | ||
3431 | * | ||
3432 | * Returns: N/A | ||
3433 | */ | ||
3434 | static void SkXmSetRxTxEn( | ||
3435 | SK_AC *pAC, /* Adapter Context */ | ||
3436 | SK_IOC IoC, /* IO context */ | ||
3437 | int Port, /* Port Index (MAC_1 + n) */ | ||
3438 | int Para) /* Parameter to set: MAC or PHY LoopBack, Duplex Mode */ | ||
3439 | { | ||
3440 | SK_U16 Word; | ||
3441 | |||
3442 | XM_IN16(IoC, Port, XM_MMU_CMD, &Word); | ||
3443 | |||
3444 | switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { | ||
3445 | case SK_MAC_LOOPB_ON: | ||
3446 | Word |= XM_MMU_MAC_LB; | ||
3447 | break; | ||
3448 | case SK_MAC_LOOPB_OFF: | ||
3449 | Word &= ~XM_MMU_MAC_LB; | ||
3450 | break; | ||
3451 | } | ||
3452 | |||
3453 | switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) { | ||
3454 | case SK_PHY_LOOPB_ON: | ||
3455 | Word |= XM_MMU_GMII_LOOP; | ||
3456 | break; | ||
3457 | case SK_PHY_LOOPB_OFF: | ||
3458 | Word &= ~XM_MMU_GMII_LOOP; | ||
3459 | break; | ||
3460 | } | ||
3461 | |||
3462 | switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { | ||
3463 | case SK_PHY_FULLD_ON: | ||
3464 | Word |= XM_MMU_GMII_FD; | ||
3465 | break; | ||
3466 | case SK_PHY_FULLD_OFF: | ||
3467 | Word &= ~XM_MMU_GMII_FD; | ||
3468 | break; | ||
3469 | } | ||
3470 | |||
3471 | XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX); | ||
3472 | |||
3473 | /* dummy read to ensure writing */ | ||
3474 | XM_IN16(IoC, Port, XM_MMU_CMD, &Word); | ||
3475 | |||
3476 | } /* SkXmSetRxTxEn */ | ||
3477 | #endif /* GENESIS */ | ||
3478 | |||
3479 | |||
3480 | #ifdef YUKON | ||
3481 | /****************************************************************************** | ||
3482 | * | ||
3483 | * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC | ||
3484 | * | ||
3485 | * Description: | ||
3486 | * sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg. | ||
3487 | * enables Rx/Tx | ||
3488 | * | ||
3489 | * Returns: N/A | ||
3490 | */ | ||
3491 | static void SkGmSetRxTxEn( | ||
3492 | SK_AC *pAC, /* Adapter Context */ | ||
3493 | SK_IOC IoC, /* IO context */ | ||
3494 | int Port, /* Port Index (MAC_1 + n) */ | ||
3495 | int Para) /* Parameter to set: MAC LoopBack, Duplex Mode */ | ||
3496 | { | ||
3497 | SK_U16 Ctrl; | ||
3498 | |||
3499 | GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); | ||
3500 | |||
3501 | switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { | ||
3502 | case SK_MAC_LOOPB_ON: | ||
3503 | Ctrl |= GM_GPCR_LOOP_ENA; | ||
3504 | break; | ||
3505 | case SK_MAC_LOOPB_OFF: | ||
3506 | Ctrl &= ~GM_GPCR_LOOP_ENA; | ||
3507 | break; | ||
3508 | } | ||
3509 | |||
3510 | switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { | ||
3511 | case SK_PHY_FULLD_ON: | ||
3512 | Ctrl |= GM_GPCR_DUP_FULL; | ||
3513 | break; | ||
3514 | case SK_PHY_FULLD_OFF: | ||
3515 | Ctrl &= ~GM_GPCR_DUP_FULL; | ||
3516 | break; | ||
3517 | } | ||
3518 | |||
3519 | GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA | | ||
3520 | GM_GPCR_TX_ENA)); | ||
3521 | |||
3522 | /* dummy read to ensure writing */ | ||
3523 | GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); | ||
3524 | |||
3525 | } /* SkGmSetRxTxEn */ | ||
3526 | #endif /* YUKON */ | ||
3527 | |||
3528 | |||
3529 | #ifndef SK_SLIM | ||
3530 | /****************************************************************************** | ||
3531 | * | ||
3532 | * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters | ||
3533 | * | ||
3534 | * Description: calls the Special Set Rx/Tx Enable routines dep. on board type | ||
3535 | * | ||
3536 | * Returns: N/A | ||
3537 | */ | ||
3538 | void SkMacSetRxTxEn( | ||
3539 | SK_AC *pAC, /* Adapter Context */ | ||
3540 | SK_IOC IoC, /* IO context */ | ||
3541 | int Port, /* Port Index (MAC_1 + n) */ | ||
3542 | int Para) | ||
3543 | { | ||
3544 | #ifdef GENESIS | ||
3545 | if (pAC->GIni.GIGenesis) { | ||
3546 | |||
3547 | SkXmSetRxTxEn(pAC, IoC, Port, Para); | ||
3548 | } | ||
3549 | #endif /* GENESIS */ | ||
3550 | |||
3551 | #ifdef YUKON | ||
3552 | if (pAC->GIni.GIYukon) { | ||
3553 | |||
3554 | SkGmSetRxTxEn(pAC, IoC, Port, Para); | ||
3555 | } | ||
3556 | #endif /* YUKON */ | ||
3557 | |||
3558 | } /* SkMacSetRxTxEn */ | ||
3559 | #endif /* !SK_SLIM */ | ||
3560 | |||
3561 | |||
3562 | /****************************************************************************** | 3115 | /****************************************************************************** |
3563 | * | 3116 | * |
3564 | * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up | 3117 | * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up |
@@ -3976,7 +3529,7 @@ SK_U16 PhyStat) /* PHY Status word to analyse */ | |||
3976 | * Returns: | 3529 | * Returns: |
3977 | * nothing | 3530 | * nothing |
3978 | */ | 3531 | */ |
3979 | void SkXmIrq( | 3532 | static void SkXmIrq( |
3980 | SK_AC *pAC, /* adapter context */ | 3533 | SK_AC *pAC, /* adapter context */ |
3981 | SK_IOC IoC, /* IO context */ | 3534 | SK_IOC IoC, /* IO context */ |
3982 | int Port) /* Port Index (MAC_1 + n) */ | 3535 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -4112,7 +3665,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
4112 | * Returns: | 3665 | * Returns: |
4113 | * nothing | 3666 | * nothing |
4114 | */ | 3667 | */ |
4115 | void SkGmIrq( | 3668 | static void SkGmIrq( |
4116 | SK_AC *pAC, /* adapter context */ | 3669 | SK_AC *pAC, /* adapter context */ |
4117 | SK_IOC IoC, /* IO context */ | 3670 | SK_IOC IoC, /* IO context */ |
4118 | int Port) /* Port Index (MAC_1 + n) */ | 3671 | int Port) /* Port Index (MAC_1 + n) */ |
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c index a2ed47f1cc70..a4b2b6975d6c 100644 --- a/drivers/net/skfp/fplustm.c +++ b/drivers/net/skfp/fplustm.c | |||
@@ -89,21 +89,21 @@ static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */ | |||
89 | /* | 89 | /* |
90 | * useful interrupt bits | 90 | * useful interrupt bits |
91 | */ | 91 | */ |
92 | static int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ; | 92 | static const int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ; |
93 | static int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0| | 93 | static const int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0| |
94 | FM_STBURS | FM_STBURA0 ; | 94 | FM_STBURS | FM_STBURA0 ; |
95 | 95 | ||
96 | /* delete FM_SRBFL after tests */ | 96 | /* delete FM_SRBFL after tests */ |
97 | static int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL | | 97 | static const int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL | |
98 | FM_SMYCLM ; | 98 | FM_SMYCLM ; |
99 | static int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR | | 99 | static const int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR | |
100 | FM_SERRCTR | FM_SLSTCTR | | 100 | FM_SERRCTR | FM_SLSTCTR | |
101 | FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ; | 101 | FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ; |
102 | 102 | ||
103 | static int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ; | 103 | static const int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ; |
104 | static int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ; | 104 | static const int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ; |
105 | 105 | ||
106 | static int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC | | 106 | static const int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC | |
107 | FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ; | 107 | FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ; |
108 | 108 | ||
109 | 109 | ||
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c index cd0aa4c151b0..74e129f3ce92 100644 --- a/drivers/net/skfp/pcmplc.c +++ b/drivers/net/skfp/pcmplc.c | |||
@@ -186,7 +186,7 @@ static const struct plt { | |||
186 | * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 | 186 | * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 |
187 | * PLL bug? | 187 | * PLL bug? |
188 | */ | 188 | */ |
189 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 189 | static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
190 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; | 190 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; |
191 | #else /* SUPERNET_3 */ | 191 | #else /* SUPERNET_3 */ |
192 | /* | 192 | /* |
@@ -195,7 +195,7 @@ static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | |||
195 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 195 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
196 | PL_PCM_ENABLED | PL_SELF_TEST ; | 196 | PL_PCM_ENABLED | PL_SELF_TEST ; |
197 | #endif /* SUPERNET_3 */ | 197 | #endif /* SUPERNET_3 */ |
198 | static int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 198 | static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
199 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; | 199 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; |
200 | 200 | ||
201 | /* external functions */ | 201 | /* external functions */ |
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 4b5ed2c63177..c7fb6133047e 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c | |||
@@ -67,7 +67,7 @@ | |||
67 | /* each new release!!! */ | 67 | /* each new release!!! */ |
68 | #define VERSION "2.07" | 68 | #define VERSION "2.07" |
69 | 69 | ||
70 | static const char *boot_msg = | 70 | static const char * const boot_msg = |
71 | "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n" | 71 | "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n" |
72 | " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)"; | 72 | " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)"; |
73 | 73 | ||
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index d167deda9a53..35b18057fbdd 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -201,7 +201,7 @@ static int max_interrupt_work = 20; | |||
201 | static int mtu; | 201 | static int mtu; |
202 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 202 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
203 | The Starfire has a 512 element hash table based on the Ethernet CRC. */ | 203 | The Starfire has a 512 element hash table based on the Ethernet CRC. */ |
204 | static int multicast_filter_limit = 512; | 204 | static const int multicast_filter_limit = 512; |
205 | /* Whether to do TCP/UDP checksums in hardware */ | 205 | /* Whether to do TCP/UDP checksums in hardware */ |
206 | static int enable_hw_cksum = 1; | 206 | static int enable_hw_cksum = 1; |
207 | 207 | ||
@@ -463,7 +463,7 @@ static struct pci_device_id starfire_pci_tbl[] = { | |||
463 | MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); | 463 | MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); |
464 | 464 | ||
465 | /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ | 465 | /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ |
466 | static struct chip_info { | 466 | static const struct chip_info { |
467 | const char *name; | 467 | const char *name; |
468 | int drv_flags; | 468 | int drv_flags; |
469 | } netdrv_tbl[] __devinitdata = { | 469 | } netdrv_tbl[] __devinitdata = { |
@@ -2084,6 +2084,38 @@ static int netdev_close(struct net_device *dev) | |||
2084 | return 0; | 2084 | return 0; |
2085 | } | 2085 | } |
2086 | 2086 | ||
2087 | #ifdef CONFIG_PM | ||
2088 | static int starfire_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2089 | { | ||
2090 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2091 | |||
2092 | if (netif_running(dev)) { | ||
2093 | netif_device_detach(dev); | ||
2094 | netdev_close(dev); | ||
2095 | } | ||
2096 | |||
2097 | pci_save_state(pdev); | ||
2098 | pci_set_power_state(pdev, pci_choose_state(pdev,state)); | ||
2099 | |||
2100 | return 0; | ||
2101 | } | ||
2102 | |||
2103 | static int starfire_resume(struct pci_dev *pdev) | ||
2104 | { | ||
2105 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2106 | |||
2107 | pci_set_power_state(pdev, PCI_D0); | ||
2108 | pci_restore_state(pdev); | ||
2109 | |||
2110 | if (netif_running(dev)) { | ||
2111 | netdev_open(dev); | ||
2112 | netif_device_attach(dev); | ||
2113 | } | ||
2114 | |||
2115 | return 0; | ||
2116 | } | ||
2117 | #endif /* CONFIG_PM */ | ||
2118 | |||
2087 | 2119 | ||
2088 | static void __devexit starfire_remove_one (struct pci_dev *pdev) | 2120 | static void __devexit starfire_remove_one (struct pci_dev *pdev) |
2089 | { | 2121 | { |
@@ -2115,6 +2147,10 @@ static struct pci_driver starfire_driver = { | |||
2115 | .name = DRV_NAME, | 2147 | .name = DRV_NAME, |
2116 | .probe = starfire_init_one, | 2148 | .probe = starfire_init_one, |
2117 | .remove = __devexit_p(starfire_remove_one), | 2149 | .remove = __devexit_p(starfire_remove_one), |
2150 | #ifdef CONFIG_PM | ||
2151 | .suspend = starfire_suspend, | ||
2152 | .resume = starfire_resume, | ||
2153 | #endif /* CONFIG_PM */ | ||
2118 | .id_table = starfire_pci_tbl, | 2154 | .id_table = starfire_pci_tbl, |
2119 | }; | 2155 | }; |
2120 | 2156 | ||
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 0ab9c38b4a34..61eec46cb111 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -106,7 +106,7 @@ | |||
106 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | 106 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ |
107 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 107 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
108 | Typical is a 64 element hash table based on the Ethernet CRC. */ | 108 | Typical is a 64 element hash table based on the Ethernet CRC. */ |
109 | static int multicast_filter_limit = 32; | 109 | static const int multicast_filter_limit = 32; |
110 | 110 | ||
111 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 111 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
112 | Setting to > 1518 effectively disables this feature. | 112 | Setting to > 1518 effectively disables this feature. |
@@ -298,7 +298,7 @@ enum { | |||
298 | struct pci_id_info { | 298 | struct pci_id_info { |
299 | const char *name; | 299 | const char *name; |
300 | }; | 300 | }; |
301 | static struct pci_id_info pci_id_tbl[] = { | 301 | static const struct pci_id_info pci_id_tbl[] = { |
302 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, | 302 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, |
303 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, | 303 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, |
304 | {"D-Link DFE-580TX 4 port Server Adapter"}, | 304 | {"D-Link DFE-580TX 4 port Server Adapter"}, |
@@ -633,9 +633,13 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
633 | 633 | ||
634 | np->phys[0] = 1; /* Default setting */ | 634 | np->phys[0] = 1; /* Default setting */ |
635 | np->mii_preamble_required++; | 635 | np->mii_preamble_required++; |
636 | /* | ||
637 | * It seems some phys doesn't deal well with address 0 being accessed | ||
638 | * first, so leave address zero to the end of the loop (32 & 31). | ||
639 | */ | ||
636 | for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { | 640 | for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { |
637 | int mii_status = mdio_read(dev, phy, MII_BMSR); | ||
638 | int phyx = phy & 0x1f; | 641 | int phyx = phy & 0x1f; |
642 | int mii_status = mdio_read(dev, phyx, MII_BMSR); | ||
639 | if (mii_status != 0xffff && mii_status != 0x0000) { | 643 | if (mii_status != 0xffff && mii_status != 0x0000) { |
640 | np->phys[phy_idx++] = phyx; | 644 | np->phys[phy_idx++] = phyx; |
641 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); | 645 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); |
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index d3ddb41d6e5c..cb0aba95d4e3 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include "sungem_phy.h" | 39 | #include "sungem_phy.h" |
40 | 40 | ||
41 | /* Link modes of the BCM5400 PHY */ | 41 | /* Link modes of the BCM5400 PHY */ |
42 | static int phy_BCM5400_link_table[8][3] = { | 42 | static const int phy_BCM5400_link_table[8][3] = { |
43 | { 0, 0, 0 }, /* No link */ | 43 | { 0, 0, 0 }, /* No link */ |
44 | { 0, 0, 0 }, /* 10BT Half Duplex */ | 44 | { 0, 0, 0 }, /* 10BT Half Duplex */ |
45 | { 1, 0, 0 }, /* 10BT Full Duplex */ | 45 | { 1, 0, 0 }, /* 10BT Full Duplex */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 31a16fa67558..6c6c5498899f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -7802,7 +7802,7 @@ static int tg3_test_link(struct tg3 *tp) | |||
7802 | } | 7802 | } |
7803 | 7803 | ||
7804 | /* Only test the commonly used registers */ | 7804 | /* Only test the commonly used registers */ |
7805 | static int tg3_test_registers(struct tg3 *tp) | 7805 | static const int tg3_test_registers(struct tg3 *tp) |
7806 | { | 7806 | { |
7807 | int i, is_5705; | 7807 | int i, is_5705; |
7808 | u32 offset, read_mask, write_mask, val, save_val, read_val; | 7808 | u32 offset, read_mask, write_mask, val, save_val, read_val; |
@@ -8016,7 +8016,7 @@ out: | |||
8016 | 8016 | ||
8017 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) | 8017 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) |
8018 | { | 8018 | { |
8019 | static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; | 8019 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; |
8020 | int i; | 8020 | int i; |
8021 | u32 j; | 8021 | u32 j; |
8022 | 8022 | ||
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 97712c3c4e07..c58a4c31d0dd 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -122,6 +122,7 @@ | |||
122 | #include <linux/spinlock.h> | 122 | #include <linux/spinlock.h> |
123 | #include <linux/version.h> | 123 | #include <linux/version.h> |
124 | #include <linux/bitops.h> | 124 | #include <linux/bitops.h> |
125 | #include <linux/jiffies.h> | ||
125 | 126 | ||
126 | #include <net/checksum.h> | 127 | #include <net/checksum.h> |
127 | 128 | ||
@@ -512,7 +513,7 @@ static int streamer_reset(struct net_device *dev) | |||
512 | 513 | ||
513 | while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { | 514 | while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { |
514 | msleep_interruptible(100); | 515 | msleep_interruptible(100); |
515 | if (jiffies - t > 40 * HZ) { | 516 | if (time_after(jiffies, t + 40 * HZ)) { |
516 | printk(KERN_ERR | 517 | printk(KERN_ERR |
517 | "IBM PCI tokenring card not responding\n"); | 518 | "IBM PCI tokenring card not responding\n"); |
518 | release_region(dev->base_addr, STREAMER_IO_SPACE); | 519 | release_region(dev->base_addr, STREAMER_IO_SPACE); |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 05477d24fd49..23032a7bc0a9 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -100,6 +100,7 @@ | |||
100 | #include <linux/pci.h> | 100 | #include <linux/pci.h> |
101 | #include <linux/spinlock.h> | 101 | #include <linux/spinlock.h> |
102 | #include <linux/bitops.h> | 102 | #include <linux/bitops.h> |
103 | #include <linux/jiffies.h> | ||
103 | 104 | ||
104 | #include <net/checksum.h> | 105 | #include <net/checksum.h> |
105 | 106 | ||
@@ -307,7 +308,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
307 | t=jiffies; | 308 | t=jiffies; |
308 | while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { | 309 | while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { |
309 | schedule(); | 310 | schedule(); |
310 | if(jiffies-t > 40*HZ) { | 311 | if(time_after(jiffies, t + 40*HZ)) { |
311 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); | 312 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); |
312 | return -ENODEV; | 313 | return -ENODEV; |
313 | } | 314 | } |
@@ -359,7 +360,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
359 | t=jiffies; | 360 | t=jiffies; |
360 | while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) { | 361 | while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) { |
361 | schedule() ; | 362 | schedule() ; |
362 | if(jiffies-t > 2*HZ) { | 363 | if(time_after(jiffies, t + 2*HZ)) { |
363 | printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; | 364 | printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; |
364 | return -ENODEV; | 365 | return -ENODEV; |
365 | } | 366 | } |
@@ -373,7 +374,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
373 | t=jiffies; | 374 | t=jiffies; |
374 | while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { | 375 | while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { |
375 | schedule(); | 376 | schedule(); |
376 | if(jiffies-t > 15*HZ) { | 377 | if(time_after(jiffies, t + 15*HZ)) { |
377 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); | 378 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); |
378 | return -ENODEV; | 379 | return -ENODEV; |
379 | } | 380 | } |
@@ -519,7 +520,7 @@ static int olympic_open(struct net_device *dev) | |||
519 | olympic_priv->srb_queued=0; | 520 | olympic_priv->srb_queued=0; |
520 | break; | 521 | break; |
521 | } | 522 | } |
522 | if ((jiffies-t) > 10*HZ) { | 523 | if (time_after(jiffies, t + 10*HZ)) { |
523 | printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; | 524 | printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; |
524 | olympic_priv->srb_queued=0; | 525 | olympic_priv->srb_queued=0; |
525 | break ; | 526 | break ; |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 2d0cfbceee22..6299e186c73f 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -402,8 +402,7 @@ static void de_rx (struct de_private *de) | |||
402 | unsigned copying_skb, buflen; | 402 | unsigned copying_skb, buflen; |
403 | 403 | ||
404 | skb = de->rx_skb[rx_tail].skb; | 404 | skb = de->rx_skb[rx_tail].skb; |
405 | if (!skb) | 405 | BUG_ON(!skb); |
406 | BUG(); | ||
407 | rmb(); | 406 | rmb(); |
408 | status = le32_to_cpu(de->rx_ring[rx_tail].opts1); | 407 | status = le32_to_cpu(de->rx_ring[rx_tail].opts1); |
409 | if (status & DescOwn) | 408 | if (status & DescOwn) |
@@ -545,8 +544,7 @@ static void de_tx (struct de_private *de) | |||
545 | break; | 544 | break; |
546 | 545 | ||
547 | skb = de->tx_skb[tx_tail].skb; | 546 | skb = de->tx_skb[tx_tail].skb; |
548 | if (!skb) | 547 | BUG_ON(!skb); |
549 | BUG(); | ||
550 | if (unlikely(skb == DE_DUMMY_SKB)) | 548 | if (unlikely(skb == DE_DUMMY_SKB)) |
551 | goto next; | 549 | goto next; |
552 | 550 | ||
@@ -789,8 +787,7 @@ static void __de_set_rx_mode (struct net_device *dev) | |||
789 | 787 | ||
790 | de->tx_head = NEXT_TX(entry); | 788 | de->tx_head = NEXT_TX(entry); |
791 | 789 | ||
792 | if (TX_BUFFS_AVAIL(de) < 0) | 790 | BUG_ON(TX_BUFFS_AVAIL(de) < 0); |
793 | BUG(); | ||
794 | if (TX_BUFFS_AVAIL(de) == 0) | 791 | if (TX_BUFFS_AVAIL(de) == 0) |
795 | netif_stop_queue(dev); | 792 | netif_stop_queue(dev); |
796 | 793 | ||
@@ -916,8 +913,7 @@ static void de_set_media (struct de_private *de) | |||
916 | unsigned media = de->media_type; | 913 | unsigned media = de->media_type; |
917 | u32 macmode = dr32(MacMode); | 914 | u32 macmode = dr32(MacMode); |
918 | 915 | ||
919 | if (de_is_running(de)) | 916 | BUG_ON(de_is_running(de)); |
920 | BUG(); | ||
921 | 917 | ||
922 | if (de->de21040) | 918 | if (de->de21040) |
923 | dw32(CSR11, FULL_DUPLEX_MAGIC); | 919 | dw32(CSR11, FULL_DUPLEX_MAGIC); |
@@ -1153,8 +1149,7 @@ static void de_media_interrupt (struct de_private *de, u32 status) | |||
1153 | return; | 1149 | return; |
1154 | } | 1150 | } |
1155 | 1151 | ||
1156 | if (!(status & LinkFail)) | 1152 | BUG_ON(!(status & LinkFail)); |
1157 | BUG(); | ||
1158 | 1153 | ||
1159 | if (netif_carrier_ok(de->dev)) { | 1154 | if (netif_carrier_ok(de->dev)) { |
1160 | de_link_down(de); | 1155 | de_link_down(de); |
@@ -2092,8 +2087,7 @@ static void __exit de_remove_one (struct pci_dev *pdev) | |||
2092 | struct net_device *dev = pci_get_drvdata(pdev); | 2087 | struct net_device *dev = pci_get_drvdata(pdev); |
2093 | struct de_private *de = dev->priv; | 2088 | struct de_private *de = dev->priv; |
2094 | 2089 | ||
2095 | if (!dev) | 2090 | BUG_ON(!dev); |
2096 | BUG(); | ||
2097 | unregister_netdev(dev); | 2091 | unregister_netdev(dev); |
2098 | kfree(de->ee_data); | 2092 | kfree(de->ee_data); |
2099 | iounmap(de->regs); | 2093 | iounmap(de->regs); |
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c index d9980bde7508..ca7e53246adb 100644 --- a/drivers/net/tulip/pnic.c +++ b/drivers/net/tulip/pnic.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/jiffies.h> | ||
19 | #include "tulip.h" | 20 | #include "tulip.h" |
20 | 21 | ||
21 | 22 | ||
@@ -68,7 +69,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5) | |||
68 | */ | 69 | */ |
69 | if (tulip_media_cap[dev->if_port] & MediaIsMII) | 70 | if (tulip_media_cap[dev->if_port] & MediaIsMII) |
70 | return; | 71 | return; |
71 | if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) { | 72 | if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) { |
72 | tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); | 73 | tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); |
73 | iowrite32(tp->csr6, ioaddr + CSR6); | 74 | iowrite32(tp->csr6, ioaddr + CSR6); |
74 | iowrite32(0x30, ioaddr + CSR12); | 75 | iowrite32(0x30, ioaddr + CSR12); |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 5b1af3986abf..ba05dedf29d3 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -1645,7 +1645,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1645 | 1645 | ||
1646 | /* no more hardware accesses behind this line. */ | 1646 | /* no more hardware accesses behind this line. */ |
1647 | 1647 | ||
1648 | if (np->csr6) BUG(); | 1648 | BUG_ON(np->csr6); |
1649 | if (ioread32(ioaddr + IntrEnable)) BUG(); | 1649 | if (ioread32(ioaddr + IntrEnable)) BUG(); |
1650 | 1650 | ||
1651 | /* pci_power_off(pdev, -1); */ | 1651 | /* pci_power_off(pdev, -1); */ |
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 60d1e05ab732..56344103ac23 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -32,6 +32,9 @@ | |||
32 | 32 | ||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
36 | #include <asm/irq.h> | ||
37 | #endif | ||
35 | 38 | ||
36 | #ifdef DEBUG | 39 | #ifdef DEBUG |
37 | #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) | 40 | #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) |
@@ -598,10 +601,8 @@ static void setup_descriptors(struct xircom_private *card) | |||
598 | enter("setup_descriptors"); | 601 | enter("setup_descriptors"); |
599 | 602 | ||
600 | 603 | ||
601 | if (card->rx_buffer == NULL) | 604 | BUG_ON(card->rx_buffer == NULL); |
602 | BUG(); | 605 | BUG_ON(card->tx_buffer == NULL); |
603 | if (card->tx_buffer == NULL) | ||
604 | BUG(); | ||
605 | 606 | ||
606 | /* Receive descriptors */ | 607 | /* Receive descriptors */ |
607 | memset(card->rx_buffer, 0, 128); /* clear the descriptors */ | 608 | memset(card->rx_buffer, 0, 128); /* clear the descriptors */ |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 4c76cb794bfb..cde35dd87906 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -178,7 +178,7 @@ enum typhoon_cards { | |||
178 | }; | 178 | }; |
179 | 179 | ||
180 | /* directly indexed by enum typhoon_cards, above */ | 180 | /* directly indexed by enum typhoon_cards, above */ |
181 | static struct typhoon_card_info typhoon_card_info[] __devinitdata = { | 181 | static const struct typhoon_card_info typhoon_card_info[] __devinitdata = { |
182 | { "3Com Typhoon (3C990-TX)", | 182 | { "3Com Typhoon (3C990-TX)", |
183 | TYPHOON_CRYPTO_NONE}, | 183 | TYPHOON_CRYPTO_NONE}, |
184 | { "3Com Typhoon (3CR990-TX-95)", | 184 | { "3Com Typhoon (3CR990-TX-95)", |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 18c27e1e7884..883cf7da10fc 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -459,7 +459,7 @@ config WANPIPE_FR | |||
459 | bool "WANPIPE Frame Relay support" | 459 | bool "WANPIPE Frame Relay support" |
460 | depends on VENDOR_SANGOMA | 460 | depends on VENDOR_SANGOMA |
461 | help | 461 | help |
462 | Connect a WANPIPE card to a Frame Relay network, or use Frame Felay | 462 | Connect a WANPIPE card to a Frame Relay network, or use Frame Relay |
463 | API to develop custom applications. | 463 | API to develop custom applications. |
464 | 464 | ||
465 | Contains the Ethernet Bridging over Frame Relay feature, where | 465 | Contains the Ethernet Bridging over Frame Relay feature, where |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 7db1d1d0bb34..cf5c805452a3 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <net/arp.h> | 30 | #include <net/arp.h> |
31 | 31 | ||
32 | #include <asm/irq.h> | ||
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
33 | #include <asm/dma.h> | 34 | #include <asm/dma.h> |
34 | #include <asm/byteorder.h> | 35 | #include <asm/byteorder.h> |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 5380ddfcd7d5..050e854e7774 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <net/arp.h> | 24 | #include <net/arp.h> |
25 | 25 | ||
26 | #include <asm/irq.h> | ||
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
27 | #include <asm/dma.h> | 28 | #include <asm/dma.h> |
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ef85d76575a2..5b0a19a5058d 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -6,7 +6,8 @@ menu "Wireless LAN (non-hamradio)" | |||
6 | depends on NETDEVICES | 6 | depends on NETDEVICES |
7 | 7 | ||
8 | config NET_RADIO | 8 | config NET_RADIO |
9 | bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions" | 9 | bool "Wireless LAN drivers (non-hamradio)" |
10 | select WIRELESS_EXT | ||
10 | ---help--- | 11 | ---help--- |
11 | Support for wireless LANs and everything having to do with radio, | 12 | Support for wireless LANs and everything having to do with radio, |
12 | but not with amateur radio or FM broadcasting. | 13 | but not with amateur radio or FM broadcasting. |
@@ -135,8 +136,9 @@ comment "Wireless 802.11b ISA/PCI cards support" | |||
135 | 136 | ||
136 | config IPW2100 | 137 | config IPW2100 |
137 | tristate "Intel PRO/Wireless 2100 Network Connection" | 138 | tristate "Intel PRO/Wireless 2100 Network Connection" |
138 | depends on NET_RADIO && PCI && IEEE80211 | 139 | depends on NET_RADIO && PCI |
139 | select FW_LOADER | 140 | select FW_LOADER |
141 | select IEEE80211 | ||
140 | ---help--- | 142 | ---help--- |
141 | A driver for the Intel PRO/Wireless 2100 Network | 143 | A driver for the Intel PRO/Wireless 2100 Network |
142 | Connection 802.11b wireless network adapter. | 144 | Connection 802.11b wireless network adapter. |
@@ -188,8 +190,9 @@ config IPW2100_DEBUG | |||
188 | 190 | ||
189 | config IPW2200 | 191 | config IPW2200 |
190 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" | 192 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" |
191 | depends on NET_RADIO && IEEE80211 && PCI | 193 | depends on NET_RADIO && PCI |
192 | select FW_LOADER | 194 | select FW_LOADER |
195 | select IEEE80211 | ||
193 | ---help--- | 196 | ---help--- |
194 | A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network | 197 | A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network |
195 | Connection adapters. | 198 | Connection adapters. |
@@ -201,7 +204,7 @@ config IPW2200 | |||
201 | In order to use this driver, you will need a firmware image for it. | 204 | In order to use this driver, you will need a firmware image for it. |
202 | You can obtain the firmware from | 205 | You can obtain the firmware from |
203 | <http://ipw2200.sf.net/>. See the above referenced README.ipw2200 | 206 | <http://ipw2200.sf.net/>. See the above referenced README.ipw2200 |
204 | for information on where to install the firmare images. | 207 | for information on where to install the firmware images. |
205 | 208 | ||
206 | You will also very likely need the Wireless Tools in order to | 209 | You will also very likely need the Wireless Tools in order to |
207 | configure your card: | 210 | configure your card: |
@@ -213,6 +216,19 @@ config IPW2200 | |||
213 | say M here and read <file:Documentation/modules.txt>. The module | 216 | say M here and read <file:Documentation/modules.txt>. The module |
214 | will be called ipw2200.ko. | 217 | will be called ipw2200.ko. |
215 | 218 | ||
219 | config IPW2200_MONITOR | ||
220 | bool "Enable promiscuous mode" | ||
221 | depends on IPW2200 | ||
222 | ---help--- | ||
223 | Enables promiscuous/monitor mode support for the ipw2200 driver. | ||
224 | With this feature compiled into the driver, you can switch to | ||
225 | promiscuous mode via the Wireless Tool's Monitor mode. While in this | ||
226 | mode, no packets can be sent. | ||
227 | |||
228 | config IPW_QOS | ||
229 | bool "Enable QoS support" | ||
230 | depends on IPW2200 && EXPERIMENTAL | ||
231 | |||
216 | config IPW2200_DEBUG | 232 | config IPW2200_DEBUG |
217 | bool "Enable full debugging output in IPW2200 module." | 233 | bool "Enable full debugging output in IPW2200 module." |
218 | depends on IPW2200 | 234 | depends on IPW2200 |
@@ -239,13 +255,14 @@ config IPW2200_DEBUG | |||
239 | 255 | ||
240 | config AIRO | 256 | config AIRO |
241 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" | 257 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" |
242 | depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN) | 258 | depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) |
259 | select CRYPTO | ||
243 | ---help--- | 260 | ---help--- |
244 | This is the standard Linux driver to support Cisco/Aironet ISA and | 261 | This is the standard Linux driver to support Cisco/Aironet ISA and |
245 | PCI 802.11 wireless cards. | 262 | PCI 802.11 wireless cards. |
246 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X | 263 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X |
247 | - with or without encryption) as well as card before the Cisco | 264 | - with or without encryption) as well as card before the Cisco |
248 | aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). | 265 | acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). |
249 | 266 | ||
250 | This driver support both the standard Linux Wireless Extensions | 267 | This driver support both the standard Linux Wireless Extensions |
251 | and Cisco proprietary API, so both the Linux Wireless Tools and the | 268 | and Cisco proprietary API, so both the Linux Wireless Tools and the |
@@ -387,13 +404,14 @@ config PCMCIA_SPECTRUM | |||
387 | config AIRO_CS | 404 | config AIRO_CS |
388 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 405 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
389 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 406 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) |
407 | select CRYPTO | ||
390 | ---help--- | 408 | ---help--- |
391 | This is the standard Linux driver to support Cisco/Aironet PCMCIA | 409 | This is the standard Linux driver to support Cisco/Aironet PCMCIA |
392 | 802.11 wireless cards. This driver is the same as the Aironet | 410 | 802.11 wireless cards. This driver is the same as the Aironet |
393 | driver part of the Linux Pcmcia package. | 411 | driver part of the Linux Pcmcia package. |
394 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X | 412 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X |
395 | - with or without encryption) as well as card before the Cisco | 413 | - with or without encryption) as well as card before the Cisco |
396 | aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also | 414 | acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also |
397 | supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom | 415 | supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom |
398 | 802.11b cards. | 416 | 802.11b cards. |
399 | 417 | ||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a4c7ae94614d..864937a409e5 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/in.h> | 36 | #include <linux/in.h> |
37 | #include <linux/bitops.h> | 37 | #include <linux/bitops.h> |
38 | #include <linux/scatterlist.h> | 38 | #include <linux/scatterlist.h> |
39 | #include <linux/crypto.h> | ||
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
41 | 42 | ||
@@ -87,14 +88,6 @@ static struct pci_driver airo_driver = { | |||
87 | #include <linux/delay.h> | 88 | #include <linux/delay.h> |
88 | #endif | 89 | #endif |
89 | 90 | ||
90 | /* Support Cisco MIC feature */ | ||
91 | #define MICSUPPORT | ||
92 | |||
93 | #if defined(MICSUPPORT) && !defined(CONFIG_CRYPTO) | ||
94 | #warning MIC support requires Crypto API | ||
95 | #undef MICSUPPORT | ||
96 | #endif | ||
97 | |||
98 | /* Hack to do some power saving */ | 91 | /* Hack to do some power saving */ |
99 | #define POWER_ON_DOWN | 92 | #define POWER_ON_DOWN |
100 | 93 | ||
@@ -1118,7 +1111,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp); | |||
1118 | static int writerids(struct net_device *dev, aironet_ioctl *comp); | 1111 | static int writerids(struct net_device *dev, aironet_ioctl *comp); |
1119 | static int flashcard(struct net_device *dev, aironet_ioctl *comp); | 1112 | static int flashcard(struct net_device *dev, aironet_ioctl *comp); |
1120 | #endif /* CISCO_EXT */ | 1113 | #endif /* CISCO_EXT */ |
1121 | #ifdef MICSUPPORT | ||
1122 | static void micinit(struct airo_info *ai); | 1114 | static void micinit(struct airo_info *ai); |
1123 | static int micsetup(struct airo_info *ai); | 1115 | static int micsetup(struct airo_info *ai); |
1124 | static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); | 1116 | static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); |
@@ -1127,9 +1119,6 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, | |||
1127 | static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); | 1119 | static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); |
1128 | static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); | 1120 | static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); |
1129 | 1121 | ||
1130 | #include <linux/crypto.h> | ||
1131 | #endif | ||
1132 | |||
1133 | struct airo_info { | 1122 | struct airo_info { |
1134 | struct net_device_stats stats; | 1123 | struct net_device_stats stats; |
1135 | struct net_device *dev; | 1124 | struct net_device *dev; |
@@ -1190,12 +1179,10 @@ struct airo_info { | |||
1190 | unsigned long scan_timestamp; /* Time started to scan */ | 1179 | unsigned long scan_timestamp; /* Time started to scan */ |
1191 | struct iw_spy_data spy_data; | 1180 | struct iw_spy_data spy_data; |
1192 | struct iw_public_data wireless_data; | 1181 | struct iw_public_data wireless_data; |
1193 | #ifdef MICSUPPORT | ||
1194 | /* MIC stuff */ | 1182 | /* MIC stuff */ |
1195 | struct crypto_tfm *tfm; | 1183 | struct crypto_tfm *tfm; |
1196 | mic_module mod[2]; | 1184 | mic_module mod[2]; |
1197 | mic_statistics micstats; | 1185 | mic_statistics micstats; |
1198 | #endif | ||
1199 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors | 1186 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors |
1200 | HostTxDesc txfids[MPI_MAX_FIDS]; | 1187 | HostTxDesc txfids[MPI_MAX_FIDS]; |
1201 | HostRidDesc config_desc; | 1188 | HostRidDesc config_desc; |
@@ -1229,7 +1216,6 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime); | |||
1229 | static int flashputbuf(struct airo_info *ai); | 1216 | static int flashputbuf(struct airo_info *ai); |
1230 | static int flashrestart(struct airo_info *ai,struct net_device *dev); | 1217 | static int flashrestart(struct airo_info *ai,struct net_device *dev); |
1231 | 1218 | ||
1232 | #ifdef MICSUPPORT | ||
1233 | /*********************************************************************** | 1219 | /*********************************************************************** |
1234 | * MIC ROUTINES * | 1220 | * MIC ROUTINES * |
1235 | *********************************************************************** | 1221 | *********************************************************************** |
@@ -1686,7 +1672,6 @@ static void emmh32_final(emmh32_context *context, u8 digest[4]) | |||
1686 | digest[2] = (val>>8) & 0xFF; | 1672 | digest[2] = (val>>8) & 0xFF; |
1687 | digest[3] = val & 0xFF; | 1673 | digest[3] = val & 0xFF; |
1688 | } | 1674 | } |
1689 | #endif | ||
1690 | 1675 | ||
1691 | static int readBSSListRid(struct airo_info *ai, int first, | 1676 | static int readBSSListRid(struct airo_info *ai, int first, |
1692 | BSSListRid *list) { | 1677 | BSSListRid *list) { |
@@ -2005,7 +1990,6 @@ static int mpi_send_packet (struct net_device *dev) | |||
2005 | * Firmware automaticly puts 802 header on so | 1990 | * Firmware automaticly puts 802 header on so |
2006 | * we don't need to account for it in the length | 1991 | * we don't need to account for it in the length |
2007 | */ | 1992 | */ |
2008 | #ifdef MICSUPPORT | ||
2009 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && | 1993 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && |
2010 | (ntohs(((u16 *)buffer)[6]) != 0x888E)) { | 1994 | (ntohs(((u16 *)buffer)[6]) != 0x888E)) { |
2011 | MICBuffer pMic; | 1995 | MICBuffer pMic; |
@@ -2022,9 +2006,7 @@ static int mpi_send_packet (struct net_device *dev) | |||
2022 | memcpy (sendbuf, &pMic, sizeof(pMic)); | 2006 | memcpy (sendbuf, &pMic, sizeof(pMic)); |
2023 | sendbuf += sizeof(pMic); | 2007 | sendbuf += sizeof(pMic); |
2024 | memcpy (sendbuf, buffer, len - sizeof(etherHead)); | 2008 | memcpy (sendbuf, buffer, len - sizeof(etherHead)); |
2025 | } else | 2009 | } else { |
2026 | #endif | ||
2027 | { | ||
2028 | *payloadLen = cpu_to_le16(len - sizeof(etherHead)); | 2010 | *payloadLen = cpu_to_le16(len - sizeof(etherHead)); |
2029 | 2011 | ||
2030 | dev->trans_start = jiffies; | 2012 | dev->trans_start = jiffies; |
@@ -2400,9 +2382,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2400 | ai->shared, ai->shared_dma); | 2382 | ai->shared, ai->shared_dma); |
2401 | } | 2383 | } |
2402 | } | 2384 | } |
2403 | #ifdef MICSUPPORT | ||
2404 | crypto_free_tfm(ai->tfm); | 2385 | crypto_free_tfm(ai->tfm); |
2405 | #endif | ||
2406 | del_airo_dev( dev ); | 2386 | del_airo_dev( dev ); |
2407 | free_netdev( dev ); | 2387 | free_netdev( dev ); |
2408 | } | 2388 | } |
@@ -2726,9 +2706,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, | |||
2726 | ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); | 2706 | ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); |
2727 | if (ai->thr_pid < 0) | 2707 | if (ai->thr_pid < 0) |
2728 | goto err_out_free; | 2708 | goto err_out_free; |
2729 | #ifdef MICSUPPORT | ||
2730 | ai->tfm = NULL; | 2709 | ai->tfm = NULL; |
2731 | #endif | ||
2732 | rc = add_airo_dev( dev ); | 2710 | rc = add_airo_dev( dev ); |
2733 | if (rc) | 2711 | if (rc) |
2734 | goto err_out_thr; | 2712 | goto err_out_thr; |
@@ -2969,10 +2947,8 @@ static int airo_thread(void *data) { | |||
2969 | airo_read_wireless_stats(ai); | 2947 | airo_read_wireless_stats(ai); |
2970 | else if (test_bit(JOB_PROMISC, &ai->flags)) | 2948 | else if (test_bit(JOB_PROMISC, &ai->flags)) |
2971 | airo_set_promisc(ai); | 2949 | airo_set_promisc(ai); |
2972 | #ifdef MICSUPPORT | ||
2973 | else if (test_bit(JOB_MIC, &ai->flags)) | 2950 | else if (test_bit(JOB_MIC, &ai->flags)) |
2974 | micinit(ai); | 2951 | micinit(ai); |
2975 | #endif | ||
2976 | else if (test_bit(JOB_EVENT, &ai->flags)) | 2952 | else if (test_bit(JOB_EVENT, &ai->flags)) |
2977 | airo_send_event(dev); | 2953 | airo_send_event(dev); |
2978 | else if (test_bit(JOB_AUTOWEP, &ai->flags)) | 2954 | else if (test_bit(JOB_AUTOWEP, &ai->flags)) |
@@ -3010,12 +2986,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3010 | 2986 | ||
3011 | if ( status & EV_MIC ) { | 2987 | if ( status & EV_MIC ) { |
3012 | OUT4500( apriv, EVACK, EV_MIC ); | 2988 | OUT4500( apriv, EVACK, EV_MIC ); |
3013 | #ifdef MICSUPPORT | ||
3014 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { | 2989 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { |
3015 | set_bit(JOB_MIC, &apriv->flags); | 2990 | set_bit(JOB_MIC, &apriv->flags); |
3016 | wake_up_interruptible(&apriv->thr_wait); | 2991 | wake_up_interruptible(&apriv->thr_wait); |
3017 | } | 2992 | } |
3018 | #endif | ||
3019 | } | 2993 | } |
3020 | if ( status & EV_LINK ) { | 2994 | if ( status & EV_LINK ) { |
3021 | union iwreq_data wrqu; | 2995 | union iwreq_data wrqu; |
@@ -3194,11 +3168,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3194 | } | 3168 | } |
3195 | bap_read (apriv, buffer + hdrlen/2, len, BAP0); | 3169 | bap_read (apriv, buffer + hdrlen/2, len, BAP0); |
3196 | } else { | 3170 | } else { |
3197 | #ifdef MICSUPPORT | ||
3198 | MICBuffer micbuf; | 3171 | MICBuffer micbuf; |
3199 | #endif | ||
3200 | bap_read (apriv, buffer, ETH_ALEN*2, BAP0); | 3172 | bap_read (apriv, buffer, ETH_ALEN*2, BAP0); |
3201 | #ifdef MICSUPPORT | ||
3202 | if (apriv->micstats.enabled) { | 3173 | if (apriv->micstats.enabled) { |
3203 | bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0); | 3174 | bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0); |
3204 | if (ntohs(micbuf.typelen) > 0x05DC) | 3175 | if (ntohs(micbuf.typelen) > 0x05DC) |
@@ -3211,15 +3182,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3211 | skb_trim (skb, len + hdrlen); | 3182 | skb_trim (skb, len + hdrlen); |
3212 | } | 3183 | } |
3213 | } | 3184 | } |
3214 | #endif | ||
3215 | bap_read(apriv,buffer+ETH_ALEN,len,BAP0); | 3185 | bap_read(apriv,buffer+ETH_ALEN,len,BAP0); |
3216 | #ifdef MICSUPPORT | ||
3217 | if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { | 3186 | if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { |
3218 | badmic: | 3187 | badmic: |
3219 | dev_kfree_skb_irq (skb); | 3188 | dev_kfree_skb_irq (skb); |
3220 | #else | ||
3221 | if (0) { | ||
3222 | #endif | ||
3223 | badrx: | 3189 | badrx: |
3224 | OUT4500( apriv, EVACK, EV_RX); | 3190 | OUT4500( apriv, EVACK, EV_RX); |
3225 | goto exitrx; | 3191 | goto exitrx; |
@@ -3430,10 +3396,8 @@ static void mpi_receive_802_3(struct airo_info *ai) | |||
3430 | int len = 0; | 3396 | int len = 0; |
3431 | struct sk_buff *skb; | 3397 | struct sk_buff *skb; |
3432 | char *buffer; | 3398 | char *buffer; |
3433 | #ifdef MICSUPPORT | ||
3434 | int off = 0; | 3399 | int off = 0; |
3435 | MICBuffer micbuf; | 3400 | MICBuffer micbuf; |
3436 | #endif | ||
3437 | 3401 | ||
3438 | memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); | 3402 | memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); |
3439 | /* Make sure we got something */ | 3403 | /* Make sure we got something */ |
@@ -3448,7 +3412,6 @@ static void mpi_receive_802_3(struct airo_info *ai) | |||
3448 | goto badrx; | 3412 | goto badrx; |
3449 | } | 3413 | } |
3450 | buffer = skb_put(skb,len); | 3414 | buffer = skb_put(skb,len); |
3451 | #ifdef MICSUPPORT | ||
3452 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); | 3415 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); |
3453 | if (ai->micstats.enabled) { | 3416 | if (ai->micstats.enabled) { |
3454 | memcpy(&micbuf, | 3417 | memcpy(&micbuf, |
@@ -3470,9 +3433,6 @@ badmic: | |||
3470 | dev_kfree_skb_irq (skb); | 3433 | dev_kfree_skb_irq (skb); |
3471 | goto badrx; | 3434 | goto badrx; |
3472 | } | 3435 | } |
3473 | #else | ||
3474 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, len); | ||
3475 | #endif | ||
3476 | #ifdef WIRELESS_SPY | 3436 | #ifdef WIRELESS_SPY |
3477 | if (ai->spy_data.spy_number > 0) { | 3437 | if (ai->spy_data.spy_number > 0) { |
3478 | char *sa; | 3438 | char *sa; |
@@ -3689,13 +3649,11 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) | |||
3689 | ai->config.authType = AUTH_OPEN; | 3649 | ai->config.authType = AUTH_OPEN; |
3690 | ai->config.modulation = MOD_CCK; | 3650 | ai->config.modulation = MOD_CCK; |
3691 | 3651 | ||
3692 | #ifdef MICSUPPORT | ||
3693 | if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) && | 3652 | if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) && |
3694 | (micsetup(ai) == SUCCESS)) { | 3653 | (micsetup(ai) == SUCCESS)) { |
3695 | ai->config.opmode |= MODE_MIC; | 3654 | ai->config.opmode |= MODE_MIC; |
3696 | set_bit(FLAG_MIC_CAPABLE, &ai->flags); | 3655 | set_bit(FLAG_MIC_CAPABLE, &ai->flags); |
3697 | } | 3656 | } |
3698 | #endif | ||
3699 | 3657 | ||
3700 | /* Save off the MAC */ | 3658 | /* Save off the MAC */ |
3701 | for( i = 0; i < ETH_ALEN; i++ ) { | 3659 | for( i = 0; i < ETH_ALEN; i++ ) { |
@@ -4170,15 +4128,12 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) | |||
4170 | } | 4128 | } |
4171 | len -= ETH_ALEN * 2; | 4129 | len -= ETH_ALEN * 2; |
4172 | 4130 | ||
4173 | #ifdef MICSUPPORT | ||
4174 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && | 4131 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && |
4175 | (ntohs(((u16 *)pPacket)[6]) != 0x888E)) { | 4132 | (ntohs(((u16 *)pPacket)[6]) != 0x888E)) { |
4176 | if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) | 4133 | if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) |
4177 | return ERROR; | 4134 | return ERROR; |
4178 | miclen = sizeof(pMic); | 4135 | miclen = sizeof(pMic); |
4179 | } | 4136 | } |
4180 | #endif | ||
4181 | |||
4182 | // packet is destination[6], source[6], payload[len-12] | 4137 | // packet is destination[6], source[6], payload[len-12] |
4183 | // write the payload length and dst/src/payload | 4138 | // write the payload length and dst/src/payload |
4184 | if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; | 4139 | if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; |
@@ -5081,7 +5036,6 @@ static int set_wep_key(struct airo_info *ai, u16 index, | |||
5081 | wkr.len = sizeof(wkr); | 5036 | wkr.len = sizeof(wkr); |
5082 | wkr.kindex = 0xffff; | 5037 | wkr.kindex = 0xffff; |
5083 | wkr.mac[0] = (char)index; | 5038 | wkr.mac[0] = (char)index; |
5084 | if (perm) printk(KERN_INFO "Setting transmit key to %d\n", index); | ||
5085 | if (perm) ai->defindex = (char)index; | 5039 | if (perm) ai->defindex = (char)index; |
5086 | } else { | 5040 | } else { |
5087 | // We are actually setting the key | 5041 | // We are actually setting the key |
@@ -5090,7 +5044,6 @@ static int set_wep_key(struct airo_info *ai, u16 index, | |||
5090 | wkr.klen = keylen; | 5044 | wkr.klen = keylen; |
5091 | memcpy( wkr.key, key, keylen ); | 5045 | memcpy( wkr.key, key, keylen ); |
5092 | memcpy( wkr.mac, macaddr, ETH_ALEN ); | 5046 | memcpy( wkr.mac, macaddr, ETH_ALEN ); |
5093 | printk(KERN_INFO "Setting key %d\n", index); | ||
5094 | } | 5047 | } |
5095 | 5048 | ||
5096 | if (perm) disable_MAC(ai, lock); | 5049 | if (perm) disable_MAC(ai, lock); |
@@ -5801,11 +5754,13 @@ static int airo_set_wap(struct net_device *dev, | |||
5801 | Cmd cmd; | 5754 | Cmd cmd; |
5802 | Resp rsp; | 5755 | Resp rsp; |
5803 | APListRid APList_rid; | 5756 | APListRid APList_rid; |
5804 | static const unsigned char bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 }; | 5757 | static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
5758 | static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | ||
5805 | 5759 | ||
5806 | if (awrq->sa_family != ARPHRD_ETHER) | 5760 | if (awrq->sa_family != ARPHRD_ETHER) |
5807 | return -EINVAL; | 5761 | return -EINVAL; |
5808 | else if (!memcmp(bcast, awrq->sa_data, ETH_ALEN)) { | 5762 | else if (!memcmp(any, awrq->sa_data, ETH_ALEN) || |
5763 | !memcmp(off, awrq->sa_data, ETH_ALEN)) { | ||
5809 | memset(&cmd, 0, sizeof(cmd)); | 5764 | memset(&cmd, 0, sizeof(cmd)); |
5810 | cmd.cmd=CMD_LOSE_SYNC; | 5765 | cmd.cmd=CMD_LOSE_SYNC; |
5811 | if (down_interruptible(&local->sem)) | 5766 | if (down_interruptible(&local->sem)) |
@@ -6296,6 +6251,272 @@ static int airo_get_encode(struct net_device *dev, | |||
6296 | 6251 | ||
6297 | /*------------------------------------------------------------------*/ | 6252 | /*------------------------------------------------------------------*/ |
6298 | /* | 6253 | /* |
6254 | * Wireless Handler : set extended Encryption parameters | ||
6255 | */ | ||
6256 | static int airo_set_encodeext(struct net_device *dev, | ||
6257 | struct iw_request_info *info, | ||
6258 | union iwreq_data *wrqu, | ||
6259 | char *extra) | ||
6260 | { | ||
6261 | struct airo_info *local = dev->priv; | ||
6262 | struct iw_point *encoding = &wrqu->encoding; | ||
6263 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
6264 | CapabilityRid cap_rid; /* Card capability info */ | ||
6265 | int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 ); | ||
6266 | u16 currentAuthType = local->config.authType; | ||
6267 | int idx, key_len, alg = ext->alg, set_key = 1; | ||
6268 | wep_key_t key; | ||
6269 | |||
6270 | /* Is WEP supported ? */ | ||
6271 | readCapabilityRid(local, &cap_rid, 1); | ||
6272 | /* Older firmware doesn't support this... | ||
6273 | if(!(cap_rid.softCap & 2)) { | ||
6274 | return -EOPNOTSUPP; | ||
6275 | } */ | ||
6276 | readConfigRid(local, 1); | ||
6277 | |||
6278 | /* Determine and validate the key index */ | ||
6279 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
6280 | if (idx) { | ||
6281 | if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1)) | ||
6282 | return -EINVAL; | ||
6283 | idx--; | ||
6284 | } else | ||
6285 | idx = get_wep_key(local, 0xffff); | ||
6286 | |||
6287 | if (encoding->flags & IW_ENCODE_DISABLED) | ||
6288 | alg = IW_ENCODE_ALG_NONE; | ||
6289 | |||
6290 | if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { | ||
6291 | /* Only set transmit key index here, actual | ||
6292 | * key is set below if needed. | ||
6293 | */ | ||
6294 | set_wep_key(local, idx, NULL, 0, perm, 1); | ||
6295 | set_key = ext->key_len > 0 ? 1 : 0; | ||
6296 | } | ||
6297 | |||
6298 | if (set_key) { | ||
6299 | /* Set the requested key first */ | ||
6300 | memset(key.key, 0, MAX_KEY_SIZE); | ||
6301 | switch (alg) { | ||
6302 | case IW_ENCODE_ALG_NONE: | ||
6303 | key.len = 0; | ||
6304 | break; | ||
6305 | case IW_ENCODE_ALG_WEP: | ||
6306 | if (ext->key_len > MIN_KEY_SIZE) { | ||
6307 | key.len = MAX_KEY_SIZE; | ||
6308 | } else if (ext->key_len > 0) { | ||
6309 | key.len = MIN_KEY_SIZE; | ||
6310 | } else { | ||
6311 | return -EINVAL; | ||
6312 | } | ||
6313 | key_len = min (ext->key_len, key.len); | ||
6314 | memcpy(key.key, ext->key, key_len); | ||
6315 | break; | ||
6316 | default: | ||
6317 | return -EINVAL; | ||
6318 | } | ||
6319 | /* Send the key to the card */ | ||
6320 | set_wep_key(local, idx, key.key, key.len, perm, 1); | ||
6321 | } | ||
6322 | |||
6323 | /* Read the flags */ | ||
6324 | if(encoding->flags & IW_ENCODE_DISABLED) | ||
6325 | local->config.authType = AUTH_OPEN; // disable encryption | ||
6326 | if(encoding->flags & IW_ENCODE_RESTRICTED) | ||
6327 | local->config.authType = AUTH_SHAREDKEY; // Only Both | ||
6328 | if(encoding->flags & IW_ENCODE_OPEN) | ||
6329 | local->config.authType = AUTH_ENCRYPT; // Only Wep | ||
6330 | /* Commit the changes to flags if needed */ | ||
6331 | if (local->config.authType != currentAuthType) | ||
6332 | set_bit (FLAG_COMMIT, &local->flags); | ||
6333 | |||
6334 | return -EINPROGRESS; | ||
6335 | } | ||
6336 | |||
6337 | |||
6338 | /*------------------------------------------------------------------*/ | ||
6339 | /* | ||
6340 | * Wireless Handler : get extended Encryption parameters | ||
6341 | */ | ||
6342 | static int airo_get_encodeext(struct net_device *dev, | ||
6343 | struct iw_request_info *info, | ||
6344 | union iwreq_data *wrqu, | ||
6345 | char *extra) | ||
6346 | { | ||
6347 | struct airo_info *local = dev->priv; | ||
6348 | struct iw_point *encoding = &wrqu->encoding; | ||
6349 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
6350 | CapabilityRid cap_rid; /* Card capability info */ | ||
6351 | int idx, max_key_len; | ||
6352 | |||
6353 | /* Is it supported ? */ | ||
6354 | readCapabilityRid(local, &cap_rid, 1); | ||
6355 | if(!(cap_rid.softCap & 2)) { | ||
6356 | return -EOPNOTSUPP; | ||
6357 | } | ||
6358 | readConfigRid(local, 1); | ||
6359 | |||
6360 | max_key_len = encoding->length - sizeof(*ext); | ||
6361 | if (max_key_len < 0) | ||
6362 | return -EINVAL; | ||
6363 | |||
6364 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
6365 | if (idx) { | ||
6366 | if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1)) | ||
6367 | return -EINVAL; | ||
6368 | idx--; | ||
6369 | } else | ||
6370 | idx = get_wep_key(local, 0xffff); | ||
6371 | |||
6372 | encoding->flags = idx + 1; | ||
6373 | memset(ext, 0, sizeof(*ext)); | ||
6374 | |||
6375 | /* Check encryption mode */ | ||
6376 | switch(local->config.authType) { | ||
6377 | case AUTH_ENCRYPT: | ||
6378 | encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; | ||
6379 | break; | ||
6380 | case AUTH_SHAREDKEY: | ||
6381 | encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; | ||
6382 | break; | ||
6383 | default: | ||
6384 | case AUTH_OPEN: | ||
6385 | encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED; | ||
6386 | break; | ||
6387 | } | ||
6388 | /* We can't return the key, so set the proper flag and return zero */ | ||
6389 | encoding->flags |= IW_ENCODE_NOKEY; | ||
6390 | memset(extra, 0, 16); | ||
6391 | |||
6392 | /* Copy the key to the user buffer */ | ||
6393 | ext->key_len = get_wep_key(local, idx); | ||
6394 | if (ext->key_len > 16) { | ||
6395 | ext->key_len=0; | ||
6396 | } | ||
6397 | |||
6398 | return 0; | ||
6399 | } | ||
6400 | |||
6401 | |||
6402 | /*------------------------------------------------------------------*/ | ||
6403 | /* | ||
6404 | * Wireless Handler : set extended authentication parameters | ||
6405 | */ | ||
6406 | static int airo_set_auth(struct net_device *dev, | ||
6407 | struct iw_request_info *info, | ||
6408 | union iwreq_data *wrqu, char *extra) | ||
6409 | { | ||
6410 | struct airo_info *local = dev->priv; | ||
6411 | struct iw_param *param = &wrqu->param; | ||
6412 | u16 currentAuthType = local->config.authType; | ||
6413 | |||
6414 | switch (param->flags & IW_AUTH_INDEX) { | ||
6415 | case IW_AUTH_WPA_VERSION: | ||
6416 | case IW_AUTH_CIPHER_PAIRWISE: | ||
6417 | case IW_AUTH_CIPHER_GROUP: | ||
6418 | case IW_AUTH_KEY_MGMT: | ||
6419 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
6420 | case IW_AUTH_PRIVACY_INVOKED: | ||
6421 | /* | ||
6422 | * airo does not use these parameters | ||
6423 | */ | ||
6424 | break; | ||
6425 | |||
6426 | case IW_AUTH_DROP_UNENCRYPTED: | ||
6427 | if (param->value) { | ||
6428 | /* Only change auth type if unencrypted */ | ||
6429 | if (currentAuthType == AUTH_OPEN) | ||
6430 | local->config.authType = AUTH_ENCRYPT; | ||
6431 | } else { | ||
6432 | local->config.authType = AUTH_OPEN; | ||
6433 | } | ||
6434 | |||
6435 | /* Commit the changes to flags if needed */ | ||
6436 | if (local->config.authType != currentAuthType) | ||
6437 | set_bit (FLAG_COMMIT, &local->flags); | ||
6438 | break; | ||
6439 | |||
6440 | case IW_AUTH_80211_AUTH_ALG: { | ||
6441 | /* FIXME: What about AUTH_OPEN? This API seems to | ||
6442 | * disallow setting our auth to AUTH_OPEN. | ||
6443 | */ | ||
6444 | if (param->value & IW_AUTH_ALG_SHARED_KEY) { | ||
6445 | local->config.authType = AUTH_SHAREDKEY; | ||
6446 | } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { | ||
6447 | local->config.authType = AUTH_ENCRYPT; | ||
6448 | } else | ||
6449 | return -EINVAL; | ||
6450 | break; | ||
6451 | |||
6452 | /* Commit the changes to flags if needed */ | ||
6453 | if (local->config.authType != currentAuthType) | ||
6454 | set_bit (FLAG_COMMIT, &local->flags); | ||
6455 | } | ||
6456 | |||
6457 | case IW_AUTH_WPA_ENABLED: | ||
6458 | /* Silently accept disable of WPA */ | ||
6459 | if (param->value > 0) | ||
6460 | return -EOPNOTSUPP; | ||
6461 | break; | ||
6462 | |||
6463 | default: | ||
6464 | return -EOPNOTSUPP; | ||
6465 | } | ||
6466 | return -EINPROGRESS; | ||
6467 | } | ||
6468 | |||
6469 | |||
6470 | /*------------------------------------------------------------------*/ | ||
6471 | /* | ||
6472 | * Wireless Handler : get extended authentication parameters | ||
6473 | */ | ||
6474 | static int airo_get_auth(struct net_device *dev, | ||
6475 | struct iw_request_info *info, | ||
6476 | union iwreq_data *wrqu, char *extra) | ||
6477 | { | ||
6478 | struct airo_info *local = dev->priv; | ||
6479 | struct iw_param *param = &wrqu->param; | ||
6480 | u16 currentAuthType = local->config.authType; | ||
6481 | |||
6482 | switch (param->flags & IW_AUTH_INDEX) { | ||
6483 | case IW_AUTH_DROP_UNENCRYPTED: | ||
6484 | switch (currentAuthType) { | ||
6485 | case AUTH_SHAREDKEY: | ||
6486 | case AUTH_ENCRYPT: | ||
6487 | param->value = 1; | ||
6488 | break; | ||
6489 | default: | ||
6490 | param->value = 0; | ||
6491 | break; | ||
6492 | } | ||
6493 | break; | ||
6494 | |||
6495 | case IW_AUTH_80211_AUTH_ALG: | ||
6496 | switch (currentAuthType) { | ||
6497 | case AUTH_SHAREDKEY: | ||
6498 | param->value = IW_AUTH_ALG_SHARED_KEY; | ||
6499 | break; | ||
6500 | case AUTH_ENCRYPT: | ||
6501 | default: | ||
6502 | param->value = IW_AUTH_ALG_OPEN_SYSTEM; | ||
6503 | break; | ||
6504 | } | ||
6505 | break; | ||
6506 | |||
6507 | case IW_AUTH_WPA_ENABLED: | ||
6508 | param->value = 0; | ||
6509 | break; | ||
6510 | |||
6511 | default: | ||
6512 | return -EOPNOTSUPP; | ||
6513 | } | ||
6514 | return 0; | ||
6515 | } | ||
6516 | |||
6517 | |||
6518 | /*------------------------------------------------------------------*/ | ||
6519 | /* | ||
6299 | * Wireless Handler : set Tx-Power | 6520 | * Wireless Handler : set Tx-Power |
6300 | */ | 6521 | */ |
6301 | static int airo_set_txpow(struct net_device *dev, | 6522 | static int airo_set_txpow(struct net_device *dev, |
@@ -7050,6 +7271,15 @@ static const iw_handler airo_handler[] = | |||
7050 | (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ | 7271 | (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ |
7051 | (iw_handler) airo_set_power, /* SIOCSIWPOWER */ | 7272 | (iw_handler) airo_set_power, /* SIOCSIWPOWER */ |
7052 | (iw_handler) airo_get_power, /* SIOCGIWPOWER */ | 7273 | (iw_handler) airo_get_power, /* SIOCGIWPOWER */ |
7274 | (iw_handler) NULL, /* -- hole -- */ | ||
7275 | (iw_handler) NULL, /* -- hole -- */ | ||
7276 | (iw_handler) NULL, /* SIOCSIWGENIE */ | ||
7277 | (iw_handler) NULL, /* SIOCGIWGENIE */ | ||
7278 | (iw_handler) airo_set_auth, /* SIOCSIWAUTH */ | ||
7279 | (iw_handler) airo_get_auth, /* SIOCGIWAUTH */ | ||
7280 | (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */ | ||
7281 | (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */ | ||
7282 | (iw_handler) NULL, /* SIOCSIWPMKSA */ | ||
7053 | }; | 7283 | }; |
7054 | 7284 | ||
7055 | /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. | 7285 | /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. |
@@ -7270,13 +7500,11 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { | |||
7270 | case AIROGSTAT: ridcode = RID_STATUS; break; | 7500 | case AIROGSTAT: ridcode = RID_STATUS; break; |
7271 | case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; | 7501 | case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; |
7272 | case AIROGSTATSC32: ridcode = RID_STATS; break; | 7502 | case AIROGSTATSC32: ridcode = RID_STATS; break; |
7273 | #ifdef MICSUPPORT | ||
7274 | case AIROGMICSTATS: | 7503 | case AIROGMICSTATS: |
7275 | if (copy_to_user(comp->data, &ai->micstats, | 7504 | if (copy_to_user(comp->data, &ai->micstats, |
7276 | min((int)comp->len,(int)sizeof(ai->micstats)))) | 7505 | min((int)comp->len,(int)sizeof(ai->micstats)))) |
7277 | return -EFAULT; | 7506 | return -EFAULT; |
7278 | return 0; | 7507 | return 0; |
7279 | #endif | ||
7280 | case AIRORRID: ridcode = comp->ridnum; break; | 7508 | case AIRORRID: ridcode = comp->ridnum; break; |
7281 | default: | 7509 | default: |
7282 | return -EINVAL; | 7510 | return -EINVAL; |
@@ -7308,9 +7536,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { | |||
7308 | static int writerids(struct net_device *dev, aironet_ioctl *comp) { | 7536 | static int writerids(struct net_device *dev, aironet_ioctl *comp) { |
7309 | struct airo_info *ai = dev->priv; | 7537 | struct airo_info *ai = dev->priv; |
7310 | int ridcode; | 7538 | int ridcode; |
7311 | #ifdef MICSUPPORT | ||
7312 | int enabled; | 7539 | int enabled; |
7313 | #endif | ||
7314 | Resp rsp; | 7540 | Resp rsp; |
7315 | static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); | 7541 | static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); |
7316 | unsigned char *iobuf; | 7542 | unsigned char *iobuf; |
@@ -7367,11 +7593,9 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { | |||
7367 | 7593 | ||
7368 | PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); | 7594 | PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); |
7369 | 7595 | ||
7370 | #ifdef MICSUPPORT | ||
7371 | enabled = ai->micstats.enabled; | 7596 | enabled = ai->micstats.enabled; |
7372 | memset(&ai->micstats,0,sizeof(ai->micstats)); | 7597 | memset(&ai->micstats,0,sizeof(ai->micstats)); |
7373 | ai->micstats.enabled = enabled; | 7598 | ai->micstats.enabled = enabled; |
7374 | #endif | ||
7375 | 7599 | ||
7376 | if (copy_to_user(comp->data, iobuf, | 7600 | if (copy_to_user(comp->data, iobuf, |
7377 | min((int)comp->len, (int)RIDSIZE))) { | 7601 | min((int)comp->len, (int)RIDSIZE))) { |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index dfc24016ba81..87afa6878f26 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -137,44 +137,6 @@ static struct { | |||
137 | #define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed | 137 | #define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed |
138 | #define MAC_INIT_OK 0x0002 // MAC boot has been completed | 138 | #define MAC_INIT_OK 0x0002 // MAC boot has been completed |
139 | 139 | ||
140 | #define C80211_SUBTYPE_MGMT_ASS_REQUEST 0x00 | ||
141 | #define C80211_SUBTYPE_MGMT_ASS_RESPONSE 0x10 | ||
142 | #define C80211_SUBTYPE_MGMT_REASS_REQUEST 0x20 | ||
143 | #define C80211_SUBTYPE_MGMT_REASS_RESPONSE 0x30 | ||
144 | #define C80211_SUBTYPE_MGMT_ProbeRequest 0x40 | ||
145 | #define C80211_SUBTYPE_MGMT_ProbeResponse 0x50 | ||
146 | #define C80211_SUBTYPE_MGMT_BEACON 0x80 | ||
147 | #define C80211_SUBTYPE_MGMT_ATIM 0x90 | ||
148 | #define C80211_SUBTYPE_MGMT_DISASSOSIATION 0xA0 | ||
149 | #define C80211_SUBTYPE_MGMT_Authentication 0xB0 | ||
150 | #define C80211_SUBTYPE_MGMT_Deauthentication 0xC0 | ||
151 | |||
152 | #define C80211_MGMT_AAN_OPENSYSTEM 0x0000 | ||
153 | #define C80211_MGMT_AAN_SHAREDKEY 0x0001 | ||
154 | |||
155 | #define C80211_MGMT_CAPABILITY_ESS 0x0001 // see 802.11 p.58 | ||
156 | #define C80211_MGMT_CAPABILITY_IBSS 0x0002 // - " - | ||
157 | #define C80211_MGMT_CAPABILITY_CFPollable 0x0004 // - " - | ||
158 | #define C80211_MGMT_CAPABILITY_CFPollRequest 0x0008 // - " - | ||
159 | #define C80211_MGMT_CAPABILITY_Privacy 0x0010 // - " - | ||
160 | |||
161 | #define C80211_MGMT_SC_Success 0 | ||
162 | #define C80211_MGMT_SC_Unspecified 1 | ||
163 | #define C80211_MGMT_SC_SupportCapabilities 10 | ||
164 | #define C80211_MGMT_SC_ReassDenied 11 | ||
165 | #define C80211_MGMT_SC_AssDenied 12 | ||
166 | #define C80211_MGMT_SC_AuthAlgNotSupported 13 | ||
167 | #define C80211_MGMT_SC_AuthTransSeqNumError 14 | ||
168 | #define C80211_MGMT_SC_AuthRejectChallenge 15 | ||
169 | #define C80211_MGMT_SC_AuthRejectTimeout 16 | ||
170 | #define C80211_MGMT_SC_AssDeniedHandleAP 17 | ||
171 | #define C80211_MGMT_SC_AssDeniedBSSRate 18 | ||
172 | |||
173 | #define C80211_MGMT_ElementID_SSID 0 | ||
174 | #define C80211_MGMT_ElementID_SupportedRates 1 | ||
175 | #define C80211_MGMT_ElementID_ChallengeText 16 | ||
176 | #define C80211_MGMT_CAPABILITY_ShortPreamble 0x0020 | ||
177 | |||
178 | #define MIB_MAX_DATA_BYTES 212 | 140 | #define MIB_MAX_DATA_BYTES 212 |
179 | #define MIB_HEADER_SIZE 4 /* first four fields */ | 141 | #define MIB_HEADER_SIZE 4 /* first four fields */ |
180 | 142 | ||
@@ -2835,7 +2797,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, | |||
2835 | u8 channel) | 2797 | u8 channel) |
2836 | { | 2798 | { |
2837 | int rejoin = 0; | 2799 | int rejoin = 0; |
2838 | int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? | 2800 | int new = capability & MFIE_TYPE_POWER_CONSTRAINT ? |
2839 | SHORT_PREAMBLE : LONG_PREAMBLE; | 2801 | SHORT_PREAMBLE : LONG_PREAMBLE; |
2840 | 2802 | ||
2841 | if (priv->preamble != new) { | 2803 | if (priv->preamble != new) { |
@@ -2921,11 +2883,11 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) | |||
2921 | memcpy(header.addr2, priv->dev->dev_addr, 6); | 2883 | memcpy(header.addr2, priv->dev->dev_addr, 6); |
2922 | memcpy(header.addr3, priv->CurrentBSSID, 6); | 2884 | memcpy(header.addr3, priv->CurrentBSSID, 6); |
2923 | 2885 | ||
2924 | body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS); | 2886 | body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS); |
2925 | if (priv->wep_is_on) | 2887 | if (priv->wep_is_on) |
2926 | body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_Privacy); | 2888 | body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); |
2927 | if (priv->preamble == SHORT_PREAMBLE) | 2889 | if (priv->preamble == SHORT_PREAMBLE) |
2928 | body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble); | 2890 | body.capability |= cpu_to_le16(MFIE_TYPE_POWER_CONSTRAINT); |
2929 | 2891 | ||
2930 | body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); | 2892 | body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); |
2931 | 2893 | ||
@@ -2939,10 +2901,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) | |||
2939 | bodysize = 12 + priv->SSID_size; | 2901 | bodysize = 12 + priv->SSID_size; |
2940 | } | 2902 | } |
2941 | 2903 | ||
2942 | ssid_el_p[0] = C80211_MGMT_ElementID_SSID; | 2904 | ssid_el_p[0] = MFIE_TYPE_SSID; |
2943 | ssid_el_p[1] = priv->SSID_size; | 2905 | ssid_el_p[1] = priv->SSID_size; |
2944 | memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); | 2906 | memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); |
2945 | ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates; | 2907 | ssid_el_p[2 + priv->SSID_size] = MFIE_TYPE_RATES; |
2946 | ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ | 2908 | ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ |
2947 | memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); | 2909 | memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); |
2948 | 2910 | ||
@@ -3004,7 +2966,7 @@ static void store_bss_info(struct atmel_private *priv, | |||
3004 | u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, | 2966 | u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, |
3005 | u8 *ssid, int is_beacon) | 2967 | u8 *ssid, int is_beacon) |
3006 | { | 2968 | { |
3007 | u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3; | 2969 | u8 *bss = capability & WLAN_CAPABILITY_ESS ? header->addr2 : header->addr3; |
3008 | int i, index; | 2970 | int i, index; |
3009 | 2971 | ||
3010 | for (index = -1, i = 0; i < priv->BSS_list_entries; i++) | 2972 | for (index = -1, i = 0; i < priv->BSS_list_entries; i++) |
@@ -3030,16 +2992,16 @@ static void store_bss_info(struct atmel_private *priv, | |||
3030 | 2992 | ||
3031 | priv->BSSinfo[index].channel = channel; | 2993 | priv->BSSinfo[index].channel = channel; |
3032 | priv->BSSinfo[index].beacon_period = beacon_period; | 2994 | priv->BSSinfo[index].beacon_period = beacon_period; |
3033 | priv->BSSinfo[index].UsingWEP = capability & C80211_MGMT_CAPABILITY_Privacy; | 2995 | priv->BSSinfo[index].UsingWEP = capability & WLAN_CAPABILITY_PRIVACY; |
3034 | memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len); | 2996 | memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len); |
3035 | priv->BSSinfo[index].SSIDsize = ssid_len; | 2997 | priv->BSSinfo[index].SSIDsize = ssid_len; |
3036 | 2998 | ||
3037 | if (capability & C80211_MGMT_CAPABILITY_IBSS) | 2999 | if (capability & WLAN_CAPABILITY_IBSS) |
3038 | priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; | 3000 | priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; |
3039 | else if (capability & C80211_MGMT_CAPABILITY_ESS) | 3001 | else if (capability & WLAN_CAPABILITY_ESS) |
3040 | priv->BSSinfo[index].BSStype =IW_MODE_INFRA; | 3002 | priv->BSSinfo[index].BSStype =IW_MODE_INFRA; |
3041 | 3003 | ||
3042 | priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? | 3004 | priv->BSSinfo[index].preamble = capability & MFIE_TYPE_POWER_CONSTRAINT ? |
3043 | SHORT_PREAMBLE : LONG_PREAMBLE; | 3005 | SHORT_PREAMBLE : LONG_PREAMBLE; |
3044 | } | 3006 | } |
3045 | 3007 | ||
@@ -3050,7 +3012,7 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3050 | u16 trans_seq_no = le16_to_cpu(auth->trans_seq); | 3012 | u16 trans_seq_no = le16_to_cpu(auth->trans_seq); |
3051 | u16 system = le16_to_cpu(auth->alg); | 3013 | u16 system = le16_to_cpu(auth->alg); |
3052 | 3014 | ||
3053 | if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) { | 3015 | if (status == WLAN_STATUS_SUCCESS && !priv->wep_is_on) { |
3054 | /* no WEP */ | 3016 | /* no WEP */ |
3055 | if (priv->station_was_associated) { | 3017 | if (priv->station_was_associated) { |
3056 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); | 3018 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); |
@@ -3063,19 +3025,19 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3063 | } | 3025 | } |
3064 | } | 3026 | } |
3065 | 3027 | ||
3066 | if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { | 3028 | if (status == WLAN_STATUS_SUCCESS && priv->wep_is_on) { |
3067 | int should_associate = 0; | 3029 | int should_associate = 0; |
3068 | /* WEP */ | 3030 | /* WEP */ |
3069 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) | 3031 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) |
3070 | return; | 3032 | return; |
3071 | 3033 | ||
3072 | if (system == C80211_MGMT_AAN_OPENSYSTEM) { | 3034 | if (system == WLAN_AUTH_OPEN) { |
3073 | if (trans_seq_no == 0x0002) { | 3035 | if (trans_seq_no == 0x0002) { |
3074 | should_associate = 1; | 3036 | should_associate = 1; |
3075 | } | 3037 | } |
3076 | } else if (system == C80211_MGMT_AAN_SHAREDKEY) { | 3038 | } else if (system == WLAN_AUTH_SHARED_KEY) { |
3077 | if (trans_seq_no == 0x0002 && | 3039 | if (trans_seq_no == 0x0002 && |
3078 | auth->el_id == C80211_MGMT_ElementID_ChallengeText) { | 3040 | auth->el_id == MFIE_TYPE_CHALLENGE) { |
3079 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); | 3041 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); |
3080 | return; | 3042 | return; |
3081 | } else if (trans_seq_no == 0x0004) { | 3043 | } else if (trans_seq_no == 0x0004) { |
@@ -3140,8 +3102,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3140 | if (frame_len < 8 + rates_len) | 3102 | if (frame_len < 8 + rates_len) |
3141 | return; | 3103 | return; |
3142 | 3104 | ||
3143 | if (status == C80211_MGMT_SC_Success) { | 3105 | if (status == WLAN_STATUS_SUCCESS) { |
3144 | if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE) | 3106 | if (subtype == IEEE80211_STYPE_ASSOC_RESP) |
3145 | priv->AssociationRequestRetryCnt = 0; | 3107 | priv->AssociationRequestRetryCnt = 0; |
3146 | else | 3108 | else |
3147 | priv->ReAssociationRequestRetryCnt = 0; | 3109 | priv->ReAssociationRequestRetryCnt = 0; |
@@ -3178,9 +3140,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3178 | return; | 3140 | return; |
3179 | } | 3141 | } |
3180 | 3142 | ||
3181 | if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE && | 3143 | if (subtype == IEEE80211_STYPE_ASSOC_RESP && |
3182 | status != C80211_MGMT_SC_AssDeniedBSSRate && | 3144 | status != WLAN_STATUS_ASSOC_DENIED_RATES && |
3183 | status != C80211_MGMT_SC_SupportCapabilities && | 3145 | status != WLAN_STATUS_CAPS_UNSUPPORTED && |
3184 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { | 3146 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { |
3185 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3147 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3186 | priv->AssociationRequestRetryCnt++; | 3148 | priv->AssociationRequestRetryCnt++; |
@@ -3188,9 +3150,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3188 | return; | 3150 | return; |
3189 | } | 3151 | } |
3190 | 3152 | ||
3191 | if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE && | 3153 | if (subtype == IEEE80211_STYPE_REASSOC_RESP && |
3192 | status != C80211_MGMT_SC_AssDeniedBSSRate && | 3154 | status != WLAN_STATUS_ASSOC_DENIED_RATES && |
3193 | status != C80211_MGMT_SC_SupportCapabilities && | 3155 | status != WLAN_STATUS_CAPS_UNSUPPORTED && |
3194 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { | 3156 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { |
3195 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3157 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3196 | priv->ReAssociationRequestRetryCnt++; | 3158 | priv->ReAssociationRequestRetryCnt++; |
@@ -3325,8 +3287,8 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3325 | 3287 | ||
3326 | subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; | 3288 | subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; |
3327 | switch (subtype) { | 3289 | switch (subtype) { |
3328 | case C80211_SUBTYPE_MGMT_BEACON: | 3290 | case IEEE80211_STYPE_BEACON: |
3329 | case C80211_SUBTYPE_MGMT_ProbeResponse: | 3291 | case IEEE80211_STYPE_PROBE_RESP: |
3330 | 3292 | ||
3331 | /* beacon frame has multiple variable-length fields - | 3293 | /* beacon frame has multiple variable-length fields - |
3332 | never let an engineer loose with a data structure design. */ | 3294 | never let an engineer loose with a data structure design. */ |
@@ -3384,19 +3346,19 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3384 | beacon_interval, channel, rssi, | 3346 | beacon_interval, channel, rssi, |
3385 | ssid_length, | 3347 | ssid_length, |
3386 | &beacon->rates_el_id, | 3348 | &beacon->rates_el_id, |
3387 | subtype == C80211_SUBTYPE_MGMT_BEACON); | 3349 | subtype == IEEE80211_STYPE_BEACON); |
3388 | } | 3350 | } |
3389 | break; | 3351 | break; |
3390 | 3352 | ||
3391 | case C80211_SUBTYPE_MGMT_Authentication: | 3353 | case IEEE80211_STYPE_AUTH: |
3392 | 3354 | ||
3393 | if (priv->station_state == STATION_STATE_AUTHENTICATING) | 3355 | if (priv->station_state == STATION_STATE_AUTHENTICATING) |
3394 | authenticate(priv, frame_len); | 3356 | authenticate(priv, frame_len); |
3395 | 3357 | ||
3396 | break; | 3358 | break; |
3397 | 3359 | ||
3398 | case C80211_SUBTYPE_MGMT_ASS_RESPONSE: | 3360 | case IEEE80211_STYPE_ASSOC_RESP: |
3399 | case C80211_SUBTYPE_MGMT_REASS_RESPONSE: | 3361 | case IEEE80211_STYPE_REASSOC_RESP: |
3400 | 3362 | ||
3401 | if (priv->station_state == STATION_STATE_ASSOCIATING || | 3363 | if (priv->station_state == STATION_STATE_ASSOCIATING || |
3402 | priv->station_state == STATION_STATE_REASSOCIATING) | 3364 | priv->station_state == STATION_STATE_REASSOCIATING) |
@@ -3404,7 +3366,7 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3404 | 3366 | ||
3405 | break; | 3367 | break; |
3406 | 3368 | ||
3407 | case C80211_SUBTYPE_MGMT_DISASSOSIATION: | 3369 | case IEEE80211_STYPE_DISASSOC: |
3408 | if (priv->station_is_associated && | 3370 | if (priv->station_is_associated && |
3409 | priv->operating_mode == IW_MODE_INFRA && | 3371 | priv->operating_mode == IW_MODE_INFRA && |
3410 | is_frame_from_current_bss(priv, header)) { | 3372 | is_frame_from_current_bss(priv, header)) { |
@@ -3417,7 +3379,7 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3417 | 3379 | ||
3418 | break; | 3380 | break; |
3419 | 3381 | ||
3420 | case C80211_SUBTYPE_MGMT_Deauthentication: | 3382 | case IEEE80211_STYPE_DEAUTH: |
3421 | if (priv->operating_mode == IW_MODE_INFRA && | 3383 | if (priv->operating_mode == IW_MODE_INFRA && |
3422 | is_frame_from_current_bss(priv, header)) { | 3384 | is_frame_from_current_bss(priv, header)) { |
3423 | priv->station_was_associated = 0; | 3385 | priv->station_was_associated = 0; |
@@ -3453,12 +3415,12 @@ static void atmel_management_timer(u_long a) | |||
3453 | priv->AuthenticationRequestRetryCnt = 0; | 3415 | priv->AuthenticationRequestRetryCnt = 0; |
3454 | restart_search(priv); | 3416 | restart_search(priv); |
3455 | } else { | 3417 | } else { |
3456 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | 3418 | int auth = WLAN_AUTH_OPEN; |
3457 | priv->AuthenticationRequestRetryCnt++; | 3419 | priv->AuthenticationRequestRetryCnt++; |
3458 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3420 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3459 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3421 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3460 | if (priv->wep_is_on && priv->exclude_unencrypted) | 3422 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3461 | auth = C80211_MGMT_AAN_SHAREDKEY; | 3423 | auth = WLAN_AUTH_SHARED_KEY; |
3462 | send_authentication_request(priv, auth, NULL, 0); | 3424 | send_authentication_request(priv, auth, NULL, 0); |
3463 | } | 3425 | } |
3464 | break; | 3426 | break; |
@@ -3558,14 +3520,14 @@ static void atmel_command_irq(struct atmel_private *priv) | |||
3558 | priv->station_was_associated = priv->station_is_associated; | 3520 | priv->station_was_associated = priv->station_is_associated; |
3559 | atmel_enter_state(priv, STATION_STATE_READY); | 3521 | atmel_enter_state(priv, STATION_STATE_READY); |
3560 | } else { | 3522 | } else { |
3561 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | 3523 | int auth = WLAN_AUTH_OPEN; |
3562 | priv->AuthenticationRequestRetryCnt = 0; | 3524 | priv->AuthenticationRequestRetryCnt = 0; |
3563 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); | 3525 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); |
3564 | 3526 | ||
3565 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3527 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3566 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3528 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3567 | if (priv->wep_is_on && priv->exclude_unencrypted) | 3529 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3568 | auth = C80211_MGMT_AAN_SHAREDKEY; | 3530 | auth = WLAN_AUTH_SHARED_KEY; |
3569 | send_authentication_request(priv, auth, NULL, 0); | 3531 | send_authentication_request(priv, auth, NULL, 0); |
3570 | } | 3532 | } |
3571 | return; | 3533 | return; |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 6290c9f7e939..72335c8eb97f 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -167,12 +167,12 @@ that only one external action is invoked at a time. | |||
167 | 167 | ||
168 | #include "ipw2100.h" | 168 | #include "ipw2100.h" |
169 | 169 | ||
170 | #define IPW2100_VERSION "1.1.3" | 170 | #define IPW2100_VERSION "git-1.2.2" |
171 | 171 | ||
172 | #define DRV_NAME "ipw2100" | 172 | #define DRV_NAME "ipw2100" |
173 | #define DRV_VERSION IPW2100_VERSION | 173 | #define DRV_VERSION IPW2100_VERSION |
174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" | 174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" |
175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" | 175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
176 | 176 | ||
177 | /* Debugging stuff */ | 177 | /* Debugging stuff */ |
178 | #ifdef CONFIG_IPW2100_DEBUG | 178 | #ifdef CONFIG_IPW2100_DEBUG |
@@ -1418,7 +1418,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) | |||
1418 | if (priv->status & STATUS_ENABLED) | 1418 | if (priv->status & STATUS_ENABLED) |
1419 | return 0; | 1419 | return 0; |
1420 | 1420 | ||
1421 | down(&priv->adapter_sem); | 1421 | mutex_lock(&priv->adapter_mutex); |
1422 | 1422 | ||
1423 | if (rf_kill_active(priv)) { | 1423 | if (rf_kill_active(priv)) { |
1424 | IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); | 1424 | IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); |
@@ -1444,7 +1444,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) | |||
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | fail_up: | 1446 | fail_up: |
1447 | up(&priv->adapter_sem); | 1447 | mutex_unlock(&priv->adapter_mutex); |
1448 | return err; | 1448 | return err; |
1449 | } | 1449 | } |
1450 | 1450 | ||
@@ -1576,7 +1576,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv) | |||
1576 | cancel_delayed_work(&priv->hang_check); | 1576 | cancel_delayed_work(&priv->hang_check); |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | down(&priv->adapter_sem); | 1579 | mutex_lock(&priv->adapter_mutex); |
1580 | 1580 | ||
1581 | err = ipw2100_hw_send_command(priv, &cmd); | 1581 | err = ipw2100_hw_send_command(priv, &cmd); |
1582 | if (err) { | 1582 | if (err) { |
@@ -1595,7 +1595,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv) | |||
1595 | IPW_DEBUG_INFO("TODO: implement scan state machine\n"); | 1595 | IPW_DEBUG_INFO("TODO: implement scan state machine\n"); |
1596 | 1596 | ||
1597 | fail_up: | 1597 | fail_up: |
1598 | up(&priv->adapter_sem); | 1598 | mutex_unlock(&priv->adapter_mutex); |
1599 | return err; | 1599 | return err; |
1600 | } | 1600 | } |
1601 | 1601 | ||
@@ -1672,6 +1672,18 @@ static int ipw2100_start_scan(struct ipw2100_priv *priv) | |||
1672 | return err; | 1672 | return err; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | static const struct ieee80211_geo ipw_geos[] = { | ||
1676 | { /* Restricted */ | ||
1677 | "---", | ||
1678 | .bg_channels = 14, | ||
1679 | .bg = {{2412, 1}, {2417, 2}, {2422, 3}, | ||
1680 | {2427, 4}, {2432, 5}, {2437, 6}, | ||
1681 | {2442, 7}, {2447, 8}, {2452, 9}, | ||
1682 | {2457, 10}, {2462, 11}, {2467, 12}, | ||
1683 | {2472, 13}, {2484, 14}}, | ||
1684 | }, | ||
1685 | }; | ||
1686 | |||
1675 | static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | 1687 | static int ipw2100_up(struct ipw2100_priv *priv, int deferred) |
1676 | { | 1688 | { |
1677 | unsigned long flags; | 1689 | unsigned long flags; |
@@ -1727,6 +1739,13 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1727 | goto exit; | 1739 | goto exit; |
1728 | } | 1740 | } |
1729 | 1741 | ||
1742 | /* Initialize the geo */ | ||
1743 | if (ieee80211_set_geo(priv->ieee, &ipw_geos[0])) { | ||
1744 | printk(KERN_WARNING DRV_NAME "Could not set geo\n"); | ||
1745 | return 0; | ||
1746 | } | ||
1747 | priv->ieee->freq_band = IEEE80211_24GHZ_BAND; | ||
1748 | |||
1730 | lock = LOCK_NONE; | 1749 | lock = LOCK_NONE; |
1731 | if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { | 1750 | if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { |
1732 | printk(KERN_ERR DRV_NAME | 1751 | printk(KERN_ERR DRV_NAME |
@@ -1869,7 +1888,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | |||
1869 | priv->status |= STATUS_RESET_PENDING; | 1888 | priv->status |= STATUS_RESET_PENDING; |
1870 | spin_unlock_irqrestore(&priv->low_lock, flags); | 1889 | spin_unlock_irqrestore(&priv->low_lock, flags); |
1871 | 1890 | ||
1872 | down(&priv->action_sem); | 1891 | mutex_lock(&priv->action_mutex); |
1873 | /* stop timed checks so that they don't interfere with reset */ | 1892 | /* stop timed checks so that they don't interfere with reset */ |
1874 | priv->stop_hang_check = 1; | 1893 | priv->stop_hang_check = 1; |
1875 | cancel_delayed_work(&priv->hang_check); | 1894 | cancel_delayed_work(&priv->hang_check); |
@@ -1879,7 +1898,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | |||
1879 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 1898 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
1880 | 1899 | ||
1881 | ipw2100_up(priv, 0); | 1900 | ipw2100_up(priv, 0); |
1882 | up(&priv->action_sem); | 1901 | mutex_unlock(&priv->action_mutex); |
1883 | 1902 | ||
1884 | } | 1903 | } |
1885 | 1904 | ||
@@ -2371,15 +2390,6 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2371 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | 2390 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); |
2372 | return; | 2391 | return; |
2373 | } | 2392 | } |
2374 | #ifdef CONFIG_IPW2100_MONITOR | ||
2375 | if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR && | ||
2376 | priv->config & CFG_CRC_CHECK && | ||
2377 | status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { | ||
2378 | IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); | ||
2379 | priv->ieee->stats.rx_errors++; | ||
2380 | return; | ||
2381 | } | ||
2382 | #endif | ||
2383 | 2393 | ||
2384 | if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && | 2394 | if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && |
2385 | !(priv->status & STATUS_ASSOCIATED))) { | 2395 | !(priv->status & STATUS_ASSOCIATED))) { |
@@ -2427,6 +2437,89 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2427 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; | 2437 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; |
2428 | } | 2438 | } |
2429 | 2439 | ||
2440 | #ifdef CONFIG_IPW2100_MONITOR | ||
2441 | |||
2442 | static void isr_rx_monitor(struct ipw2100_priv *priv, int i, | ||
2443 | struct ieee80211_rx_stats *stats) | ||
2444 | { | ||
2445 | struct ipw2100_status *status = &priv->status_queue.drv[i]; | ||
2446 | struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; | ||
2447 | |||
2448 | /* Magic struct that slots into the radiotap header -- no reason | ||
2449 | * to build this manually element by element, we can write it much | ||
2450 | * more efficiently than we can parse it. ORDER MATTERS HERE */ | ||
2451 | struct ipw_rt_hdr { | ||
2452 | struct ieee80211_radiotap_header rt_hdr; | ||
2453 | s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ | ||
2454 | } *ipw_rt; | ||
2455 | |||
2456 | IPW_DEBUG_RX("Handler...\n"); | ||
2457 | |||
2458 | if (unlikely(status->frame_size > skb_tailroom(packet->skb) - | ||
2459 | sizeof(struct ipw_rt_hdr))) { | ||
2460 | IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" | ||
2461 | " Dropping.\n", | ||
2462 | priv->net_dev->name, | ||
2463 | status->frame_size, | ||
2464 | skb_tailroom(packet->skb)); | ||
2465 | priv->ieee->stats.rx_errors++; | ||
2466 | return; | ||
2467 | } | ||
2468 | |||
2469 | if (unlikely(!netif_running(priv->net_dev))) { | ||
2470 | priv->ieee->stats.rx_errors++; | ||
2471 | priv->wstats.discard.misc++; | ||
2472 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | ||
2473 | return; | ||
2474 | } | ||
2475 | |||
2476 | if (unlikely(priv->config & CFG_CRC_CHECK && | ||
2477 | status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { | ||
2478 | IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); | ||
2479 | priv->ieee->stats.rx_errors++; | ||
2480 | return; | ||
2481 | } | ||
2482 | |||
2483 | pci_unmap_single(priv->pci_dev, packet->dma_addr, | ||
2484 | sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); | ||
2485 | memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), | ||
2486 | packet->skb->data, status->frame_size); | ||
2487 | |||
2488 | ipw_rt = (struct ipw_rt_hdr *) packet->skb->data; | ||
2489 | |||
2490 | ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
2491 | ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ | ||
2492 | ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total hdr+data */ | ||
2493 | |||
2494 | ipw_rt->rt_hdr.it_present = 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL; | ||
2495 | |||
2496 | ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM; | ||
2497 | |||
2498 | skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr)); | ||
2499 | |||
2500 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { | ||
2501 | priv->ieee->stats.rx_errors++; | ||
2502 | |||
2503 | /* ieee80211_rx failed, so it didn't free the SKB */ | ||
2504 | dev_kfree_skb_any(packet->skb); | ||
2505 | packet->skb = NULL; | ||
2506 | } | ||
2507 | |||
2508 | /* We need to allocate a new SKB and attach it to the RDB. */ | ||
2509 | if (unlikely(ipw2100_alloc_skb(priv, packet))) { | ||
2510 | IPW_DEBUG_WARNING( | ||
2511 | "%s: Unable to allocate SKB onto RBD ring - disabling " | ||
2512 | "adapter.\n", priv->net_dev->name); | ||
2513 | /* TODO: schedule adapter shutdown */ | ||
2514 | IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); | ||
2515 | } | ||
2516 | |||
2517 | /* Update the RDB entry */ | ||
2518 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; | ||
2519 | } | ||
2520 | |||
2521 | #endif | ||
2522 | |||
2430 | static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) | 2523 | static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) |
2431 | { | 2524 | { |
2432 | struct ipw2100_status *status = &priv->status_queue.drv[i]; | 2525 | struct ipw2100_status *status = &priv->status_queue.drv[i]; |
@@ -2558,7 +2651,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) | |||
2558 | case P8023_DATA_VAL: | 2651 | case P8023_DATA_VAL: |
2559 | #ifdef CONFIG_IPW2100_MONITOR | 2652 | #ifdef CONFIG_IPW2100_MONITOR |
2560 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 2653 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
2561 | isr_rx(priv, i, &stats); | 2654 | isr_rx_monitor(priv, i, &stats); |
2562 | break; | 2655 | break; |
2563 | } | 2656 | } |
2564 | #endif | 2657 | #endif |
@@ -3750,7 +3843,7 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr, | |||
3750 | struct net_device *dev = priv->net_dev; | 3843 | struct net_device *dev = priv->net_dev; |
3751 | const char *p = buf; | 3844 | const char *p = buf; |
3752 | 3845 | ||
3753 | (void) dev; /* kill unused-var warning for debug-only code */ | 3846 | (void)dev; /* kill unused-var warning for debug-only code */ |
3754 | 3847 | ||
3755 | if (count < 1) | 3848 | if (count < 1) |
3756 | return count; | 3849 | return count; |
@@ -3863,7 +3956,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) | |||
3863 | #ifdef CONFIG_IPW2100_MONITOR | 3956 | #ifdef CONFIG_IPW2100_MONITOR |
3864 | case IW_MODE_MONITOR: | 3957 | case IW_MODE_MONITOR: |
3865 | priv->last_mode = priv->ieee->iw_mode; | 3958 | priv->last_mode = priv->ieee->iw_mode; |
3866 | priv->net_dev->type = ARPHRD_IEEE80211; | 3959 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; |
3867 | break; | 3960 | break; |
3868 | #endif /* CONFIG_IPW2100_MONITOR */ | 3961 | #endif /* CONFIG_IPW2100_MONITOR */ |
3869 | } | 3962 | } |
@@ -4070,7 +4163,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, | |||
4070 | unsigned long val; | 4163 | unsigned long val; |
4071 | char *p = buffer; | 4164 | char *p = buffer; |
4072 | 4165 | ||
4073 | (void) dev; /* kill unused-var warning for debug-only code */ | 4166 | (void)dev; /* kill unused-var warning for debug-only code */ |
4074 | 4167 | ||
4075 | IPW_DEBUG_INFO("enter\n"); | 4168 | IPW_DEBUG_INFO("enter\n"); |
4076 | 4169 | ||
@@ -4119,7 +4212,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4119 | IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", | 4212 | IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", |
4120 | disable_radio ? "OFF" : "ON"); | 4213 | disable_radio ? "OFF" : "ON"); |
4121 | 4214 | ||
4122 | down(&priv->action_sem); | 4215 | mutex_lock(&priv->action_mutex); |
4123 | 4216 | ||
4124 | if (disable_radio) { | 4217 | if (disable_radio) { |
4125 | priv->status |= STATUS_RF_KILL_SW; | 4218 | priv->status |= STATUS_RF_KILL_SW; |
@@ -4137,7 +4230,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4137 | schedule_reset(priv); | 4230 | schedule_reset(priv); |
4138 | } | 4231 | } |
4139 | 4232 | ||
4140 | up(&priv->action_sem); | 4233 | mutex_unlock(&priv->action_mutex); |
4141 | return 1; | 4234 | return 1; |
4142 | } | 4235 | } |
4143 | 4236 | ||
@@ -5107,12 +5200,13 @@ static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power) | |||
5107 | .host_command_length = 4 | 5200 | .host_command_length = 4 |
5108 | }; | 5201 | }; |
5109 | int err = 0; | 5202 | int err = 0; |
5203 | u32 tmp = tx_power; | ||
5110 | 5204 | ||
5111 | if (tx_power != IPW_TX_POWER_DEFAULT) | 5205 | if (tx_power != IPW_TX_POWER_DEFAULT) |
5112 | tx_power = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / | 5206 | tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / |
5113 | (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); | 5207 | (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); |
5114 | 5208 | ||
5115 | cmd.host_command_parameters[0] = tx_power; | 5209 | cmd.host_command_parameters[0] = tmp; |
5116 | 5210 | ||
5117 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) | 5211 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) |
5118 | err = ipw2100_hw_send_command(priv, &cmd); | 5212 | err = ipw2100_hw_send_command(priv, &cmd); |
@@ -5365,9 +5459,12 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) | |||
5365 | SEC_LEVEL_0, 0, 1); | 5459 | SEC_LEVEL_0, 0, 1); |
5366 | } else { | 5460 | } else { |
5367 | auth_mode = IPW_AUTH_OPEN; | 5461 | auth_mode = IPW_AUTH_OPEN; |
5368 | if ((priv->ieee->sec.flags & SEC_AUTH_MODE) && | 5462 | if (priv->ieee->sec.flags & SEC_AUTH_MODE) { |
5369 | (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) | 5463 | if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY) |
5370 | auth_mode = IPW_AUTH_SHARED; | 5464 | auth_mode = IPW_AUTH_SHARED; |
5465 | else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP) | ||
5466 | auth_mode = IPW_AUTH_LEAP_CISCO_ID; | ||
5467 | } | ||
5371 | 5468 | ||
5372 | sec_level = SEC_LEVEL_0; | 5469 | sec_level = SEC_LEVEL_0; |
5373 | if (priv->ieee->sec.flags & SEC_LEVEL) | 5470 | if (priv->ieee->sec.flags & SEC_LEVEL) |
@@ -5437,7 +5534,7 @@ static void shim__set_security(struct net_device *dev, | |||
5437 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 5534 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
5438 | int i, force_update = 0; | 5535 | int i, force_update = 0; |
5439 | 5536 | ||
5440 | down(&priv->action_sem); | 5537 | mutex_lock(&priv->action_mutex); |
5441 | if (!(priv->status & STATUS_INITIALIZED)) | 5538 | if (!(priv->status & STATUS_INITIALIZED)) |
5442 | goto done; | 5539 | goto done; |
5443 | 5540 | ||
@@ -5510,7 +5607,7 @@ static void shim__set_security(struct net_device *dev, | |||
5510 | if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) | 5607 | if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) |
5511 | ipw2100_configure_security(priv, 0); | 5608 | ipw2100_configure_security(priv, 0); |
5512 | done: | 5609 | done: |
5513 | up(&priv->action_sem); | 5610 | mutex_unlock(&priv->action_mutex); |
5514 | } | 5611 | } |
5515 | 5612 | ||
5516 | static int ipw2100_adapter_setup(struct ipw2100_priv *priv) | 5613 | static int ipw2100_adapter_setup(struct ipw2100_priv *priv) |
@@ -5634,7 +5731,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5634 | if (!is_valid_ether_addr(addr->sa_data)) | 5731 | if (!is_valid_ether_addr(addr->sa_data)) |
5635 | return -EADDRNOTAVAIL; | 5732 | return -EADDRNOTAVAIL; |
5636 | 5733 | ||
5637 | down(&priv->action_sem); | 5734 | mutex_lock(&priv->action_mutex); |
5638 | 5735 | ||
5639 | priv->config |= CFG_CUSTOM_MAC; | 5736 | priv->config |= CFG_CUSTOM_MAC; |
5640 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | 5737 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); |
@@ -5644,12 +5741,12 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5644 | goto done; | 5741 | goto done; |
5645 | 5742 | ||
5646 | priv->reset_backoff = 0; | 5743 | priv->reset_backoff = 0; |
5647 | up(&priv->action_sem); | 5744 | mutex_unlock(&priv->action_mutex); |
5648 | ipw2100_reset_adapter(priv); | 5745 | ipw2100_reset_adapter(priv); |
5649 | return 0; | 5746 | return 0; |
5650 | 5747 | ||
5651 | done: | 5748 | done: |
5652 | up(&priv->action_sem); | 5749 | mutex_unlock(&priv->action_mutex); |
5653 | return err; | 5750 | return err; |
5654 | } | 5751 | } |
5655 | 5752 | ||
@@ -5760,6 +5857,9 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) | |||
5760 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { | 5857 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { |
5761 | sec.auth_mode = WLAN_AUTH_OPEN; | 5858 | sec.auth_mode = WLAN_AUTH_OPEN; |
5762 | ieee->open_wep = 1; | 5859 | ieee->open_wep = 1; |
5860 | } else if (value & IW_AUTH_ALG_LEAP) { | ||
5861 | sec.auth_mode = WLAN_AUTH_LEAP; | ||
5862 | ieee->open_wep = 1; | ||
5763 | } else | 5863 | } else |
5764 | return -EINVAL; | 5864 | return -EINVAL; |
5765 | 5865 | ||
@@ -5771,8 +5871,8 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) | |||
5771 | return ret; | 5871 | return ret; |
5772 | } | 5872 | } |
5773 | 5873 | ||
5774 | void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, | 5874 | static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, |
5775 | char *wpa_ie, int wpa_ie_len) | 5875 | char *wpa_ie, int wpa_ie_len) |
5776 | { | 5876 | { |
5777 | 5877 | ||
5778 | struct ipw2100_wpa_assoc_frame frame; | 5878 | struct ipw2100_wpa_assoc_frame frame; |
@@ -5989,8 +6089,8 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
5989 | strcpy(priv->nick, "ipw2100"); | 6089 | strcpy(priv->nick, "ipw2100"); |
5990 | 6090 | ||
5991 | spin_lock_init(&priv->low_lock); | 6091 | spin_lock_init(&priv->low_lock); |
5992 | sema_init(&priv->action_sem, 1); | 6092 | mutex_init(&priv->action_mutex); |
5993 | sema_init(&priv->adapter_sem, 1); | 6093 | mutex_init(&priv->adapter_mutex); |
5994 | 6094 | ||
5995 | init_waitqueue_head(&priv->wait_command_queue); | 6095 | init_waitqueue_head(&priv->wait_command_queue); |
5996 | 6096 | ||
@@ -6155,7 +6255,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6155 | * member to call a function that then just turns and calls ipw2100_up. | 6255 | * member to call a function that then just turns and calls ipw2100_up. |
6156 | * net_dev->init is called after name allocation but before the | 6256 | * net_dev->init is called after name allocation but before the |
6157 | * notifier chain is called */ | 6257 | * notifier chain is called */ |
6158 | down(&priv->action_sem); | 6258 | mutex_lock(&priv->action_mutex); |
6159 | err = register_netdev(dev); | 6259 | err = register_netdev(dev); |
6160 | if (err) { | 6260 | if (err) { |
6161 | printk(KERN_WARNING DRV_NAME | 6261 | printk(KERN_WARNING DRV_NAME |
@@ -6191,12 +6291,12 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6191 | 6291 | ||
6192 | priv->status |= STATUS_INITIALIZED; | 6292 | priv->status |= STATUS_INITIALIZED; |
6193 | 6293 | ||
6194 | up(&priv->action_sem); | 6294 | mutex_unlock(&priv->action_mutex); |
6195 | 6295 | ||
6196 | return 0; | 6296 | return 0; |
6197 | 6297 | ||
6198 | fail_unlock: | 6298 | fail_unlock: |
6199 | up(&priv->action_sem); | 6299 | mutex_unlock(&priv->action_mutex); |
6200 | 6300 | ||
6201 | fail: | 6301 | fail: |
6202 | if (dev) { | 6302 | if (dev) { |
@@ -6236,7 +6336,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) | |||
6236 | struct net_device *dev; | 6336 | struct net_device *dev; |
6237 | 6337 | ||
6238 | if (priv) { | 6338 | if (priv) { |
6239 | down(&priv->action_sem); | 6339 | mutex_lock(&priv->action_mutex); |
6240 | 6340 | ||
6241 | priv->status &= ~STATUS_INITIALIZED; | 6341 | priv->status &= ~STATUS_INITIALIZED; |
6242 | 6342 | ||
@@ -6251,9 +6351,9 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) | |||
6251 | /* Take down the hardware */ | 6351 | /* Take down the hardware */ |
6252 | ipw2100_down(priv); | 6352 | ipw2100_down(priv); |
6253 | 6353 | ||
6254 | /* Release the semaphore so that the network subsystem can | 6354 | /* Release the mutex so that the network subsystem can |
6255 | * complete any needed calls into the driver... */ | 6355 | * complete any needed calls into the driver... */ |
6256 | up(&priv->action_sem); | 6356 | mutex_unlock(&priv->action_mutex); |
6257 | 6357 | ||
6258 | /* Unregister the device first - this results in close() | 6358 | /* Unregister the device first - this results in close() |
6259 | * being called if the device is open. If we free storage | 6359 | * being called if the device is open. If we free storage |
@@ -6292,7 +6392,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) | |||
6292 | 6392 | ||
6293 | IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); | 6393 | IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); |
6294 | 6394 | ||
6295 | down(&priv->action_sem); | 6395 | mutex_lock(&priv->action_mutex); |
6296 | if (priv->status & STATUS_INITIALIZED) { | 6396 | if (priv->status & STATUS_INITIALIZED) { |
6297 | /* Take down the device; powers it off, etc. */ | 6397 | /* Take down the device; powers it off, etc. */ |
6298 | ipw2100_down(priv); | 6398 | ipw2100_down(priv); |
@@ -6305,7 +6405,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) | |||
6305 | pci_disable_device(pci_dev); | 6405 | pci_disable_device(pci_dev); |
6306 | pci_set_power_state(pci_dev, PCI_D3hot); | 6406 | pci_set_power_state(pci_dev, PCI_D3hot); |
6307 | 6407 | ||
6308 | up(&priv->action_sem); | 6408 | mutex_unlock(&priv->action_mutex); |
6309 | 6409 | ||
6310 | return 0; | 6410 | return 0; |
6311 | } | 6411 | } |
@@ -6319,7 +6419,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6319 | if (IPW2100_PM_DISABLED) | 6419 | if (IPW2100_PM_DISABLED) |
6320 | return 0; | 6420 | return 0; |
6321 | 6421 | ||
6322 | down(&priv->action_sem); | 6422 | mutex_lock(&priv->action_mutex); |
6323 | 6423 | ||
6324 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); | 6424 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); |
6325 | 6425 | ||
@@ -6345,7 +6445,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6345 | if (!(priv->status & STATUS_RF_KILL_SW)) | 6445 | if (!(priv->status & STATUS_RF_KILL_SW)) |
6346 | ipw2100_up(priv, 0); | 6446 | ipw2100_up(priv, 0); |
6347 | 6447 | ||
6348 | up(&priv->action_sem); | 6448 | mutex_unlock(&priv->action_mutex); |
6349 | 6449 | ||
6350 | return 0; | 6450 | return 0; |
6351 | } | 6451 | } |
@@ -6509,7 +6609,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev, | |||
6509 | if (priv->ieee->iw_mode == IW_MODE_INFRA) | 6609 | if (priv->ieee->iw_mode == IW_MODE_INFRA) |
6510 | return -EOPNOTSUPP; | 6610 | return -EOPNOTSUPP; |
6511 | 6611 | ||
6512 | down(&priv->action_sem); | 6612 | mutex_lock(&priv->action_mutex); |
6513 | if (!(priv->status & STATUS_INITIALIZED)) { | 6613 | if (!(priv->status & STATUS_INITIALIZED)) { |
6514 | err = -EIO; | 6614 | err = -EIO; |
6515 | goto done; | 6615 | goto done; |
@@ -6540,7 +6640,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev, | |||
6540 | } | 6640 | } |
6541 | 6641 | ||
6542 | done: | 6642 | done: |
6543 | up(&priv->action_sem); | 6643 | mutex_unlock(&priv->action_mutex); |
6544 | return err; | 6644 | return err; |
6545 | } | 6645 | } |
6546 | 6646 | ||
@@ -6581,7 +6681,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev, | |||
6581 | if (wrqu->mode == priv->ieee->iw_mode) | 6681 | if (wrqu->mode == priv->ieee->iw_mode) |
6582 | return 0; | 6682 | return 0; |
6583 | 6683 | ||
6584 | down(&priv->action_sem); | 6684 | mutex_lock(&priv->action_mutex); |
6585 | if (!(priv->status & STATUS_INITIALIZED)) { | 6685 | if (!(priv->status & STATUS_INITIALIZED)) { |
6586 | err = -EIO; | 6686 | err = -EIO; |
6587 | goto done; | 6687 | goto done; |
@@ -6604,7 +6704,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev, | |||
6604 | } | 6704 | } |
6605 | 6705 | ||
6606 | done: | 6706 | done: |
6607 | up(&priv->action_sem); | 6707 | mutex_unlock(&priv->action_mutex); |
6608 | return err; | 6708 | return err; |
6609 | } | 6709 | } |
6610 | 6710 | ||
@@ -6786,7 +6886,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, | |||
6786 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) | 6886 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) |
6787 | return -EINVAL; | 6887 | return -EINVAL; |
6788 | 6888 | ||
6789 | down(&priv->action_sem); | 6889 | mutex_lock(&priv->action_mutex); |
6790 | if (!(priv->status & STATUS_INITIALIZED)) { | 6890 | if (!(priv->status & STATUS_INITIALIZED)) { |
6791 | err = -EIO; | 6891 | err = -EIO; |
6792 | goto done; | 6892 | goto done; |
@@ -6815,7 +6915,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, | |||
6815 | wrqu->ap_addr.sa_data[5] & 0xff); | 6915 | wrqu->ap_addr.sa_data[5] & 0xff); |
6816 | 6916 | ||
6817 | done: | 6917 | done: |
6818 | up(&priv->action_sem); | 6918 | mutex_unlock(&priv->action_mutex); |
6819 | return err; | 6919 | return err; |
6820 | } | 6920 | } |
6821 | 6921 | ||
@@ -6851,7 +6951,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev, | |||
6851 | int length = 0; | 6951 | int length = 0; |
6852 | int err = 0; | 6952 | int err = 0; |
6853 | 6953 | ||
6854 | down(&priv->action_sem); | 6954 | mutex_lock(&priv->action_mutex); |
6855 | if (!(priv->status & STATUS_INITIALIZED)) { | 6955 | if (!(priv->status & STATUS_INITIALIZED)) { |
6856 | err = -EIO; | 6956 | err = -EIO; |
6857 | goto done; | 6957 | goto done; |
@@ -6888,7 +6988,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev, | |||
6888 | err = ipw2100_set_essid(priv, essid, length, 0); | 6988 | err = ipw2100_set_essid(priv, essid, length, 0); |
6889 | 6989 | ||
6890 | done: | 6990 | done: |
6891 | up(&priv->action_sem); | 6991 | mutex_unlock(&priv->action_mutex); |
6892 | return err; | 6992 | return err; |
6893 | } | 6993 | } |
6894 | 6994 | ||
@@ -6969,7 +7069,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev, | |||
6969 | u32 rate; | 7069 | u32 rate; |
6970 | int err = 0; | 7070 | int err = 0; |
6971 | 7071 | ||
6972 | down(&priv->action_sem); | 7072 | mutex_lock(&priv->action_mutex); |
6973 | if (!(priv->status & STATUS_INITIALIZED)) { | 7073 | if (!(priv->status & STATUS_INITIALIZED)) { |
6974 | err = -EIO; | 7074 | err = -EIO; |
6975 | goto done; | 7075 | goto done; |
@@ -6996,7 +7096,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev, | |||
6996 | 7096 | ||
6997 | IPW_DEBUG_WX("SET Rate -> %04X \n", rate); | 7097 | IPW_DEBUG_WX("SET Rate -> %04X \n", rate); |
6998 | done: | 7098 | done: |
6999 | up(&priv->action_sem); | 7099 | mutex_unlock(&priv->action_mutex); |
7000 | return err; | 7100 | return err; |
7001 | } | 7101 | } |
7002 | 7102 | ||
@@ -7016,7 +7116,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev, | |||
7016 | return 0; | 7116 | return 0; |
7017 | } | 7117 | } |
7018 | 7118 | ||
7019 | down(&priv->action_sem); | 7119 | mutex_lock(&priv->action_mutex); |
7020 | if (!(priv->status & STATUS_INITIALIZED)) { | 7120 | if (!(priv->status & STATUS_INITIALIZED)) { |
7021 | err = -EIO; | 7121 | err = -EIO; |
7022 | goto done; | 7122 | goto done; |
@@ -7048,7 +7148,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev, | |||
7048 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | 7148 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); |
7049 | 7149 | ||
7050 | done: | 7150 | done: |
7051 | up(&priv->action_sem); | 7151 | mutex_unlock(&priv->action_mutex); |
7052 | return err; | 7152 | return err; |
7053 | } | 7153 | } |
7054 | 7154 | ||
@@ -7063,7 +7163,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev, | |||
7063 | if (wrqu->rts.fixed == 0) | 7163 | if (wrqu->rts.fixed == 0) |
7064 | return -EINVAL; | 7164 | return -EINVAL; |
7065 | 7165 | ||
7066 | down(&priv->action_sem); | 7166 | mutex_lock(&priv->action_mutex); |
7067 | if (!(priv->status & STATUS_INITIALIZED)) { | 7167 | if (!(priv->status & STATUS_INITIALIZED)) { |
7068 | err = -EIO; | 7168 | err = -EIO; |
7069 | goto done; | 7169 | goto done; |
@@ -7083,7 +7183,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev, | |||
7083 | 7183 | ||
7084 | IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); | 7184 | IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); |
7085 | done: | 7185 | done: |
7086 | up(&priv->action_sem); | 7186 | mutex_unlock(&priv->action_mutex); |
7087 | return err; | 7187 | return err; |
7088 | } | 7188 | } |
7089 | 7189 | ||
@@ -7134,7 +7234,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev, | |||
7134 | value = wrqu->txpower.value; | 7234 | value = wrqu->txpower.value; |
7135 | } | 7235 | } |
7136 | 7236 | ||
7137 | down(&priv->action_sem); | 7237 | mutex_lock(&priv->action_mutex); |
7138 | if (!(priv->status & STATUS_INITIALIZED)) { | 7238 | if (!(priv->status & STATUS_INITIALIZED)) { |
7139 | err = -EIO; | 7239 | err = -EIO; |
7140 | goto done; | 7240 | goto done; |
@@ -7145,7 +7245,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev, | |||
7145 | IPW_DEBUG_WX("SET TX Power -> %d \n", value); | 7245 | IPW_DEBUG_WX("SET TX Power -> %d \n", value); |
7146 | 7246 | ||
7147 | done: | 7247 | done: |
7148 | up(&priv->action_sem); | 7248 | mutex_unlock(&priv->action_mutex); |
7149 | return err; | 7249 | return err; |
7150 | } | 7250 | } |
7151 | 7251 | ||
@@ -7237,7 +7337,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev, | |||
7237 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) | 7337 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) |
7238 | return 0; | 7338 | return 0; |
7239 | 7339 | ||
7240 | down(&priv->action_sem); | 7340 | mutex_lock(&priv->action_mutex); |
7241 | if (!(priv->status & STATUS_INITIALIZED)) { | 7341 | if (!(priv->status & STATUS_INITIALIZED)) { |
7242 | err = -EIO; | 7342 | err = -EIO; |
7243 | goto done; | 7343 | goto done; |
@@ -7264,7 +7364,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev, | |||
7264 | IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); | 7364 | IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); |
7265 | 7365 | ||
7266 | done: | 7366 | done: |
7267 | up(&priv->action_sem); | 7367 | mutex_unlock(&priv->action_mutex); |
7268 | return err; | 7368 | return err; |
7269 | } | 7369 | } |
7270 | 7370 | ||
@@ -7307,7 +7407,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev, | |||
7307 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7407 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7308 | int err = 0; | 7408 | int err = 0; |
7309 | 7409 | ||
7310 | down(&priv->action_sem); | 7410 | mutex_lock(&priv->action_mutex); |
7311 | if (!(priv->status & STATUS_INITIALIZED)) { | 7411 | if (!(priv->status & STATUS_INITIALIZED)) { |
7312 | err = -EIO; | 7412 | err = -EIO; |
7313 | goto done; | 7413 | goto done; |
@@ -7322,7 +7422,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev, | |||
7322 | } | 7422 | } |
7323 | 7423 | ||
7324 | done: | 7424 | done: |
7325 | up(&priv->action_sem); | 7425 | mutex_unlock(&priv->action_mutex); |
7326 | return err; | 7426 | return err; |
7327 | } | 7427 | } |
7328 | 7428 | ||
@@ -7372,7 +7472,7 @@ static int ipw2100_wx_set_power(struct net_device *dev, | |||
7372 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7472 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7373 | int err = 0; | 7473 | int err = 0; |
7374 | 7474 | ||
7375 | down(&priv->action_sem); | 7475 | mutex_lock(&priv->action_mutex); |
7376 | if (!(priv->status & STATUS_INITIALIZED)) { | 7476 | if (!(priv->status & STATUS_INITIALIZED)) { |
7377 | err = -EIO; | 7477 | err = -EIO; |
7378 | goto done; | 7478 | goto done; |
@@ -7405,7 +7505,7 @@ static int ipw2100_wx_set_power(struct net_device *dev, | |||
7405 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); | 7505 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); |
7406 | 7506 | ||
7407 | done: | 7507 | done: |
7408 | up(&priv->action_sem); | 7508 | mutex_unlock(&priv->action_mutex); |
7409 | return err; | 7509 | return err; |
7410 | 7510 | ||
7411 | } | 7511 | } |
@@ -7709,7 +7809,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev, | |||
7709 | int enable = (parms[0] > 0); | 7809 | int enable = (parms[0] > 0); |
7710 | int err = 0; | 7810 | int err = 0; |
7711 | 7811 | ||
7712 | down(&priv->action_sem); | 7812 | mutex_lock(&priv->action_mutex); |
7713 | if (!(priv->status & STATUS_INITIALIZED)) { | 7813 | if (!(priv->status & STATUS_INITIALIZED)) { |
7714 | err = -EIO; | 7814 | err = -EIO; |
7715 | goto done; | 7815 | goto done; |
@@ -7727,7 +7827,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev, | |||
7727 | err = ipw2100_switch_mode(priv, priv->last_mode); | 7827 | err = ipw2100_switch_mode(priv, priv->last_mode); |
7728 | } | 7828 | } |
7729 | done: | 7829 | done: |
7730 | up(&priv->action_sem); | 7830 | mutex_unlock(&priv->action_mutex); |
7731 | return err; | 7831 | return err; |
7732 | } | 7832 | } |
7733 | 7833 | ||
@@ -7750,7 +7850,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, | |||
7750 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7850 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7751 | int err = 0, mode = *(int *)extra; | 7851 | int err = 0, mode = *(int *)extra; |
7752 | 7852 | ||
7753 | down(&priv->action_sem); | 7853 | mutex_lock(&priv->action_mutex); |
7754 | if (!(priv->status & STATUS_INITIALIZED)) { | 7854 | if (!(priv->status & STATUS_INITIALIZED)) { |
7755 | err = -EIO; | 7855 | err = -EIO; |
7756 | goto done; | 7856 | goto done; |
@@ -7762,7 +7862,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, | |||
7762 | if (priv->power_mode != mode) | 7862 | if (priv->power_mode != mode) |
7763 | err = ipw2100_set_power_mode(priv, mode); | 7863 | err = ipw2100_set_power_mode(priv, mode); |
7764 | done: | 7864 | done: |
7765 | up(&priv->action_sem); | 7865 | mutex_unlock(&priv->action_mutex); |
7766 | return err; | 7866 | return err; |
7767 | } | 7867 | } |
7768 | 7868 | ||
@@ -7814,7 +7914,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev, | |||
7814 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7914 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7815 | int err, mode = *(int *)extra; | 7915 | int err, mode = *(int *)extra; |
7816 | 7916 | ||
7817 | down(&priv->action_sem); | 7917 | mutex_lock(&priv->action_mutex); |
7818 | if (!(priv->status & STATUS_INITIALIZED)) { | 7918 | if (!(priv->status & STATUS_INITIALIZED)) { |
7819 | err = -EIO; | 7919 | err = -EIO; |
7820 | goto done; | 7920 | goto done; |
@@ -7832,7 +7932,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev, | |||
7832 | err = ipw2100_system_config(priv, 0); | 7932 | err = ipw2100_system_config(priv, 0); |
7833 | 7933 | ||
7834 | done: | 7934 | done: |
7835 | up(&priv->action_sem); | 7935 | mutex_unlock(&priv->action_mutex); |
7836 | return err; | 7936 | return err; |
7837 | } | 7937 | } |
7838 | 7938 | ||
@@ -7862,7 +7962,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev, | |||
7862 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7962 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7863 | int err, mode = *(int *)extra; | 7963 | int err, mode = *(int *)extra; |
7864 | 7964 | ||
7865 | down(&priv->action_sem); | 7965 | mutex_lock(&priv->action_mutex); |
7866 | if (!(priv->status & STATUS_INITIALIZED)) { | 7966 | if (!(priv->status & STATUS_INITIALIZED)) { |
7867 | err = -EIO; | 7967 | err = -EIO; |
7868 | goto done; | 7968 | goto done; |
@@ -7879,7 +7979,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev, | |||
7879 | err = 0; | 7979 | err = 0; |
7880 | 7980 | ||
7881 | done: | 7981 | done: |
7882 | up(&priv->action_sem); | 7982 | mutex_unlock(&priv->action_mutex); |
7883 | return err; | 7983 | return err; |
7884 | } | 7984 | } |
7885 | 7985 | ||
@@ -8184,11 +8284,11 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8184 | if (priv->status & STATUS_STOPPING) | 8284 | if (priv->status & STATUS_STOPPING) |
8185 | return; | 8285 | return; |
8186 | 8286 | ||
8187 | down(&priv->action_sem); | 8287 | mutex_lock(&priv->action_mutex); |
8188 | 8288 | ||
8189 | IPW_DEBUG_WX("enter\n"); | 8289 | IPW_DEBUG_WX("enter\n"); |
8190 | 8290 | ||
8191 | up(&priv->action_sem); | 8291 | mutex_unlock(&priv->action_mutex); |
8192 | 8292 | ||
8193 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 8293 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
8194 | 8294 | ||
@@ -8211,7 +8311,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8211 | 8311 | ||
8212 | if (!(priv->status & STATUS_ASSOCIATED)) { | 8312 | if (!(priv->status & STATUS_ASSOCIATED)) { |
8213 | IPW_DEBUG_WX("Configuring ESSID\n"); | 8313 | IPW_DEBUG_WX("Configuring ESSID\n"); |
8214 | down(&priv->action_sem); | 8314 | mutex_lock(&priv->action_mutex); |
8215 | /* This is a disassociation event, so kick the firmware to | 8315 | /* This is a disassociation event, so kick the firmware to |
8216 | * look for another AP */ | 8316 | * look for another AP */ |
8217 | if (priv->config & CFG_STATIC_ESSID) | 8317 | if (priv->config & CFG_STATIC_ESSID) |
@@ -8219,7 +8319,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8219 | 0); | 8319 | 0); |
8220 | else | 8320 | else |
8221 | ipw2100_set_essid(priv, NULL, 0, 0); | 8321 | ipw2100_set_essid(priv, NULL, 0, 0); |
8222 | up(&priv->action_sem); | 8322 | mutex_unlock(&priv->action_mutex); |
8223 | } | 8323 | } |
8224 | 8324 | ||
8225 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 8325 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index f6c51441fa87..55b7227198df 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -41,7 +41,12 @@ | |||
41 | 41 | ||
42 | #include <net/ieee80211.h> | 42 | #include <net/ieee80211.h> |
43 | 43 | ||
44 | #ifdef CONFIG_IPW2100_MONITOR | ||
45 | #include <net/ieee80211_radiotap.h> | ||
46 | #endif | ||
47 | |||
44 | #include <linux/workqueue.h> | 48 | #include <linux/workqueue.h> |
49 | #include <linux/mutex.h> | ||
45 | 50 | ||
46 | struct ipw2100_priv; | 51 | struct ipw2100_priv; |
47 | struct ipw2100_tx_packet; | 52 | struct ipw2100_tx_packet; |
@@ -392,8 +397,10 @@ struct ipw2100_notification { | |||
392 | #define IPW_WEP104_CIPHER (1<<5) | 397 | #define IPW_WEP104_CIPHER (1<<5) |
393 | #define IPW_CKIP_CIPHER (1<<6) | 398 | #define IPW_CKIP_CIPHER (1<<6) |
394 | 399 | ||
395 | #define IPW_AUTH_OPEN 0 | 400 | #define IPW_AUTH_OPEN 0 |
396 | #define IPW_AUTH_SHARED 1 | 401 | #define IPW_AUTH_SHARED 1 |
402 | #define IPW_AUTH_LEAP 2 | ||
403 | #define IPW_AUTH_LEAP_CISCO_ID 0x80 | ||
397 | 404 | ||
398 | struct statistic { | 405 | struct statistic { |
399 | int value; | 406 | int value; |
@@ -588,8 +595,8 @@ struct ipw2100_priv { | |||
588 | int inta_other; | 595 | int inta_other; |
589 | 596 | ||
590 | spinlock_t low_lock; | 597 | spinlock_t low_lock; |
591 | struct semaphore action_sem; | 598 | struct mutex action_mutex; |
592 | struct semaphore adapter_sem; | 599 | struct mutex adapter_mutex; |
593 | 600 | ||
594 | wait_queue_head_t wait_command_queue; | 601 | wait_queue_head_t wait_command_queue; |
595 | }; | 602 | }; |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 287676ad80df..9dce522526c5 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | 802.11 status code portion of this file from ethereal-0.10.6: | 5 | 802.11 status code portion of this file from ethereal-0.10.6: |
6 | Copyright 2000, Axis Communications AB | 6 | Copyright 2000, Axis Communications AB |
@@ -33,9 +33,9 @@ | |||
33 | #include "ipw2200.h" | 33 | #include "ipw2200.h" |
34 | #include <linux/version.h> | 34 | #include <linux/version.h> |
35 | 35 | ||
36 | #define IPW2200_VERSION "git-1.0.8" | 36 | #define IPW2200_VERSION "git-1.1.1" |
37 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" | 37 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" |
38 | #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" | 38 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
39 | #define DRV_VERSION IPW2200_VERSION | 39 | #define DRV_VERSION IPW2200_VERSION |
40 | 40 | ||
41 | #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) | 41 | #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) |
@@ -55,7 +55,9 @@ static int associate = 1; | |||
55 | static int auto_create = 1; | 55 | static int auto_create = 1; |
56 | static int led = 0; | 56 | static int led = 0; |
57 | static int disable = 0; | 57 | static int disable = 0; |
58 | static int hwcrypto = 1; | 58 | static int bt_coexist = 0; |
59 | static int hwcrypto = 0; | ||
60 | static int roaming = 1; | ||
59 | static const char ipw_modes[] = { | 61 | static const char ipw_modes[] = { |
60 | 'a', 'b', 'g', '?' | 62 | 'a', 'b', 'g', '?' |
61 | }; | 63 | }; |
@@ -151,12 +153,6 @@ static int init_supported_rates(struct ipw_priv *priv, | |||
151 | static void ipw_set_hwcrypto_keys(struct ipw_priv *); | 153 | static void ipw_set_hwcrypto_keys(struct ipw_priv *); |
152 | static void ipw_send_wep_keys(struct ipw_priv *, int); | 154 | static void ipw_send_wep_keys(struct ipw_priv *, int); |
153 | 155 | ||
154 | static int ipw_is_valid_channel(struct ieee80211_device *, u8); | ||
155 | static int ipw_channel_to_index(struct ieee80211_device *, u8); | ||
156 | static u8 ipw_freq_to_channel(struct ieee80211_device *, u32); | ||
157 | static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *); | ||
158 | static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *); | ||
159 | |||
160 | static int snprint_line(char *buf, size_t count, | 156 | static int snprint_line(char *buf, size_t count, |
161 | const u8 * data, u32 len, u32 ofs) | 157 | const u8 * data, u32 len, u32 ofs) |
162 | { | 158 | { |
@@ -227,12 +223,15 @@ static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) | |||
227 | return total; | 223 | return total; |
228 | } | 224 | } |
229 | 225 | ||
226 | /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ | ||
230 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); | 227 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); |
231 | #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) | 228 | #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) |
232 | 229 | ||
230 | /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ | ||
233 | static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); | 231 | static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); |
234 | #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) | 232 | #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) |
235 | 233 | ||
234 | /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
236 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); | 235 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); |
237 | static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) | 236 | static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) |
238 | { | 237 | { |
@@ -241,6 +240,7 @@ static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) | |||
241 | _ipw_write_reg8(a, b, c); | 240 | _ipw_write_reg8(a, b, c); |
242 | } | 241 | } |
243 | 242 | ||
243 | /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
244 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); | 244 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); |
245 | static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) | 245 | static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) |
246 | { | 246 | { |
@@ -249,6 +249,7 @@ static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) | |||
249 | _ipw_write_reg16(a, b, c); | 249 | _ipw_write_reg16(a, b, c); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
252 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); | 253 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); |
253 | static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) | 254 | static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) |
254 | { | 255 | { |
@@ -257,48 +258,70 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) | |||
257 | _ipw_write_reg32(a, b, c); | 258 | _ipw_write_reg32(a, b, c); |
258 | } | 259 | } |
259 | 260 | ||
261 | /* 8-bit direct write (low 4K) */ | ||
260 | #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) | 262 | #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) |
263 | |||
264 | /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
261 | #define ipw_write8(ipw, ofs, val) \ | 265 | #define ipw_write8(ipw, ofs, val) \ |
262 | IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 266 | IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
263 | _ipw_write8(ipw, ofs, val) | 267 | _ipw_write8(ipw, ofs, val) |
264 | 268 | ||
269 | /* 16-bit direct write (low 4K) */ | ||
265 | #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) | 270 | #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) |
271 | |||
272 | /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
266 | #define ipw_write16(ipw, ofs, val) \ | 273 | #define ipw_write16(ipw, ofs, val) \ |
267 | IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 274 | IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
268 | _ipw_write16(ipw, ofs, val) | 275 | _ipw_write16(ipw, ofs, val) |
269 | 276 | ||
277 | /* 32-bit direct write (low 4K) */ | ||
270 | #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) | 278 | #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) |
279 | |||
280 | /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
271 | #define ipw_write32(ipw, ofs, val) \ | 281 | #define ipw_write32(ipw, ofs, val) \ |
272 | IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 282 | IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
273 | _ipw_write32(ipw, ofs, val) | 283 | _ipw_write32(ipw, ofs, val) |
274 | 284 | ||
285 | /* 8-bit direct read (low 4K) */ | ||
275 | #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) | 286 | #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) |
287 | |||
288 | /* 8-bit direct read (low 4K), with debug wrapper */ | ||
276 | static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 289 | static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
277 | { | 290 | { |
278 | IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs)); | 291 | IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs)); |
279 | return _ipw_read8(ipw, ofs); | 292 | return _ipw_read8(ipw, ofs); |
280 | } | 293 | } |
281 | 294 | ||
295 | /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
282 | #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) | 296 | #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) |
283 | 297 | ||
298 | /* 16-bit direct read (low 4K) */ | ||
284 | #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) | 299 | #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) |
300 | |||
301 | /* 16-bit direct read (low 4K), with debug wrapper */ | ||
285 | static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 302 | static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
286 | { | 303 | { |
287 | IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs)); | 304 | IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs)); |
288 | return _ipw_read16(ipw, ofs); | 305 | return _ipw_read16(ipw, ofs); |
289 | } | 306 | } |
290 | 307 | ||
308 | /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
291 | #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) | 309 | #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) |
292 | 310 | ||
311 | /* 32-bit direct read (low 4K) */ | ||
293 | #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) | 312 | #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) |
313 | |||
314 | /* 32-bit direct read (low 4K), with debug wrapper */ | ||
294 | static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 315 | static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
295 | { | 316 | { |
296 | IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs)); | 317 | IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs)); |
297 | return _ipw_read32(ipw, ofs); | 318 | return _ipw_read32(ipw, ofs); |
298 | } | 319 | } |
299 | 320 | ||
321 | /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
300 | #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) | 322 | #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) |
301 | 323 | ||
324 | /* multi-byte read (above 4K), with debug wrapper */ | ||
302 | static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); | 325 | static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); |
303 | static inline void __ipw_read_indirect(const char *f, int l, | 326 | static inline void __ipw_read_indirect(const char *f, int l, |
304 | struct ipw_priv *a, u32 b, u8 * c, int d) | 327 | struct ipw_priv *a, u32 b, u8 * c, int d) |
@@ -308,15 +331,17 @@ static inline void __ipw_read_indirect(const char *f, int l, | |||
308 | _ipw_read_indirect(a, b, c, d); | 331 | _ipw_read_indirect(a, b, c, d); |
309 | } | 332 | } |
310 | 333 | ||
334 | /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ | ||
311 | #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d) | 335 | #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d) |
312 | 336 | ||
337 | /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ | ||
313 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, | 338 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, |
314 | int num); | 339 | int num); |
315 | #define ipw_write_indirect(a, b, c, d) \ | 340 | #define ipw_write_indirect(a, b, c, d) \ |
316 | IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ | 341 | IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ |
317 | _ipw_write_indirect(a, b, c, d) | 342 | _ipw_write_indirect(a, b, c, d) |
318 | 343 | ||
319 | /* indirect write s */ | 344 | /* 32-bit indirect write (above 4K) */ |
320 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) | 345 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) |
321 | { | 346 | { |
322 | IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); | 347 | IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); |
@@ -324,22 +349,29 @@ static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) | |||
324 | _ipw_write32(priv, IPW_INDIRECT_DATA, value); | 349 | _ipw_write32(priv, IPW_INDIRECT_DATA, value); |
325 | } | 350 | } |
326 | 351 | ||
352 | /* 8-bit indirect write (above 4K) */ | ||
327 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) | 353 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) |
328 | { | 354 | { |
355 | u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ | ||
356 | u32 dif_len = reg - aligned_addr; | ||
357 | |||
329 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | 358 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); |
330 | _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); | 359 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
331 | _ipw_write8(priv, IPW_INDIRECT_DATA, value); | 360 | _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); |
332 | } | 361 | } |
333 | 362 | ||
363 | /* 16-bit indirect write (above 4K) */ | ||
334 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) | 364 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) |
335 | { | 365 | { |
366 | u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ | ||
367 | u32 dif_len = (reg - aligned_addr) & (~0x1ul); | ||
368 | |||
336 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | 369 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); |
337 | _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); | 370 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
338 | _ipw_write16(priv, IPW_INDIRECT_DATA, value); | 371 | _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); |
339 | } | 372 | } |
340 | 373 | ||
341 | /* indirect read s */ | 374 | /* 8-bit indirect read (above 4K) */ |
342 | |||
343 | static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) | 375 | static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) |
344 | { | 376 | { |
345 | u32 word; | 377 | u32 word; |
@@ -349,6 +381,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) | |||
349 | return (word >> ((reg & 0x3) * 8)) & 0xff; | 381 | return (word >> ((reg & 0x3) * 8)) & 0xff; |
350 | } | 382 | } |
351 | 383 | ||
384 | /* 32-bit indirect read (above 4K) */ | ||
352 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) | 385 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) |
353 | { | 386 | { |
354 | u32 value; | 387 | u32 value; |
@@ -361,11 +394,12 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) | |||
361 | return value; | 394 | return value; |
362 | } | 395 | } |
363 | 396 | ||
364 | /* iterative/auto-increment 32 bit reads and writes */ | 397 | /* General purpose, no alignment requirement, iterative (multi-byte) read, */ |
398 | /* for area above 1st 4K of SRAM/reg space */ | ||
365 | static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | 399 | static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, |
366 | int num) | 400 | int num) |
367 | { | 401 | { |
368 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; | 402 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ |
369 | u32 dif_len = addr - aligned_addr; | 403 | u32 dif_len = addr - aligned_addr; |
370 | u32 i; | 404 | u32 i; |
371 | 405 | ||
@@ -375,7 +409,7 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
375 | return; | 409 | return; |
376 | } | 410 | } |
377 | 411 | ||
378 | /* Read the first nibble byte by byte */ | 412 | /* Read the first dword (or portion) byte by byte */ |
379 | if (unlikely(dif_len)) { | 413 | if (unlikely(dif_len)) { |
380 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 414 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
381 | /* Start reading at aligned_addr + dif_len */ | 415 | /* Start reading at aligned_addr + dif_len */ |
@@ -384,11 +418,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
384 | aligned_addr += 4; | 418 | aligned_addr += 4; |
385 | } | 419 | } |
386 | 420 | ||
421 | /* Read all of the middle dwords as dwords, with auto-increment */ | ||
387 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); | 422 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); |
388 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) | 423 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) |
389 | *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); | 424 | *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); |
390 | 425 | ||
391 | /* Copy the last nibble */ | 426 | /* Read the last dword (or portion) byte by byte */ |
392 | if (unlikely(num)) { | 427 | if (unlikely(num)) { |
393 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 428 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
394 | for (i = 0; num > 0; i++, num--) | 429 | for (i = 0; num > 0; i++, num--) |
@@ -396,10 +431,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
396 | } | 431 | } |
397 | } | 432 | } |
398 | 433 | ||
434 | /* General purpose, no alignment requirement, iterative (multi-byte) write, */ | ||
435 | /* for area above 1st 4K of SRAM/reg space */ | ||
399 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | 436 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, |
400 | int num) | 437 | int num) |
401 | { | 438 | { |
402 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; | 439 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ |
403 | u32 dif_len = addr - aligned_addr; | 440 | u32 dif_len = addr - aligned_addr; |
404 | u32 i; | 441 | u32 i; |
405 | 442 | ||
@@ -409,20 +446,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
409 | return; | 446 | return; |
410 | } | 447 | } |
411 | 448 | ||
412 | /* Write the first nibble byte by byte */ | 449 | /* Write the first dword (or portion) byte by byte */ |
413 | if (unlikely(dif_len)) { | 450 | if (unlikely(dif_len)) { |
414 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 451 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
415 | /* Start reading at aligned_addr + dif_len */ | 452 | /* Start writing at aligned_addr + dif_len */ |
416 | for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) | 453 | for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) |
417 | _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); | 454 | _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); |
418 | aligned_addr += 4; | 455 | aligned_addr += 4; |
419 | } | 456 | } |
420 | 457 | ||
458 | /* Write all of the middle dwords as dwords, with auto-increment */ | ||
421 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); | 459 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); |
422 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) | 460 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) |
423 | _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); | 461 | _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); |
424 | 462 | ||
425 | /* Copy the last nibble */ | 463 | /* Write the last dword (or portion) byte by byte */ |
426 | if (unlikely(num)) { | 464 | if (unlikely(num)) { |
427 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 465 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
428 | for (i = 0; num > 0; i++, num--, buf++) | 466 | for (i = 0; num > 0; i++, num--, buf++) |
@@ -430,17 +468,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
430 | } | 468 | } |
431 | } | 469 | } |
432 | 470 | ||
471 | /* General purpose, no alignment requirement, iterative (multi-byte) write, */ | ||
472 | /* for 1st 4K of SRAM/regs space */ | ||
433 | static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, | 473 | static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, |
434 | int num) | 474 | int num) |
435 | { | 475 | { |
436 | memcpy_toio((priv->hw_base + addr), buf, num); | 476 | memcpy_toio((priv->hw_base + addr), buf, num); |
437 | } | 477 | } |
438 | 478 | ||
479 | /* Set bit(s) in low 4K of SRAM/regs */ | ||
439 | static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) | 480 | static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) |
440 | { | 481 | { |
441 | ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); | 482 | ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); |
442 | } | 483 | } |
443 | 484 | ||
485 | /* Clear bit(s) in low 4K of SRAM/regs */ | ||
444 | static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) | 486 | static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) |
445 | { | 487 | { |
446 | ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); | 488 | ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); |
@@ -701,7 +743,7 @@ static void ipw_init_ordinals(struct ipw_priv *priv) | |||
701 | 743 | ||
702 | } | 744 | } |
703 | 745 | ||
704 | u32 ipw_register_toggle(u32 reg) | 746 | static u32 ipw_register_toggle(u32 reg) |
705 | { | 747 | { |
706 | reg &= ~IPW_START_STANDBY; | 748 | reg &= ~IPW_START_STANDBY; |
707 | if (reg & IPW_GATE_ODMA) | 749 | if (reg & IPW_GATE_ODMA) |
@@ -722,11 +764,11 @@ u32 ipw_register_toggle(u32 reg) | |||
722 | * - On radio OFF, turn off any LEDs started during radio on | 764 | * - On radio OFF, turn off any LEDs started during radio on |
723 | * | 765 | * |
724 | */ | 766 | */ |
725 | #define LD_TIME_LINK_ON 300 | 767 | #define LD_TIME_LINK_ON msecs_to_jiffies(300) |
726 | #define LD_TIME_LINK_OFF 2700 | 768 | #define LD_TIME_LINK_OFF msecs_to_jiffies(2700) |
727 | #define LD_TIME_ACT_ON 250 | 769 | #define LD_TIME_ACT_ON msecs_to_jiffies(250) |
728 | 770 | ||
729 | void ipw_led_link_on(struct ipw_priv *priv) | 771 | static void ipw_led_link_on(struct ipw_priv *priv) |
730 | { | 772 | { |
731 | unsigned long flags; | 773 | unsigned long flags; |
732 | u32 led; | 774 | u32 led; |
@@ -764,12 +806,12 @@ void ipw_led_link_on(struct ipw_priv *priv) | |||
764 | static void ipw_bg_led_link_on(void *data) | 806 | static void ipw_bg_led_link_on(void *data) |
765 | { | 807 | { |
766 | struct ipw_priv *priv = data; | 808 | struct ipw_priv *priv = data; |
767 | down(&priv->sem); | 809 | mutex_lock(&priv->mutex); |
768 | ipw_led_link_on(data); | 810 | ipw_led_link_on(data); |
769 | up(&priv->sem); | 811 | mutex_unlock(&priv->mutex); |
770 | } | 812 | } |
771 | 813 | ||
772 | void ipw_led_link_off(struct ipw_priv *priv) | 814 | static void ipw_led_link_off(struct ipw_priv *priv) |
773 | { | 815 | { |
774 | unsigned long flags; | 816 | unsigned long flags; |
775 | u32 led; | 817 | u32 led; |
@@ -808,9 +850,9 @@ void ipw_led_link_off(struct ipw_priv *priv) | |||
808 | static void ipw_bg_led_link_off(void *data) | 850 | static void ipw_bg_led_link_off(void *data) |
809 | { | 851 | { |
810 | struct ipw_priv *priv = data; | 852 | struct ipw_priv *priv = data; |
811 | down(&priv->sem); | 853 | mutex_lock(&priv->mutex); |
812 | ipw_led_link_off(data); | 854 | ipw_led_link_off(data); |
813 | up(&priv->sem); | 855 | mutex_unlock(&priv->mutex); |
814 | } | 856 | } |
815 | 857 | ||
816 | static void __ipw_led_activity_on(struct ipw_priv *priv) | 858 | static void __ipw_led_activity_on(struct ipw_priv *priv) |
@@ -847,6 +889,7 @@ static void __ipw_led_activity_on(struct ipw_priv *priv) | |||
847 | } | 889 | } |
848 | } | 890 | } |
849 | 891 | ||
892 | #if 0 | ||
850 | void ipw_led_activity_on(struct ipw_priv *priv) | 893 | void ipw_led_activity_on(struct ipw_priv *priv) |
851 | { | 894 | { |
852 | unsigned long flags; | 895 | unsigned long flags; |
@@ -854,8 +897,9 @@ void ipw_led_activity_on(struct ipw_priv *priv) | |||
854 | __ipw_led_activity_on(priv); | 897 | __ipw_led_activity_on(priv); |
855 | spin_unlock_irqrestore(&priv->lock, flags); | 898 | spin_unlock_irqrestore(&priv->lock, flags); |
856 | } | 899 | } |
900 | #endif /* 0 */ | ||
857 | 901 | ||
858 | void ipw_led_activity_off(struct ipw_priv *priv) | 902 | static void ipw_led_activity_off(struct ipw_priv *priv) |
859 | { | 903 | { |
860 | unsigned long flags; | 904 | unsigned long flags; |
861 | u32 led; | 905 | u32 led; |
@@ -885,12 +929,12 @@ void ipw_led_activity_off(struct ipw_priv *priv) | |||
885 | static void ipw_bg_led_activity_off(void *data) | 929 | static void ipw_bg_led_activity_off(void *data) |
886 | { | 930 | { |
887 | struct ipw_priv *priv = data; | 931 | struct ipw_priv *priv = data; |
888 | down(&priv->sem); | 932 | mutex_lock(&priv->mutex); |
889 | ipw_led_activity_off(data); | 933 | ipw_led_activity_off(data); |
890 | up(&priv->sem); | 934 | mutex_unlock(&priv->mutex); |
891 | } | 935 | } |
892 | 936 | ||
893 | void ipw_led_band_on(struct ipw_priv *priv) | 937 | static void ipw_led_band_on(struct ipw_priv *priv) |
894 | { | 938 | { |
895 | unsigned long flags; | 939 | unsigned long flags; |
896 | u32 led; | 940 | u32 led; |
@@ -925,7 +969,7 @@ void ipw_led_band_on(struct ipw_priv *priv) | |||
925 | spin_unlock_irqrestore(&priv->lock, flags); | 969 | spin_unlock_irqrestore(&priv->lock, flags); |
926 | } | 970 | } |
927 | 971 | ||
928 | void ipw_led_band_off(struct ipw_priv *priv) | 972 | static void ipw_led_band_off(struct ipw_priv *priv) |
929 | { | 973 | { |
930 | unsigned long flags; | 974 | unsigned long flags; |
931 | u32 led; | 975 | u32 led; |
@@ -948,24 +992,24 @@ void ipw_led_band_off(struct ipw_priv *priv) | |||
948 | spin_unlock_irqrestore(&priv->lock, flags); | 992 | spin_unlock_irqrestore(&priv->lock, flags); |
949 | } | 993 | } |
950 | 994 | ||
951 | void ipw_led_radio_on(struct ipw_priv *priv) | 995 | static void ipw_led_radio_on(struct ipw_priv *priv) |
952 | { | 996 | { |
953 | ipw_led_link_on(priv); | 997 | ipw_led_link_on(priv); |
954 | } | 998 | } |
955 | 999 | ||
956 | void ipw_led_radio_off(struct ipw_priv *priv) | 1000 | static void ipw_led_radio_off(struct ipw_priv *priv) |
957 | { | 1001 | { |
958 | ipw_led_activity_off(priv); | 1002 | ipw_led_activity_off(priv); |
959 | ipw_led_link_off(priv); | 1003 | ipw_led_link_off(priv); |
960 | } | 1004 | } |
961 | 1005 | ||
962 | void ipw_led_link_up(struct ipw_priv *priv) | 1006 | static void ipw_led_link_up(struct ipw_priv *priv) |
963 | { | 1007 | { |
964 | /* Set the Link Led on for all nic types */ | 1008 | /* Set the Link Led on for all nic types */ |
965 | ipw_led_link_on(priv); | 1009 | ipw_led_link_on(priv); |
966 | } | 1010 | } |
967 | 1011 | ||
968 | void ipw_led_link_down(struct ipw_priv *priv) | 1012 | static void ipw_led_link_down(struct ipw_priv *priv) |
969 | { | 1013 | { |
970 | ipw_led_activity_off(priv); | 1014 | ipw_led_activity_off(priv); |
971 | ipw_led_link_off(priv); | 1015 | ipw_led_link_off(priv); |
@@ -974,7 +1018,7 @@ void ipw_led_link_down(struct ipw_priv *priv) | |||
974 | ipw_led_radio_off(priv); | 1018 | ipw_led_radio_off(priv); |
975 | } | 1019 | } |
976 | 1020 | ||
977 | void ipw_led_init(struct ipw_priv *priv) | 1021 | static void ipw_led_init(struct ipw_priv *priv) |
978 | { | 1022 | { |
979 | priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; | 1023 | priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; |
980 | 1024 | ||
@@ -1025,7 +1069,7 @@ void ipw_led_init(struct ipw_priv *priv) | |||
1025 | } | 1069 | } |
1026 | } | 1070 | } |
1027 | 1071 | ||
1028 | void ipw_led_shutdown(struct ipw_priv *priv) | 1072 | static void ipw_led_shutdown(struct ipw_priv *priv) |
1029 | { | 1073 | { |
1030 | ipw_led_activity_off(priv); | 1074 | ipw_led_activity_off(priv); |
1031 | ipw_led_link_off(priv); | 1075 | ipw_led_link_off(priv); |
@@ -1074,6 +1118,7 @@ static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, | |||
1074 | 1118 | ||
1075 | static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) | 1119 | static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) |
1076 | { | 1120 | { |
1121 | /* length = 1st dword in log */ | ||
1077 | return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); | 1122 | return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); |
1078 | } | 1123 | } |
1079 | 1124 | ||
@@ -1603,7 +1648,7 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, | |||
1603 | break; | 1648 | break; |
1604 | } | 1649 | } |
1605 | 1650 | ||
1606 | if (ipw_is_valid_channel(priv->ieee, channel)) | 1651 | if (ieee80211_is_valid_channel(priv->ieee, channel)) |
1607 | priv->speed_scan[pos++] = channel; | 1652 | priv->speed_scan[pos++] = channel; |
1608 | else | 1653 | else |
1609 | IPW_WARNING("Skipping invalid channel request: %d\n", | 1654 | IPW_WARNING("Skipping invalid channel request: %d\n", |
@@ -1751,9 +1796,9 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1751 | } | 1796 | } |
1752 | 1797 | ||
1753 | if (inta & IPW_INTA_BIT_FATAL_ERROR) { | 1798 | if (inta & IPW_INTA_BIT_FATAL_ERROR) { |
1754 | IPW_ERROR("Firmware error detected. Restarting.\n"); | 1799 | IPW_WARNING("Firmware error detected. Restarting.\n"); |
1755 | if (priv->error) { | 1800 | if (priv->error) { |
1756 | IPW_ERROR("Sysfs 'error' log already exists.\n"); | 1801 | IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); |
1757 | #ifdef CONFIG_IPW2200_DEBUG | 1802 | #ifdef CONFIG_IPW2200_DEBUG |
1758 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { | 1803 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { |
1759 | struct ipw_fw_error *error = | 1804 | struct ipw_fw_error *error = |
@@ -1766,10 +1811,10 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1766 | } else { | 1811 | } else { |
1767 | priv->error = ipw_alloc_error_log(priv); | 1812 | priv->error = ipw_alloc_error_log(priv); |
1768 | if (priv->error) | 1813 | if (priv->error) |
1769 | IPW_ERROR("Sysfs 'error' log captured.\n"); | 1814 | IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); |
1770 | else | 1815 | else |
1771 | IPW_ERROR("Error allocating sysfs 'error' " | 1816 | IPW_DEBUG_FW("Error allocating sysfs 'error' " |
1772 | "log.\n"); | 1817 | "log.\n"); |
1773 | #ifdef CONFIG_IPW2200_DEBUG | 1818 | #ifdef CONFIG_IPW2200_DEBUG |
1774 | if (ipw_debug_level & IPW_DL_FW_ERRORS) | 1819 | if (ipw_debug_level & IPW_DL_FW_ERRORS) |
1775 | ipw_dump_error_log(priv, priv->error); | 1820 | ipw_dump_error_log(priv, priv->error); |
@@ -1870,7 +1915,8 @@ static char *get_cmd_string(u8 cmd) | |||
1870 | } | 1915 | } |
1871 | 1916 | ||
1872 | #define HOST_COMPLETE_TIMEOUT HZ | 1917 | #define HOST_COMPLETE_TIMEOUT HZ |
1873 | static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | 1918 | |
1919 | static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | ||
1874 | { | 1920 | { |
1875 | int rc = 0; | 1921 | int rc = 0; |
1876 | unsigned long flags; | 1922 | unsigned long flags; |
@@ -1897,9 +1943,15 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | |||
1897 | IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", | 1943 | IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", |
1898 | get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, | 1944 | get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, |
1899 | priv->status); | 1945 | priv->status); |
1900 | printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); | ||
1901 | 1946 | ||
1902 | rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0); | 1947 | #ifndef DEBUG_CMD_WEP_KEY |
1948 | if (cmd->cmd == IPW_CMD_WEP_KEY) | ||
1949 | IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); | ||
1950 | else | ||
1951 | #endif | ||
1952 | printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); | ||
1953 | |||
1954 | rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); | ||
1903 | if (rc) { | 1955 | if (rc) { |
1904 | priv->status &= ~STATUS_HCMD_ACTIVE; | 1956 | priv->status &= ~STATUS_HCMD_ACTIVE; |
1905 | IPW_ERROR("Failed to send %s: Reason %d\n", | 1957 | IPW_ERROR("Failed to send %s: Reason %d\n", |
@@ -1942,61 +1994,62 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | |||
1942 | return rc; | 1994 | return rc; |
1943 | } | 1995 | } |
1944 | 1996 | ||
1945 | static int ipw_send_host_complete(struct ipw_priv *priv) | 1997 | static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) |
1998 | { | ||
1999 | struct host_cmd cmd = { | ||
2000 | .cmd = command, | ||
2001 | }; | ||
2002 | |||
2003 | return __ipw_send_cmd(priv, &cmd); | ||
2004 | } | ||
2005 | |||
2006 | static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, | ||
2007 | void *data) | ||
1946 | { | 2008 | { |
1947 | struct host_cmd cmd = { | 2009 | struct host_cmd cmd = { |
1948 | .cmd = IPW_CMD_HOST_COMPLETE, | 2010 | .cmd = command, |
1949 | .len = 0 | 2011 | .len = len, |
2012 | .param = data, | ||
1950 | }; | 2013 | }; |
1951 | 2014 | ||
2015 | return __ipw_send_cmd(priv, &cmd); | ||
2016 | } | ||
2017 | |||
2018 | static int ipw_send_host_complete(struct ipw_priv *priv) | ||
2019 | { | ||
1952 | if (!priv) { | 2020 | if (!priv) { |
1953 | IPW_ERROR("Invalid args\n"); | 2021 | IPW_ERROR("Invalid args\n"); |
1954 | return -1; | 2022 | return -1; |
1955 | } | 2023 | } |
1956 | 2024 | ||
1957 | return ipw_send_cmd(priv, &cmd); | 2025 | return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); |
1958 | } | 2026 | } |
1959 | 2027 | ||
1960 | static int ipw_send_system_config(struct ipw_priv *priv, | 2028 | static int ipw_send_system_config(struct ipw_priv *priv, |
1961 | struct ipw_sys_config *config) | 2029 | struct ipw_sys_config *config) |
1962 | { | 2030 | { |
1963 | struct host_cmd cmd = { | ||
1964 | .cmd = IPW_CMD_SYSTEM_CONFIG, | ||
1965 | .len = sizeof(*config) | ||
1966 | }; | ||
1967 | |||
1968 | if (!priv || !config) { | 2031 | if (!priv || !config) { |
1969 | IPW_ERROR("Invalid args\n"); | 2032 | IPW_ERROR("Invalid args\n"); |
1970 | return -1; | 2033 | return -1; |
1971 | } | 2034 | } |
1972 | 2035 | ||
1973 | memcpy(cmd.param, config, sizeof(*config)); | 2036 | return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config), |
1974 | return ipw_send_cmd(priv, &cmd); | 2037 | config); |
1975 | } | 2038 | } |
1976 | 2039 | ||
1977 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) | 2040 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) |
1978 | { | 2041 | { |
1979 | struct host_cmd cmd = { | ||
1980 | .cmd = IPW_CMD_SSID, | ||
1981 | .len = min(len, IW_ESSID_MAX_SIZE) | ||
1982 | }; | ||
1983 | |||
1984 | if (!priv || !ssid) { | 2042 | if (!priv || !ssid) { |
1985 | IPW_ERROR("Invalid args\n"); | 2043 | IPW_ERROR("Invalid args\n"); |
1986 | return -1; | 2044 | return -1; |
1987 | } | 2045 | } |
1988 | 2046 | ||
1989 | memcpy(cmd.param, ssid, cmd.len); | 2047 | return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), |
1990 | return ipw_send_cmd(priv, &cmd); | 2048 | ssid); |
1991 | } | 2049 | } |
1992 | 2050 | ||
1993 | static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) | 2051 | static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) |
1994 | { | 2052 | { |
1995 | struct host_cmd cmd = { | ||
1996 | .cmd = IPW_CMD_ADAPTER_ADDRESS, | ||
1997 | .len = ETH_ALEN | ||
1998 | }; | ||
1999 | |||
2000 | if (!priv || !mac) { | 2053 | if (!priv || !mac) { |
2001 | IPW_ERROR("Invalid args\n"); | 2054 | IPW_ERROR("Invalid args\n"); |
2002 | return -1; | 2055 | return -1; |
@@ -2005,8 +2058,7 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) | |||
2005 | IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", | 2058 | IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", |
2006 | priv->net_dev->name, MAC_ARG(mac)); | 2059 | priv->net_dev->name, MAC_ARG(mac)); |
2007 | 2060 | ||
2008 | memcpy(cmd.param, mac, ETH_ALEN); | 2061 | return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); |
2009 | return ipw_send_cmd(priv, &cmd); | ||
2010 | } | 2062 | } |
2011 | 2063 | ||
2012 | /* | 2064 | /* |
@@ -2036,9 +2088,9 @@ static void ipw_adapter_restart(void *adapter) | |||
2036 | static void ipw_bg_adapter_restart(void *data) | 2088 | static void ipw_bg_adapter_restart(void *data) |
2037 | { | 2089 | { |
2038 | struct ipw_priv *priv = data; | 2090 | struct ipw_priv *priv = data; |
2039 | down(&priv->sem); | 2091 | mutex_lock(&priv->mutex); |
2040 | ipw_adapter_restart(data); | 2092 | ipw_adapter_restart(data); |
2041 | up(&priv->sem); | 2093 | mutex_unlock(&priv->mutex); |
2042 | } | 2094 | } |
2043 | 2095 | ||
2044 | #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) | 2096 | #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) |
@@ -2048,8 +2100,8 @@ static void ipw_scan_check(void *data) | |||
2048 | struct ipw_priv *priv = data; | 2100 | struct ipw_priv *priv = data; |
2049 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { | 2101 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { |
2050 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " | 2102 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " |
2051 | "adapter (%dms).\n", | 2103 | "adapter after (%dms).\n", |
2052 | IPW_SCAN_CHECK_WATCHDOG / 100); | 2104 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); |
2053 | queue_work(priv->workqueue, &priv->adapter_restart); | 2105 | queue_work(priv->workqueue, &priv->adapter_restart); |
2054 | } | 2106 | } |
2055 | } | 2107 | } |
@@ -2057,59 +2109,48 @@ static void ipw_scan_check(void *data) | |||
2057 | static void ipw_bg_scan_check(void *data) | 2109 | static void ipw_bg_scan_check(void *data) |
2058 | { | 2110 | { |
2059 | struct ipw_priv *priv = data; | 2111 | struct ipw_priv *priv = data; |
2060 | down(&priv->sem); | 2112 | mutex_lock(&priv->mutex); |
2061 | ipw_scan_check(data); | 2113 | ipw_scan_check(data); |
2062 | up(&priv->sem); | 2114 | mutex_unlock(&priv->mutex); |
2063 | } | 2115 | } |
2064 | 2116 | ||
2065 | static int ipw_send_scan_request_ext(struct ipw_priv *priv, | 2117 | static int ipw_send_scan_request_ext(struct ipw_priv *priv, |
2066 | struct ipw_scan_request_ext *request) | 2118 | struct ipw_scan_request_ext *request) |
2067 | { | 2119 | { |
2068 | struct host_cmd cmd = { | 2120 | return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, |
2069 | .cmd = IPW_CMD_SCAN_REQUEST_EXT, | 2121 | sizeof(*request), request); |
2070 | .len = sizeof(*request) | ||
2071 | }; | ||
2072 | |||
2073 | memcpy(cmd.param, request, sizeof(*request)); | ||
2074 | return ipw_send_cmd(priv, &cmd); | ||
2075 | } | 2122 | } |
2076 | 2123 | ||
2077 | static int ipw_send_scan_abort(struct ipw_priv *priv) | 2124 | static int ipw_send_scan_abort(struct ipw_priv *priv) |
2078 | { | 2125 | { |
2079 | struct host_cmd cmd = { | ||
2080 | .cmd = IPW_CMD_SCAN_ABORT, | ||
2081 | .len = 0 | ||
2082 | }; | ||
2083 | |||
2084 | if (!priv) { | 2126 | if (!priv) { |
2085 | IPW_ERROR("Invalid args\n"); | 2127 | IPW_ERROR("Invalid args\n"); |
2086 | return -1; | 2128 | return -1; |
2087 | } | 2129 | } |
2088 | 2130 | ||
2089 | return ipw_send_cmd(priv, &cmd); | 2131 | return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); |
2090 | } | 2132 | } |
2091 | 2133 | ||
2092 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) | 2134 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) |
2093 | { | 2135 | { |
2094 | struct host_cmd cmd = { | 2136 | struct ipw_sensitivity_calib calib = { |
2095 | .cmd = IPW_CMD_SENSITIVITY_CALIB, | 2137 | .beacon_rssi_raw = sens, |
2096 | .len = sizeof(struct ipw_sensitivity_calib) | ||
2097 | }; | 2138 | }; |
2098 | struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *) | 2139 | |
2099 | &cmd.param; | 2140 | return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), |
2100 | calib->beacon_rssi_raw = sens; | 2141 | &calib); |
2101 | return ipw_send_cmd(priv, &cmd); | ||
2102 | } | 2142 | } |
2103 | 2143 | ||
2104 | static int ipw_send_associate(struct ipw_priv *priv, | 2144 | static int ipw_send_associate(struct ipw_priv *priv, |
2105 | struct ipw_associate *associate) | 2145 | struct ipw_associate *associate) |
2106 | { | 2146 | { |
2107 | struct host_cmd cmd = { | ||
2108 | .cmd = IPW_CMD_ASSOCIATE, | ||
2109 | .len = sizeof(*associate) | ||
2110 | }; | ||
2111 | |||
2112 | struct ipw_associate tmp_associate; | 2147 | struct ipw_associate tmp_associate; |
2148 | |||
2149 | if (!priv || !associate) { | ||
2150 | IPW_ERROR("Invalid args\n"); | ||
2151 | return -1; | ||
2152 | } | ||
2153 | |||
2113 | memcpy(&tmp_associate, associate, sizeof(*associate)); | 2154 | memcpy(&tmp_associate, associate, sizeof(*associate)); |
2114 | tmp_associate.policy_support = | 2155 | tmp_associate.policy_support = |
2115 | cpu_to_le16(tmp_associate.policy_support); | 2156 | cpu_to_le16(tmp_associate.policy_support); |
@@ -2122,85 +2163,60 @@ static int ipw_send_associate(struct ipw_priv *priv, | |||
2122 | cpu_to_le16(tmp_associate.beacon_interval); | 2163 | cpu_to_le16(tmp_associate.beacon_interval); |
2123 | tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window); | 2164 | tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window); |
2124 | 2165 | ||
2125 | if (!priv || !associate) { | 2166 | return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate), |
2126 | IPW_ERROR("Invalid args\n"); | 2167 | &tmp_associate); |
2127 | return -1; | ||
2128 | } | ||
2129 | |||
2130 | memcpy(cmd.param, &tmp_associate, sizeof(*associate)); | ||
2131 | return ipw_send_cmd(priv, &cmd); | ||
2132 | } | 2168 | } |
2133 | 2169 | ||
2134 | static int ipw_send_supported_rates(struct ipw_priv *priv, | 2170 | static int ipw_send_supported_rates(struct ipw_priv *priv, |
2135 | struct ipw_supported_rates *rates) | 2171 | struct ipw_supported_rates *rates) |
2136 | { | 2172 | { |
2137 | struct host_cmd cmd = { | ||
2138 | .cmd = IPW_CMD_SUPPORTED_RATES, | ||
2139 | .len = sizeof(*rates) | ||
2140 | }; | ||
2141 | |||
2142 | if (!priv || !rates) { | 2173 | if (!priv || !rates) { |
2143 | IPW_ERROR("Invalid args\n"); | 2174 | IPW_ERROR("Invalid args\n"); |
2144 | return -1; | 2175 | return -1; |
2145 | } | 2176 | } |
2146 | 2177 | ||
2147 | memcpy(cmd.param, rates, sizeof(*rates)); | 2178 | return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), |
2148 | return ipw_send_cmd(priv, &cmd); | 2179 | rates); |
2149 | } | 2180 | } |
2150 | 2181 | ||
2151 | static int ipw_set_random_seed(struct ipw_priv *priv) | 2182 | static int ipw_set_random_seed(struct ipw_priv *priv) |
2152 | { | 2183 | { |
2153 | struct host_cmd cmd = { | 2184 | u32 val; |
2154 | .cmd = IPW_CMD_SEED_NUMBER, | ||
2155 | .len = sizeof(u32) | ||
2156 | }; | ||
2157 | 2185 | ||
2158 | if (!priv) { | 2186 | if (!priv) { |
2159 | IPW_ERROR("Invalid args\n"); | 2187 | IPW_ERROR("Invalid args\n"); |
2160 | return -1; | 2188 | return -1; |
2161 | } | 2189 | } |
2162 | 2190 | ||
2163 | get_random_bytes(&cmd.param, sizeof(u32)); | 2191 | get_random_bytes(&val, sizeof(val)); |
2164 | 2192 | ||
2165 | return ipw_send_cmd(priv, &cmd); | 2193 | return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); |
2166 | } | 2194 | } |
2167 | 2195 | ||
2168 | static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) | 2196 | static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) |
2169 | { | 2197 | { |
2170 | struct host_cmd cmd = { | ||
2171 | .cmd = IPW_CMD_CARD_DISABLE, | ||
2172 | .len = sizeof(u32) | ||
2173 | }; | ||
2174 | |||
2175 | if (!priv) { | 2198 | if (!priv) { |
2176 | IPW_ERROR("Invalid args\n"); | 2199 | IPW_ERROR("Invalid args\n"); |
2177 | return -1; | 2200 | return -1; |
2178 | } | 2201 | } |
2179 | 2202 | ||
2180 | *((u32 *) & cmd.param) = phy_off; | 2203 | return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off), |
2181 | 2204 | &phy_off); | |
2182 | return ipw_send_cmd(priv, &cmd); | ||
2183 | } | 2205 | } |
2184 | 2206 | ||
2185 | static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) | 2207 | static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) |
2186 | { | 2208 | { |
2187 | struct host_cmd cmd = { | ||
2188 | .cmd = IPW_CMD_TX_POWER, | ||
2189 | .len = sizeof(*power) | ||
2190 | }; | ||
2191 | |||
2192 | if (!priv || !power) { | 2209 | if (!priv || !power) { |
2193 | IPW_ERROR("Invalid args\n"); | 2210 | IPW_ERROR("Invalid args\n"); |
2194 | return -1; | 2211 | return -1; |
2195 | } | 2212 | } |
2196 | 2213 | ||
2197 | memcpy(cmd.param, power, sizeof(*power)); | 2214 | return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); |
2198 | return ipw_send_cmd(priv, &cmd); | ||
2199 | } | 2215 | } |
2200 | 2216 | ||
2201 | static int ipw_set_tx_power(struct ipw_priv *priv) | 2217 | static int ipw_set_tx_power(struct ipw_priv *priv) |
2202 | { | 2218 | { |
2203 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 2219 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
2204 | struct ipw_tx_power tx_power; | 2220 | struct ipw_tx_power tx_power; |
2205 | s8 max_power; | 2221 | s8 max_power; |
2206 | int i; | 2222 | int i; |
@@ -2247,18 +2263,14 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) | |||
2247 | struct ipw_rts_threshold rts_threshold = { | 2263 | struct ipw_rts_threshold rts_threshold = { |
2248 | .rts_threshold = rts, | 2264 | .rts_threshold = rts, |
2249 | }; | 2265 | }; |
2250 | struct host_cmd cmd = { | ||
2251 | .cmd = IPW_CMD_RTS_THRESHOLD, | ||
2252 | .len = sizeof(rts_threshold) | ||
2253 | }; | ||
2254 | 2266 | ||
2255 | if (!priv) { | 2267 | if (!priv) { |
2256 | IPW_ERROR("Invalid args\n"); | 2268 | IPW_ERROR("Invalid args\n"); |
2257 | return -1; | 2269 | return -1; |
2258 | } | 2270 | } |
2259 | 2271 | ||
2260 | memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold)); | 2272 | return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, |
2261 | return ipw_send_cmd(priv, &cmd); | 2273 | sizeof(rts_threshold), &rts_threshold); |
2262 | } | 2274 | } |
2263 | 2275 | ||
2264 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | 2276 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) |
@@ -2266,27 +2278,19 @@ static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | |||
2266 | struct ipw_frag_threshold frag_threshold = { | 2278 | struct ipw_frag_threshold frag_threshold = { |
2267 | .frag_threshold = frag, | 2279 | .frag_threshold = frag, |
2268 | }; | 2280 | }; |
2269 | struct host_cmd cmd = { | ||
2270 | .cmd = IPW_CMD_FRAG_THRESHOLD, | ||
2271 | .len = sizeof(frag_threshold) | ||
2272 | }; | ||
2273 | 2281 | ||
2274 | if (!priv) { | 2282 | if (!priv) { |
2275 | IPW_ERROR("Invalid args\n"); | 2283 | IPW_ERROR("Invalid args\n"); |
2276 | return -1; | 2284 | return -1; |
2277 | } | 2285 | } |
2278 | 2286 | ||
2279 | memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold)); | 2287 | return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, |
2280 | return ipw_send_cmd(priv, &cmd); | 2288 | sizeof(frag_threshold), &frag_threshold); |
2281 | } | 2289 | } |
2282 | 2290 | ||
2283 | static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | 2291 | static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) |
2284 | { | 2292 | { |
2285 | struct host_cmd cmd = { | 2293 | u32 param; |
2286 | .cmd = IPW_CMD_POWER_MODE, | ||
2287 | .len = sizeof(u32) | ||
2288 | }; | ||
2289 | u32 *param = (u32 *) (&cmd.param); | ||
2290 | 2294 | ||
2291 | if (!priv) { | 2295 | if (!priv) { |
2292 | IPW_ERROR("Invalid args\n"); | 2296 | IPW_ERROR("Invalid args\n"); |
@@ -2297,17 +2301,18 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | |||
2297 | * level */ | 2301 | * level */ |
2298 | switch (mode) { | 2302 | switch (mode) { |
2299 | case IPW_POWER_BATTERY: | 2303 | case IPW_POWER_BATTERY: |
2300 | *param = IPW_POWER_INDEX_3; | 2304 | param = IPW_POWER_INDEX_3; |
2301 | break; | 2305 | break; |
2302 | case IPW_POWER_AC: | 2306 | case IPW_POWER_AC: |
2303 | *param = IPW_POWER_MODE_CAM; | 2307 | param = IPW_POWER_MODE_CAM; |
2304 | break; | 2308 | break; |
2305 | default: | 2309 | default: |
2306 | *param = mode; | 2310 | param = mode; |
2307 | break; | 2311 | break; |
2308 | } | 2312 | } |
2309 | 2313 | ||
2310 | return ipw_send_cmd(priv, &cmd); | 2314 | return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), |
2315 | ¶m); | ||
2311 | } | 2316 | } |
2312 | 2317 | ||
2313 | static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) | 2318 | static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) |
@@ -2316,18 +2321,14 @@ static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) | |||
2316 | .short_retry_limit = slimit, | 2321 | .short_retry_limit = slimit, |
2317 | .long_retry_limit = llimit | 2322 | .long_retry_limit = llimit |
2318 | }; | 2323 | }; |
2319 | struct host_cmd cmd = { | ||
2320 | .cmd = IPW_CMD_RETRY_LIMIT, | ||
2321 | .len = sizeof(retry_limit) | ||
2322 | }; | ||
2323 | 2324 | ||
2324 | if (!priv) { | 2325 | if (!priv) { |
2325 | IPW_ERROR("Invalid args\n"); | 2326 | IPW_ERROR("Invalid args\n"); |
2326 | return -1; | 2327 | return -1; |
2327 | } | 2328 | } |
2328 | 2329 | ||
2329 | memcpy(cmd.param, &retry_limit, sizeof(retry_limit)); | 2330 | return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), |
2330 | return ipw_send_cmd(priv, &cmd); | 2331 | &retry_limit); |
2331 | } | 2332 | } |
2332 | 2333 | ||
2333 | /* | 2334 | /* |
@@ -2454,7 +2455,7 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv) | |||
2454 | /* | 2455 | /* |
2455 | If the data looks correct, then copy it to our private | 2456 | If the data looks correct, then copy it to our private |
2456 | copy. Otherwise let the firmware know to perform the operation | 2457 | copy. Otherwise let the firmware know to perform the operation |
2457 | on it's own | 2458 | on its own. |
2458 | */ | 2459 | */ |
2459 | if (priv->eeprom[EEPROM_VERSION] != 0) { | 2460 | if (priv->eeprom[EEPROM_VERSION] != 0) { |
2460 | IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); | 2461 | IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); |
@@ -2707,22 +2708,25 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, | |||
2707 | 2708 | ||
2708 | static int ipw_fw_dma_wait(struct ipw_priv *priv) | 2709 | static int ipw_fw_dma_wait(struct ipw_priv *priv) |
2709 | { | 2710 | { |
2710 | u32 current_index = 0; | 2711 | u32 current_index = 0, previous_index; |
2711 | u32 watchdog = 0; | 2712 | u32 watchdog = 0; |
2712 | 2713 | ||
2713 | IPW_DEBUG_FW(">> : \n"); | 2714 | IPW_DEBUG_FW(">> : \n"); |
2714 | 2715 | ||
2715 | current_index = ipw_fw_dma_command_block_index(priv); | 2716 | current_index = ipw_fw_dma_command_block_index(priv); |
2716 | IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n", | 2717 | IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", |
2717 | (int)priv->sram_desc.last_cb_index); | 2718 | (int)priv->sram_desc.last_cb_index); |
2718 | 2719 | ||
2719 | while (current_index < priv->sram_desc.last_cb_index) { | 2720 | while (current_index < priv->sram_desc.last_cb_index) { |
2720 | udelay(50); | 2721 | udelay(50); |
2722 | previous_index = current_index; | ||
2721 | current_index = ipw_fw_dma_command_block_index(priv); | 2723 | current_index = ipw_fw_dma_command_block_index(priv); |
2722 | 2724 | ||
2723 | watchdog++; | 2725 | if (previous_index < current_index) { |
2724 | 2726 | watchdog = 0; | |
2725 | if (watchdog > 400) { | 2727 | continue; |
2728 | } | ||
2729 | if (++watchdog > 400) { | ||
2726 | IPW_DEBUG_FW_INFO("Timeout\n"); | 2730 | IPW_DEBUG_FW_INFO("Timeout\n"); |
2727 | ipw_fw_dma_dump_command_block(priv); | 2731 | ipw_fw_dma_dump_command_block(priv); |
2728 | ipw_fw_dma_abort(priv); | 2732 | ipw_fw_dma_abort(priv); |
@@ -2772,6 +2776,7 @@ static inline int ipw_alive(struct ipw_priv *priv) | |||
2772 | return ipw_read32(priv, 0x90) == 0xd55555d5; | 2776 | return ipw_read32(priv, 0x90) == 0xd55555d5; |
2773 | } | 2777 | } |
2774 | 2778 | ||
2779 | /* timeout in msec, attempted in 10-msec quanta */ | ||
2775 | static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, | 2780 | static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, |
2776 | int timeout) | 2781 | int timeout) |
2777 | { | 2782 | { |
@@ -2800,10 +2805,11 @@ static int ipw_stop_master(struct ipw_priv *priv) | |||
2800 | /* stop master. typical delay - 0 */ | 2805 | /* stop master. typical delay - 0 */ |
2801 | ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); | 2806 | ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); |
2802 | 2807 | ||
2808 | /* timeout is in msec, polled in 10-msec quanta */ | ||
2803 | rc = ipw_poll_bit(priv, IPW_RESET_REG, | 2809 | rc = ipw_poll_bit(priv, IPW_RESET_REG, |
2804 | IPW_RESET_REG_MASTER_DISABLED, 100); | 2810 | IPW_RESET_REG_MASTER_DISABLED, 100); |
2805 | if (rc < 0) { | 2811 | if (rc < 0) { |
2806 | IPW_ERROR("stop master failed in 10ms\n"); | 2812 | IPW_ERROR("wait for stop master failed after 100ms\n"); |
2807 | return -1; | 2813 | return -1; |
2808 | } | 2814 | } |
2809 | 2815 | ||
@@ -2823,33 +2829,11 @@ static void ipw_arc_release(struct ipw_priv *priv) | |||
2823 | mdelay(5); | 2829 | mdelay(5); |
2824 | } | 2830 | } |
2825 | 2831 | ||
2826 | struct fw_header { | ||
2827 | u32 version; | ||
2828 | u32 mode; | ||
2829 | }; | ||
2830 | |||
2831 | struct fw_chunk { | 2832 | struct fw_chunk { |
2832 | u32 address; | 2833 | u32 address; |
2833 | u32 length; | 2834 | u32 length; |
2834 | }; | 2835 | }; |
2835 | 2836 | ||
2836 | #define IPW_FW_MAJOR_VERSION 2 | ||
2837 | #define IPW_FW_MINOR_VERSION 4 | ||
2838 | |||
2839 | #define IPW_FW_MINOR(x) ((x & 0xff) >> 8) | ||
2840 | #define IPW_FW_MAJOR(x) (x & 0xff) | ||
2841 | |||
2842 | #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION) | ||
2843 | |||
2844 | #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \ | ||
2845 | "." __stringify(IPW_FW_MINOR_VERSION) "-" | ||
2846 | |||
2847 | #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0 | ||
2848 | #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw" | ||
2849 | #else | ||
2850 | #define IPW_FW_NAME(x) "ipw2200_" x ".fw" | ||
2851 | #endif | ||
2852 | |||
2853 | static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) | 2837 | static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) |
2854 | { | 2838 | { |
2855 | int rc = 0, i, addr; | 2839 | int rc = 0, i, addr; |
@@ -2890,8 +2874,8 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) | |||
2890 | mdelay(1); | 2874 | mdelay(1); |
2891 | 2875 | ||
2892 | /* enable ucode store */ | 2876 | /* enable ucode store */ |
2893 | ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0); | 2877 | ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); |
2894 | ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS); | 2878 | ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); |
2895 | mdelay(1); | 2879 | mdelay(1); |
2896 | 2880 | ||
2897 | /* write ucode */ | 2881 | /* write ucode */ |
@@ -3036,7 +3020,7 @@ static int ipw_stop_nic(struct ipw_priv *priv) | |||
3036 | rc = ipw_poll_bit(priv, IPW_RESET_REG, | 3020 | rc = ipw_poll_bit(priv, IPW_RESET_REG, |
3037 | IPW_RESET_REG_MASTER_DISABLED, 500); | 3021 | IPW_RESET_REG_MASTER_DISABLED, 500); |
3038 | if (rc < 0) { | 3022 | if (rc < 0) { |
3039 | IPW_ERROR("wait for reg master disabled failed\n"); | 3023 | IPW_ERROR("wait for reg master disabled failed after 500ms\n"); |
3040 | return rc; | 3024 | return rc; |
3041 | } | 3025 | } |
3042 | 3026 | ||
@@ -3118,33 +3102,47 @@ static int ipw_reset_nic(struct ipw_priv *priv) | |||
3118 | return rc; | 3102 | return rc; |
3119 | } | 3103 | } |
3120 | 3104 | ||
3105 | |||
3106 | struct ipw_fw { | ||
3107 | u32 ver; | ||
3108 | u32 boot_size; | ||
3109 | u32 ucode_size; | ||
3110 | u32 fw_size; | ||
3111 | u8 data[0]; | ||
3112 | }; | ||
3113 | |||
3121 | static int ipw_get_fw(struct ipw_priv *priv, | 3114 | static int ipw_get_fw(struct ipw_priv *priv, |
3122 | const struct firmware **fw, const char *name) | 3115 | const struct firmware **raw, const char *name) |
3123 | { | 3116 | { |
3124 | struct fw_header *header; | 3117 | struct ipw_fw *fw; |
3125 | int rc; | 3118 | int rc; |
3126 | 3119 | ||
3127 | /* ask firmware_class module to get the boot firmware off disk */ | 3120 | /* ask firmware_class module to get the boot firmware off disk */ |
3128 | rc = request_firmware(fw, name, &priv->pci_dev->dev); | 3121 | rc = request_firmware(raw, name, &priv->pci_dev->dev); |
3129 | if (rc < 0) { | 3122 | if (rc < 0) { |
3130 | IPW_ERROR("%s load failed: Reason %d\n", name, rc); | 3123 | IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); |
3131 | return rc; | 3124 | return rc; |
3132 | } | 3125 | } |
3133 | 3126 | ||
3134 | header = (struct fw_header *)(*fw)->data; | 3127 | if ((*raw)->size < sizeof(*fw)) { |
3135 | if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) { | 3128 | IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); |
3136 | IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n", | 3129 | return -EINVAL; |
3137 | name, | 3130 | } |
3138 | IPW_FW_MAJOR(le32_to_cpu(header->version)), | 3131 | |
3139 | IPW_FW_MAJOR_VERSION); | 3132 | fw = (void *)(*raw)->data; |
3133 | |||
3134 | if ((*raw)->size < sizeof(*fw) + | ||
3135 | fw->boot_size + fw->ucode_size + fw->fw_size) { | ||
3136 | IPW_ERROR("%s is too small or corrupt (%zd)\n", | ||
3137 | name, (*raw)->size); | ||
3140 | return -EINVAL; | 3138 | return -EINVAL; |
3141 | } | 3139 | } |
3142 | 3140 | ||
3143 | IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n", | 3141 | IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", |
3144 | name, | 3142 | name, |
3145 | IPW_FW_MAJOR(le32_to_cpu(header->version)), | 3143 | le32_to_cpu(fw->ver) >> 16, |
3146 | IPW_FW_MINOR(le32_to_cpu(header->version)), | 3144 | le32_to_cpu(fw->ver) & 0xff, |
3147 | (*fw)->size - sizeof(struct fw_header)); | 3145 | (*raw)->size - sizeof(*fw)); |
3148 | return 0; | 3146 | return 0; |
3149 | } | 3147 | } |
3150 | 3148 | ||
@@ -3184,17 +3182,13 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv, | |||
3184 | 3182 | ||
3185 | #ifdef CONFIG_PM | 3183 | #ifdef CONFIG_PM |
3186 | static int fw_loaded = 0; | 3184 | static int fw_loaded = 0; |
3187 | static const struct firmware *bootfw = NULL; | 3185 | static const struct firmware *raw = NULL; |
3188 | static const struct firmware *firmware = NULL; | ||
3189 | static const struct firmware *ucode = NULL; | ||
3190 | 3186 | ||
3191 | static void free_firmware(void) | 3187 | static void free_firmware(void) |
3192 | { | 3188 | { |
3193 | if (fw_loaded) { | 3189 | if (fw_loaded) { |
3194 | release_firmware(bootfw); | 3190 | release_firmware(raw); |
3195 | release_firmware(ucode); | 3191 | raw = NULL; |
3196 | release_firmware(firmware); | ||
3197 | bootfw = ucode = firmware = NULL; | ||
3198 | fw_loaded = 0; | 3192 | fw_loaded = 0; |
3199 | } | 3193 | } |
3200 | } | 3194 | } |
@@ -3205,60 +3199,50 @@ static void free_firmware(void) | |||
3205 | static int ipw_load(struct ipw_priv *priv) | 3199 | static int ipw_load(struct ipw_priv *priv) |
3206 | { | 3200 | { |
3207 | #ifndef CONFIG_PM | 3201 | #ifndef CONFIG_PM |
3208 | const struct firmware *bootfw = NULL; | 3202 | const struct firmware *raw = NULL; |
3209 | const struct firmware *firmware = NULL; | ||
3210 | const struct firmware *ucode = NULL; | ||
3211 | #endif | 3203 | #endif |
3204 | struct ipw_fw *fw; | ||
3205 | u8 *boot_img, *ucode_img, *fw_img; | ||
3206 | u8 *name = NULL; | ||
3212 | int rc = 0, retries = 3; | 3207 | int rc = 0, retries = 3; |
3213 | 3208 | ||
3214 | #ifdef CONFIG_PM | 3209 | switch (priv->ieee->iw_mode) { |
3215 | if (!fw_loaded) { | 3210 | case IW_MODE_ADHOC: |
3216 | #endif | 3211 | name = "ipw2200-ibss.fw"; |
3217 | rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot")); | 3212 | break; |
3218 | if (rc) | ||
3219 | goto error; | ||
3220 | |||
3221 | switch (priv->ieee->iw_mode) { | ||
3222 | case IW_MODE_ADHOC: | ||
3223 | rc = ipw_get_fw(priv, &ucode, | ||
3224 | IPW_FW_NAME("ibss_ucode")); | ||
3225 | if (rc) | ||
3226 | goto error; | ||
3227 | |||
3228 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss")); | ||
3229 | break; | ||
3230 | |||
3231 | #ifdef CONFIG_IPW2200_MONITOR | 3213 | #ifdef CONFIG_IPW2200_MONITOR |
3232 | case IW_MODE_MONITOR: | 3214 | case IW_MODE_MONITOR: |
3233 | rc = ipw_get_fw(priv, &ucode, | 3215 | name = "ipw2200-sniffer.fw"; |
3234 | IPW_FW_NAME("sniffer_ucode")); | 3216 | break; |
3235 | if (rc) | ||
3236 | goto error; | ||
3237 | |||
3238 | rc = ipw_get_fw(priv, &firmware, | ||
3239 | IPW_FW_NAME("sniffer")); | ||
3240 | break; | ||
3241 | #endif | 3217 | #endif |
3242 | case IW_MODE_INFRA: | 3218 | case IW_MODE_INFRA: |
3243 | rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode")); | 3219 | name = "ipw2200-bss.fw"; |
3244 | if (rc) | 3220 | break; |
3245 | goto error; | 3221 | } |
3246 | |||
3247 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss")); | ||
3248 | break; | ||
3249 | 3222 | ||
3250 | default: | 3223 | if (!name) { |
3251 | rc = -EINVAL; | 3224 | rc = -EINVAL; |
3252 | } | 3225 | goto error; |
3226 | } | ||
3253 | 3227 | ||
3254 | if (rc) | 3228 | #ifdef CONFIG_PM |
3229 | if (!fw_loaded) { | ||
3230 | #endif | ||
3231 | rc = ipw_get_fw(priv, &raw, name); | ||
3232 | if (rc < 0) | ||
3255 | goto error; | 3233 | goto error; |
3256 | |||
3257 | #ifdef CONFIG_PM | 3234 | #ifdef CONFIG_PM |
3258 | fw_loaded = 1; | ||
3259 | } | 3235 | } |
3260 | #endif | 3236 | #endif |
3261 | 3237 | ||
3238 | fw = (void *)raw->data; | ||
3239 | boot_img = &fw->data[0]; | ||
3240 | ucode_img = &fw->data[fw->boot_size]; | ||
3241 | fw_img = &fw->data[fw->boot_size + fw->ucode_size]; | ||
3242 | |||
3243 | if (rc < 0) | ||
3244 | goto error; | ||
3245 | |||
3262 | if (!priv->rxq) | 3246 | if (!priv->rxq) |
3263 | priv->rxq = ipw_rx_queue_alloc(priv); | 3247 | priv->rxq = ipw_rx_queue_alloc(priv); |
3264 | else | 3248 | else |
@@ -3279,7 +3263,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3279 | ipw_stop_nic(priv); | 3263 | ipw_stop_nic(priv); |
3280 | 3264 | ||
3281 | rc = ipw_reset_nic(priv); | 3265 | rc = ipw_reset_nic(priv); |
3282 | if (rc) { | 3266 | if (rc < 0) { |
3283 | IPW_ERROR("Unable to reset NIC\n"); | 3267 | IPW_ERROR("Unable to reset NIC\n"); |
3284 | goto error; | 3268 | goto error; |
3285 | } | 3269 | } |
@@ -3288,8 +3272,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3288 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); | 3272 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); |
3289 | 3273 | ||
3290 | /* DMA the initial boot firmware into the device */ | 3274 | /* DMA the initial boot firmware into the device */ |
3291 | rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header), | 3275 | rc = ipw_load_firmware(priv, boot_img, fw->boot_size); |
3292 | bootfw->size - sizeof(struct fw_header)); | ||
3293 | if (rc < 0) { | 3276 | if (rc < 0) { |
3294 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); | 3277 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); |
3295 | goto error; | 3278 | goto error; |
@@ -3298,7 +3281,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3298 | /* kick start the device */ | 3281 | /* kick start the device */ |
3299 | ipw_start_nic(priv); | 3282 | ipw_start_nic(priv); |
3300 | 3283 | ||
3301 | /* wait for the device to finish it's initial startup sequence */ | 3284 | /* wait for the device to finish its initial startup sequence */ |
3302 | rc = ipw_poll_bit(priv, IPW_INTA_RW, | 3285 | rc = ipw_poll_bit(priv, IPW_INTA_RW, |
3303 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); | 3286 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); |
3304 | if (rc < 0) { | 3287 | if (rc < 0) { |
@@ -3311,8 +3294,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3311 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); | 3294 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); |
3312 | 3295 | ||
3313 | /* DMA the ucode into the device */ | 3296 | /* DMA the ucode into the device */ |
3314 | rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header), | 3297 | rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size); |
3315 | ucode->size - sizeof(struct fw_header)); | ||
3316 | if (rc < 0) { | 3298 | if (rc < 0) { |
3317 | IPW_ERROR("Unable to load ucode: %d\n", rc); | 3299 | IPW_ERROR("Unable to load ucode: %d\n", rc); |
3318 | goto error; | 3300 | goto error; |
@@ -3322,18 +3304,19 @@ static int ipw_load(struct ipw_priv *priv) | |||
3322 | ipw_stop_nic(priv); | 3304 | ipw_stop_nic(priv); |
3323 | 3305 | ||
3324 | /* DMA bss firmware into the device */ | 3306 | /* DMA bss firmware into the device */ |
3325 | rc = ipw_load_firmware(priv, firmware->data + | 3307 | rc = ipw_load_firmware(priv, fw_img, fw->fw_size); |
3326 | sizeof(struct fw_header), | ||
3327 | firmware->size - sizeof(struct fw_header)); | ||
3328 | if (rc < 0) { | 3308 | if (rc < 0) { |
3329 | IPW_ERROR("Unable to load firmware: %d\n", rc); | 3309 | IPW_ERROR("Unable to load firmware: %d\n", rc); |
3330 | goto error; | 3310 | goto error; |
3331 | } | 3311 | } |
3312 | #ifdef CONFIG_PM | ||
3313 | fw_loaded = 1; | ||
3314 | #endif | ||
3332 | 3315 | ||
3333 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); | 3316 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); |
3334 | 3317 | ||
3335 | rc = ipw_queue_reset(priv); | 3318 | rc = ipw_queue_reset(priv); |
3336 | if (rc) { | 3319 | if (rc < 0) { |
3337 | IPW_ERROR("Unable to initialize queues\n"); | 3320 | IPW_ERROR("Unable to initialize queues\n"); |
3338 | goto error; | 3321 | goto error; |
3339 | } | 3322 | } |
@@ -3362,7 +3345,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3362 | rc = ipw_poll_bit(priv, IPW_INTA_RW, | 3345 | rc = ipw_poll_bit(priv, IPW_INTA_RW, |
3363 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); | 3346 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); |
3364 | if (rc < 0) { | 3347 | if (rc < 0) { |
3365 | IPW_ERROR("device failed to start after 500ms\n"); | 3348 | IPW_ERROR("device failed to start within 500ms\n"); |
3366 | goto error; | 3349 | goto error; |
3367 | } | 3350 | } |
3368 | IPW_DEBUG_INFO("device response after %dms\n", rc); | 3351 | IPW_DEBUG_INFO("device response after %dms\n", rc); |
@@ -3386,9 +3369,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3386 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); | 3369 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); |
3387 | 3370 | ||
3388 | #ifndef CONFIG_PM | 3371 | #ifndef CONFIG_PM |
3389 | release_firmware(bootfw); | 3372 | release_firmware(raw); |
3390 | release_firmware(ucode); | ||
3391 | release_firmware(firmware); | ||
3392 | #endif | 3373 | #endif |
3393 | return 0; | 3374 | return 0; |
3394 | 3375 | ||
@@ -3398,15 +3379,11 @@ static int ipw_load(struct ipw_priv *priv) | |||
3398 | priv->rxq = NULL; | 3379 | priv->rxq = NULL; |
3399 | } | 3380 | } |
3400 | ipw_tx_queue_free(priv); | 3381 | ipw_tx_queue_free(priv); |
3401 | if (bootfw) | 3382 | if (raw) |
3402 | release_firmware(bootfw); | 3383 | release_firmware(raw); |
3403 | if (ucode) | ||
3404 | release_firmware(ucode); | ||
3405 | if (firmware) | ||
3406 | release_firmware(firmware); | ||
3407 | #ifdef CONFIG_PM | 3384 | #ifdef CONFIG_PM |
3408 | fw_loaded = 0; | 3385 | fw_loaded = 0; |
3409 | bootfw = ucode = firmware = NULL; | 3386 | raw = NULL; |
3410 | #endif | 3387 | #endif |
3411 | 3388 | ||
3412 | return rc; | 3389 | return rc; |
@@ -3715,9 +3692,9 @@ static int ipw_disassociate(void *data) | |||
3715 | static void ipw_bg_disassociate(void *data) | 3692 | static void ipw_bg_disassociate(void *data) |
3716 | { | 3693 | { |
3717 | struct ipw_priv *priv = data; | 3694 | struct ipw_priv *priv = data; |
3718 | down(&priv->sem); | 3695 | mutex_lock(&priv->mutex); |
3719 | ipw_disassociate(data); | 3696 | ipw_disassociate(data); |
3720 | up(&priv->sem); | 3697 | mutex_unlock(&priv->mutex); |
3721 | } | 3698 | } |
3722 | 3699 | ||
3723 | static void ipw_system_config(void *data) | 3700 | static void ipw_system_config(void *data) |
@@ -4077,9 +4054,9 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4077 | static void ipw_bg_gather_stats(void *data) | 4054 | static void ipw_bg_gather_stats(void *data) |
4078 | { | 4055 | { |
4079 | struct ipw_priv *priv = data; | 4056 | struct ipw_priv *priv = data; |
4080 | down(&priv->sem); | 4057 | mutex_lock(&priv->mutex); |
4081 | ipw_gather_stats(data); | 4058 | ipw_gather_stats(data); |
4082 | up(&priv->sem); | 4059 | mutex_unlock(&priv->mutex); |
4083 | } | 4060 | } |
4084 | 4061 | ||
4085 | /* Missed beacon behavior: | 4062 | /* Missed beacon behavior: |
@@ -4121,8 +4098,9 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4121 | return; | 4098 | return; |
4122 | } | 4099 | } |
4123 | 4100 | ||
4124 | if (missed_count > priv->roaming_threshold && | 4101 | if (roaming && |
4125 | missed_count <= priv->disassociate_threshold) { | 4102 | (missed_count > priv->roaming_threshold && |
4103 | missed_count <= priv->disassociate_threshold)) { | ||
4126 | /* If we are not already roaming, set the ROAM | 4104 | /* If we are not already roaming, set the ROAM |
4127 | * bit in the status and kick off a scan. | 4105 | * bit in the status and kick off a scan. |
4128 | * This can happen several times before we reach | 4106 | * This can happen several times before we reach |
@@ -4150,7 +4128,6 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4150 | } | 4128 | } |
4151 | 4129 | ||
4152 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); | 4130 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); |
4153 | |||
4154 | } | 4131 | } |
4155 | 4132 | ||
4156 | /** | 4133 | /** |
@@ -4527,10 +4504,9 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4527 | 4504 | ||
4528 | if (notif->size == sizeof(*x)) { | 4505 | if (notif->size == sizeof(*x)) { |
4529 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, | 4506 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, |
4530 | "link deterioration: '%s' " MAC_FMT | 4507 | "link deterioration: type %d, cnt %d\n", |
4531 | " \n", escape_essid(priv->essid, | 4508 | x->silence_notification_type, |
4532 | priv->essid_len), | 4509 | x->silence_count); |
4533 | MAC_ARG(priv->bssid)); | ||
4534 | memcpy(&priv->last_link_deterioration, x, | 4510 | memcpy(&priv->last_link_deterioration, x, |
4535 | sizeof(*x)); | 4511 | sizeof(*x)); |
4536 | } else { | 4512 | } else { |
@@ -4911,13 +4887,13 @@ static void ipw_rx_queue_replenish(void *data) | |||
4911 | static void ipw_bg_rx_queue_replenish(void *data) | 4887 | static void ipw_bg_rx_queue_replenish(void *data) |
4912 | { | 4888 | { |
4913 | struct ipw_priv *priv = data; | 4889 | struct ipw_priv *priv = data; |
4914 | down(&priv->sem); | 4890 | mutex_lock(&priv->mutex); |
4915 | ipw_rx_queue_replenish(data); | 4891 | ipw_rx_queue_replenish(data); |
4916 | up(&priv->sem); | 4892 | mutex_unlock(&priv->mutex); |
4917 | } | 4893 | } |
4918 | 4894 | ||
4919 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | 4895 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. |
4920 | * If an SKB has been detached, the POOL needs to have it's SKB set to NULL | 4896 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL |
4921 | * This free routine walks the list of POOL entries and if SKB is set to | 4897 | * This free routine walks the list of POOL entries and if SKB is set to |
4922 | * non NULL it is unmapped and freed | 4898 | * non NULL it is unmapped and freed |
4923 | */ | 4899 | */ |
@@ -5257,10 +5233,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, | |||
5257 | if (priv->ieee->scan_age != 0 && | 5233 | if (priv->ieee->scan_age != 0 && |
5258 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { | 5234 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { |
5259 | IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " | 5235 | IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " |
5260 | "because of age: %lums.\n", | 5236 | "because of age: %ums.\n", |
5261 | escape_essid(network->ssid, network->ssid_len), | 5237 | escape_essid(network->ssid, network->ssid_len), |
5262 | MAC_ARG(network->bssid), | 5238 | MAC_ARG(network->bssid), |
5263 | 1000 * (jiffies - network->last_scanned) / HZ); | 5239 | jiffies_to_msecs(jiffies - |
5240 | network->last_scanned)); | ||
5264 | return 0; | 5241 | return 0; |
5265 | } | 5242 | } |
5266 | 5243 | ||
@@ -5369,7 +5346,7 @@ static void ipw_merge_adhoc_network(void *data) | |||
5369 | return; | 5346 | return; |
5370 | } | 5347 | } |
5371 | 5348 | ||
5372 | down(&priv->sem); | 5349 | mutex_lock(&priv->mutex); |
5373 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { | 5350 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { |
5374 | IPW_DEBUG_MERGE("remove network %s\n", | 5351 | IPW_DEBUG_MERGE("remove network %s\n", |
5375 | escape_essid(priv->essid, | 5352 | escape_essid(priv->essid, |
@@ -5379,7 +5356,7 @@ static void ipw_merge_adhoc_network(void *data) | |||
5379 | 5356 | ||
5380 | ipw_disassociate(priv); | 5357 | ipw_disassociate(priv); |
5381 | priv->assoc_network = match.network; | 5358 | priv->assoc_network = match.network; |
5382 | up(&priv->sem); | 5359 | mutex_unlock(&priv->mutex); |
5383 | return; | 5360 | return; |
5384 | } | 5361 | } |
5385 | } | 5362 | } |
@@ -5467,11 +5444,12 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5467 | if (network->last_associate && | 5444 | if (network->last_associate && |
5468 | time_after(network->last_associate + (HZ * 3UL), jiffies)) { | 5445 | time_after(network->last_associate + (HZ * 3UL), jiffies)) { |
5469 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5446 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5470 | "because of storming (%lus since last " | 5447 | "because of storming (%ums since last " |
5471 | "assoc attempt).\n", | 5448 | "assoc attempt).\n", |
5472 | escape_essid(network->ssid, network->ssid_len), | 5449 | escape_essid(network->ssid, network->ssid_len), |
5473 | MAC_ARG(network->bssid), | 5450 | MAC_ARG(network->bssid), |
5474 | (jiffies - network->last_associate) / HZ); | 5451 | jiffies_to_msecs(jiffies - |
5452 | network->last_associate)); | ||
5475 | return 0; | 5453 | return 0; |
5476 | } | 5454 | } |
5477 | 5455 | ||
@@ -5479,10 +5457,11 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5479 | if (priv->ieee->scan_age != 0 && | 5457 | if (priv->ieee->scan_age != 0 && |
5480 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { | 5458 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { |
5481 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5459 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5482 | "because of age: %lums.\n", | 5460 | "because of age: %ums.\n", |
5483 | escape_essid(network->ssid, network->ssid_len), | 5461 | escape_essid(network->ssid, network->ssid_len), |
5484 | MAC_ARG(network->bssid), | 5462 | MAC_ARG(network->bssid), |
5485 | 1000 * (jiffies - network->last_scanned) / HZ); | 5463 | jiffies_to_msecs(jiffies - |
5464 | network->last_scanned)); | ||
5486 | return 0; | 5465 | return 0; |
5487 | } | 5466 | } |
5488 | 5467 | ||
@@ -5510,15 +5489,6 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5510 | return 0; | 5489 | return 0; |
5511 | } | 5490 | } |
5512 | 5491 | ||
5513 | if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 || | ||
5514 | network->rsn_ie_len > 0)) { | ||
5515 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | ||
5516 | "because of WPA capability mismatch.\n", | ||
5517 | escape_essid(network->ssid, network->ssid_len), | ||
5518 | MAC_ARG(network->bssid)); | ||
5519 | return 0; | ||
5520 | } | ||
5521 | |||
5522 | if ((priv->config & CFG_STATIC_BSSID) && | 5492 | if ((priv->config & CFG_STATIC_BSSID) && |
5523 | memcmp(network->bssid, priv->bssid, ETH_ALEN)) { | 5493 | memcmp(network->bssid, priv->bssid, ETH_ALEN)) { |
5524 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5494 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
@@ -5539,7 +5509,7 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5539 | } | 5509 | } |
5540 | 5510 | ||
5541 | /* Filter out invalid channel in current GEO */ | 5511 | /* Filter out invalid channel in current GEO */ |
5542 | if (!ipw_is_valid_channel(priv->ieee, network->channel)) { | 5512 | if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) { |
5543 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5513 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5544 | "because of invalid channel in current GEO\n", | 5514 | "because of invalid channel in current GEO\n", |
5545 | escape_essid(network->ssid, network->ssid_len), | 5515 | escape_essid(network->ssid, network->ssid_len), |
@@ -5584,7 +5554,7 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5584 | static void ipw_adhoc_create(struct ipw_priv *priv, | 5554 | static void ipw_adhoc_create(struct ipw_priv *priv, |
5585 | struct ieee80211_network *network) | 5555 | struct ieee80211_network *network) |
5586 | { | 5556 | { |
5587 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 5557 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
5588 | int i; | 5558 | int i; |
5589 | 5559 | ||
5590 | /* | 5560 | /* |
@@ -5599,10 +5569,10 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5599 | * FW fatal error. | 5569 | * FW fatal error. |
5600 | * | 5570 | * |
5601 | */ | 5571 | */ |
5602 | switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { | 5572 | switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) { |
5603 | case IEEE80211_52GHZ_BAND: | 5573 | case IEEE80211_52GHZ_BAND: |
5604 | network->mode = IEEE_A; | 5574 | network->mode = IEEE_A; |
5605 | i = ipw_channel_to_index(priv->ieee, priv->channel); | 5575 | i = ieee80211_channel_to_index(priv->ieee, priv->channel); |
5606 | if (i == -1) | 5576 | if (i == -1) |
5607 | BUG(); | 5577 | BUG(); |
5608 | if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { | 5578 | if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { |
@@ -5616,7 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5616 | network->mode = IEEE_G; | 5586 | network->mode = IEEE_G; |
5617 | else | 5587 | else |
5618 | network->mode = IEEE_B; | 5588 | network->mode = IEEE_B; |
5619 | i = ipw_channel_to_index(priv->ieee, priv->channel); | 5589 | i = ieee80211_channel_to_index(priv->ieee, priv->channel); |
5620 | if (i == -1) | 5590 | if (i == -1) |
5621 | BUG(); | 5591 | BUG(); |
5622 | if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { | 5592 | if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { |
@@ -5671,54 +5641,44 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5671 | 5641 | ||
5672 | static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) | 5642 | static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) |
5673 | { | 5643 | { |
5674 | struct ipw_tgi_tx_key *key; | 5644 | struct ipw_tgi_tx_key key; |
5675 | struct host_cmd cmd = { | ||
5676 | .cmd = IPW_CMD_TGI_TX_KEY, | ||
5677 | .len = sizeof(*key) | ||
5678 | }; | ||
5679 | 5645 | ||
5680 | if (!(priv->ieee->sec.flags & (1 << index))) | 5646 | if (!(priv->ieee->sec.flags & (1 << index))) |
5681 | return; | 5647 | return; |
5682 | 5648 | ||
5683 | key = (struct ipw_tgi_tx_key *)&cmd.param; | 5649 | key.key_id = index; |
5684 | key->key_id = index; | 5650 | memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); |
5685 | memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); | 5651 | key.security_type = type; |
5686 | key->security_type = type; | 5652 | key.station_index = 0; /* always 0 for BSS */ |
5687 | key->station_index = 0; /* always 0 for BSS */ | 5653 | key.flags = 0; |
5688 | key->flags = 0; | ||
5689 | /* 0 for new key; previous value of counter (after fatal error) */ | 5654 | /* 0 for new key; previous value of counter (after fatal error) */ |
5690 | key->tx_counter[0] = 0; | 5655 | key.tx_counter[0] = 0; |
5691 | key->tx_counter[1] = 0; | 5656 | key.tx_counter[1] = 0; |
5692 | 5657 | ||
5693 | ipw_send_cmd(priv, &cmd); | 5658 | ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); |
5694 | } | 5659 | } |
5695 | 5660 | ||
5696 | static void ipw_send_wep_keys(struct ipw_priv *priv, int type) | 5661 | static void ipw_send_wep_keys(struct ipw_priv *priv, int type) |
5697 | { | 5662 | { |
5698 | struct ipw_wep_key *key; | 5663 | struct ipw_wep_key key; |
5699 | int i; | 5664 | int i; |
5700 | struct host_cmd cmd = { | ||
5701 | .cmd = IPW_CMD_WEP_KEY, | ||
5702 | .len = sizeof(*key) | ||
5703 | }; | ||
5704 | 5665 | ||
5705 | key = (struct ipw_wep_key *)&cmd.param; | 5666 | key.cmd_id = DINO_CMD_WEP_KEY; |
5706 | key->cmd_id = DINO_CMD_WEP_KEY; | 5667 | key.seq_num = 0; |
5707 | key->seq_num = 0; | ||
5708 | 5668 | ||
5709 | /* Note: AES keys cannot be set for multiple times. | 5669 | /* Note: AES keys cannot be set for multiple times. |
5710 | * Only set it at the first time. */ | 5670 | * Only set it at the first time. */ |
5711 | for (i = 0; i < 4; i++) { | 5671 | for (i = 0; i < 4; i++) { |
5712 | key->key_index = i | type; | 5672 | key.key_index = i | type; |
5713 | if (!(priv->ieee->sec.flags & (1 << i))) { | 5673 | if (!(priv->ieee->sec.flags & (1 << i))) { |
5714 | key->key_size = 0; | 5674 | key.key_size = 0; |
5715 | continue; | 5675 | continue; |
5716 | } | 5676 | } |
5717 | 5677 | ||
5718 | key->key_size = priv->ieee->sec.key_sizes[i]; | 5678 | key.key_size = priv->ieee->sec.key_sizes[i]; |
5719 | memcpy(key->key, priv->ieee->sec.keys[i], key->key_size); | 5679 | memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); |
5720 | 5680 | ||
5721 | ipw_send_cmd(priv, &cmd); | 5681 | ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); |
5722 | } | 5682 | } |
5723 | } | 5683 | } |
5724 | 5684 | ||
@@ -5822,9 +5782,9 @@ static void ipw_adhoc_check(void *data) | |||
5822 | static void ipw_bg_adhoc_check(void *data) | 5782 | static void ipw_bg_adhoc_check(void *data) |
5823 | { | 5783 | { |
5824 | struct ipw_priv *priv = data; | 5784 | struct ipw_priv *priv = data; |
5825 | down(&priv->sem); | 5785 | mutex_lock(&priv->mutex); |
5826 | ipw_adhoc_check(data); | 5786 | ipw_adhoc_check(data); |
5827 | up(&priv->sem); | 5787 | mutex_unlock(&priv->mutex); |
5828 | } | 5788 | } |
5829 | 5789 | ||
5830 | #ifdef CONFIG_IPW2200_DEBUG | 5790 | #ifdef CONFIG_IPW2200_DEBUG |
@@ -5950,7 +5910,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
5950 | const struct ieee80211_geo *geo; | 5910 | const struct ieee80211_geo *geo; |
5951 | int i; | 5911 | int i; |
5952 | 5912 | ||
5953 | geo = ipw_get_geo(priv->ieee); | 5913 | geo = ieee80211_get_geo(priv->ieee); |
5954 | 5914 | ||
5955 | if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { | 5915 | if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { |
5956 | int start = channel_index; | 5916 | int start = channel_index; |
@@ -6010,7 +5970,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6010 | channel_index++; | 5970 | channel_index++; |
6011 | scan->channels_list[channel_index] = channel; | 5971 | scan->channels_list[channel_index] = channel; |
6012 | index = | 5972 | index = |
6013 | ipw_channel_to_index(priv->ieee, channel); | 5973 | ieee80211_channel_to_index(priv->ieee, channel); |
6014 | ipw_set_scan_type(scan, channel_index, | 5974 | ipw_set_scan_type(scan, channel_index, |
6015 | geo->bg[index]. | 5975 | geo->bg[index]. |
6016 | flags & | 5976 | flags & |
@@ -6051,7 +6011,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6051 | (priv->status & STATUS_EXIT_PENDING)) | 6011 | (priv->status & STATUS_EXIT_PENDING)) |
6052 | return 0; | 6012 | return 0; |
6053 | 6013 | ||
6054 | down(&priv->sem); | 6014 | mutex_lock(&priv->mutex); |
6055 | 6015 | ||
6056 | if (priv->status & STATUS_SCANNING) { | 6016 | if (priv->status & STATUS_SCANNING) { |
6057 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); | 6017 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); |
@@ -6092,7 +6052,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6092 | u8 channel; | 6052 | u8 channel; |
6093 | u8 band = 0; | 6053 | u8 band = 0; |
6094 | 6054 | ||
6095 | switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { | 6055 | switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) { |
6096 | case IEEE80211_52GHZ_BAND: | 6056 | case IEEE80211_52GHZ_BAND: |
6097 | band = (u8) (IPW_A_MODE << 6) | 1; | 6057 | band = (u8) (IPW_A_MODE << 6) | 1; |
6098 | channel = priv->channel; | 6058 | channel = priv->channel; |
@@ -6159,16 +6119,16 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6159 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6119 | queue_delayed_work(priv->workqueue, &priv->scan_check, |
6160 | IPW_SCAN_CHECK_WATCHDOG); | 6120 | IPW_SCAN_CHECK_WATCHDOG); |
6161 | done: | 6121 | done: |
6162 | up(&priv->sem); | 6122 | mutex_unlock(&priv->mutex); |
6163 | return err; | 6123 | return err; |
6164 | } | 6124 | } |
6165 | 6125 | ||
6166 | static void ipw_bg_abort_scan(void *data) | 6126 | static void ipw_bg_abort_scan(void *data) |
6167 | { | 6127 | { |
6168 | struct ipw_priv *priv = data; | 6128 | struct ipw_priv *priv = data; |
6169 | down(&priv->sem); | 6129 | mutex_lock(&priv->mutex); |
6170 | ipw_abort_scan(data); | 6130 | ipw_abort_scan(data); |
6171 | up(&priv->sem); | 6131 | mutex_unlock(&priv->mutex); |
6172 | } | 6132 | } |
6173 | 6133 | ||
6174 | static int ipw_wpa_enable(struct ipw_priv *priv, int value) | 6134 | static int ipw_wpa_enable(struct ipw_priv *priv, int value) |
@@ -6193,6 +6153,9 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) | |||
6193 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { | 6153 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { |
6194 | sec.auth_mode = WLAN_AUTH_OPEN; | 6154 | sec.auth_mode = WLAN_AUTH_OPEN; |
6195 | ieee->open_wep = 1; | 6155 | ieee->open_wep = 1; |
6156 | } else if (value & IW_AUTH_ALG_LEAP) { | ||
6157 | sec.auth_mode = WLAN_AUTH_LEAP; | ||
6158 | ieee->open_wep = 1; | ||
6196 | } else | 6159 | } else |
6197 | return -EINVAL; | 6160 | return -EINVAL; |
6198 | 6161 | ||
@@ -6204,7 +6167,8 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) | |||
6204 | return ret; | 6167 | return ret; |
6205 | } | 6168 | } |
6206 | 6169 | ||
6207 | void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len) | 6170 | static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, |
6171 | int wpa_ie_len) | ||
6208 | { | 6172 | { |
6209 | /* make sure WPA is enabled */ | 6173 | /* make sure WPA is enabled */ |
6210 | ipw_wpa_enable(priv, 1); | 6174 | ipw_wpa_enable(priv, 1); |
@@ -6215,15 +6179,10 @@ void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len) | |||
6215 | static int ipw_set_rsn_capa(struct ipw_priv *priv, | 6179 | static int ipw_set_rsn_capa(struct ipw_priv *priv, |
6216 | char *capabilities, int length) | 6180 | char *capabilities, int length) |
6217 | { | 6181 | { |
6218 | struct host_cmd cmd = { | ||
6219 | .cmd = IPW_CMD_RSN_CAPABILITIES, | ||
6220 | .len = length, | ||
6221 | }; | ||
6222 | |||
6223 | IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); | 6182 | IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); |
6224 | 6183 | ||
6225 | memcpy(cmd.param, capabilities, length); | 6184 | return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, |
6226 | return ipw_send_cmd(priv, &cmd); | 6185 | capabilities); |
6227 | } | 6186 | } |
6228 | 6187 | ||
6229 | /* | 6188 | /* |
@@ -6244,7 +6203,7 @@ static int ipw_wx_set_genie(struct net_device *dev, | |||
6244 | (wrqu->data.length && extra == NULL)) | 6203 | (wrqu->data.length && extra == NULL)) |
6245 | return -EINVAL; | 6204 | return -EINVAL; |
6246 | 6205 | ||
6247 | //down(&priv->sem); | 6206 | //mutex_lock(&priv->mutex); |
6248 | 6207 | ||
6249 | //if (!ieee->wpa_enabled) { | 6208 | //if (!ieee->wpa_enabled) { |
6250 | // err = -EOPNOTSUPP; | 6209 | // err = -EOPNOTSUPP; |
@@ -6270,7 +6229,7 @@ static int ipw_wx_set_genie(struct net_device *dev, | |||
6270 | 6229 | ||
6271 | ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); | 6230 | ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); |
6272 | out: | 6231 | out: |
6273 | //up(&priv->sem); | 6232 | //mutex_unlock(&priv->mutex); |
6274 | return err; | 6233 | return err; |
6275 | } | 6234 | } |
6276 | 6235 | ||
@@ -6283,7 +6242,7 @@ static int ipw_wx_get_genie(struct net_device *dev, | |||
6283 | struct ieee80211_device *ieee = priv->ieee; | 6242 | struct ieee80211_device *ieee = priv->ieee; |
6284 | int err = 0; | 6243 | int err = 0; |
6285 | 6244 | ||
6286 | //down(&priv->sem); | 6245 | //mutex_lock(&priv->mutex); |
6287 | 6246 | ||
6288 | //if (!ieee->wpa_enabled) { | 6247 | //if (!ieee->wpa_enabled) { |
6289 | // err = -EOPNOTSUPP; | 6248 | // err = -EOPNOTSUPP; |
@@ -6304,7 +6263,7 @@ static int ipw_wx_get_genie(struct net_device *dev, | |||
6304 | memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); | 6263 | memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); |
6305 | 6264 | ||
6306 | out: | 6265 | out: |
6307 | //up(&priv->sem); | 6266 | //mutex_unlock(&priv->mutex); |
6308 | return err; | 6267 | return err; |
6309 | } | 6268 | } |
6310 | 6269 | ||
@@ -6556,7 +6515,7 @@ static int ipw_wx_set_mlme(struct net_device *dev, | |||
6556 | * get the modulation type of the current network or | 6515 | * get the modulation type of the current network or |
6557 | * the card current mode | 6516 | * the card current mode |
6558 | */ | 6517 | */ |
6559 | u8 ipw_qos_current_mode(struct ipw_priv * priv) | 6518 | static u8 ipw_qos_current_mode(struct ipw_priv * priv) |
6560 | { | 6519 | { |
6561 | u8 mode = 0; | 6520 | u8 mode = 0; |
6562 | 6521 | ||
@@ -6964,12 +6923,12 @@ static void ipw_bg_qos_activate(void *data) | |||
6964 | if (priv == NULL) | 6923 | if (priv == NULL) |
6965 | return; | 6924 | return; |
6966 | 6925 | ||
6967 | down(&priv->sem); | 6926 | mutex_lock(&priv->mutex); |
6968 | 6927 | ||
6969 | if (priv->status & STATUS_ASSOCIATED) | 6928 | if (priv->status & STATUS_ASSOCIATED) |
6970 | ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); | 6929 | ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); |
6971 | 6930 | ||
6972 | up(&priv->sem); | 6931 | mutex_unlock(&priv->mutex); |
6973 | } | 6932 | } |
6974 | 6933 | ||
6975 | static int ipw_handle_probe_response(struct net_device *dev, | 6934 | static int ipw_handle_probe_response(struct net_device *dev, |
@@ -7010,25 +6969,15 @@ static int ipw_handle_assoc_response(struct net_device *dev, | |||
7010 | static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters | 6969 | static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters |
7011 | *qos_param) | 6970 | *qos_param) |
7012 | { | 6971 | { |
7013 | struct host_cmd cmd = { | 6972 | return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, |
7014 | .cmd = IPW_CMD_QOS_PARAMETERS, | 6973 | sizeof(*qos_param) * 3, qos_param); |
7015 | .len = (sizeof(struct ieee80211_qos_parameters) * 3) | ||
7016 | }; | ||
7017 | |||
7018 | memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3); | ||
7019 | return ipw_send_cmd(priv, &cmd); | ||
7020 | } | 6974 | } |
7021 | 6975 | ||
7022 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element | 6976 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element |
7023 | *qos_param) | 6977 | *qos_param) |
7024 | { | 6978 | { |
7025 | struct host_cmd cmd = { | 6979 | return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), |
7026 | .cmd = IPW_CMD_WME_INFO, | 6980 | qos_param); |
7027 | .len = sizeof(*qos_param) | ||
7028 | }; | ||
7029 | |||
7030 | memcpy(cmd.param, qos_param, sizeof(*qos_param)); | ||
7031 | return ipw_send_cmd(priv, &cmd); | ||
7032 | } | 6981 | } |
7033 | 6982 | ||
7034 | #endif /* CONFIG_IPW_QOS */ | 6983 | #endif /* CONFIG_IPW_QOS */ |
@@ -7052,19 +7001,21 @@ static int ipw_associate_network(struct ipw_priv *priv, | |||
7052 | 7001 | ||
7053 | memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); | 7002 | memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); |
7054 | priv->assoc_request.channel = network->channel; | 7003 | priv->assoc_request.channel = network->channel; |
7004 | priv->assoc_request.auth_key = 0; | ||
7005 | |||
7055 | if ((priv->capability & CAP_PRIVACY_ON) && | 7006 | if ((priv->capability & CAP_PRIVACY_ON) && |
7056 | (priv->capability & CAP_SHARED_KEY)) { | 7007 | (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { |
7057 | priv->assoc_request.auth_type = AUTH_SHARED_KEY; | 7008 | priv->assoc_request.auth_type = AUTH_SHARED_KEY; |
7058 | priv->assoc_request.auth_key = priv->ieee->sec.active_key; | 7009 | priv->assoc_request.auth_key = priv->ieee->sec.active_key; |
7059 | 7010 | ||
7060 | if ((priv->capability & CAP_PRIVACY_ON) && | 7011 | if (priv->ieee->sec.level == SEC_LEVEL_1) |
7061 | (priv->ieee->sec.level == SEC_LEVEL_1) && | ||
7062 | !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) | ||
7063 | ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); | 7012 | ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); |
7064 | } else { | 7013 | |
7014 | } else if ((priv->capability & CAP_PRIVACY_ON) && | ||
7015 | (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) | ||
7016 | priv->assoc_request.auth_type = AUTH_LEAP; | ||
7017 | else | ||
7065 | priv->assoc_request.auth_type = AUTH_OPEN; | 7018 | priv->assoc_request.auth_type = AUTH_OPEN; |
7066 | priv->assoc_request.auth_key = 0; | ||
7067 | } | ||
7068 | 7019 | ||
7069 | if (priv->ieee->wpa_ie_len) { | 7020 | if (priv->ieee->wpa_ie_len) { |
7070 | priv->assoc_request.policy_support = 0x02; /* RSN active */ | 7021 | priv->assoc_request.policy_support = 0x02; /* RSN active */ |
@@ -7278,9 +7229,9 @@ static void ipw_roam(void *data) | |||
7278 | static void ipw_bg_roam(void *data) | 7229 | static void ipw_bg_roam(void *data) |
7279 | { | 7230 | { |
7280 | struct ipw_priv *priv = data; | 7231 | struct ipw_priv *priv = data; |
7281 | down(&priv->sem); | 7232 | mutex_lock(&priv->mutex); |
7282 | ipw_roam(data); | 7233 | ipw_roam(data); |
7283 | up(&priv->sem); | 7234 | mutex_unlock(&priv->mutex); |
7284 | } | 7235 | } |
7285 | 7236 | ||
7286 | static int ipw_associate(void *data) | 7237 | static int ipw_associate(void *data) |
@@ -7375,9 +7326,9 @@ static int ipw_associate(void *data) | |||
7375 | static void ipw_bg_associate(void *data) | 7326 | static void ipw_bg_associate(void *data) |
7376 | { | 7327 | { |
7377 | struct ipw_priv *priv = data; | 7328 | struct ipw_priv *priv = data; |
7378 | down(&priv->sem); | 7329 | mutex_lock(&priv->mutex); |
7379 | ipw_associate(data); | 7330 | ipw_associate(data); |
7380 | up(&priv->sem); | 7331 | mutex_unlock(&priv->mutex); |
7381 | } | 7332 | } |
7382 | 7333 | ||
7383 | static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, | 7334 | static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, |
@@ -7811,12 +7762,10 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7811 | 7762 | ||
7812 | while (i != r) { | 7763 | while (i != r) { |
7813 | rxb = priv->rxq->queue[i]; | 7764 | rxb = priv->rxq->queue[i]; |
7814 | #ifdef CONFIG_IPW2200_DEBUG | ||
7815 | if (unlikely(rxb == NULL)) { | 7765 | if (unlikely(rxb == NULL)) { |
7816 | printk(KERN_CRIT "Queue not allocated!\n"); | 7766 | printk(KERN_CRIT "Queue not allocated!\n"); |
7817 | break; | 7767 | break; |
7818 | } | 7768 | } |
7819 | #endif | ||
7820 | priv->rxq->queue[i] = NULL; | 7769 | priv->rxq->queue[i] = NULL; |
7821 | 7770 | ||
7822 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, | 7771 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, |
@@ -7835,7 +7784,8 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7835 | le16_to_cpu(pkt->u.frame.rssi_dbm) - | 7784 | le16_to_cpu(pkt->u.frame.rssi_dbm) - |
7836 | IPW_RSSI_TO_DBM, | 7785 | IPW_RSSI_TO_DBM, |
7837 | .signal = | 7786 | .signal = |
7838 | le16_to_cpu(pkt->u.frame.signal), | 7787 | le16_to_cpu(pkt->u.frame.rssi_dbm) - |
7788 | IPW_RSSI_TO_DBM + 0x100, | ||
7839 | .noise = | 7789 | .noise = |
7840 | le16_to_cpu(pkt->u.frame.noise), | 7790 | le16_to_cpu(pkt->u.frame.noise), |
7841 | .rate = pkt->u.frame.rate, | 7791 | .rate = pkt->u.frame.rate, |
@@ -7899,7 +7849,8 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7899 | le16_to_cpu(pkt->u.frame.length)); | 7849 | le16_to_cpu(pkt->u.frame.length)); |
7900 | 7850 | ||
7901 | if (le16_to_cpu(pkt->u.frame.length) < | 7851 | if (le16_to_cpu(pkt->u.frame.length) < |
7902 | frame_hdr_len(header)) { | 7852 | ieee80211_get_hdrlen(le16_to_cpu( |
7853 | header->frame_ctl))) { | ||
7903 | IPW_DEBUG_DROP | 7854 | IPW_DEBUG_DROP |
7904 | ("Received packet is too small. " | 7855 | ("Received packet is too small. " |
7905 | "Dropping.\n"); | 7856 | "Dropping.\n"); |
@@ -7989,7 +7940,14 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7989 | #define DEFAULT_SHORT_RETRY_LIMIT 7U | 7940 | #define DEFAULT_SHORT_RETRY_LIMIT 7U |
7990 | #define DEFAULT_LONG_RETRY_LIMIT 4U | 7941 | #define DEFAULT_LONG_RETRY_LIMIT 4U |
7991 | 7942 | ||
7992 | static int ipw_sw_reset(struct ipw_priv *priv, int init) | 7943 | /** |
7944 | * ipw_sw_reset | ||
7945 | * @option: options to control different reset behaviour | ||
7946 | * 0 = reset everything except the 'disable' module_param | ||
7947 | * 1 = reset everything and print out driver info (for probe only) | ||
7948 | * 2 = reset everything | ||
7949 | */ | ||
7950 | static int ipw_sw_reset(struct ipw_priv *priv, int option) | ||
7993 | { | 7951 | { |
7994 | int band, modulation; | 7952 | int band, modulation; |
7995 | int old_mode = priv->ieee->iw_mode; | 7953 | int old_mode = priv->ieee->iw_mode; |
@@ -8016,7 +7974,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8016 | priv->essid_len = 0; | 7974 | priv->essid_len = 0; |
8017 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | 7975 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); |
8018 | 7976 | ||
8019 | if (disable) { | 7977 | if (disable && option) { |
8020 | priv->status |= STATUS_RF_KILL_SW; | 7978 | priv->status |= STATUS_RF_KILL_SW; |
8021 | IPW_DEBUG_INFO("Radio disabled.\n"); | 7979 | IPW_DEBUG_INFO("Radio disabled.\n"); |
8022 | } | 7980 | } |
@@ -8068,7 +8026,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8068 | 8026 | ||
8069 | if ((priv->pci_dev->device == 0x4223) || | 8027 | if ((priv->pci_dev->device == 0x4223) || |
8070 | (priv->pci_dev->device == 0x4224)) { | 8028 | (priv->pci_dev->device == 0x4224)) { |
8071 | if (init) | 8029 | if (option == 1) |
8072 | printk(KERN_INFO DRV_NAME | 8030 | printk(KERN_INFO DRV_NAME |
8073 | ": Detected Intel PRO/Wireless 2915ABG Network " | 8031 | ": Detected Intel PRO/Wireless 2915ABG Network " |
8074 | "Connection\n"); | 8032 | "Connection\n"); |
@@ -8079,7 +8037,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8079 | priv->adapter = IPW_2915ABG; | 8037 | priv->adapter = IPW_2915ABG; |
8080 | priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; | 8038 | priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; |
8081 | } else { | 8039 | } else { |
8082 | if (init) | 8040 | if (option == 1) |
8083 | printk(KERN_INFO DRV_NAME | 8041 | printk(KERN_INFO DRV_NAME |
8084 | ": Detected Intel PRO/Wireless 2200BG Network " | 8042 | ": Detected Intel PRO/Wireless 2200BG Network " |
8085 | "Connection\n"); | 8043 | "Connection\n"); |
@@ -8126,7 +8084,7 @@ static int ipw_wx_get_name(struct net_device *dev, | |||
8126 | union iwreq_data *wrqu, char *extra) | 8084 | union iwreq_data *wrqu, char *extra) |
8127 | { | 8085 | { |
8128 | struct ipw_priv *priv = ieee80211_priv(dev); | 8086 | struct ipw_priv *priv = ieee80211_priv(dev); |
8129 | down(&priv->sem); | 8087 | mutex_lock(&priv->mutex); |
8130 | if (priv->status & STATUS_RF_KILL_MASK) | 8088 | if (priv->status & STATUS_RF_KILL_MASK) |
8131 | strcpy(wrqu->name, "radio off"); | 8089 | strcpy(wrqu->name, "radio off"); |
8132 | else if (!(priv->status & STATUS_ASSOCIATED)) | 8090 | else if (!(priv->status & STATUS_ASSOCIATED)) |
@@ -8135,7 +8093,7 @@ static int ipw_wx_get_name(struct net_device *dev, | |||
8135 | snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", | 8093 | snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", |
8136 | ipw_modes[priv->assoc_request.ieee_mode]); | 8094 | ipw_modes[priv->assoc_request.ieee_mode]); |
8137 | IPW_DEBUG_WX("Name: %s\n", wrqu->name); | 8095 | IPW_DEBUG_WX("Name: %s\n", wrqu->name); |
8138 | up(&priv->sem); | 8096 | mutex_unlock(&priv->mutex); |
8139 | return 0; | 8097 | return 0; |
8140 | } | 8098 | } |
8141 | 8099 | ||
@@ -8196,7 +8154,7 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8196 | union iwreq_data *wrqu, char *extra) | 8154 | union iwreq_data *wrqu, char *extra) |
8197 | { | 8155 | { |
8198 | struct ipw_priv *priv = ieee80211_priv(dev); | 8156 | struct ipw_priv *priv = ieee80211_priv(dev); |
8199 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 8157 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
8200 | struct iw_freq *fwrq = &wrqu->freq; | 8158 | struct iw_freq *fwrq = &wrqu->freq; |
8201 | int ret = 0, i; | 8159 | int ret = 0, i; |
8202 | u8 channel, flags; | 8160 | u8 channel, flags; |
@@ -8204,24 +8162,24 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8204 | 8162 | ||
8205 | if (fwrq->m == 0) { | 8163 | if (fwrq->m == 0) { |
8206 | IPW_DEBUG_WX("SET Freq/Channel -> any\n"); | 8164 | IPW_DEBUG_WX("SET Freq/Channel -> any\n"); |
8207 | down(&priv->sem); | 8165 | mutex_lock(&priv->mutex); |
8208 | ret = ipw_set_channel(priv, 0); | 8166 | ret = ipw_set_channel(priv, 0); |
8209 | up(&priv->sem); | 8167 | mutex_unlock(&priv->mutex); |
8210 | return ret; | 8168 | return ret; |
8211 | } | 8169 | } |
8212 | /* if setting by freq convert to channel */ | 8170 | /* if setting by freq convert to channel */ |
8213 | if (fwrq->e == 1) { | 8171 | if (fwrq->e == 1) { |
8214 | channel = ipw_freq_to_channel(priv->ieee, fwrq->m); | 8172 | channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m); |
8215 | if (channel == 0) | 8173 | if (channel == 0) |
8216 | return -EINVAL; | 8174 | return -EINVAL; |
8217 | } else | 8175 | } else |
8218 | channel = fwrq->m; | 8176 | channel = fwrq->m; |
8219 | 8177 | ||
8220 | if (!(band = ipw_is_valid_channel(priv->ieee, channel))) | 8178 | if (!(band = ieee80211_is_valid_channel(priv->ieee, channel))) |
8221 | return -EINVAL; | 8179 | return -EINVAL; |
8222 | 8180 | ||
8223 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | 8181 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { |
8224 | i = ipw_channel_to_index(priv->ieee, channel); | 8182 | i = ieee80211_channel_to_index(priv->ieee, channel); |
8225 | if (i == -1) | 8183 | if (i == -1) |
8226 | return -EINVAL; | 8184 | return -EINVAL; |
8227 | 8185 | ||
@@ -8234,9 +8192,9 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8234 | } | 8192 | } |
8235 | 8193 | ||
8236 | IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); | 8194 | IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); |
8237 | down(&priv->sem); | 8195 | mutex_lock(&priv->mutex); |
8238 | ret = ipw_set_channel(priv, channel); | 8196 | ret = ipw_set_channel(priv, channel); |
8239 | up(&priv->sem); | 8197 | mutex_unlock(&priv->mutex); |
8240 | return ret; | 8198 | return ret; |
8241 | } | 8199 | } |
8242 | 8200 | ||
@@ -8250,14 +8208,14 @@ static int ipw_wx_get_freq(struct net_device *dev, | |||
8250 | 8208 | ||
8251 | /* If we are associated, trying to associate, or have a statically | 8209 | /* If we are associated, trying to associate, or have a statically |
8252 | * configured CHANNEL then return that; otherwise return ANY */ | 8210 | * configured CHANNEL then return that; otherwise return ANY */ |
8253 | down(&priv->sem); | 8211 | mutex_lock(&priv->mutex); |
8254 | if (priv->config & CFG_STATIC_CHANNEL || | 8212 | if (priv->config & CFG_STATIC_CHANNEL || |
8255 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) | 8213 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) |
8256 | wrqu->freq.m = priv->channel; | 8214 | wrqu->freq.m = priv->channel; |
8257 | else | 8215 | else |
8258 | wrqu->freq.m = 0; | 8216 | wrqu->freq.m = 0; |
8259 | 8217 | ||
8260 | up(&priv->sem); | 8218 | mutex_unlock(&priv->mutex); |
8261 | IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); | 8219 | IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); |
8262 | return 0; | 8220 | return 0; |
8263 | } | 8221 | } |
@@ -8287,7 +8245,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8287 | if (wrqu->mode == priv->ieee->iw_mode) | 8245 | if (wrqu->mode == priv->ieee->iw_mode) |
8288 | return 0; | 8246 | return 0; |
8289 | 8247 | ||
8290 | down(&priv->sem); | 8248 | mutex_lock(&priv->mutex); |
8291 | 8249 | ||
8292 | ipw_sw_reset(priv, 0); | 8250 | ipw_sw_reset(priv, 0); |
8293 | 8251 | ||
@@ -8310,7 +8268,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8310 | priv->ieee->iw_mode = wrqu->mode; | 8268 | priv->ieee->iw_mode = wrqu->mode; |
8311 | 8269 | ||
8312 | queue_work(priv->workqueue, &priv->adapter_restart); | 8270 | queue_work(priv->workqueue, &priv->adapter_restart); |
8313 | up(&priv->sem); | 8271 | mutex_unlock(&priv->mutex); |
8314 | return err; | 8272 | return err; |
8315 | } | 8273 | } |
8316 | 8274 | ||
@@ -8319,10 +8277,10 @@ static int ipw_wx_get_mode(struct net_device *dev, | |||
8319 | union iwreq_data *wrqu, char *extra) | 8277 | union iwreq_data *wrqu, char *extra) |
8320 | { | 8278 | { |
8321 | struct ipw_priv *priv = ieee80211_priv(dev); | 8279 | struct ipw_priv *priv = ieee80211_priv(dev); |
8322 | down(&priv->sem); | 8280 | mutex_lock(&priv->mutex); |
8323 | wrqu->mode = priv->ieee->iw_mode; | 8281 | wrqu->mode = priv->ieee->iw_mode; |
8324 | IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); | 8282 | IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); |
8325 | up(&priv->sem); | 8283 | mutex_unlock(&priv->mutex); |
8326 | return 0; | 8284 | return 0; |
8327 | } | 8285 | } |
8328 | 8286 | ||
@@ -8349,7 +8307,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8349 | { | 8307 | { |
8350 | struct ipw_priv *priv = ieee80211_priv(dev); | 8308 | struct ipw_priv *priv = ieee80211_priv(dev); |
8351 | struct iw_range *range = (struct iw_range *)extra; | 8309 | struct iw_range *range = (struct iw_range *)extra; |
8352 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 8310 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
8353 | int i = 0, j; | 8311 | int i = 0, j; |
8354 | 8312 | ||
8355 | wrqu->data.length = sizeof(*range); | 8313 | wrqu->data.length = sizeof(*range); |
@@ -8361,7 +8319,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8361 | range->max_qual.qual = 100; | 8319 | range->max_qual.qual = 100; |
8362 | /* TODO: Find real max RSSI and stick here */ | 8320 | /* TODO: Find real max RSSI and stick here */ |
8363 | range->max_qual.level = 0; | 8321 | range->max_qual.level = 0; |
8364 | range->max_qual.noise = priv->ieee->worst_rssi + 0x100; | 8322 | range->max_qual.noise = 0; |
8365 | range->max_qual.updated = 7; /* Updated all three */ | 8323 | range->max_qual.updated = 7; /* Updated all three */ |
8366 | 8324 | ||
8367 | range->avg_qual.qual = 70; | 8325 | range->avg_qual.qual = 70; |
@@ -8369,7 +8327,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8369 | range->avg_qual.level = 0; /* FIXME to real average level */ | 8327 | range->avg_qual.level = 0; /* FIXME to real average level */ |
8370 | range->avg_qual.noise = 0; | 8328 | range->avg_qual.noise = 0; |
8371 | range->avg_qual.updated = 7; /* Updated all three */ | 8329 | range->avg_qual.updated = 7; /* Updated all three */ |
8372 | down(&priv->sem); | 8330 | mutex_lock(&priv->mutex); |
8373 | range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); | 8331 | range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); |
8374 | 8332 | ||
8375 | for (i = 0; i < range->num_bitrates; i++) | 8333 | for (i = 0; i < range->num_bitrates; i++) |
@@ -8387,31 +8345,39 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8387 | 8345 | ||
8388 | /* Set the Wireless Extension versions */ | 8346 | /* Set the Wireless Extension versions */ |
8389 | range->we_version_compiled = WIRELESS_EXT; | 8347 | range->we_version_compiled = WIRELESS_EXT; |
8390 | range->we_version_source = 16; | 8348 | range->we_version_source = 18; |
8391 | 8349 | ||
8392 | i = 0; | 8350 | i = 0; |
8393 | if (priv->ieee->mode & (IEEE_B | IEEE_G)) { | 8351 | if (priv->ieee->mode & (IEEE_B | IEEE_G)) { |
8394 | for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; | 8352 | for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { |
8395 | i++, j++) { | 8353 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && |
8354 | (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY)) | ||
8355 | continue; | ||
8356 | |||
8396 | range->freq[i].i = geo->bg[j].channel; | 8357 | range->freq[i].i = geo->bg[j].channel; |
8397 | range->freq[i].m = geo->bg[j].freq * 100000; | 8358 | range->freq[i].m = geo->bg[j].freq * 100000; |
8398 | range->freq[i].e = 1; | 8359 | range->freq[i].e = 1; |
8360 | i++; | ||
8399 | } | 8361 | } |
8400 | } | 8362 | } |
8401 | 8363 | ||
8402 | if (priv->ieee->mode & IEEE_A) { | 8364 | if (priv->ieee->mode & IEEE_A) { |
8403 | for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; | 8365 | for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { |
8404 | i++, j++) { | 8366 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && |
8367 | (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY)) | ||
8368 | continue; | ||
8369 | |||
8405 | range->freq[i].i = geo->a[j].channel; | 8370 | range->freq[i].i = geo->a[j].channel; |
8406 | range->freq[i].m = geo->a[j].freq * 100000; | 8371 | range->freq[i].m = geo->a[j].freq * 100000; |
8407 | range->freq[i].e = 1; | 8372 | range->freq[i].e = 1; |
8373 | i++; | ||
8408 | } | 8374 | } |
8409 | } | 8375 | } |
8410 | 8376 | ||
8411 | range->num_channels = i; | 8377 | range->num_channels = i; |
8412 | range->num_frequency = i; | 8378 | range->num_frequency = i; |
8413 | 8379 | ||
8414 | up(&priv->sem); | 8380 | mutex_unlock(&priv->mutex); |
8415 | 8381 | ||
8416 | /* Event capability (kernel + driver) */ | 8382 | /* Event capability (kernel + driver) */ |
8417 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | | 8383 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | |
@@ -8419,6 +8385,9 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8419 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); | 8385 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); |
8420 | range->event_capa[1] = IW_EVENT_CAPA_K_1; | 8386 | range->event_capa[1] = IW_EVENT_CAPA_K_1; |
8421 | 8387 | ||
8388 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | ||
8389 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; | ||
8390 | |||
8422 | IPW_DEBUG_WX("GET Range\n"); | 8391 | IPW_DEBUG_WX("GET Range\n"); |
8423 | return 0; | 8392 | return 0; |
8424 | } | 8393 | } |
@@ -8438,7 +8407,7 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8438 | 8407 | ||
8439 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) | 8408 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) |
8440 | return -EINVAL; | 8409 | return -EINVAL; |
8441 | down(&priv->sem); | 8410 | mutex_lock(&priv->mutex); |
8442 | if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || | 8411 | if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || |
8443 | !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { | 8412 | !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { |
8444 | /* we disable mandatory BSSID association */ | 8413 | /* we disable mandatory BSSID association */ |
@@ -8447,14 +8416,14 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8447 | IPW_DEBUG_ASSOC("Attempting to associate with new " | 8416 | IPW_DEBUG_ASSOC("Attempting to associate with new " |
8448 | "parameters.\n"); | 8417 | "parameters.\n"); |
8449 | ipw_associate(priv); | 8418 | ipw_associate(priv); |
8450 | up(&priv->sem); | 8419 | mutex_unlock(&priv->mutex); |
8451 | return 0; | 8420 | return 0; |
8452 | } | 8421 | } |
8453 | 8422 | ||
8454 | priv->config |= CFG_STATIC_BSSID; | 8423 | priv->config |= CFG_STATIC_BSSID; |
8455 | if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { | 8424 | if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { |
8456 | IPW_DEBUG_WX("BSSID set to current BSSID.\n"); | 8425 | IPW_DEBUG_WX("BSSID set to current BSSID.\n"); |
8457 | up(&priv->sem); | 8426 | mutex_unlock(&priv->mutex); |
8458 | return 0; | 8427 | return 0; |
8459 | } | 8428 | } |
8460 | 8429 | ||
@@ -8468,7 +8437,7 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8468 | if (!ipw_disassociate(priv)) | 8437 | if (!ipw_disassociate(priv)) |
8469 | ipw_associate(priv); | 8438 | ipw_associate(priv); |
8470 | 8439 | ||
8471 | up(&priv->sem); | 8440 | mutex_unlock(&priv->mutex); |
8472 | return 0; | 8441 | return 0; |
8473 | } | 8442 | } |
8474 | 8443 | ||
@@ -8479,7 +8448,7 @@ static int ipw_wx_get_wap(struct net_device *dev, | |||
8479 | struct ipw_priv *priv = ieee80211_priv(dev); | 8448 | struct ipw_priv *priv = ieee80211_priv(dev); |
8480 | /* If we are associated, trying to associate, or have a statically | 8449 | /* If we are associated, trying to associate, or have a statically |
8481 | * configured BSSID then return that; otherwise return ANY */ | 8450 | * configured BSSID then return that; otherwise return ANY */ |
8482 | down(&priv->sem); | 8451 | mutex_lock(&priv->mutex); |
8483 | if (priv->config & CFG_STATIC_BSSID || | 8452 | if (priv->config & CFG_STATIC_BSSID || |
8484 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | 8453 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { |
8485 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; | 8454 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; |
@@ -8489,7 +8458,7 @@ static int ipw_wx_get_wap(struct net_device *dev, | |||
8489 | 8458 | ||
8490 | IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", | 8459 | IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", |
8491 | MAC_ARG(wrqu->ap_addr.sa_data)); | 8460 | MAC_ARG(wrqu->ap_addr.sa_data)); |
8492 | up(&priv->sem); | 8461 | mutex_unlock(&priv->mutex); |
8493 | return 0; | 8462 | return 0; |
8494 | } | 8463 | } |
8495 | 8464 | ||
@@ -8500,7 +8469,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8500 | struct ipw_priv *priv = ieee80211_priv(dev); | 8469 | struct ipw_priv *priv = ieee80211_priv(dev); |
8501 | char *essid = ""; /* ANY */ | 8470 | char *essid = ""; /* ANY */ |
8502 | int length = 0; | 8471 | int length = 0; |
8503 | down(&priv->sem); | 8472 | mutex_lock(&priv->mutex); |
8504 | if (wrqu->essid.flags && wrqu->essid.length) { | 8473 | if (wrqu->essid.flags && wrqu->essid.length) { |
8505 | length = wrqu->essid.length - 1; | 8474 | length = wrqu->essid.length - 1; |
8506 | essid = extra; | 8475 | essid = extra; |
@@ -8515,7 +8484,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8515 | priv->config &= ~CFG_STATIC_ESSID; | 8484 | priv->config &= ~CFG_STATIC_ESSID; |
8516 | ipw_associate(priv); | 8485 | ipw_associate(priv); |
8517 | } | 8486 | } |
8518 | up(&priv->sem); | 8487 | mutex_unlock(&priv->mutex); |
8519 | return 0; | 8488 | return 0; |
8520 | } | 8489 | } |
8521 | 8490 | ||
@@ -8525,7 +8494,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8525 | 8494 | ||
8526 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { | 8495 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { |
8527 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); | 8496 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); |
8528 | up(&priv->sem); | 8497 | mutex_unlock(&priv->mutex); |
8529 | return 0; | 8498 | return 0; |
8530 | } | 8499 | } |
8531 | 8500 | ||
@@ -8540,7 +8509,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8540 | if (!ipw_disassociate(priv)) | 8509 | if (!ipw_disassociate(priv)) |
8541 | ipw_associate(priv); | 8510 | ipw_associate(priv); |
8542 | 8511 | ||
8543 | up(&priv->sem); | 8512 | mutex_unlock(&priv->mutex); |
8544 | return 0; | 8513 | return 0; |
8545 | } | 8514 | } |
8546 | 8515 | ||
@@ -8552,7 +8521,7 @@ static int ipw_wx_get_essid(struct net_device *dev, | |||
8552 | 8521 | ||
8553 | /* If we are associated, trying to associate, or have a statically | 8522 | /* If we are associated, trying to associate, or have a statically |
8554 | * configured ESSID then return that; otherwise return ANY */ | 8523 | * configured ESSID then return that; otherwise return ANY */ |
8555 | down(&priv->sem); | 8524 | mutex_lock(&priv->mutex); |
8556 | if (priv->config & CFG_STATIC_ESSID || | 8525 | if (priv->config & CFG_STATIC_ESSID || |
8557 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | 8526 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { |
8558 | IPW_DEBUG_WX("Getting essid: '%s'\n", | 8527 | IPW_DEBUG_WX("Getting essid: '%s'\n", |
@@ -8565,7 +8534,7 @@ static int ipw_wx_get_essid(struct net_device *dev, | |||
8565 | wrqu->essid.length = 0; | 8534 | wrqu->essid.length = 0; |
8566 | wrqu->essid.flags = 0; /* active */ | 8535 | wrqu->essid.flags = 0; /* active */ |
8567 | } | 8536 | } |
8568 | up(&priv->sem); | 8537 | mutex_unlock(&priv->mutex); |
8569 | return 0; | 8538 | return 0; |
8570 | } | 8539 | } |
8571 | 8540 | ||
@@ -8578,12 +8547,12 @@ static int ipw_wx_set_nick(struct net_device *dev, | |||
8578 | IPW_DEBUG_WX("Setting nick to '%s'\n", extra); | 8547 | IPW_DEBUG_WX("Setting nick to '%s'\n", extra); |
8579 | if (wrqu->data.length > IW_ESSID_MAX_SIZE) | 8548 | if (wrqu->data.length > IW_ESSID_MAX_SIZE) |
8580 | return -E2BIG; | 8549 | return -E2BIG; |
8581 | down(&priv->sem); | 8550 | mutex_lock(&priv->mutex); |
8582 | wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); | 8551 | wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); |
8583 | memset(priv->nick, 0, sizeof(priv->nick)); | 8552 | memset(priv->nick, 0, sizeof(priv->nick)); |
8584 | memcpy(priv->nick, extra, wrqu->data.length); | 8553 | memcpy(priv->nick, extra, wrqu->data.length); |
8585 | IPW_DEBUG_TRACE("<<\n"); | 8554 | IPW_DEBUG_TRACE("<<\n"); |
8586 | up(&priv->sem); | 8555 | mutex_unlock(&priv->mutex); |
8587 | return 0; | 8556 | return 0; |
8588 | 8557 | ||
8589 | } | 8558 | } |
@@ -8594,11 +8563,57 @@ static int ipw_wx_get_nick(struct net_device *dev, | |||
8594 | { | 8563 | { |
8595 | struct ipw_priv *priv = ieee80211_priv(dev); | 8564 | struct ipw_priv *priv = ieee80211_priv(dev); |
8596 | IPW_DEBUG_WX("Getting nick\n"); | 8565 | IPW_DEBUG_WX("Getting nick\n"); |
8597 | down(&priv->sem); | 8566 | mutex_lock(&priv->mutex); |
8598 | wrqu->data.length = strlen(priv->nick) + 1; | 8567 | wrqu->data.length = strlen(priv->nick) + 1; |
8599 | memcpy(extra, priv->nick, wrqu->data.length); | 8568 | memcpy(extra, priv->nick, wrqu->data.length); |
8600 | wrqu->data.flags = 1; /* active */ | 8569 | wrqu->data.flags = 1; /* active */ |
8601 | up(&priv->sem); | 8570 | mutex_unlock(&priv->mutex); |
8571 | return 0; | ||
8572 | } | ||
8573 | |||
8574 | static int ipw_wx_set_sens(struct net_device *dev, | ||
8575 | struct iw_request_info *info, | ||
8576 | union iwreq_data *wrqu, char *extra) | ||
8577 | { | ||
8578 | struct ipw_priv *priv = ieee80211_priv(dev); | ||
8579 | int err = 0; | ||
8580 | |||
8581 | IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); | ||
8582 | IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); | ||
8583 | mutex_lock(&priv->mutex); | ||
8584 | |||
8585 | if (wrqu->sens.fixed == 0) | ||
8586 | { | ||
8587 | priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; | ||
8588 | priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; | ||
8589 | goto out; | ||
8590 | } | ||
8591 | if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || | ||
8592 | (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { | ||
8593 | err = -EINVAL; | ||
8594 | goto out; | ||
8595 | } | ||
8596 | |||
8597 | priv->roaming_threshold = wrqu->sens.value; | ||
8598 | priv->disassociate_threshold = 3*wrqu->sens.value; | ||
8599 | out: | ||
8600 | mutex_unlock(&priv->mutex); | ||
8601 | return err; | ||
8602 | } | ||
8603 | |||
8604 | static int ipw_wx_get_sens(struct net_device *dev, | ||
8605 | struct iw_request_info *info, | ||
8606 | union iwreq_data *wrqu, char *extra) | ||
8607 | { | ||
8608 | struct ipw_priv *priv = ieee80211_priv(dev); | ||
8609 | mutex_lock(&priv->mutex); | ||
8610 | wrqu->sens.fixed = 1; | ||
8611 | wrqu->sens.value = priv->roaming_threshold; | ||
8612 | mutex_unlock(&priv->mutex); | ||
8613 | |||
8614 | IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", | ||
8615 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); | ||
8616 | |||
8602 | return 0; | 8617 | return 0; |
8603 | } | 8618 | } |
8604 | 8619 | ||
@@ -8691,7 +8706,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8691 | apply: | 8706 | apply: |
8692 | IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", | 8707 | IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", |
8693 | mask, fixed ? "fixed" : "sub-rates"); | 8708 | mask, fixed ? "fixed" : "sub-rates"); |
8694 | down(&priv->sem); | 8709 | mutex_lock(&priv->mutex); |
8695 | if (mask == IEEE80211_DEFAULT_RATES_MASK) { | 8710 | if (mask == IEEE80211_DEFAULT_RATES_MASK) { |
8696 | priv->config &= ~CFG_FIXED_RATE; | 8711 | priv->config &= ~CFG_FIXED_RATE; |
8697 | ipw_set_fixed_rate(priv, priv->ieee->mode); | 8712 | ipw_set_fixed_rate(priv, priv->ieee->mode); |
@@ -8700,7 +8715,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8700 | 8715 | ||
8701 | if (priv->rates_mask == mask) { | 8716 | if (priv->rates_mask == mask) { |
8702 | IPW_DEBUG_WX("Mask set to current mask.\n"); | 8717 | IPW_DEBUG_WX("Mask set to current mask.\n"); |
8703 | up(&priv->sem); | 8718 | mutex_unlock(&priv->mutex); |
8704 | return 0; | 8719 | return 0; |
8705 | } | 8720 | } |
8706 | 8721 | ||
@@ -8711,7 +8726,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8711 | if (!ipw_disassociate(priv)) | 8726 | if (!ipw_disassociate(priv)) |
8712 | ipw_associate(priv); | 8727 | ipw_associate(priv); |
8713 | 8728 | ||
8714 | up(&priv->sem); | 8729 | mutex_unlock(&priv->mutex); |
8715 | return 0; | 8730 | return 0; |
8716 | } | 8731 | } |
8717 | 8732 | ||
@@ -8720,9 +8735,9 @@ static int ipw_wx_get_rate(struct net_device *dev, | |||
8720 | union iwreq_data *wrqu, char *extra) | 8735 | union iwreq_data *wrqu, char *extra) |
8721 | { | 8736 | { |
8722 | struct ipw_priv *priv = ieee80211_priv(dev); | 8737 | struct ipw_priv *priv = ieee80211_priv(dev); |
8723 | down(&priv->sem); | 8738 | mutex_lock(&priv->mutex); |
8724 | wrqu->bitrate.value = priv->last_rate; | 8739 | wrqu->bitrate.value = priv->last_rate; |
8725 | up(&priv->sem); | 8740 | mutex_unlock(&priv->mutex); |
8726 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | 8741 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); |
8727 | return 0; | 8742 | return 0; |
8728 | } | 8743 | } |
@@ -8732,20 +8747,20 @@ static int ipw_wx_set_rts(struct net_device *dev, | |||
8732 | union iwreq_data *wrqu, char *extra) | 8747 | union iwreq_data *wrqu, char *extra) |
8733 | { | 8748 | { |
8734 | struct ipw_priv *priv = ieee80211_priv(dev); | 8749 | struct ipw_priv *priv = ieee80211_priv(dev); |
8735 | down(&priv->sem); | 8750 | mutex_lock(&priv->mutex); |
8736 | if (wrqu->rts.disabled) | 8751 | if (wrqu->rts.disabled) |
8737 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; | 8752 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; |
8738 | else { | 8753 | else { |
8739 | if (wrqu->rts.value < MIN_RTS_THRESHOLD || | 8754 | if (wrqu->rts.value < MIN_RTS_THRESHOLD || |
8740 | wrqu->rts.value > MAX_RTS_THRESHOLD) { | 8755 | wrqu->rts.value > MAX_RTS_THRESHOLD) { |
8741 | up(&priv->sem); | 8756 | mutex_unlock(&priv->mutex); |
8742 | return -EINVAL; | 8757 | return -EINVAL; |
8743 | } | 8758 | } |
8744 | priv->rts_threshold = wrqu->rts.value; | 8759 | priv->rts_threshold = wrqu->rts.value; |
8745 | } | 8760 | } |
8746 | 8761 | ||
8747 | ipw_send_rts_threshold(priv, priv->rts_threshold); | 8762 | ipw_send_rts_threshold(priv, priv->rts_threshold); |
8748 | up(&priv->sem); | 8763 | mutex_unlock(&priv->mutex); |
8749 | IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); | 8764 | IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); |
8750 | return 0; | 8765 | return 0; |
8751 | } | 8766 | } |
@@ -8755,11 +8770,11 @@ static int ipw_wx_get_rts(struct net_device *dev, | |||
8755 | union iwreq_data *wrqu, char *extra) | 8770 | union iwreq_data *wrqu, char *extra) |
8756 | { | 8771 | { |
8757 | struct ipw_priv *priv = ieee80211_priv(dev); | 8772 | struct ipw_priv *priv = ieee80211_priv(dev); |
8758 | down(&priv->sem); | 8773 | mutex_lock(&priv->mutex); |
8759 | wrqu->rts.value = priv->rts_threshold; | 8774 | wrqu->rts.value = priv->rts_threshold; |
8760 | wrqu->rts.fixed = 0; /* no auto select */ | 8775 | wrqu->rts.fixed = 0; /* no auto select */ |
8761 | wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); | 8776 | wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); |
8762 | up(&priv->sem); | 8777 | mutex_unlock(&priv->mutex); |
8763 | IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); | 8778 | IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); |
8764 | return 0; | 8779 | return 0; |
8765 | } | 8780 | } |
@@ -8771,7 +8786,7 @@ static int ipw_wx_set_txpow(struct net_device *dev, | |||
8771 | struct ipw_priv *priv = ieee80211_priv(dev); | 8786 | struct ipw_priv *priv = ieee80211_priv(dev); |
8772 | int err = 0; | 8787 | int err = 0; |
8773 | 8788 | ||
8774 | down(&priv->sem); | 8789 | mutex_lock(&priv->mutex); |
8775 | if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { | 8790 | if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { |
8776 | err = -EINPROGRESS; | 8791 | err = -EINPROGRESS; |
8777 | goto out; | 8792 | goto out; |
@@ -8794,7 +8809,7 @@ static int ipw_wx_set_txpow(struct net_device *dev, | |||
8794 | priv->tx_power = wrqu->power.value; | 8809 | priv->tx_power = wrqu->power.value; |
8795 | err = ipw_set_tx_power(priv); | 8810 | err = ipw_set_tx_power(priv); |
8796 | out: | 8811 | out: |
8797 | up(&priv->sem); | 8812 | mutex_unlock(&priv->mutex); |
8798 | return err; | 8813 | return err; |
8799 | } | 8814 | } |
8800 | 8815 | ||
@@ -8803,12 +8818,12 @@ static int ipw_wx_get_txpow(struct net_device *dev, | |||
8803 | union iwreq_data *wrqu, char *extra) | 8818 | union iwreq_data *wrqu, char *extra) |
8804 | { | 8819 | { |
8805 | struct ipw_priv *priv = ieee80211_priv(dev); | 8820 | struct ipw_priv *priv = ieee80211_priv(dev); |
8806 | down(&priv->sem); | 8821 | mutex_lock(&priv->mutex); |
8807 | wrqu->power.value = priv->tx_power; | 8822 | wrqu->power.value = priv->tx_power; |
8808 | wrqu->power.fixed = 1; | 8823 | wrqu->power.fixed = 1; |
8809 | wrqu->power.flags = IW_TXPOW_DBM; | 8824 | wrqu->power.flags = IW_TXPOW_DBM; |
8810 | wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; | 8825 | wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; |
8811 | up(&priv->sem); | 8826 | mutex_unlock(&priv->mutex); |
8812 | 8827 | ||
8813 | IPW_DEBUG_WX("GET TX Power -> %s %d \n", | 8828 | IPW_DEBUG_WX("GET TX Power -> %s %d \n", |
8814 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); | 8829 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); |
@@ -8821,13 +8836,13 @@ static int ipw_wx_set_frag(struct net_device *dev, | |||
8821 | union iwreq_data *wrqu, char *extra) | 8836 | union iwreq_data *wrqu, char *extra) |
8822 | { | 8837 | { |
8823 | struct ipw_priv *priv = ieee80211_priv(dev); | 8838 | struct ipw_priv *priv = ieee80211_priv(dev); |
8824 | down(&priv->sem); | 8839 | mutex_lock(&priv->mutex); |
8825 | if (wrqu->frag.disabled) | 8840 | if (wrqu->frag.disabled) |
8826 | priv->ieee->fts = DEFAULT_FTS; | 8841 | priv->ieee->fts = DEFAULT_FTS; |
8827 | else { | 8842 | else { |
8828 | if (wrqu->frag.value < MIN_FRAG_THRESHOLD || | 8843 | if (wrqu->frag.value < MIN_FRAG_THRESHOLD || |
8829 | wrqu->frag.value > MAX_FRAG_THRESHOLD) { | 8844 | wrqu->frag.value > MAX_FRAG_THRESHOLD) { |
8830 | up(&priv->sem); | 8845 | mutex_unlock(&priv->mutex); |
8831 | return -EINVAL; | 8846 | return -EINVAL; |
8832 | } | 8847 | } |
8833 | 8848 | ||
@@ -8835,7 +8850,7 @@ static int ipw_wx_set_frag(struct net_device *dev, | |||
8835 | } | 8850 | } |
8836 | 8851 | ||
8837 | ipw_send_frag_threshold(priv, wrqu->frag.value); | 8852 | ipw_send_frag_threshold(priv, wrqu->frag.value); |
8838 | up(&priv->sem); | 8853 | mutex_unlock(&priv->mutex); |
8839 | IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); | 8854 | IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); |
8840 | return 0; | 8855 | return 0; |
8841 | } | 8856 | } |
@@ -8845,11 +8860,11 @@ static int ipw_wx_get_frag(struct net_device *dev, | |||
8845 | union iwreq_data *wrqu, char *extra) | 8860 | union iwreq_data *wrqu, char *extra) |
8846 | { | 8861 | { |
8847 | struct ipw_priv *priv = ieee80211_priv(dev); | 8862 | struct ipw_priv *priv = ieee80211_priv(dev); |
8848 | down(&priv->sem); | 8863 | mutex_lock(&priv->mutex); |
8849 | wrqu->frag.value = priv->ieee->fts; | 8864 | wrqu->frag.value = priv->ieee->fts; |
8850 | wrqu->frag.fixed = 0; /* no auto select */ | 8865 | wrqu->frag.fixed = 0; /* no auto select */ |
8851 | wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); | 8866 | wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); |
8852 | up(&priv->sem); | 8867 | mutex_unlock(&priv->mutex); |
8853 | IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); | 8868 | IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); |
8854 | 8869 | ||
8855 | return 0; | 8870 | return 0; |
@@ -8870,7 +8885,7 @@ static int ipw_wx_set_retry(struct net_device *dev, | |||
8870 | if (wrqu->retry.value < 0 || wrqu->retry.value > 255) | 8885 | if (wrqu->retry.value < 0 || wrqu->retry.value > 255) |
8871 | return -EINVAL; | 8886 | return -EINVAL; |
8872 | 8887 | ||
8873 | down(&priv->sem); | 8888 | mutex_lock(&priv->mutex); |
8874 | if (wrqu->retry.flags & IW_RETRY_MIN) | 8889 | if (wrqu->retry.flags & IW_RETRY_MIN) |
8875 | priv->short_retry_limit = (u8) wrqu->retry.value; | 8890 | priv->short_retry_limit = (u8) wrqu->retry.value; |
8876 | else if (wrqu->retry.flags & IW_RETRY_MAX) | 8891 | else if (wrqu->retry.flags & IW_RETRY_MAX) |
@@ -8882,7 +8897,7 @@ static int ipw_wx_set_retry(struct net_device *dev, | |||
8882 | 8897 | ||
8883 | ipw_send_retry_limit(priv, priv->short_retry_limit, | 8898 | ipw_send_retry_limit(priv, priv->short_retry_limit, |
8884 | priv->long_retry_limit); | 8899 | priv->long_retry_limit); |
8885 | up(&priv->sem); | 8900 | mutex_unlock(&priv->mutex); |
8886 | IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", | 8901 | IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", |
8887 | priv->short_retry_limit, priv->long_retry_limit); | 8902 | priv->short_retry_limit, priv->long_retry_limit); |
8888 | return 0; | 8903 | return 0; |
@@ -8894,11 +8909,11 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
8894 | { | 8909 | { |
8895 | struct ipw_priv *priv = ieee80211_priv(dev); | 8910 | struct ipw_priv *priv = ieee80211_priv(dev); |
8896 | 8911 | ||
8897 | down(&priv->sem); | 8912 | mutex_lock(&priv->mutex); |
8898 | wrqu->retry.disabled = 0; | 8913 | wrqu->retry.disabled = 0; |
8899 | 8914 | ||
8900 | if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { | 8915 | if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { |
8901 | up(&priv->sem); | 8916 | mutex_unlock(&priv->mutex); |
8902 | return -EINVAL; | 8917 | return -EINVAL; |
8903 | } | 8918 | } |
8904 | 8919 | ||
@@ -8912,7 +8927,7 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
8912 | wrqu->retry.flags = IW_RETRY_LIMIT; | 8927 | wrqu->retry.flags = IW_RETRY_LIMIT; |
8913 | wrqu->retry.value = priv->short_retry_limit; | 8928 | wrqu->retry.value = priv->short_retry_limit; |
8914 | } | 8929 | } |
8915 | up(&priv->sem); | 8930 | mutex_unlock(&priv->mutex); |
8916 | 8931 | ||
8917 | IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); | 8932 | IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); |
8918 | 8933 | ||
@@ -8929,7 +8944,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | |||
8929 | (priv->status & STATUS_EXIT_PENDING)) | 8944 | (priv->status & STATUS_EXIT_PENDING)) |
8930 | return 0; | 8945 | return 0; |
8931 | 8946 | ||
8932 | down(&priv->sem); | 8947 | mutex_lock(&priv->mutex); |
8933 | 8948 | ||
8934 | if (priv->status & STATUS_RF_KILL_MASK) { | 8949 | if (priv->status & STATUS_RF_KILL_MASK) { |
8935 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); | 8950 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); |
@@ -8981,7 +8996,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | |||
8981 | priv->status |= STATUS_SCANNING; | 8996 | priv->status |= STATUS_SCANNING; |
8982 | 8997 | ||
8983 | done: | 8998 | done: |
8984 | up(&priv->sem); | 8999 | mutex_unlock(&priv->mutex); |
8985 | return err; | 9000 | return err; |
8986 | } | 9001 | } |
8987 | 9002 | ||
@@ -9024,7 +9039,7 @@ static int ipw_wx_set_encode(struct net_device *dev, | |||
9024 | int ret; | 9039 | int ret; |
9025 | u32 cap = priv->capability; | 9040 | u32 cap = priv->capability; |
9026 | 9041 | ||
9027 | down(&priv->sem); | 9042 | mutex_lock(&priv->mutex); |
9028 | ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); | 9043 | ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); |
9029 | 9044 | ||
9030 | /* In IBSS mode, we need to notify the firmware to update | 9045 | /* In IBSS mode, we need to notify the firmware to update |
@@ -9034,7 +9049,7 @@ static int ipw_wx_set_encode(struct net_device *dev, | |||
9034 | priv->status & STATUS_ASSOCIATED) | 9049 | priv->status & STATUS_ASSOCIATED) |
9035 | ipw_disassociate(priv); | 9050 | ipw_disassociate(priv); |
9036 | 9051 | ||
9037 | up(&priv->sem); | 9052 | mutex_unlock(&priv->mutex); |
9038 | return ret; | 9053 | return ret; |
9039 | } | 9054 | } |
9040 | 9055 | ||
@@ -9052,17 +9067,17 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9052 | { | 9067 | { |
9053 | struct ipw_priv *priv = ieee80211_priv(dev); | 9068 | struct ipw_priv *priv = ieee80211_priv(dev); |
9054 | int err; | 9069 | int err; |
9055 | down(&priv->sem); | 9070 | mutex_lock(&priv->mutex); |
9056 | if (wrqu->power.disabled) { | 9071 | if (wrqu->power.disabled) { |
9057 | priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); | 9072 | priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); |
9058 | err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); | 9073 | err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); |
9059 | if (err) { | 9074 | if (err) { |
9060 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9075 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9061 | up(&priv->sem); | 9076 | mutex_unlock(&priv->mutex); |
9062 | return err; | 9077 | return err; |
9063 | } | 9078 | } |
9064 | IPW_DEBUG_WX("SET Power Management Mode -> off\n"); | 9079 | IPW_DEBUG_WX("SET Power Management Mode -> off\n"); |
9065 | up(&priv->sem); | 9080 | mutex_unlock(&priv->mutex); |
9066 | return 0; | 9081 | return 0; |
9067 | } | 9082 | } |
9068 | 9083 | ||
@@ -9074,7 +9089,7 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9074 | default: /* Otherwise we don't support it */ | 9089 | default: /* Otherwise we don't support it */ |
9075 | IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", | 9090 | IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", |
9076 | wrqu->power.flags); | 9091 | wrqu->power.flags); |
9077 | up(&priv->sem); | 9092 | mutex_unlock(&priv->mutex); |
9078 | return -EOPNOTSUPP; | 9093 | return -EOPNOTSUPP; |
9079 | } | 9094 | } |
9080 | 9095 | ||
@@ -9087,12 +9102,12 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9087 | err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); | 9102 | err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); |
9088 | if (err) { | 9103 | if (err) { |
9089 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9104 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9090 | up(&priv->sem); | 9105 | mutex_unlock(&priv->mutex); |
9091 | return err; | 9106 | return err; |
9092 | } | 9107 | } |
9093 | 9108 | ||
9094 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); | 9109 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); |
9095 | up(&priv->sem); | 9110 | mutex_unlock(&priv->mutex); |
9096 | return 0; | 9111 | return 0; |
9097 | } | 9112 | } |
9098 | 9113 | ||
@@ -9101,13 +9116,13 @@ static int ipw_wx_get_power(struct net_device *dev, | |||
9101 | union iwreq_data *wrqu, char *extra) | 9116 | union iwreq_data *wrqu, char *extra) |
9102 | { | 9117 | { |
9103 | struct ipw_priv *priv = ieee80211_priv(dev); | 9118 | struct ipw_priv *priv = ieee80211_priv(dev); |
9104 | down(&priv->sem); | 9119 | mutex_lock(&priv->mutex); |
9105 | if (!(priv->power_mode & IPW_POWER_ENABLED)) | 9120 | if (!(priv->power_mode & IPW_POWER_ENABLED)) |
9106 | wrqu->power.disabled = 1; | 9121 | wrqu->power.disabled = 1; |
9107 | else | 9122 | else |
9108 | wrqu->power.disabled = 0; | 9123 | wrqu->power.disabled = 0; |
9109 | 9124 | ||
9110 | up(&priv->sem); | 9125 | mutex_unlock(&priv->mutex); |
9111 | IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); | 9126 | IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); |
9112 | 9127 | ||
9113 | return 0; | 9128 | return 0; |
@@ -9120,7 +9135,7 @@ static int ipw_wx_set_powermode(struct net_device *dev, | |||
9120 | struct ipw_priv *priv = ieee80211_priv(dev); | 9135 | struct ipw_priv *priv = ieee80211_priv(dev); |
9121 | int mode = *(int *)extra; | 9136 | int mode = *(int *)extra; |
9122 | int err; | 9137 | int err; |
9123 | down(&priv->sem); | 9138 | mutex_lock(&priv->mutex); |
9124 | if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { | 9139 | if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { |
9125 | mode = IPW_POWER_AC; | 9140 | mode = IPW_POWER_AC; |
9126 | priv->power_mode = mode; | 9141 | priv->power_mode = mode; |
@@ -9133,11 +9148,11 @@ static int ipw_wx_set_powermode(struct net_device *dev, | |||
9133 | 9148 | ||
9134 | if (err) { | 9149 | if (err) { |
9135 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9150 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9136 | up(&priv->sem); | 9151 | mutex_unlock(&priv->mutex); |
9137 | return err; | 9152 | return err; |
9138 | } | 9153 | } |
9139 | } | 9154 | } |
9140 | up(&priv->sem); | 9155 | mutex_unlock(&priv->mutex); |
9141 | return 0; | 9156 | return 0; |
9142 | } | 9157 | } |
9143 | 9158 | ||
@@ -9186,7 +9201,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9186 | IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); | 9201 | IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); |
9187 | return -EINVAL; | 9202 | return -EINVAL; |
9188 | } | 9203 | } |
9189 | down(&priv->sem); | 9204 | mutex_lock(&priv->mutex); |
9190 | if (priv->adapter == IPW_2915ABG) { | 9205 | if (priv->adapter == IPW_2915ABG) { |
9191 | priv->ieee->abg_true = 1; | 9206 | priv->ieee->abg_true = 1; |
9192 | if (mode & IEEE_A) { | 9207 | if (mode & IEEE_A) { |
@@ -9198,7 +9213,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9198 | if (mode & IEEE_A) { | 9213 | if (mode & IEEE_A) { |
9199 | IPW_WARNING("Attempt to set 2200BG into " | 9214 | IPW_WARNING("Attempt to set 2200BG into " |
9200 | "802.11a mode\n"); | 9215 | "802.11a mode\n"); |
9201 | up(&priv->sem); | 9216 | mutex_unlock(&priv->mutex); |
9202 | return -EINVAL; | 9217 | return -EINVAL; |
9203 | } | 9218 | } |
9204 | 9219 | ||
@@ -9235,7 +9250,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9235 | IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", | 9250 | IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", |
9236 | mode & IEEE_A ? 'a' : '.', | 9251 | mode & IEEE_A ? 'a' : '.', |
9237 | mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); | 9252 | mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); |
9238 | up(&priv->sem); | 9253 | mutex_unlock(&priv->mutex); |
9239 | return 0; | 9254 | return 0; |
9240 | } | 9255 | } |
9241 | 9256 | ||
@@ -9244,7 +9259,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, | |||
9244 | union iwreq_data *wrqu, char *extra) | 9259 | union iwreq_data *wrqu, char *extra) |
9245 | { | 9260 | { |
9246 | struct ipw_priv *priv = ieee80211_priv(dev); | 9261 | struct ipw_priv *priv = ieee80211_priv(dev); |
9247 | down(&priv->sem); | 9262 | mutex_lock(&priv->mutex); |
9248 | switch (priv->ieee->mode) { | 9263 | switch (priv->ieee->mode) { |
9249 | case IEEE_A: | 9264 | case IEEE_A: |
9250 | strncpy(extra, "802.11a (1)", MAX_WX_STRING); | 9265 | strncpy(extra, "802.11a (1)", MAX_WX_STRING); |
@@ -9275,7 +9290,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, | |||
9275 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); | 9290 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); |
9276 | 9291 | ||
9277 | wrqu->data.length = strlen(extra) + 1; | 9292 | wrqu->data.length = strlen(extra) + 1; |
9278 | up(&priv->sem); | 9293 | mutex_unlock(&priv->mutex); |
9279 | 9294 | ||
9280 | return 0; | 9295 | return 0; |
9281 | } | 9296 | } |
@@ -9286,7 +9301,7 @@ static int ipw_wx_set_preamble(struct net_device *dev, | |||
9286 | { | 9301 | { |
9287 | struct ipw_priv *priv = ieee80211_priv(dev); | 9302 | struct ipw_priv *priv = ieee80211_priv(dev); |
9288 | int mode = *(int *)extra; | 9303 | int mode = *(int *)extra; |
9289 | down(&priv->sem); | 9304 | mutex_lock(&priv->mutex); |
9290 | /* Switching from SHORT -> LONG requires a disassociation */ | 9305 | /* Switching from SHORT -> LONG requires a disassociation */ |
9291 | if (mode == 1) { | 9306 | if (mode == 1) { |
9292 | if (!(priv->config & CFG_PREAMBLE_LONG)) { | 9307 | if (!(priv->config & CFG_PREAMBLE_LONG)) { |
@@ -9305,11 +9320,11 @@ static int ipw_wx_set_preamble(struct net_device *dev, | |||
9305 | priv->config &= ~CFG_PREAMBLE_LONG; | 9320 | priv->config &= ~CFG_PREAMBLE_LONG; |
9306 | goto done; | 9321 | goto done; |
9307 | } | 9322 | } |
9308 | up(&priv->sem); | 9323 | mutex_unlock(&priv->mutex); |
9309 | return -EINVAL; | 9324 | return -EINVAL; |
9310 | 9325 | ||
9311 | done: | 9326 | done: |
9312 | up(&priv->sem); | 9327 | mutex_unlock(&priv->mutex); |
9313 | return 0; | 9328 | return 0; |
9314 | } | 9329 | } |
9315 | 9330 | ||
@@ -9318,12 +9333,12 @@ static int ipw_wx_get_preamble(struct net_device *dev, | |||
9318 | union iwreq_data *wrqu, char *extra) | 9333 | union iwreq_data *wrqu, char *extra) |
9319 | { | 9334 | { |
9320 | struct ipw_priv *priv = ieee80211_priv(dev); | 9335 | struct ipw_priv *priv = ieee80211_priv(dev); |
9321 | down(&priv->sem); | 9336 | mutex_lock(&priv->mutex); |
9322 | if (priv->config & CFG_PREAMBLE_LONG) | 9337 | if (priv->config & CFG_PREAMBLE_LONG) |
9323 | snprintf(wrqu->name, IFNAMSIZ, "long (1)"); | 9338 | snprintf(wrqu->name, IFNAMSIZ, "long (1)"); |
9324 | else | 9339 | else |
9325 | snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); | 9340 | snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); |
9326 | up(&priv->sem); | 9341 | mutex_unlock(&priv->mutex); |
9327 | return 0; | 9342 | return 0; |
9328 | } | 9343 | } |
9329 | 9344 | ||
@@ -9335,7 +9350,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9335 | struct ipw_priv *priv = ieee80211_priv(dev); | 9350 | struct ipw_priv *priv = ieee80211_priv(dev); |
9336 | int *parms = (int *)extra; | 9351 | int *parms = (int *)extra; |
9337 | int enable = (parms[0] > 0); | 9352 | int enable = (parms[0] > 0); |
9338 | down(&priv->sem); | 9353 | mutex_lock(&priv->mutex); |
9339 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); | 9354 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); |
9340 | if (enable) { | 9355 | if (enable) { |
9341 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 9356 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
@@ -9350,13 +9365,13 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9350 | ipw_set_channel(priv, parms[1]); | 9365 | ipw_set_channel(priv, parms[1]); |
9351 | } else { | 9366 | } else { |
9352 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 9367 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
9353 | up(&priv->sem); | 9368 | mutex_unlock(&priv->mutex); |
9354 | return 0; | 9369 | return 0; |
9355 | } | 9370 | } |
9356 | priv->net_dev->type = ARPHRD_ETHER; | 9371 | priv->net_dev->type = ARPHRD_ETHER; |
9357 | queue_work(priv->workqueue, &priv->adapter_restart); | 9372 | queue_work(priv->workqueue, &priv->adapter_restart); |
9358 | } | 9373 | } |
9359 | up(&priv->sem); | 9374 | mutex_unlock(&priv->mutex); |
9360 | return 0; | 9375 | return 0; |
9361 | } | 9376 | } |
9362 | 9377 | ||
@@ -9386,9 +9401,9 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9386 | 9401 | ||
9387 | IPW_DEBUG_WX("SW_RESET\n"); | 9402 | IPW_DEBUG_WX("SW_RESET\n"); |
9388 | 9403 | ||
9389 | down(&priv->sem); | 9404 | mutex_lock(&priv->mutex); |
9390 | 9405 | ||
9391 | ret = ipw_sw_reset(priv, 0); | 9406 | ret = ipw_sw_reset(priv, 2); |
9392 | if (!ret) { | 9407 | if (!ret) { |
9393 | free_firmware(); | 9408 | free_firmware(); |
9394 | ipw_adapter_restart(priv); | 9409 | ipw_adapter_restart(priv); |
@@ -9398,9 +9413,9 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9398 | * module parameter, so take appropriate action */ | 9413 | * module parameter, so take appropriate action */ |
9399 | ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); | 9414 | ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); |
9400 | 9415 | ||
9401 | up(&priv->sem); | 9416 | mutex_unlock(&priv->mutex); |
9402 | ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); | 9417 | ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); |
9403 | down(&priv->sem); | 9418 | mutex_lock(&priv->mutex); |
9404 | 9419 | ||
9405 | if (!(priv->status & STATUS_RF_KILL_MASK)) { | 9420 | if (!(priv->status & STATUS_RF_KILL_MASK)) { |
9406 | /* Configuration likely changed -- force [re]association */ | 9421 | /* Configuration likely changed -- force [re]association */ |
@@ -9410,7 +9425,7 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9410 | ipw_associate(priv); | 9425 | ipw_associate(priv); |
9411 | } | 9426 | } |
9412 | 9427 | ||
9413 | up(&priv->sem); | 9428 | mutex_unlock(&priv->mutex); |
9414 | 9429 | ||
9415 | return 0; | 9430 | return 0; |
9416 | } | 9431 | } |
@@ -9423,6 +9438,8 @@ static iw_handler ipw_wx_handlers[] = { | |||
9423 | IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, | 9438 | IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, |
9424 | IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, | 9439 | IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, |
9425 | IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, | 9440 | IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, |
9441 | IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, | ||
9442 | IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, | ||
9426 | IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, | 9443 | IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, |
9427 | IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, | 9444 | IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, |
9428 | IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, | 9445 | IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, |
@@ -9568,7 +9585,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) | |||
9568 | wstats->qual.level = average_value(&priv->average_rssi); | 9585 | wstats->qual.level = average_value(&priv->average_rssi); |
9569 | wstats->qual.noise = average_value(&priv->average_noise); | 9586 | wstats->qual.noise = average_value(&priv->average_noise); |
9570 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | | 9587 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | |
9571 | IW_QUAL_NOISE_UPDATED; | 9588 | IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; |
9572 | 9589 | ||
9573 | wstats->miss.beacon = average_value(&priv->average_missed_beacons); | 9590 | wstats->miss.beacon = average_value(&priv->average_missed_beacons); |
9574 | wstats->discard.retries = priv->last_tx_failures; | 9591 | wstats->discard.retries = priv->last_tx_failures; |
@@ -9586,7 +9603,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) | |||
9586 | static void init_sys_config(struct ipw_sys_config *sys_config) | 9603 | static void init_sys_config(struct ipw_sys_config *sys_config) |
9587 | { | 9604 | { |
9588 | memset(sys_config, 0, sizeof(struct ipw_sys_config)); | 9605 | memset(sys_config, 0, sizeof(struct ipw_sys_config)); |
9589 | sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ | 9606 | sys_config->bt_coexistence = 0; |
9590 | sys_config->answer_broadcast_ssid_probe = 0; | 9607 | sys_config->answer_broadcast_ssid_probe = 0; |
9591 | sys_config->accept_all_data_frames = 0; | 9608 | sys_config->accept_all_data_frames = 0; |
9592 | sys_config->accept_non_directed_frames = 1; | 9609 | sys_config->accept_non_directed_frames = 1; |
@@ -9594,12 +9611,13 @@ static void init_sys_config(struct ipw_sys_config *sys_config) | |||
9594 | sys_config->disable_unicast_decryption = 1; | 9611 | sys_config->disable_unicast_decryption = 1; |
9595 | sys_config->exclude_multicast_unencrypted = 0; | 9612 | sys_config->exclude_multicast_unencrypted = 0; |
9596 | sys_config->disable_multicast_decryption = 1; | 9613 | sys_config->disable_multicast_decryption = 1; |
9597 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH; | 9614 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV; |
9598 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ | 9615 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ |
9599 | sys_config->dot11g_auto_detection = 0; | 9616 | sys_config->dot11g_auto_detection = 0; |
9600 | sys_config->enable_cts_to_self = 0; | 9617 | sys_config->enable_cts_to_self = 0; |
9601 | sys_config->bt_coexist_collision_thr = 0; | 9618 | sys_config->bt_coexist_collision_thr = 0; |
9602 | sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 | 9619 | sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 |
9620 | sys_config->silence_threshold = 0x1e; | ||
9603 | } | 9621 | } |
9604 | 9622 | ||
9605 | static int ipw_net_open(struct net_device *dev) | 9623 | static int ipw_net_open(struct net_device *dev) |
@@ -9607,11 +9625,11 @@ static int ipw_net_open(struct net_device *dev) | |||
9607 | struct ipw_priv *priv = ieee80211_priv(dev); | 9625 | struct ipw_priv *priv = ieee80211_priv(dev); |
9608 | IPW_DEBUG_INFO("dev->open\n"); | 9626 | IPW_DEBUG_INFO("dev->open\n"); |
9609 | /* we should be verifying the device is ready to be opened */ | 9627 | /* we should be verifying the device is ready to be opened */ |
9610 | down(&priv->sem); | 9628 | mutex_lock(&priv->mutex); |
9611 | if (!(priv->status & STATUS_RF_KILL_MASK) && | 9629 | if (!(priv->status & STATUS_RF_KILL_MASK) && |
9612 | (priv->status & STATUS_ASSOCIATED)) | 9630 | (priv->status & STATUS_ASSOCIATED)) |
9613 | netif_start_queue(dev); | 9631 | netif_start_queue(dev); |
9614 | up(&priv->sem); | 9632 | mutex_unlock(&priv->mutex); |
9615 | return 0; | 9633 | return 0; |
9616 | } | 9634 | } |
9617 | 9635 | ||
@@ -9647,11 +9665,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9647 | u16 remaining_bytes; | 9665 | u16 remaining_bytes; |
9648 | int fc; | 9666 | int fc; |
9649 | 9667 | ||
9650 | /* If there isn't room in the queue, we return busy and let the | ||
9651 | * network stack requeue the packet for us */ | ||
9652 | if (ipw_queue_space(q) < q->high_mark) | ||
9653 | return NETDEV_TX_BUSY; | ||
9654 | |||
9655 | switch (priv->ieee->iw_mode) { | 9668 | switch (priv->ieee->iw_mode) { |
9656 | case IW_MODE_ADHOC: | 9669 | case IW_MODE_ADHOC: |
9657 | hdr_len = IEEE80211_3ADDR_LEN; | 9670 | hdr_len = IEEE80211_3ADDR_LEN; |
@@ -9817,6 +9830,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9817 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); | 9830 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); |
9818 | ipw_write32(priv, q->reg_w, q->first_empty); | 9831 | ipw_write32(priv, q->reg_w, q->first_empty); |
9819 | 9832 | ||
9833 | if (ipw_queue_space(q) < q->high_mark) | ||
9834 | netif_stop_queue(priv->net_dev); | ||
9835 | |||
9820 | return NETDEV_TX_OK; | 9836 | return NETDEV_TX_OK; |
9821 | 9837 | ||
9822 | drop: | 9838 | drop: |
@@ -9890,13 +9906,13 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) | |||
9890 | struct sockaddr *addr = p; | 9906 | struct sockaddr *addr = p; |
9891 | if (!is_valid_ether_addr(addr->sa_data)) | 9907 | if (!is_valid_ether_addr(addr->sa_data)) |
9892 | return -EADDRNOTAVAIL; | 9908 | return -EADDRNOTAVAIL; |
9893 | down(&priv->sem); | 9909 | mutex_lock(&priv->mutex); |
9894 | priv->config |= CFG_CUSTOM_MAC; | 9910 | priv->config |= CFG_CUSTOM_MAC; |
9895 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | 9911 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); |
9896 | printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", | 9912 | printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", |
9897 | priv->net_dev->name, MAC_ARG(priv->mac_addr)); | 9913 | priv->net_dev->name, MAC_ARG(priv->mac_addr)); |
9898 | queue_work(priv->workqueue, &priv->adapter_restart); | 9914 | queue_work(priv->workqueue, &priv->adapter_restart); |
9899 | up(&priv->sem); | 9915 | mutex_unlock(&priv->mutex); |
9900 | return 0; | 9916 | return 0; |
9901 | } | 9917 | } |
9902 | 9918 | ||
@@ -9940,9 +9956,9 @@ static int ipw_ethtool_get_eeprom(struct net_device *dev, | |||
9940 | 9956 | ||
9941 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) | 9957 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) |
9942 | return -EINVAL; | 9958 | return -EINVAL; |
9943 | down(&p->sem); | 9959 | mutex_lock(&p->mutex); |
9944 | memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); | 9960 | memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); |
9945 | up(&p->sem); | 9961 | mutex_unlock(&p->mutex); |
9946 | return 0; | 9962 | return 0; |
9947 | } | 9963 | } |
9948 | 9964 | ||
@@ -9954,12 +9970,11 @@ static int ipw_ethtool_set_eeprom(struct net_device *dev, | |||
9954 | 9970 | ||
9955 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) | 9971 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) |
9956 | return -EINVAL; | 9972 | return -EINVAL; |
9957 | down(&p->sem); | 9973 | mutex_lock(&p->mutex); |
9958 | memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); | 9974 | memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); |
9959 | for (i = IPW_EEPROM_DATA; | 9975 | for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) |
9960 | i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++) | 9976 | ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); |
9961 | ipw_write8(p, i, p->eeprom[i]); | 9977 | mutex_unlock(&p->mutex); |
9962 | up(&p->sem); | ||
9963 | return 0; | 9978 | return 0; |
9964 | } | 9979 | } |
9965 | 9980 | ||
@@ -10054,12 +10069,12 @@ static void ipw_rf_kill(void *adapter) | |||
10054 | static void ipw_bg_rf_kill(void *data) | 10069 | static void ipw_bg_rf_kill(void *data) |
10055 | { | 10070 | { |
10056 | struct ipw_priv *priv = data; | 10071 | struct ipw_priv *priv = data; |
10057 | down(&priv->sem); | 10072 | mutex_lock(&priv->mutex); |
10058 | ipw_rf_kill(data); | 10073 | ipw_rf_kill(data); |
10059 | up(&priv->sem); | 10074 | mutex_unlock(&priv->mutex); |
10060 | } | 10075 | } |
10061 | 10076 | ||
10062 | void ipw_link_up(struct ipw_priv *priv) | 10077 | static void ipw_link_up(struct ipw_priv *priv) |
10063 | { | 10078 | { |
10064 | priv->last_seq_num = -1; | 10079 | priv->last_seq_num = -1; |
10065 | priv->last_frag_num = -1; | 10080 | priv->last_frag_num = -1; |
@@ -10089,12 +10104,12 @@ void ipw_link_up(struct ipw_priv *priv) | |||
10089 | static void ipw_bg_link_up(void *data) | 10104 | static void ipw_bg_link_up(void *data) |
10090 | { | 10105 | { |
10091 | struct ipw_priv *priv = data; | 10106 | struct ipw_priv *priv = data; |
10092 | down(&priv->sem); | 10107 | mutex_lock(&priv->mutex); |
10093 | ipw_link_up(data); | 10108 | ipw_link_up(data); |
10094 | up(&priv->sem); | 10109 | mutex_unlock(&priv->mutex); |
10095 | } | 10110 | } |
10096 | 10111 | ||
10097 | void ipw_link_down(struct ipw_priv *priv) | 10112 | static void ipw_link_down(struct ipw_priv *priv) |
10098 | { | 10113 | { |
10099 | ipw_led_link_down(priv); | 10114 | ipw_led_link_down(priv); |
10100 | netif_carrier_off(priv->net_dev); | 10115 | netif_carrier_off(priv->net_dev); |
@@ -10117,9 +10132,9 @@ void ipw_link_down(struct ipw_priv *priv) | |||
10117 | static void ipw_bg_link_down(void *data) | 10132 | static void ipw_bg_link_down(void *data) |
10118 | { | 10133 | { |
10119 | struct ipw_priv *priv = data; | 10134 | struct ipw_priv *priv = data; |
10120 | down(&priv->sem); | 10135 | mutex_lock(&priv->mutex); |
10121 | ipw_link_down(data); | 10136 | ipw_link_down(data); |
10122 | up(&priv->sem); | 10137 | mutex_unlock(&priv->mutex); |
10123 | } | 10138 | } |
10124 | 10139 | ||
10125 | static int ipw_setup_deferred_work(struct ipw_priv *priv) | 10140 | static int ipw_setup_deferred_work(struct ipw_priv *priv) |
@@ -10292,6 +10307,20 @@ static int ipw_config(struct ipw_priv *priv) | |||
10292 | 10307 | ||
10293 | /* set basic system config settings */ | 10308 | /* set basic system config settings */ |
10294 | init_sys_config(&priv->sys_config); | 10309 | init_sys_config(&priv->sys_config); |
10310 | |||
10311 | /* Support Bluetooth if we have BT h/w on board, and user wants to. | ||
10312 | * Does not support BT priority yet (don't abort or defer our Tx) */ | ||
10313 | if (bt_coexist) { | ||
10314 | unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; | ||
10315 | |||
10316 | if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) | ||
10317 | priv->sys_config.bt_coexistence | ||
10318 | |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; | ||
10319 | if (bt_caps & EEPROM_SKU_CAP_BT_OOB) | ||
10320 | priv->sys_config.bt_coexistence | ||
10321 | |= CFG_BT_COEXISTENCE_OOB; | ||
10322 | } | ||
10323 | |||
10295 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) | 10324 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) |
10296 | priv->sys_config.answer_broadcast_ssid_probe = 1; | 10325 | priv->sys_config.answer_broadcast_ssid_probe = 1; |
10297 | else | 10326 | else |
@@ -10349,6 +10378,9 @@ static int ipw_config(struct ipw_priv *priv) | |||
10349 | * not intended for resale of the above mentioned Intel adapters has | 10378 | * not intended for resale of the above mentioned Intel adapters has |
10350 | * not been tested. | 10379 | * not been tested. |
10351 | * | 10380 | * |
10381 | * Remember to update the table in README.ipw2200 when changing this | ||
10382 | * table. | ||
10383 | * | ||
10352 | */ | 10384 | */ |
10353 | static const struct ieee80211_geo ipw_geos[] = { | 10385 | static const struct ieee80211_geo ipw_geos[] = { |
10354 | { /* Restricted */ | 10386 | { /* Restricted */ |
@@ -10596,96 +10628,6 @@ static const struct ieee80211_geo ipw_geos[] = { | |||
10596 | } | 10628 | } |
10597 | }; | 10629 | }; |
10598 | 10630 | ||
10599 | /* GEO code borrowed from ieee80211_geo.c */ | ||
10600 | static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | ||
10601 | { | ||
10602 | int i; | ||
10603 | |||
10604 | /* Driver needs to initialize the geography map before using | ||
10605 | * these helper functions */ | ||
10606 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10607 | |||
10608 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10609 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10610 | /* NOTE: If G mode is currently supported but | ||
10611 | * this is a B only channel, we don't see it | ||
10612 | * as valid. */ | ||
10613 | if ((ieee->geo.bg[i].channel == channel) && | ||
10614 | (!(ieee->mode & IEEE_G) || | ||
10615 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) | ||
10616 | return IEEE80211_24GHZ_BAND; | ||
10617 | |||
10618 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10619 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10620 | if (ieee->geo.a[i].channel == channel) | ||
10621 | return IEEE80211_52GHZ_BAND; | ||
10622 | |||
10623 | return 0; | ||
10624 | } | ||
10625 | |||
10626 | static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel) | ||
10627 | { | ||
10628 | int i; | ||
10629 | |||
10630 | /* Driver needs to initialize the geography map before using | ||
10631 | * these helper functions */ | ||
10632 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10633 | |||
10634 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10635 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10636 | if (ieee->geo.bg[i].channel == channel) | ||
10637 | return i; | ||
10638 | |||
10639 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10640 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10641 | if (ieee->geo.a[i].channel == channel) | ||
10642 | return i; | ||
10643 | |||
10644 | return -1; | ||
10645 | } | ||
10646 | |||
10647 | static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq) | ||
10648 | { | ||
10649 | int i; | ||
10650 | |||
10651 | /* Driver needs to initialize the geography map before using | ||
10652 | * these helper functions */ | ||
10653 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10654 | |||
10655 | freq /= 100000; | ||
10656 | |||
10657 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10658 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10659 | if (ieee->geo.bg[i].freq == freq) | ||
10660 | return ieee->geo.bg[i].channel; | ||
10661 | |||
10662 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10663 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10664 | if (ieee->geo.a[i].freq == freq) | ||
10665 | return ieee->geo.a[i].channel; | ||
10666 | |||
10667 | return 0; | ||
10668 | } | ||
10669 | |||
10670 | static int ipw_set_geo(struct ieee80211_device *ieee, | ||
10671 | const struct ieee80211_geo *geo) | ||
10672 | { | ||
10673 | memcpy(ieee->geo.name, geo->name, 3); | ||
10674 | ieee->geo.name[3] = '\0'; | ||
10675 | ieee->geo.bg_channels = geo->bg_channels; | ||
10676 | ieee->geo.a_channels = geo->a_channels; | ||
10677 | memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * | ||
10678 | sizeof(struct ieee80211_channel)); | ||
10679 | memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * | ||
10680 | sizeof(struct ieee80211_channel)); | ||
10681 | return 0; | ||
10682 | } | ||
10683 | |||
10684 | static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee) | ||
10685 | { | ||
10686 | return &ieee->geo; | ||
10687 | } | ||
10688 | |||
10689 | #define MAX_HW_RESTARTS 5 | 10631 | #define MAX_HW_RESTARTS 5 |
10690 | static int ipw_up(struct ipw_priv *priv) | 10632 | static int ipw_up(struct ipw_priv *priv) |
10691 | { | 10633 | { |
@@ -10732,14 +10674,11 @@ static int ipw_up(struct ipw_priv *priv) | |||
10732 | priv->eeprom[EEPROM_COUNTRY_CODE + 2]); | 10674 | priv->eeprom[EEPROM_COUNTRY_CODE + 2]); |
10733 | j = 0; | 10675 | j = 0; |
10734 | } | 10676 | } |
10735 | if (ipw_set_geo(priv->ieee, &ipw_geos[j])) { | 10677 | if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) { |
10736 | IPW_WARNING("Could not set geography."); | 10678 | IPW_WARNING("Could not set geography."); |
10737 | return 0; | 10679 | return 0; |
10738 | } | 10680 | } |
10739 | 10681 | ||
10740 | IPW_DEBUG_INFO("Geography %03d [%s] detected.\n", | ||
10741 | j, priv->ieee->geo.name); | ||
10742 | |||
10743 | if (priv->status & STATUS_RF_KILL_SW) { | 10682 | if (priv->status & STATUS_RF_KILL_SW) { |
10744 | IPW_WARNING("Radio disabled by module parameter.\n"); | 10683 | IPW_WARNING("Radio disabled by module parameter.\n"); |
10745 | return 0; | 10684 | return 0; |
@@ -10782,9 +10721,9 @@ static int ipw_up(struct ipw_priv *priv) | |||
10782 | static void ipw_bg_up(void *data) | 10721 | static void ipw_bg_up(void *data) |
10783 | { | 10722 | { |
10784 | struct ipw_priv *priv = data; | 10723 | struct ipw_priv *priv = data; |
10785 | down(&priv->sem); | 10724 | mutex_lock(&priv->mutex); |
10786 | ipw_up(data); | 10725 | ipw_up(data); |
10787 | up(&priv->sem); | 10726 | mutex_unlock(&priv->mutex); |
10788 | } | 10727 | } |
10789 | 10728 | ||
10790 | static void ipw_deinit(struct ipw_priv *priv) | 10729 | static void ipw_deinit(struct ipw_priv *priv) |
@@ -10853,23 +10792,23 @@ static void ipw_down(struct ipw_priv *priv) | |||
10853 | static void ipw_bg_down(void *data) | 10792 | static void ipw_bg_down(void *data) |
10854 | { | 10793 | { |
10855 | struct ipw_priv *priv = data; | 10794 | struct ipw_priv *priv = data; |
10856 | down(&priv->sem); | 10795 | mutex_lock(&priv->mutex); |
10857 | ipw_down(data); | 10796 | ipw_down(data); |
10858 | up(&priv->sem); | 10797 | mutex_unlock(&priv->mutex); |
10859 | } | 10798 | } |
10860 | 10799 | ||
10861 | /* Called by register_netdev() */ | 10800 | /* Called by register_netdev() */ |
10862 | static int ipw_net_init(struct net_device *dev) | 10801 | static int ipw_net_init(struct net_device *dev) |
10863 | { | 10802 | { |
10864 | struct ipw_priv *priv = ieee80211_priv(dev); | 10803 | struct ipw_priv *priv = ieee80211_priv(dev); |
10865 | down(&priv->sem); | 10804 | mutex_lock(&priv->mutex); |
10866 | 10805 | ||
10867 | if (ipw_up(priv)) { | 10806 | if (ipw_up(priv)) { |
10868 | up(&priv->sem); | 10807 | mutex_unlock(&priv->mutex); |
10869 | return -EIO; | 10808 | return -EIO; |
10870 | } | 10809 | } |
10871 | 10810 | ||
10872 | up(&priv->sem); | 10811 | mutex_unlock(&priv->mutex); |
10873 | return 0; | 10812 | return 0; |
10874 | } | 10813 | } |
10875 | 10814 | ||
@@ -10959,7 +10898,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10959 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) | 10898 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) |
10960 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); | 10899 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); |
10961 | 10900 | ||
10962 | init_MUTEX(&priv->sem); | 10901 | mutex_init(&priv->mutex); |
10963 | if (pci_enable_device(pdev)) { | 10902 | if (pci_enable_device(pdev)) { |
10964 | err = -ENODEV; | 10903 | err = -ENODEV; |
10965 | goto out_free_ieee80211; | 10904 | goto out_free_ieee80211; |
@@ -11017,7 +10956,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11017 | SET_MODULE_OWNER(net_dev); | 10956 | SET_MODULE_OWNER(net_dev); |
11018 | SET_NETDEV_DEV(net_dev, &pdev->dev); | 10957 | SET_NETDEV_DEV(net_dev, &pdev->dev); |
11019 | 10958 | ||
11020 | down(&priv->sem); | 10959 | mutex_lock(&priv->mutex); |
11021 | 10960 | ||
11022 | priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; | 10961 | priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; |
11023 | priv->ieee->set_security = shim__set_security; | 10962 | priv->ieee->set_security = shim__set_security; |
@@ -11050,16 +10989,22 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11050 | err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); | 10989 | err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); |
11051 | if (err) { | 10990 | if (err) { |
11052 | IPW_ERROR("failed to create sysfs device attributes\n"); | 10991 | IPW_ERROR("failed to create sysfs device attributes\n"); |
11053 | up(&priv->sem); | 10992 | mutex_unlock(&priv->mutex); |
11054 | goto out_release_irq; | 10993 | goto out_release_irq; |
11055 | } | 10994 | } |
11056 | 10995 | ||
11057 | up(&priv->sem); | 10996 | mutex_unlock(&priv->mutex); |
11058 | err = register_netdev(net_dev); | 10997 | err = register_netdev(net_dev); |
11059 | if (err) { | 10998 | if (err) { |
11060 | IPW_ERROR("failed to register network device\n"); | 10999 | IPW_ERROR("failed to register network device\n"); |
11061 | goto out_remove_sysfs; | 11000 | goto out_remove_sysfs; |
11062 | } | 11001 | } |
11002 | |||
11003 | printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " | ||
11004 | "channels, %d 802.11a channels)\n", | ||
11005 | priv->ieee->geo.name, priv->ieee->geo.bg_channels, | ||
11006 | priv->ieee->geo.a_channels); | ||
11007 | |||
11063 | return 0; | 11008 | return 0; |
11064 | 11009 | ||
11065 | out_remove_sysfs: | 11010 | out_remove_sysfs: |
@@ -11091,13 +11036,13 @@ static void ipw_pci_remove(struct pci_dev *pdev) | |||
11091 | if (!priv) | 11036 | if (!priv) |
11092 | return; | 11037 | return; |
11093 | 11038 | ||
11094 | down(&priv->sem); | 11039 | mutex_lock(&priv->mutex); |
11095 | 11040 | ||
11096 | priv->status |= STATUS_EXIT_PENDING; | 11041 | priv->status |= STATUS_EXIT_PENDING; |
11097 | ipw_down(priv); | 11042 | ipw_down(priv); |
11098 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | 11043 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); |
11099 | 11044 | ||
11100 | up(&priv->sem); | 11045 | mutex_unlock(&priv->mutex); |
11101 | 11046 | ||
11102 | unregister_netdev(priv->net_dev); | 11047 | unregister_netdev(priv->net_dev); |
11103 | 11048 | ||
@@ -11250,8 +11195,10 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); | |||
11250 | module_param(led, int, 0444); | 11195 | module_param(led, int, 0444); |
11251 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); | 11196 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); |
11252 | 11197 | ||
11198 | #ifdef CONFIG_IPW2200_DEBUG | ||
11253 | module_param(debug, int, 0444); | 11199 | module_param(debug, int, 0444); |
11254 | MODULE_PARM_DESC(debug, "debug output mask"); | 11200 | MODULE_PARM_DESC(debug, "debug output mask"); |
11201 | #endif | ||
11255 | 11202 | ||
11256 | module_param(channel, int, 0444); | 11203 | module_param(channel, int, 0444); |
11257 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); | 11204 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); |
@@ -11281,12 +11228,18 @@ module_param(mode, int, 0444); | |||
11281 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); | 11228 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); |
11282 | #endif | 11229 | #endif |
11283 | 11230 | ||
11231 | module_param(bt_coexist, int, 0444); | ||
11232 | MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); | ||
11233 | |||
11284 | module_param(hwcrypto, int, 0444); | 11234 | module_param(hwcrypto, int, 0444); |
11285 | MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default on)"); | 11235 | MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); |
11286 | 11236 | ||
11287 | module_param(cmdlog, int, 0444); | 11237 | module_param(cmdlog, int, 0444); |
11288 | MODULE_PARM_DESC(cmdlog, | 11238 | MODULE_PARM_DESC(cmdlog, |
11289 | "allocate a ring buffer for logging firmware commands"); | 11239 | "allocate a ring buffer for logging firmware commands"); |
11290 | 11240 | ||
11241 | module_param(roaming, int, 0444); | ||
11242 | MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); | ||
11243 | |||
11291 | module_exit(ipw_exit); | 11244 | module_exit(ipw_exit); |
11292 | module_init(ipw_init); | 11245 | module_init(ipw_init); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index e65620a4d79e..4b9804900702 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
34 | #include <linux/config.h> | 34 | #include <linux/config.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/mutex.h> | ||
36 | 37 | ||
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/netdevice.h> | 39 | #include <linux/netdevice.h> |
@@ -46,6 +47,7 @@ | |||
46 | #include <linux/firmware.h> | 47 | #include <linux/firmware.h> |
47 | #include <linux/wireless.h> | 48 | #include <linux/wireless.h> |
48 | #include <linux/dma-mapping.h> | 49 | #include <linux/dma-mapping.h> |
50 | #include <linux/jiffies.h> | ||
49 | #include <asm/io.h> | 51 | #include <asm/io.h> |
50 | 52 | ||
51 | #include <net/ieee80211.h> | 53 | #include <net/ieee80211.h> |
@@ -244,8 +246,10 @@ enum connection_manager_assoc_states { | |||
244 | #define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 | 246 | #define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 |
245 | 247 | ||
246 | #define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 | 248 | #define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 |
247 | #define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 24 | 249 | #define IPW_MB_ROAMING_THRESHOLD_MIN 1 |
248 | #define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 | 250 | #define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 |
251 | #define IPW_MB_ROAMING_THRESHOLD_MAX 30 | ||
252 | #define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 3*IPW_MB_ROAMING_THRESHOLD_DEFAULT | ||
249 | #define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 | 253 | #define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 |
250 | 254 | ||
251 | #define MACADRR_BYTE_LEN 6 | 255 | #define MACADRR_BYTE_LEN 6 |
@@ -616,13 +620,16 @@ struct notif_tgi_tx_key { | |||
616 | u8 reserved; | 620 | u8 reserved; |
617 | } __attribute__ ((packed)); | 621 | } __attribute__ ((packed)); |
618 | 622 | ||
623 | #define SILENCE_OVER_THRESH (1) | ||
624 | #define SILENCE_UNDER_THRESH (2) | ||
625 | |||
619 | struct notif_link_deterioration { | 626 | struct notif_link_deterioration { |
620 | struct ipw_cmd_stats stats; | 627 | struct ipw_cmd_stats stats; |
621 | u8 rate; | 628 | u8 rate; |
622 | u8 modulation; | 629 | u8 modulation; |
623 | struct rate_histogram histogram; | 630 | struct rate_histogram histogram; |
624 | u8 reserved1; | 631 | u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ |
625 | u16 reserved2; | 632 | u16 silence_count; |
626 | } __attribute__ ((packed)); | 633 | } __attribute__ ((packed)); |
627 | 634 | ||
628 | struct notif_association { | 635 | struct notif_association { |
@@ -780,7 +787,7 @@ struct ipw_sys_config { | |||
780 | u8 enable_cts_to_self; | 787 | u8 enable_cts_to_self; |
781 | u8 enable_multicast_filtering; | 788 | u8 enable_multicast_filtering; |
782 | u8 bt_coexist_collision_thr; | 789 | u8 bt_coexist_collision_thr; |
783 | u8 reserved2; | 790 | u8 silence_threshold; |
784 | u8 accept_all_mgmt_bcpr; | 791 | u8 accept_all_mgmt_bcpr; |
785 | u8 accept_all_mgtm_frames; | 792 | u8 accept_all_mgtm_frames; |
786 | u8 pass_noise_stats_to_host; | 793 | u8 pass_noise_stats_to_host; |
@@ -852,7 +859,7 @@ struct ipw_scan_request_ext { | |||
852 | u16 dwell_time[IPW_SCAN_TYPES]; | 859 | u16 dwell_time[IPW_SCAN_TYPES]; |
853 | } __attribute__ ((packed)); | 860 | } __attribute__ ((packed)); |
854 | 861 | ||
855 | extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) | 862 | static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) |
856 | { | 863 | { |
857 | if (index % 2) | 864 | if (index % 2) |
858 | return scan->scan_type[index / 2] & 0x0F; | 865 | return scan->scan_type[index / 2] & 0x0F; |
@@ -860,7 +867,7 @@ extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) | |||
860 | return (scan->scan_type[index / 2] & 0xF0) >> 4; | 867 | return (scan->scan_type[index / 2] & 0xF0) >> 4; |
861 | } | 868 | } |
862 | 869 | ||
863 | extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, | 870 | static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, |
864 | u8 index, u8 scan_type) | 871 | u8 index, u8 scan_type) |
865 | { | 872 | { |
866 | if (index % 2) | 873 | if (index % 2) |
@@ -1120,7 +1127,7 @@ struct ipw_priv { | |||
1120 | struct ieee80211_device *ieee; | 1127 | struct ieee80211_device *ieee; |
1121 | 1128 | ||
1122 | spinlock_t lock; | 1129 | spinlock_t lock; |
1123 | struct semaphore sem; | 1130 | struct mutex mutex; |
1124 | 1131 | ||
1125 | /* basic pci-network driver stuff */ | 1132 | /* basic pci-network driver stuff */ |
1126 | struct pci_dev *pci_dev; | 1133 | struct pci_dev *pci_dev; |
@@ -1406,13 +1413,6 @@ do { if (ipw_debug_level & (level)) \ | |||
1406 | * Register bit definitions | 1413 | * Register bit definitions |
1407 | */ | 1414 | */ |
1408 | 1415 | ||
1409 | /* Dino control registers bits */ | ||
1410 | |||
1411 | #define DINO_ENABLE_SYSTEM 0x80 | ||
1412 | #define DINO_ENABLE_CS 0x40 | ||
1413 | #define DINO_RXFIFO_DATA 0x01 | ||
1414 | #define DINO_CONTROL_REG 0x00200000 | ||
1415 | |||
1416 | #define IPW_INTA_RW 0x00000008 | 1416 | #define IPW_INTA_RW 0x00000008 |
1417 | #define IPW_INTA_MASK_R 0x0000000C | 1417 | #define IPW_INTA_MASK_R 0x0000000C |
1418 | #define IPW_INDIRECT_ADDR 0x00000010 | 1418 | #define IPW_INDIRECT_ADDR 0x00000010 |
@@ -1459,6 +1459,11 @@ do { if (ipw_debug_level & (level)) \ | |||
1459 | #define IPW_DOMAIN_0_END 0x1000 | 1459 | #define IPW_DOMAIN_0_END 0x1000 |
1460 | #define CLX_MEM_BAR_SIZE 0x1000 | 1460 | #define CLX_MEM_BAR_SIZE 0x1000 |
1461 | 1461 | ||
1462 | /* Dino/baseband control registers bits */ | ||
1463 | |||
1464 | #define DINO_ENABLE_SYSTEM 0x80 /* 1 = baseband processor on, 0 = reset */ | ||
1465 | #define DINO_ENABLE_CS 0x40 /* 1 = enable ucode load */ | ||
1466 | #define DINO_RXFIFO_DATA 0x01 /* 1 = data available */ | ||
1462 | #define IPW_BASEBAND_CONTROL_STATUS 0X00200000 | 1467 | #define IPW_BASEBAND_CONTROL_STATUS 0X00200000 |
1463 | #define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 | 1468 | #define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 |
1464 | #define IPW_BASEBAND_RX_FIFO_READ 0X00200004 | 1469 | #define IPW_BASEBAND_RX_FIFO_READ 0X00200004 |
@@ -1567,13 +1572,18 @@ do { if (ipw_debug_level & (level)) \ | |||
1567 | #define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ | 1572 | #define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ |
1568 | #define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ | 1573 | #define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ |
1569 | 1574 | ||
1570 | /* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/ | 1575 | /* NIC type as found in the one byte EEPROM_NIC_TYPE offset */ |
1571 | #define EEPROM_NIC_TYPE_0 0 | 1576 | #define EEPROM_NIC_TYPE_0 0 |
1572 | #define EEPROM_NIC_TYPE_1 1 | 1577 | #define EEPROM_NIC_TYPE_1 1 |
1573 | #define EEPROM_NIC_TYPE_2 2 | 1578 | #define EEPROM_NIC_TYPE_2 2 |
1574 | #define EEPROM_NIC_TYPE_3 3 | 1579 | #define EEPROM_NIC_TYPE_3 3 |
1575 | #define EEPROM_NIC_TYPE_4 4 | 1580 | #define EEPROM_NIC_TYPE_4 4 |
1576 | 1581 | ||
1582 | /* Bluetooth Coexistence capabilities as found in EEPROM_SKU_CAPABILITY */ | ||
1583 | #define EEPROM_SKU_CAP_BT_CHANNEL_SIG 0x01 /* we can tell BT our channel # */ | ||
1584 | #define EEPROM_SKU_CAP_BT_PRIORITY 0x02 /* BT can take priority over us */ | ||
1585 | #define EEPROM_SKU_CAP_BT_OOB 0x04 /* we can signal BT out-of-band */ | ||
1586 | |||
1577 | #define FW_MEM_REG_LOWER_BOUND 0x00300000 | 1587 | #define FW_MEM_REG_LOWER_BOUND 0x00300000 |
1578 | #define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) | 1588 | #define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) |
1579 | #define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) | 1589 | #define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) |
@@ -1658,9 +1668,10 @@ enum { | |||
1658 | IPW_FW_ERROR_FATAL_ERROR | 1668 | IPW_FW_ERROR_FATAL_ERROR |
1659 | }; | 1669 | }; |
1660 | 1670 | ||
1661 | #define AUTH_OPEN 0 | 1671 | #define AUTH_OPEN 0 |
1662 | #define AUTH_SHARED_KEY 1 | 1672 | #define AUTH_SHARED_KEY 1 |
1663 | #define AUTH_IGNORE 3 | 1673 | #define AUTH_LEAP 2 |
1674 | #define AUTH_IGNORE 3 | ||
1664 | 1675 | ||
1665 | #define HC_ASSOCIATE 0 | 1676 | #define HC_ASSOCIATE 0 |
1666 | #define HC_REASSOCIATE 1 | 1677 | #define HC_REASSOCIATE 1 |
@@ -1860,7 +1871,7 @@ struct host_cmd { | |||
1860 | u8 cmd; | 1871 | u8 cmd; |
1861 | u8 len; | 1872 | u8 len; |
1862 | u16 reserved; | 1873 | u16 reserved; |
1863 | u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; | 1874 | u32 *param; |
1864 | } __attribute__ ((packed)); | 1875 | } __attribute__ ((packed)); |
1865 | 1876 | ||
1866 | struct ipw_cmd_log { | 1877 | struct ipw_cmd_log { |
@@ -1869,21 +1880,24 @@ struct ipw_cmd_log { | |||
1869 | struct host_cmd cmd; | 1880 | struct host_cmd cmd; |
1870 | }; | 1881 | }; |
1871 | 1882 | ||
1872 | #define CFG_BT_COEXISTENCE_MIN 0x00 | 1883 | /* SysConfig command parameters ... */ |
1873 | #define CFG_BT_COEXISTENCE_DEFER 0x02 | 1884 | /* bt_coexistence param */ |
1874 | #define CFG_BT_COEXISTENCE_KILL 0x04 | 1885 | #define CFG_BT_COEXISTENCE_SIGNAL_CHNL 0x01 /* tell BT our chnl # */ |
1875 | #define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 | 1886 | #define CFG_BT_COEXISTENCE_DEFER 0x02 /* defer our Tx if BT traffic */ |
1876 | #define CFG_BT_COEXISTENCE_OOB 0x10 | 1887 | #define CFG_BT_COEXISTENCE_KILL 0x04 /* kill our Tx if BT traffic */ |
1877 | #define CFG_BT_COEXISTENCE_MAX 0xFF | 1888 | #define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 /* multimedia extensions */ |
1878 | #define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM */ | 1889 | #define CFG_BT_COEXISTENCE_OOB 0x10 /* signal BT via out-of-band */ |
1879 | 1890 | ||
1880 | #define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0 | 1891 | /* clear-to-send to self param */ |
1881 | #define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1 | 1892 | #define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x00 |
1893 | #define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x01 | ||
1882 | #define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN | 1894 | #define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN |
1883 | 1895 | ||
1884 | #define CFG_SYS_ANTENNA_BOTH 0x000 | 1896 | /* Antenna diversity param (h/w can select best antenna, based on signal) */ |
1885 | #define CFG_SYS_ANTENNA_A 0x001 | 1897 | #define CFG_SYS_ANTENNA_BOTH 0x00 /* NIC selects best antenna */ |
1886 | #define CFG_SYS_ANTENNA_B 0x003 | 1898 | #define CFG_SYS_ANTENNA_A 0x01 /* force antenna A */ |
1899 | #define CFG_SYS_ANTENNA_B 0x03 /* force antenna B */ | ||
1900 | #define CFG_SYS_ANTENNA_SLOW_DIV 0x02 /* consider background noise */ | ||
1887 | 1901 | ||
1888 | /* | 1902 | /* |
1889 | * The definitions below were lifted off the ipw2100 driver, which only | 1903 | * The definitions below were lifted off the ipw2100 driver, which only |
@@ -1899,27 +1913,4 @@ struct ipw_cmd_log { | |||
1899 | 1913 | ||
1900 | #define IPW_MAX_CONFIG_RETRIES 10 | 1914 | #define IPW_MAX_CONFIG_RETRIES 10 |
1901 | 1915 | ||
1902 | static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr) | ||
1903 | { | ||
1904 | u32 retval; | ||
1905 | u16 fc; | ||
1906 | |||
1907 | retval = sizeof(struct ieee80211_hdr_3addr); | ||
1908 | fc = le16_to_cpu(hdr->frame_ctl); | ||
1909 | |||
1910 | /* | ||
1911 | * Function ToDS FromDS | ||
1912 | * IBSS 0 0 | ||
1913 | * To AP 1 0 | ||
1914 | * From AP 0 1 | ||
1915 | * WDS (bridge) 1 1 | ||
1916 | * | ||
1917 | * Only WDS frames use Address4 among them. --YZ | ||
1918 | */ | ||
1919 | if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS)) | ||
1920 | retval -= ETH_ALEN; | ||
1921 | |||
1922 | return retval; | ||
1923 | } | ||
1924 | |||
1925 | #endif /* __ipw2200_h__ */ | 1916 | #endif /* __ipw2200_h__ */ |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index bf6271ee387a..75ce6ddb0cf5 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -55,10 +55,8 @@ | |||
55 | #include <linux/etherdevice.h> | 55 | #include <linux/etherdevice.h> |
56 | #include <linux/skbuff.h> | 56 | #include <linux/skbuff.h> |
57 | #include <linux/bitops.h> | 57 | #include <linux/bitops.h> |
58 | #ifdef CONFIG_NET_RADIO | ||
59 | #include <linux/wireless.h> | 58 | #include <linux/wireless.h> |
60 | #include <net/iw_handler.h> | 59 | #include <net/iw_handler.h> |
61 | #endif | ||
62 | 60 | ||
63 | #include <pcmcia/cs_types.h> | 61 | #include <pcmcia/cs_types.h> |
64 | #include <pcmcia/cs.h> | 62 | #include <pcmcia/cs.h> |
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 18baacfc5a2c..18a44580b53b 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
@@ -112,7 +112,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE"; | |||
112 | #include <linux/ip.h> | 112 | #include <linux/ip.h> |
113 | #include <linux/tcp.h> | 113 | #include <linux/tcp.h> |
114 | #include <linux/time.h> | 114 | #include <linux/time.h> |
115 | 115 | #include <linux/jiffies.h> | |
116 | 116 | ||
117 | /************************************************************************/ | 117 | /************************************************************************/ |
118 | /* Useful structures and definitions */ | 118 | /* Useful structures and definitions */ |
@@ -1569,7 +1569,7 @@ static int strip_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1569 | del_timer(&strip_info->idle_timer); | 1569 | del_timer(&strip_info->idle_timer); |
1570 | 1570 | ||
1571 | 1571 | ||
1572 | if (jiffies - strip_info->pps_timer > HZ) { | 1572 | if (time_after(jiffies, strip_info->pps_timer + HZ)) { |
1573 | unsigned long t = jiffies - strip_info->pps_timer; | 1573 | unsigned long t = jiffies - strip_info->pps_timer; |
1574 | unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t; | 1574 | unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t; |
1575 | unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t; | 1575 | unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t; |
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h index 166e28b9a4f7..5cb0bc8bb128 100644 --- a/drivers/net/wireless/wavelan.p.h +++ b/drivers/net/wireless/wavelan.p.h | |||
@@ -98,11 +98,7 @@ | |||
98 | * characteristics of the hardware. Applications such as mobile IP may | 98 | * characteristics of the hardware. Applications such as mobile IP may |
99 | * take advantage of it. | 99 | * take advantage of it. |
100 | * | 100 | * |
101 | * You will need to enable the CONFIG_NET_RADIO define in the kernel | 101 | * It might be a good idea as well to fetch the wireless tools to |
102 | * configuration to enable the wireless extensions (this is the one | ||
103 | * giving access to the radio network device choice). | ||
104 | * | ||
105 | * It might also be a good idea as well to fetch the wireless tools to | ||
106 | * configure the device and play a bit. | 102 | * configure the device and play a bit. |
107 | */ | 103 | */ |
108 | 104 | ||
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h index f2d597568151..451f6271dcbc 100644 --- a/drivers/net/wireless/wavelan_cs.p.h +++ b/drivers/net/wireless/wavelan_cs.p.h | |||
@@ -99,11 +99,7 @@ | |||
99 | * caracteristics of the hardware in a standard way and support for | 99 | * caracteristics of the hardware in a standard way and support for |
100 | * applications for taking advantage of it (like Mobile IP). | 100 | * applications for taking advantage of it (like Mobile IP). |
101 | * | 101 | * |
102 | * You will need to enable the CONFIG_NET_RADIO define in the kernel | 102 | * It might be a good idea as well to fetch the wireless tools to |
103 | * configuration to enable the wireless extensions (this is the one | ||
104 | * giving access to the radio network device choice). | ||
105 | * | ||
106 | * It might also be a good idea as well to fetch the wireless tools to | ||
107 | * configure the device and play a bit. | 103 | * configure the device and play a bit. |
108 | */ | 104 | */ |
109 | 105 | ||
@@ -440,11 +436,8 @@ | |||
440 | #include <linux/ioport.h> | 436 | #include <linux/ioport.h> |
441 | #include <linux/fcntl.h> | 437 | #include <linux/fcntl.h> |
442 | #include <linux/ethtool.h> | 438 | #include <linux/ethtool.h> |
443 | |||
444 | #ifdef CONFIG_NET_RADIO | ||
445 | #include <linux/wireless.h> /* Wireless extensions */ | 439 | #include <linux/wireless.h> /* Wireless extensions */ |
446 | #include <net/iw_handler.h> /* New driver API */ | 440 | #include <net/iw_handler.h> /* New driver API */ |
447 | #endif | ||
448 | 441 | ||
449 | /* Pcmcia headers that we need */ | 442 | /* Pcmcia headers that we need */ |
450 | #include <pcmcia/cs_types.h> | 443 | #include <pcmcia/cs_types.h> |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 1c2506535f7e..75d56bfef0ee 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -69,8 +69,8 @@ static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ | |||
69 | static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ | 69 | static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ |
70 | static int fifo_cfg = 0x0028; | 70 | static int fifo_cfg = 0x0028; |
71 | #else | 71 | #else |
72 | static int dma_ctrl = 0x004A0263; /* Constrained by errata */ | 72 | static const int dma_ctrl = 0x004A0263; /* Constrained by errata */ |
73 | static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ | 73 | static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 76 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
@@ -266,7 +266,7 @@ struct pci_id_info { | |||
266 | int drv_flags; /* Driver use, intended as capability flags. */ | 266 | int drv_flags; /* Driver use, intended as capability flags. */ |
267 | }; | 267 | }; |
268 | 268 | ||
269 | static struct pci_id_info pci_id_tbl[] = { | 269 | static const struct pci_id_info pci_id_tbl[] = { |
270 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, | 270 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, |
271 | PCI_IOTYPE, YELLOWFIN_SIZE, | 271 | PCI_IOTYPE, YELLOWFIN_SIZE, |
272 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, | 272 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 8ab6e12153ba..761021603597 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
28 | #include <linux/etherdevice.h> | 28 | #include <linux/etherdevice.h> |
29 | #include <linux/zorro.h> | 29 | #include <linux/zorro.h> |
30 | #include <linux/jiffies.h> | ||
30 | 31 | ||
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
32 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
@@ -151,7 +152,7 @@ static int __devinit zorro8390_init(struct net_device *dev, | |||
151 | z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 152 | z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
152 | 153 | ||
153 | while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) | 154 | while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) |
154 | if (jiffies - reset_start_time > 2*HZ/100) { | 155 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
155 | printk(KERN_WARNING " not found (no reset ack).\n"); | 156 | printk(KERN_WARNING " not found (no reset ack).\n"); |
156 | return -ENODEV; | 157 | return -ENODEV; |
157 | } | 158 | } |
@@ -273,7 +274,7 @@ static void zorro8390_reset_8390(struct net_device *dev) | |||
273 | 274 | ||
274 | /* This check _should_not_ be necessary, omit eventually. */ | 275 | /* This check _should_not_ be necessary, omit eventually. */ |
275 | while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) | 276 | while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) |
276 | if (jiffies - reset_start_time > 2*HZ/100) { | 277 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
277 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", | 278 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", |
278 | dev->name); | 279 | dev->name); |
279 | break; | 280 | break; |
@@ -400,7 +401,7 @@ static void zorro8390_block_output(struct net_device *dev, int count, | |||
400 | dma_start = jiffies; | 401 | dma_start = jiffies; |
401 | 402 | ||
402 | while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) | 403 | while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) |
403 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 404 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
404 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", | 405 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", |
405 | dev->name); | 406 | dev->name); |
406 | zorro8390_reset_8390(dev); | 407 | zorro8390_reset_8390(dev); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2e727f49ad19..44133250da2e 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -273,7 +273,7 @@ removeseg: | |||
273 | list_del(&dev_info->lh); | 273 | list_del(&dev_info->lh); |
274 | 274 | ||
275 | del_gendisk(dev_info->gd); | 275 | del_gendisk(dev_info->gd); |
276 | blk_put_queue(dev_info->dcssblk_queue); | 276 | blk_cleanup_queue(dev_info->dcssblk_queue); |
277 | dev_info->gd->queue = NULL; | 277 | dev_info->gd->queue = NULL; |
278 | put_disk(dev_info->gd); | 278 | put_disk(dev_info->gd); |
279 | device_unregister(dev); | 279 | device_unregister(dev); |
@@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
491 | unregister_dev: | 491 | unregister_dev: |
492 | PRINT_ERR("device_create_file() failed!\n"); | 492 | PRINT_ERR("device_create_file() failed!\n"); |
493 | list_del(&dev_info->lh); | 493 | list_del(&dev_info->lh); |
494 | blk_put_queue(dev_info->dcssblk_queue); | 494 | blk_cleanup_queue(dev_info->dcssblk_queue); |
495 | dev_info->gd->queue = NULL; | 495 | dev_info->gd->queue = NULL; |
496 | put_disk(dev_info->gd); | 496 | put_disk(dev_info->gd); |
497 | device_unregister(&dev_info->dev); | 497 | device_unregister(&dev_info->dev); |
@@ -505,7 +505,7 @@ list_del: | |||
505 | unload_seg: | 505 | unload_seg: |
506 | segment_unload(local_buf); | 506 | segment_unload(local_buf); |
507 | dealloc_gendisk: | 507 | dealloc_gendisk: |
508 | blk_put_queue(dev_info->dcssblk_queue); | 508 | blk_cleanup_queue(dev_info->dcssblk_queue); |
509 | dev_info->gd->queue = NULL; | 509 | dev_info->gd->queue = NULL; |
510 | put_disk(dev_info->gd); | 510 | put_disk(dev_info->gd); |
511 | free_dev_info: | 511 | free_dev_info: |
@@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
562 | list_del(&dev_info->lh); | 562 | list_del(&dev_info->lh); |
563 | 563 | ||
564 | del_gendisk(dev_info->gd); | 564 | del_gendisk(dev_info->gd); |
565 | blk_put_queue(dev_info->dcssblk_queue); | 565 | blk_cleanup_queue(dev_info->dcssblk_queue); |
566 | dev_info->gd->queue = NULL; | 566 | dev_info->gd->queue = NULL; |
567 | put_disk(dev_info->gd); | 567 | put_disk(dev_info->gd); |
568 | device_unregister(&dev_info->dev); | 568 | device_unregister(&dev_info->dev); |
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile index 6f1e0e95587a..3adb6395e42d 100644 --- a/fs/jfs/Makefile +++ b/fs/jfs/Makefile | |||
@@ -8,7 +8,8 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \ | |||
8 | jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \ | 8 | jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \ |
9 | jfs_unicode.o jfs_dtree.o jfs_inode.o \ | 9 | jfs_unicode.o jfs_dtree.o jfs_inode.o \ |
10 | jfs_extent.o symlink.o jfs_metapage.o \ | 10 | jfs_extent.o symlink.o jfs_metapage.o \ |
11 | jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o resize.o xattr.o | 11 | jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o \ |
12 | resize.o xattr.o ioctl.o | ||
12 | 13 | ||
13 | jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o | 14 | jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o |
14 | 15 | ||
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 461e4934ca7c..e2281300979c 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c | |||
@@ -183,6 +183,9 @@ cleanup: | |||
183 | posix_acl_release(acl); | 183 | posix_acl_release(acl); |
184 | } else | 184 | } else |
185 | inode->i_mode &= ~current->fs->umask; | 185 | inode->i_mode &= ~current->fs->umask; |
186 | |||
187 | JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) | | ||
188 | inode->i_mode; | ||
186 | 189 | ||
187 | return rc; | 190 | return rc; |
188 | } | 191 | } |
@@ -207,12 +210,12 @@ static int jfs_acl_chmod(struct inode *inode) | |||
207 | rc = posix_acl_chmod_masq(clone, inode->i_mode); | 210 | rc = posix_acl_chmod_masq(clone, inode->i_mode); |
208 | if (!rc) { | 211 | if (!rc) { |
209 | tid_t tid = txBegin(inode->i_sb, 0); | 212 | tid_t tid = txBegin(inode->i_sb, 0); |
210 | down(&JFS_IP(inode)->commit_sem); | 213 | mutex_lock(&JFS_IP(inode)->commit_mutex); |
211 | rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); | 214 | rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); |
212 | if (!rc) | 215 | if (!rc) |
213 | rc = txCommit(tid, 1, &inode, 0); | 216 | rc = txCommit(tid, 1, &inode, 0); |
214 | txEnd(tid); | 217 | txEnd(tid); |
215 | up(&JFS_IP(inode)->commit_sem); | 218 | mutex_unlock(&JFS_IP(inode)->commit_mutex); |
216 | } | 219 | } |
217 | 220 | ||
218 | posix_acl_release(clone); | 221 | posix_acl_release(clone); |
diff --git a/fs/jfs/file.c b/fs/jfs/file.c index c2c19c9ed9a4..e1ac6e497e2b 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c | |||
@@ -113,4 +113,5 @@ struct file_operations jfs_file_operations = { | |||
113 | .sendfile = generic_file_sendfile, | 113 | .sendfile = generic_file_sendfile, |
114 | .fsync = jfs_fsync, | 114 | .fsync = jfs_fsync, |
115 | .release = jfs_release, | 115 | .release = jfs_release, |
116 | .ioctl = jfs_ioctl, | ||
116 | }; | 117 | }; |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9f942ca8e4e3..51a5fed90cca 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -55,6 +55,7 @@ void jfs_read_inode(struct inode *inode) | |||
55 | inode->i_op = &jfs_file_inode_operations; | 55 | inode->i_op = &jfs_file_inode_operations; |
56 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | 56 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
57 | } | 57 | } |
58 | jfs_set_inode_flags(inode); | ||
58 | } | 59 | } |
59 | 60 | ||
60 | /* | 61 | /* |
@@ -89,16 +90,16 @@ int jfs_commit_inode(struct inode *inode, int wait) | |||
89 | } | 90 | } |
90 | 91 | ||
91 | tid = txBegin(inode->i_sb, COMMIT_INODE); | 92 | tid = txBegin(inode->i_sb, COMMIT_INODE); |
92 | down(&JFS_IP(inode)->commit_sem); | 93 | mutex_lock(&JFS_IP(inode)->commit_mutex); |
93 | 94 | ||
94 | /* | 95 | /* |
95 | * Retest inode state after taking commit_sem | 96 | * Retest inode state after taking commit_mutex |
96 | */ | 97 | */ |
97 | if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) | 98 | if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) |
98 | rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); | 99 | rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); |
99 | 100 | ||
100 | txEnd(tid); | 101 | txEnd(tid); |
101 | up(&JFS_IP(inode)->commit_sem); | 102 | mutex_unlock(&JFS_IP(inode)->commit_mutex); |
102 | return rc; | 103 | return rc; |
103 | } | 104 | } |
104 | 105 | ||
@@ -335,18 +336,18 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length) | |||
335 | tid = txBegin(ip->i_sb, 0); | 336 | tid = txBegin(ip->i_sb, 0); |
336 | 337 | ||
337 | /* | 338 | /* |
338 | * The commit_sem cannot be taken before txBegin. | 339 | * The commit_mutex cannot be taken before txBegin. |
339 | * txBegin may block and there is a chance the inode | 340 | * txBegin may block and there is a chance the inode |
340 | * could be marked dirty and need to be committed | 341 | * could be marked dirty and need to be committed |
341 | * before txBegin unblocks | 342 | * before txBegin unblocks |
342 | */ | 343 | */ |
343 | down(&JFS_IP(ip)->commit_sem); | 344 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
344 | 345 | ||
345 | newsize = xtTruncate(tid, ip, length, | 346 | newsize = xtTruncate(tid, ip, length, |
346 | COMMIT_TRUNCATE | COMMIT_PWMAP); | 347 | COMMIT_TRUNCATE | COMMIT_PWMAP); |
347 | if (newsize < 0) { | 348 | if (newsize < 0) { |
348 | txEnd(tid); | 349 | txEnd(tid); |
349 | up(&JFS_IP(ip)->commit_sem); | 350 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
350 | break; | 351 | break; |
351 | } | 352 | } |
352 | 353 | ||
@@ -355,7 +356,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length) | |||
355 | 356 | ||
356 | txCommit(tid, 1, &ip, 0); | 357 | txCommit(tid, 1, &ip, 0); |
357 | txEnd(tid); | 358 | txEnd(tid); |
358 | up(&JFS_IP(ip)->commit_sem); | 359 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
359 | } while (newsize > length); /* Truncate isn't always atomic */ | 360 | } while (newsize > length); /* Truncate isn't always atomic */ |
360 | } | 361 | } |
361 | 362 | ||
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c new file mode 100644 index 000000000000..67b3774820eb --- /dev/null +++ b/fs/jfs/ioctl.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * linux/fs/jfs/ioctl.c | ||
3 | * | ||
4 | * Copyright (C) 2006 Herbert Poetzl | ||
5 | * adapted from Remy Card's ext2/ioctl.c | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/ext2_fs.h> | ||
10 | #include <linux/ctype.h> | ||
11 | #include <linux/capability.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <asm/current.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | |||
16 | #include "jfs_incore.h" | ||
17 | #include "jfs_dinode.h" | ||
18 | #include "jfs_inode.h" | ||
19 | |||
20 | |||
21 | static struct { | ||
22 | long jfs_flag; | ||
23 | long ext2_flag; | ||
24 | } jfs_map[] = { | ||
25 | {JFS_NOATIME_FL, EXT2_NOATIME_FL}, | ||
26 | {JFS_DIRSYNC_FL, EXT2_DIRSYNC_FL}, | ||
27 | {JFS_SYNC_FL, EXT2_SYNC_FL}, | ||
28 | {JFS_SECRM_FL, EXT2_SECRM_FL}, | ||
29 | {JFS_UNRM_FL, EXT2_UNRM_FL}, | ||
30 | {JFS_APPEND_FL, EXT2_APPEND_FL}, | ||
31 | {JFS_IMMUTABLE_FL, EXT2_IMMUTABLE_FL}, | ||
32 | {0, 0}, | ||
33 | }; | ||
34 | |||
35 | static long jfs_map_ext2(unsigned long flags, int from) | ||
36 | { | ||
37 | int index=0; | ||
38 | long mapped=0; | ||
39 | |||
40 | while (jfs_map[index].jfs_flag) { | ||
41 | if (from) { | ||
42 | if (jfs_map[index].ext2_flag & flags) | ||
43 | mapped |= jfs_map[index].jfs_flag; | ||
44 | } else { | ||
45 | if (jfs_map[index].jfs_flag & flags) | ||
46 | mapped |= jfs_map[index].ext2_flag; | ||
47 | } | ||
48 | index++; | ||
49 | } | ||
50 | return mapped; | ||
51 | } | ||
52 | |||
53 | |||
54 | int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | ||
55 | unsigned long arg) | ||
56 | { | ||
57 | struct jfs_inode_info *jfs_inode = JFS_IP(inode); | ||
58 | unsigned int flags; | ||
59 | |||
60 | switch (cmd) { | ||
61 | case JFS_IOC_GETFLAGS: | ||
62 | flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE; | ||
63 | flags = jfs_map_ext2(flags, 0); | ||
64 | return put_user(flags, (int __user *) arg); | ||
65 | case JFS_IOC_SETFLAGS: { | ||
66 | unsigned int oldflags; | ||
67 | |||
68 | if (IS_RDONLY(inode)) | ||
69 | return -EROFS; | ||
70 | |||
71 | if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) | ||
72 | return -EACCES; | ||
73 | |||
74 | if (get_user(flags, (int __user *) arg)) | ||
75 | return -EFAULT; | ||
76 | |||
77 | flags = jfs_map_ext2(flags, 1); | ||
78 | if (!S_ISDIR(inode->i_mode)) | ||
79 | flags &= ~JFS_DIRSYNC_FL; | ||
80 | |||
81 | oldflags = jfs_inode->mode2; | ||
82 | |||
83 | /* | ||
84 | * The IMMUTABLE and APPEND_ONLY flags can only be changed by | ||
85 | * the relevant capability. | ||
86 | */ | ||
87 | if ((oldflags & JFS_IMMUTABLE_FL) || | ||
88 | ((flags ^ oldflags) & | ||
89 | (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { | ||
90 | if (!capable(CAP_LINUX_IMMUTABLE)) | ||
91 | return -EPERM; | ||
92 | } | ||
93 | |||
94 | flags = flags & JFS_FL_USER_MODIFIABLE; | ||
95 | flags |= oldflags & ~JFS_FL_USER_MODIFIABLE; | ||
96 | jfs_inode->mode2 = flags; | ||
97 | |||
98 | jfs_set_inode_flags(inode); | ||
99 | inode->i_ctime = CURRENT_TIME_SEC; | ||
100 | mark_inode_dirty(inode); | ||
101 | return 0; | ||
102 | } | ||
103 | default: | ||
104 | return -ENOTTY; | ||
105 | } | ||
106 | } | ||
107 | |||
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h index 580a3258449b..9f2572aea561 100644 --- a/fs/jfs/jfs_dinode.h +++ b/fs/jfs/jfs_dinode.h | |||
@@ -139,13 +139,36 @@ struct dinode { | |||
139 | 139 | ||
140 | /* more extended mode bits: attributes for OS/2 */ | 140 | /* more extended mode bits: attributes for OS/2 */ |
141 | #define IREADONLY 0x02000000 /* no write access to file */ | 141 | #define IREADONLY 0x02000000 /* no write access to file */ |
142 | #define IARCHIVE 0x40000000 /* file archive bit */ | ||
143 | #define ISYSTEM 0x08000000 /* system file */ | ||
144 | #define IHIDDEN 0x04000000 /* hidden file */ | 142 | #define IHIDDEN 0x04000000 /* hidden file */ |
145 | #define IRASH 0x4E000000 /* mask for changeable attributes */ | 143 | #define ISYSTEM 0x08000000 /* system file */ |
146 | #define INEWNAME 0x80000000 /* non-8.3 filename format */ | 144 | |
147 | #define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ | 145 | #define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ |
146 | #define IARCHIVE 0x40000000 /* file archive bit */ | ||
147 | #define INEWNAME 0x80000000 /* non-8.3 filename format */ | ||
148 | |||
149 | #define IRASH 0x4E000000 /* mask for changeable attributes */ | ||
148 | #define ATTRSHIFT 25 /* bits to shift to move attribute | 150 | #define ATTRSHIFT 25 /* bits to shift to move attribute |
149 | specification to mode position */ | 151 | specification to mode position */ |
150 | 152 | ||
153 | /* extended attributes for Linux */ | ||
154 | |||
155 | #define JFS_NOATIME_FL 0x00080000 /* do not update atime */ | ||
156 | |||
157 | #define JFS_DIRSYNC_FL 0x00100000 /* dirsync behaviour */ | ||
158 | #define JFS_SYNC_FL 0x00200000 /* Synchronous updates */ | ||
159 | #define JFS_SECRM_FL 0x00400000 /* Secure deletion */ | ||
160 | #define JFS_UNRM_FL 0x00800000 /* allow for undelete */ | ||
161 | |||
162 | #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */ | ||
163 | #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */ | ||
164 | |||
165 | #define JFS_FL_USER_VISIBLE 0x03F80000 | ||
166 | #define JFS_FL_USER_MODIFIABLE 0x03F80000 | ||
167 | #define JFS_FL_INHERIT 0x03C80000 | ||
168 | |||
169 | /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */ | ||
170 | #define JFS_IOC_GETFLAGS _IOR('f', 1, long) | ||
171 | #define JFS_IOC_SETFLAGS _IOW('f', 2, long) | ||
172 | |||
173 | |||
151 | #endif /*_H_JFS_DINODE */ | 174 | #endif /*_H_JFS_DINODE */ |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 79b5404db100..c161c98954e0 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -64,9 +64,9 @@ | |||
64 | * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. | 64 | * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | #define BMAP_LOCK_INIT(bmp) init_MUTEX(&bmp->db_bmaplock) | 67 | #define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock) |
68 | #define BMAP_LOCK(bmp) down(&bmp->db_bmaplock) | 68 | #define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock) |
69 | #define BMAP_UNLOCK(bmp) up(&bmp->db_bmaplock) | 69 | #define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * forward references | 72 | * forward references |
@@ -125,7 +125,7 @@ static int dbGetL2AGSize(s64 nblocks); | |||
125 | * into the table, with the table elements yielding the maximum | 125 | * into the table, with the table elements yielding the maximum |
126 | * binary buddy of free bits within the character. | 126 | * binary buddy of free bits within the character. |
127 | */ | 127 | */ |
128 | static s8 budtab[256] = { | 128 | static const s8 budtab[256] = { |
129 | 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 129 | 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
130 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | 130 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
131 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | 131 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 32e25884e7e8..8b14cc8e0228 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h | |||
@@ -243,7 +243,7 @@ struct dbmap { | |||
243 | struct bmap { | 243 | struct bmap { |
244 | struct dbmap db_bmap; /* on-disk aggregate map descriptor */ | 244 | struct dbmap db_bmap; /* on-disk aggregate map descriptor */ |
245 | struct inode *db_ipbmap; /* ptr to aggregate map incore inode */ | 245 | struct inode *db_ipbmap; /* ptr to aggregate map incore inode */ |
246 | struct semaphore db_bmaplock; /* aggregate map lock */ | 246 | struct mutex db_bmaplock; /* aggregate map lock */ |
247 | atomic_t db_active[MAXAG]; /* count of active, open files in AG */ | 247 | atomic_t db_active[MAXAG]; /* count of active, open files in AG */ |
248 | u32 *db_DBmap; | 248 | u32 *db_DBmap; |
249 | }; | 249 | }; |
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 404f33eae507..6c3f08319846 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c | |||
@@ -1005,6 +1005,9 @@ static int dtSplitUp(tid_t tid, | |||
1005 | 1005 | ||
1006 | DT_PUTPAGE(smp); | 1006 | DT_PUTPAGE(smp); |
1007 | 1007 | ||
1008 | if (!DO_INDEX(ip)) | ||
1009 | ip->i_size = xlen << sbi->l2bsize; | ||
1010 | |||
1008 | goto freeKeyName; | 1011 | goto freeKeyName; |
1009 | } | 1012 | } |
1010 | 1013 | ||
@@ -1055,7 +1058,9 @@ static int dtSplitUp(tid_t tid, | |||
1055 | xaddr = addressPXD(pxd) + xlen; | 1058 | xaddr = addressPXD(pxd) + xlen; |
1056 | dbFree(ip, xaddr, (s64) n); | 1059 | dbFree(ip, xaddr, (s64) n); |
1057 | } | 1060 | } |
1058 | } | 1061 | } else if (!DO_INDEX(ip)) |
1062 | ip->i_size = lengthPXD(pxd) << sbi->l2bsize; | ||
1063 | |||
1059 | 1064 | ||
1060 | extendOut: | 1065 | extendOut: |
1061 | DT_PUTPAGE(smp); | 1066 | DT_PUTPAGE(smp); |
@@ -1098,6 +1103,9 @@ static int dtSplitUp(tid_t tid, | |||
1098 | goto splitOut; | 1103 | goto splitOut; |
1099 | } | 1104 | } |
1100 | 1105 | ||
1106 | if (!DO_INDEX(ip)) | ||
1107 | ip->i_size += PSIZE; | ||
1108 | |||
1101 | /* | 1109 | /* |
1102 | * propagate up the router entry for the leaf page just split | 1110 | * propagate up the router entry for the leaf page just split |
1103 | * | 1111 | * |
@@ -2424,6 +2432,9 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, | |||
2424 | break; | 2432 | break; |
2425 | } | 2433 | } |
2426 | 2434 | ||
2435 | if (!DO_INDEX(ip)) | ||
2436 | ip->i_size -= PSIZE; | ||
2437 | |||
2427 | return 0; | 2438 | return 0; |
2428 | } | 2439 | } |
2429 | 2440 | ||
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 4879603daa1c..5549378358bf 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c | |||
@@ -94,7 +94,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
94 | txBeginAnon(ip->i_sb); | 94 | txBeginAnon(ip->i_sb); |
95 | 95 | ||
96 | /* Avoid race with jfs_commit_inode() */ | 96 | /* Avoid race with jfs_commit_inode() */ |
97 | down(&JFS_IP(ip)->commit_sem); | 97 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
98 | 98 | ||
99 | /* validate extent length */ | 99 | /* validate extent length */ |
100 | if (xlen > MAXXLEN) | 100 | if (xlen > MAXXLEN) |
@@ -136,14 +136,14 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
136 | */ | 136 | */ |
137 | nxlen = xlen; | 137 | nxlen = xlen; |
138 | if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { | 138 | if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { |
139 | up(&JFS_IP(ip)->commit_sem); | 139 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
140 | return (rc); | 140 | return (rc); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Allocate blocks to quota. */ | 143 | /* Allocate blocks to quota. */ |
144 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { | 144 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { |
145 | dbFree(ip, nxaddr, (s64) nxlen); | 145 | dbFree(ip, nxaddr, (s64) nxlen); |
146 | up(&JFS_IP(ip)->commit_sem); | 146 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
147 | return -EDQUOT; | 147 | return -EDQUOT; |
148 | } | 148 | } |
149 | 149 | ||
@@ -165,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
165 | if (rc) { | 165 | if (rc) { |
166 | dbFree(ip, nxaddr, nxlen); | 166 | dbFree(ip, nxaddr, nxlen); |
167 | DQUOT_FREE_BLOCK(ip, nxlen); | 167 | DQUOT_FREE_BLOCK(ip, nxlen); |
168 | up(&JFS_IP(ip)->commit_sem); | 168 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
169 | return (rc); | 169 | return (rc); |
170 | } | 170 | } |
171 | 171 | ||
@@ -177,7 +177,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
177 | 177 | ||
178 | mark_inode_dirty(ip); | 178 | mark_inode_dirty(ip); |
179 | 179 | ||
180 | up(&JFS_IP(ip)->commit_sem); | 180 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
181 | /* | 181 | /* |
182 | * COMMIT_SyncList flags an anonymous tlock on page that is on | 182 | * COMMIT_SyncList flags an anonymous tlock on page that is on |
183 | * sync list. | 183 | * sync list. |
@@ -222,7 +222,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
222 | /* This blocks if we are low on resources */ | 222 | /* This blocks if we are low on resources */ |
223 | txBeginAnon(ip->i_sb); | 223 | txBeginAnon(ip->i_sb); |
224 | 224 | ||
225 | down(&JFS_IP(ip)->commit_sem); | 225 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
226 | /* validate extent length */ | 226 | /* validate extent length */ |
227 | if (nxlen > MAXXLEN) | 227 | if (nxlen > MAXXLEN) |
228 | nxlen = MAXXLEN; | 228 | nxlen = MAXXLEN; |
@@ -258,7 +258,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
258 | /* Allocat blocks to quota. */ | 258 | /* Allocat blocks to quota. */ |
259 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { | 259 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { |
260 | dbFree(ip, nxaddr, (s64) nxlen); | 260 | dbFree(ip, nxaddr, (s64) nxlen); |
261 | up(&JFS_IP(ip)->commit_sem); | 261 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
262 | return -EDQUOT; | 262 | return -EDQUOT; |
263 | } | 263 | } |
264 | 264 | ||
@@ -338,7 +338,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
338 | 338 | ||
339 | mark_inode_dirty(ip); | 339 | mark_inode_dirty(ip); |
340 | exit: | 340 | exit: |
341 | up(&JFS_IP(ip)->commit_sem); | 341 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
342 | return (rc); | 342 | return (rc); |
343 | } | 343 | } |
344 | #endif /* _NOTYET */ | 344 | #endif /* _NOTYET */ |
@@ -439,12 +439,12 @@ int extRecord(struct inode *ip, xad_t * xp) | |||
439 | 439 | ||
440 | txBeginAnon(ip->i_sb); | 440 | txBeginAnon(ip->i_sb); |
441 | 441 | ||
442 | down(&JFS_IP(ip)->commit_sem); | 442 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
443 | 443 | ||
444 | /* update the extent */ | 444 | /* update the extent */ |
445 | rc = xtUpdate(0, ip, xp); | 445 | rc = xtUpdate(0, ip, xp); |
446 | 446 | ||
447 | up(&JFS_IP(ip)->commit_sem); | 447 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
448 | return rc; | 448 | return rc; |
449 | } | 449 | } |
450 | 450 | ||
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 4efa0d0eec39..ccbe60aff83d 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c | |||
@@ -66,14 +66,14 @@ static HLIST_HEAD(aggregate_hash); | |||
66 | * imap locks | 66 | * imap locks |
67 | */ | 67 | */ |
68 | /* iag free list lock */ | 68 | /* iag free list lock */ |
69 | #define IAGFREE_LOCK_INIT(imap) init_MUTEX(&imap->im_freelock) | 69 | #define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock) |
70 | #define IAGFREE_LOCK(imap) down(&imap->im_freelock) | 70 | #define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock) |
71 | #define IAGFREE_UNLOCK(imap) up(&imap->im_freelock) | 71 | #define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock) |
72 | 72 | ||
73 | /* per ag iag list locks */ | 73 | /* per ag iag list locks */ |
74 | #define AG_LOCK_INIT(imap,index) init_MUTEX(&(imap->im_aglock[index])) | 74 | #define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index])) |
75 | #define AG_LOCK(imap,agno) down(&imap->im_aglock[agno]) | 75 | #define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno]) |
76 | #define AG_UNLOCK(imap,agno) up(&imap->im_aglock[agno]) | 76 | #define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno]) |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * forward references | 79 | * forward references |
@@ -1261,7 +1261,7 @@ int diFree(struct inode *ip) | |||
1261 | * to be freed by the transaction; | 1261 | * to be freed by the transaction; |
1262 | */ | 1262 | */ |
1263 | tid = txBegin(ipimap->i_sb, COMMIT_FORCE); | 1263 | tid = txBegin(ipimap->i_sb, COMMIT_FORCE); |
1264 | down(&JFS_IP(ipimap)->commit_sem); | 1264 | mutex_lock(&JFS_IP(ipimap)->commit_mutex); |
1265 | 1265 | ||
1266 | /* acquire tlock of the iag page of the freed ixad | 1266 | /* acquire tlock of the iag page of the freed ixad |
1267 | * to force the page NOHOMEOK (even though no data is | 1267 | * to force the page NOHOMEOK (even though no data is |
@@ -1294,7 +1294,7 @@ int diFree(struct inode *ip) | |||
1294 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); | 1294 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); |
1295 | 1295 | ||
1296 | txEnd(tid); | 1296 | txEnd(tid); |
1297 | up(&JFS_IP(ipimap)->commit_sem); | 1297 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
1298 | 1298 | ||
1299 | /* unlock the AG inode map information */ | 1299 | /* unlock the AG inode map information */ |
1300 | AG_UNLOCK(imap, agno); | 1300 | AG_UNLOCK(imap, agno); |
@@ -2554,13 +2554,13 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) | |||
2554 | * addressing structure pointing to the new iag page; | 2554 | * addressing structure pointing to the new iag page; |
2555 | */ | 2555 | */ |
2556 | tid = txBegin(sb, COMMIT_FORCE); | 2556 | tid = txBegin(sb, COMMIT_FORCE); |
2557 | down(&JFS_IP(ipimap)->commit_sem); | 2557 | mutex_lock(&JFS_IP(ipimap)->commit_mutex); |
2558 | 2558 | ||
2559 | /* update the inode map addressing structure to point to it */ | 2559 | /* update the inode map addressing structure to point to it */ |
2560 | if ((rc = | 2560 | if ((rc = |
2561 | xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { | 2561 | xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { |
2562 | txEnd(tid); | 2562 | txEnd(tid); |
2563 | up(&JFS_IP(ipimap)->commit_sem); | 2563 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
2564 | /* Free the blocks allocated for the iag since it was | 2564 | /* Free the blocks allocated for the iag since it was |
2565 | * not successfully added to the inode map | 2565 | * not successfully added to the inode map |
2566 | */ | 2566 | */ |
@@ -2626,7 +2626,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) | |||
2626 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); | 2626 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); |
2627 | 2627 | ||
2628 | txEnd(tid); | 2628 | txEnd(tid); |
2629 | up(&JFS_IP(ipimap)->commit_sem); | 2629 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
2630 | 2630 | ||
2631 | duplicateIXtree(sb, blkno, xlen, &xaddr); | 2631 | duplicateIXtree(sb, blkno, xlen, &xaddr); |
2632 | 2632 | ||
@@ -3074,14 +3074,40 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno, | |||
3074 | static int copy_from_dinode(struct dinode * dip, struct inode *ip) | 3074 | static int copy_from_dinode(struct dinode * dip, struct inode *ip) |
3075 | { | 3075 | { |
3076 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); | 3076 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); |
3077 | struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); | ||
3077 | 3078 | ||
3078 | jfs_ip->fileset = le32_to_cpu(dip->di_fileset); | 3079 | jfs_ip->fileset = le32_to_cpu(dip->di_fileset); |
3079 | jfs_ip->mode2 = le32_to_cpu(dip->di_mode); | 3080 | jfs_ip->mode2 = le32_to_cpu(dip->di_mode); |
3080 | 3081 | ||
3081 | ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; | 3082 | ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; |
3083 | if (sbi->umask != -1) { | ||
3084 | ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask); | ||
3085 | /* For directories, add x permission if r is allowed by umask */ | ||
3086 | if (S_ISDIR(ip->i_mode)) { | ||
3087 | if (ip->i_mode & 0400) | ||
3088 | ip->i_mode |= 0100; | ||
3089 | if (ip->i_mode & 0040) | ||
3090 | ip->i_mode |= 0010; | ||
3091 | if (ip->i_mode & 0004) | ||
3092 | ip->i_mode |= 0001; | ||
3093 | } | ||
3094 | } | ||
3082 | ip->i_nlink = le32_to_cpu(dip->di_nlink); | 3095 | ip->i_nlink = le32_to_cpu(dip->di_nlink); |
3083 | ip->i_uid = le32_to_cpu(dip->di_uid); | 3096 | |
3084 | ip->i_gid = le32_to_cpu(dip->di_gid); | 3097 | jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); |
3098 | if (sbi->uid == -1) | ||
3099 | ip->i_uid = jfs_ip->saved_uid; | ||
3100 | else { | ||
3101 | ip->i_uid = sbi->uid; | ||
3102 | } | ||
3103 | |||
3104 | jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); | ||
3105 | if (sbi->gid == -1) | ||
3106 | ip->i_gid = jfs_ip->saved_gid; | ||
3107 | else { | ||
3108 | ip->i_gid = sbi->gid; | ||
3109 | } | ||
3110 | |||
3085 | ip->i_size = le64_to_cpu(dip->di_size); | 3111 | ip->i_size = le64_to_cpu(dip->di_size); |
3086 | ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); | 3112 | ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); |
3087 | ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); | 3113 | ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); |
@@ -3132,21 +3158,33 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip) | |||
3132 | static void copy_to_dinode(struct dinode * dip, struct inode *ip) | 3158 | static void copy_to_dinode(struct dinode * dip, struct inode *ip) |
3133 | { | 3159 | { |
3134 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); | 3160 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); |
3161 | struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); | ||
3135 | 3162 | ||
3136 | dip->di_fileset = cpu_to_le32(jfs_ip->fileset); | 3163 | dip->di_fileset = cpu_to_le32(jfs_ip->fileset); |
3137 | dip->di_inostamp = cpu_to_le32(JFS_SBI(ip->i_sb)->inostamp); | 3164 | dip->di_inostamp = cpu_to_le32(sbi->inostamp); |
3138 | dip->di_number = cpu_to_le32(ip->i_ino); | 3165 | dip->di_number = cpu_to_le32(ip->i_ino); |
3139 | dip->di_gen = cpu_to_le32(ip->i_generation); | 3166 | dip->di_gen = cpu_to_le32(ip->i_generation); |
3140 | dip->di_size = cpu_to_le64(ip->i_size); | 3167 | dip->di_size = cpu_to_le64(ip->i_size); |
3141 | dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); | 3168 | dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); |
3142 | dip->di_nlink = cpu_to_le32(ip->i_nlink); | 3169 | dip->di_nlink = cpu_to_le32(ip->i_nlink); |
3143 | dip->di_uid = cpu_to_le32(ip->i_uid); | 3170 | if (sbi->uid == -1) |
3144 | dip->di_gid = cpu_to_le32(ip->i_gid); | 3171 | dip->di_uid = cpu_to_le32(ip->i_uid); |
3172 | else | ||
3173 | dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); | ||
3174 | if (sbi->gid == -1) | ||
3175 | dip->di_gid = cpu_to_le32(ip->i_gid); | ||
3176 | else | ||
3177 | dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); | ||
3145 | /* | 3178 | /* |
3146 | * mode2 is only needed for storing the higher order bits. | 3179 | * mode2 is only needed for storing the higher order bits. |
3147 | * Trust i_mode for the lower order ones | 3180 | * Trust i_mode for the lower order ones |
3148 | */ | 3181 | */ |
3149 | dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode); | 3182 | if (sbi->umask == -1) |
3183 | dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | | ||
3184 | ip->i_mode); | ||
3185 | else /* Leave the original permissions alone */ | ||
3186 | dip->di_mode = cpu_to_le32(jfs_ip->mode2); | ||
3187 | |||
3150 | dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); | 3188 | dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); |
3151 | dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); | 3189 | dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); |
3152 | dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); | 3190 | dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); |
diff --git a/fs/jfs/jfs_imap.h b/fs/jfs/jfs_imap.h index 6b59adec036a..6e24465f0f98 100644 --- a/fs/jfs/jfs_imap.h +++ b/fs/jfs/jfs_imap.h | |||
@@ -140,8 +140,8 @@ struct dinomap { | |||
140 | struct inomap { | 140 | struct inomap { |
141 | struct dinomap im_imap; /* 4096: inode allocation control */ | 141 | struct dinomap im_imap; /* 4096: inode allocation control */ |
142 | struct inode *im_ipimap; /* 4: ptr to inode for imap */ | 142 | struct inode *im_ipimap; /* 4: ptr to inode for imap */ |
143 | struct semaphore im_freelock; /* 4: iag free list lock */ | 143 | struct mutex im_freelock; /* 4: iag free list lock */ |
144 | struct semaphore im_aglock[MAXAG]; /* 512: per AG locks */ | 144 | struct mutex im_aglock[MAXAG]; /* 512: per AG locks */ |
145 | u32 *im_DBGdimap; | 145 | u32 *im_DBGdimap; |
146 | atomic_t im_numinos; /* num of backed inodes */ | 146 | atomic_t im_numinos; /* num of backed inodes */ |
147 | atomic_t im_numfree; /* num of free backed inodes */ | 147 | atomic_t im_numfree; /* num of free backed inodes */ |
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index dc21a5bd54d4..54d73716ca8c 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #ifndef _H_JFS_INCORE | 19 | #ifndef _H_JFS_INCORE |
20 | #define _H_JFS_INCORE | 20 | #define _H_JFS_INCORE |
21 | 21 | ||
22 | #include <linux/mutex.h> | ||
22 | #include <linux/rwsem.h> | 23 | #include <linux/rwsem.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
@@ -37,6 +38,8 @@ | |||
37 | struct jfs_inode_info { | 38 | struct jfs_inode_info { |
38 | int fileset; /* fileset number (always 16)*/ | 39 | int fileset; /* fileset number (always 16)*/ |
39 | uint mode2; /* jfs-specific mode */ | 40 | uint mode2; /* jfs-specific mode */ |
41 | uint saved_uid; /* saved for uid mount option */ | ||
42 | uint saved_gid; /* saved for gid mount option */ | ||
40 | pxd_t ixpxd; /* inode extent descriptor */ | 43 | pxd_t ixpxd; /* inode extent descriptor */ |
41 | dxd_t acl; /* dxd describing acl */ | 44 | dxd_t acl; /* dxd describing acl */ |
42 | dxd_t ea; /* dxd describing ea */ | 45 | dxd_t ea; /* dxd describing ea */ |
@@ -62,12 +65,12 @@ struct jfs_inode_info { | |||
62 | */ | 65 | */ |
63 | struct rw_semaphore rdwrlock; | 66 | struct rw_semaphore rdwrlock; |
64 | /* | 67 | /* |
65 | * commit_sem serializes transaction processing on an inode. | 68 | * commit_mutex serializes transaction processing on an inode. |
66 | * It must be taken after beginning a transaction (txBegin), since | 69 | * It must be taken after beginning a transaction (txBegin), since |
67 | * dirty inodes may be committed while a new transaction on the | 70 | * dirty inodes may be committed while a new transaction on the |
68 | * inode is blocked in txBegin or TxBeginAnon | 71 | * inode is blocked in txBegin or TxBeginAnon |
69 | */ | 72 | */ |
70 | struct semaphore commit_sem; | 73 | struct mutex commit_mutex; |
71 | /* xattr_sem allows us to access the xattrs without taking i_mutex */ | 74 | /* xattr_sem allows us to access the xattrs without taking i_mutex */ |
72 | struct rw_semaphore xattr_sem; | 75 | struct rw_semaphore xattr_sem; |
73 | lid_t xtlid; /* lid of xtree lock on directory */ | 76 | lid_t xtlid; /* lid of xtree lock on directory */ |
@@ -169,6 +172,9 @@ struct jfs_sb_info { | |||
169 | uint state; /* mount/recovery state */ | 172 | uint state; /* mount/recovery state */ |
170 | unsigned long flag; /* mount time flags */ | 173 | unsigned long flag; /* mount time flags */ |
171 | uint p_state; /* state prior to going no integrity */ | 174 | uint p_state; /* state prior to going no integrity */ |
175 | uint uid; /* uid to override on-disk uid */ | ||
176 | uint gid; /* gid to override on-disk gid */ | ||
177 | uint umask; /* umask to override on-disk umask */ | ||
172 | }; | 178 | }; |
173 | 179 | ||
174 | /* jfs_sb_info commit_state */ | 180 | /* jfs_sb_info commit_state */ |
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index 2af5efbfd06f..495df402916d 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c | |||
@@ -25,6 +25,26 @@ | |||
25 | #include "jfs_dinode.h" | 25 | #include "jfs_dinode.h" |
26 | #include "jfs_debug.h" | 26 | #include "jfs_debug.h" |
27 | 27 | ||
28 | |||
29 | void jfs_set_inode_flags(struct inode *inode) | ||
30 | { | ||
31 | unsigned int flags = JFS_IP(inode)->mode2; | ||
32 | |||
33 | inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | | ||
34 | S_NOATIME | S_DIRSYNC | S_SYNC); | ||
35 | |||
36 | if (flags & JFS_IMMUTABLE_FL) | ||
37 | inode->i_flags |= S_IMMUTABLE; | ||
38 | if (flags & JFS_APPEND_FL) | ||
39 | inode->i_flags |= S_APPEND; | ||
40 | if (flags & JFS_NOATIME_FL) | ||
41 | inode->i_flags |= S_NOATIME; | ||
42 | if (flags & JFS_DIRSYNC_FL) | ||
43 | inode->i_flags |= S_DIRSYNC; | ||
44 | if (flags & JFS_SYNC_FL) | ||
45 | inode->i_flags |= S_SYNC; | ||
46 | } | ||
47 | |||
28 | /* | 48 | /* |
29 | * NAME: ialloc() | 49 | * NAME: ialloc() |
30 | * | 50 | * |
@@ -63,6 +83,13 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
63 | inode->i_gid = current->fsgid; | 83 | inode->i_gid = current->fsgid; |
64 | 84 | ||
65 | /* | 85 | /* |
86 | * New inodes need to save sane values on disk when | ||
87 | * uid & gid mount options are used | ||
88 | */ | ||
89 | jfs_inode->saved_uid = inode->i_uid; | ||
90 | jfs_inode->saved_gid = inode->i_gid; | ||
91 | |||
92 | /* | ||
66 | * Allocate inode to quota. | 93 | * Allocate inode to quota. |
67 | */ | 94 | */ |
68 | if (DQUOT_ALLOC_INODE(inode)) { | 95 | if (DQUOT_ALLOC_INODE(inode)) { |
@@ -74,10 +101,20 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
74 | } | 101 | } |
75 | 102 | ||
76 | inode->i_mode = mode; | 103 | inode->i_mode = mode; |
77 | if (S_ISDIR(mode)) | 104 | /* inherit flags from parent */ |
78 | jfs_inode->mode2 = IDIRECTORY | mode; | 105 | jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT; |
79 | else | 106 | |
80 | jfs_inode->mode2 = INLINEEA | ISPARSE | mode; | 107 | if (S_ISDIR(mode)) { |
108 | jfs_inode->mode2 |= IDIRECTORY; | ||
109 | jfs_inode->mode2 &= ~JFS_DIRSYNC_FL; | ||
110 | } | ||
111 | else { | ||
112 | jfs_inode->mode2 |= INLINEEA | ISPARSE; | ||
113 | if (S_ISLNK(mode)) | ||
114 | jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL); | ||
115 | } | ||
116 | jfs_inode->mode2 |= mode; | ||
117 | |||
81 | inode->i_blksize = sb->s_blocksize; | 118 | inode->i_blksize = sb->s_blocksize; |
82 | inode->i_blocks = 0; | 119 | inode->i_blocks = 0; |
83 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 120 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
@@ -98,6 +135,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
98 | jfs_inode->atlhead = 0; | 135 | jfs_inode->atlhead = 0; |
99 | jfs_inode->atltail = 0; | 136 | jfs_inode->atltail = 0; |
100 | jfs_inode->xtlid = 0; | 137 | jfs_inode->xtlid = 0; |
138 | jfs_set_inode_flags(inode); | ||
101 | 139 | ||
102 | jfs_info("ialloc returns inode = 0x%p\n", inode); | 140 | jfs_info("ialloc returns inode = 0x%p\n", inode); |
103 | 141 | ||
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b54bac576cb3..095d471b9f9a 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | extern struct inode *ialloc(struct inode *, umode_t); | 21 | extern struct inode *ialloc(struct inode *, umode_t); |
22 | extern int jfs_fsync(struct file *, struct dentry *, int); | 22 | extern int jfs_fsync(struct file *, struct dentry *, int); |
23 | extern int jfs_ioctl(struct inode *, struct file *, | ||
24 | unsigned int, unsigned long); | ||
23 | extern void jfs_read_inode(struct inode *); | 25 | extern void jfs_read_inode(struct inode *); |
24 | extern int jfs_commit_inode(struct inode *, int); | 26 | extern int jfs_commit_inode(struct inode *, int); |
25 | extern int jfs_write_inode(struct inode*, int); | 27 | extern int jfs_write_inode(struct inode*, int); |
@@ -29,6 +31,7 @@ extern void jfs_truncate(struct inode *); | |||
29 | extern void jfs_truncate_nolock(struct inode *, loff_t); | 31 | extern void jfs_truncate_nolock(struct inode *, loff_t); |
30 | extern void jfs_free_zero_link(struct inode *); | 32 | extern void jfs_free_zero_link(struct inode *); |
31 | extern struct dentry *jfs_get_parent(struct dentry *dentry); | 33 | extern struct dentry *jfs_get_parent(struct dentry *dentry); |
34 | extern void jfs_set_inode_flags(struct inode *); | ||
32 | 35 | ||
33 | extern struct address_space_operations jfs_aops; | 36 | extern struct address_space_operations jfs_aops; |
34 | extern struct inode_operations jfs_dir_inode_operations; | 37 | extern struct inode_operations jfs_dir_inode_operations; |
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h index 10ad1d086685..70ac9f7d1e00 100644 --- a/fs/jfs/jfs_lock.h +++ b/fs/jfs/jfs_lock.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define _H_JFS_LOCK | 20 | #define _H_JFS_LOCK |
21 | 21 | ||
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/mutex.h> | ||
23 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
24 | 25 | ||
25 | /* | 26 | /* |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index d27bac6acaa3..0b348b13b551 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/interrupt.h> | 64 | #include <linux/interrupt.h> |
65 | #include <linux/smp_lock.h> | 65 | #include <linux/smp_lock.h> |
66 | #include <linux/completion.h> | 66 | #include <linux/completion.h> |
67 | #include <linux/kthread.h> | ||
67 | #include <linux/buffer_head.h> /* for sync_blockdev() */ | 68 | #include <linux/buffer_head.h> /* for sync_blockdev() */ |
68 | #include <linux/bio.h> | 69 | #include <linux/bio.h> |
69 | #include <linux/suspend.h> | 70 | #include <linux/suspend.h> |
@@ -81,15 +82,14 @@ | |||
81 | */ | 82 | */ |
82 | static struct lbuf *log_redrive_list; | 83 | static struct lbuf *log_redrive_list; |
83 | static DEFINE_SPINLOCK(log_redrive_lock); | 84 | static DEFINE_SPINLOCK(log_redrive_lock); |
84 | DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait); | ||
85 | 85 | ||
86 | 86 | ||
87 | /* | 87 | /* |
88 | * log read/write serialization (per log) | 88 | * log read/write serialization (per log) |
89 | */ | 89 | */ |
90 | #define LOG_LOCK_INIT(log) init_MUTEX(&(log)->loglock) | 90 | #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock) |
91 | #define LOG_LOCK(log) down(&((log)->loglock)) | 91 | #define LOG_LOCK(log) mutex_lock(&((log)->loglock)) |
92 | #define LOG_UNLOCK(log) up(&((log)->loglock)) | 92 | #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock)) |
93 | 93 | ||
94 | 94 | ||
95 | /* | 95 | /* |
@@ -1105,11 +1105,10 @@ int lmLogOpen(struct super_block *sb) | |||
1105 | } | 1105 | } |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) { | 1108 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { |
1109 | up(&jfs_log_sem); | 1109 | up(&jfs_log_sem); |
1110 | return -ENOMEM; | 1110 | return -ENOMEM; |
1111 | } | 1111 | } |
1112 | memset(log, 0, sizeof(struct jfs_log)); | ||
1113 | INIT_LIST_HEAD(&log->sb_list); | 1112 | INIT_LIST_HEAD(&log->sb_list); |
1114 | init_waitqueue_head(&log->syncwait); | 1113 | init_waitqueue_head(&log->syncwait); |
1115 | 1114 | ||
@@ -1181,9 +1180,8 @@ static int open_inline_log(struct super_block *sb) | |||
1181 | struct jfs_log *log; | 1180 | struct jfs_log *log; |
1182 | int rc; | 1181 | int rc; |
1183 | 1182 | ||
1184 | if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) | 1183 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) |
1185 | return -ENOMEM; | 1184 | return -ENOMEM; |
1186 | memset(log, 0, sizeof(struct jfs_log)); | ||
1187 | INIT_LIST_HEAD(&log->sb_list); | 1185 | INIT_LIST_HEAD(&log->sb_list); |
1188 | init_waitqueue_head(&log->syncwait); | 1186 | init_waitqueue_head(&log->syncwait); |
1189 | 1187 | ||
@@ -1216,12 +1214,11 @@ static int open_dummy_log(struct super_block *sb) | |||
1216 | 1214 | ||
1217 | down(&jfs_log_sem); | 1215 | down(&jfs_log_sem); |
1218 | if (!dummy_log) { | 1216 | if (!dummy_log) { |
1219 | dummy_log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL); | 1217 | dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); |
1220 | if (!dummy_log) { | 1218 | if (!dummy_log) { |
1221 | up(&jfs_log_sem); | 1219 | up(&jfs_log_sem); |
1222 | return -ENOMEM; | 1220 | return -ENOMEM; |
1223 | } | 1221 | } |
1224 | memset(dummy_log, 0, sizeof(struct jfs_log)); | ||
1225 | INIT_LIST_HEAD(&dummy_log->sb_list); | 1222 | INIT_LIST_HEAD(&dummy_log->sb_list); |
1226 | init_waitqueue_head(&dummy_log->syncwait); | 1223 | init_waitqueue_head(&dummy_log->syncwait); |
1227 | dummy_log->no_integrity = 1; | 1224 | dummy_log->no_integrity = 1; |
@@ -1980,7 +1977,7 @@ static inline void lbmRedrive(struct lbuf *bp) | |||
1980 | log_redrive_list = bp; | 1977 | log_redrive_list = bp; |
1981 | spin_unlock_irqrestore(&log_redrive_lock, flags); | 1978 | spin_unlock_irqrestore(&log_redrive_lock, flags); |
1982 | 1979 | ||
1983 | wake_up(&jfs_IO_thread_wait); | 1980 | wake_up_process(jfsIOthread); |
1984 | } | 1981 | } |
1985 | 1982 | ||
1986 | 1983 | ||
@@ -2347,13 +2344,7 @@ int jfsIOWait(void *arg) | |||
2347 | { | 2344 | { |
2348 | struct lbuf *bp; | 2345 | struct lbuf *bp; |
2349 | 2346 | ||
2350 | daemonize("jfsIO"); | ||
2351 | |||
2352 | complete(&jfsIOwait); | ||
2353 | |||
2354 | do { | 2347 | do { |
2355 | DECLARE_WAITQUEUE(wq, current); | ||
2356 | |||
2357 | spin_lock_irq(&log_redrive_lock); | 2348 | spin_lock_irq(&log_redrive_lock); |
2358 | while ((bp = log_redrive_list) != 0) { | 2349 | while ((bp = log_redrive_list) != 0) { |
2359 | log_redrive_list = bp->l_redrive_next; | 2350 | log_redrive_list = bp->l_redrive_next; |
@@ -2362,21 +2353,19 @@ int jfsIOWait(void *arg) | |||
2362 | lbmStartIO(bp); | 2353 | lbmStartIO(bp); |
2363 | spin_lock_irq(&log_redrive_lock); | 2354 | spin_lock_irq(&log_redrive_lock); |
2364 | } | 2355 | } |
2356 | spin_unlock_irq(&log_redrive_lock); | ||
2357 | |||
2365 | if (freezing(current)) { | 2358 | if (freezing(current)) { |
2366 | spin_unlock_irq(&log_redrive_lock); | ||
2367 | refrigerator(); | 2359 | refrigerator(); |
2368 | } else { | 2360 | } else { |
2369 | add_wait_queue(&jfs_IO_thread_wait, &wq); | ||
2370 | set_current_state(TASK_INTERRUPTIBLE); | 2361 | set_current_state(TASK_INTERRUPTIBLE); |
2371 | spin_unlock_irq(&log_redrive_lock); | ||
2372 | schedule(); | 2362 | schedule(); |
2373 | current->state = TASK_RUNNING; | 2363 | current->state = TASK_RUNNING; |
2374 | remove_wait_queue(&jfs_IO_thread_wait, &wq); | ||
2375 | } | 2364 | } |
2376 | } while (!jfs_stop_threads); | 2365 | } while (!kthread_should_stop()); |
2377 | 2366 | ||
2378 | jfs_info("jfsIOWait being killed!"); | 2367 | jfs_info("jfsIOWait being killed!"); |
2379 | complete_and_exit(&jfsIOwait, 0); | 2368 | return 0; |
2380 | } | 2369 | } |
2381 | 2370 | ||
2382 | /* | 2371 | /* |
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h index e4978b5b65ee..8c6909b80014 100644 --- a/fs/jfs/jfs_logmgr.h +++ b/fs/jfs/jfs_logmgr.h | |||
@@ -389,7 +389,7 @@ struct jfs_log { | |||
389 | int eor; /* 4: eor of last record in eol page */ | 389 | int eor; /* 4: eor of last record in eol page */ |
390 | struct lbuf *bp; /* 4: current log page buffer */ | 390 | struct lbuf *bp; /* 4: current log page buffer */ |
391 | 391 | ||
392 | struct semaphore loglock; /* 4: log write serialization lock */ | 392 | struct mutex loglock; /* 4: log write serialization lock */ |
393 | 393 | ||
394 | /* syncpt */ | 394 | /* syncpt */ |
395 | int nextsync; /* 4: bytes to write before next syncpt */ | 395 | int nextsync; /* 4: bytes to write before next syncpt */ |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 8a53981f9f27..5fbaeaadccd3 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -104,10 +104,9 @@ static inline int insert_metapage(struct page *page, struct metapage *mp) | |||
104 | if (PagePrivate(page)) | 104 | if (PagePrivate(page)) |
105 | a = mp_anchor(page); | 105 | a = mp_anchor(page); |
106 | else { | 106 | else { |
107 | a = kmalloc(sizeof(struct meta_anchor), GFP_NOFS); | 107 | a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS); |
108 | if (!a) | 108 | if (!a) |
109 | return -ENOMEM; | 109 | return -ENOMEM; |
110 | memset(a, 0, sizeof(struct meta_anchor)); | ||
111 | set_page_private(page, (unsigned long)a); | 110 | set_page_private(page, (unsigned long)a); |
112 | SetPagePrivate(page); | 111 | SetPagePrivate(page); |
113 | kmap(page); | 112 | kmap(page); |
diff --git a/fs/jfs/jfs_superblock.h b/fs/jfs/jfs_superblock.h index fcf781bf31cb..682cf1a68a18 100644 --- a/fs/jfs/jfs_superblock.h +++ b/fs/jfs/jfs_superblock.h | |||
@@ -113,12 +113,9 @@ extern int jfs_mount(struct super_block *); | |||
113 | extern int jfs_mount_rw(struct super_block *, int); | 113 | extern int jfs_mount_rw(struct super_block *, int); |
114 | extern int jfs_umount(struct super_block *); | 114 | extern int jfs_umount(struct super_block *); |
115 | extern int jfs_umount_rw(struct super_block *); | 115 | extern int jfs_umount_rw(struct super_block *); |
116 | |||
117 | extern int jfs_stop_threads; | ||
118 | extern struct completion jfsIOwait; | ||
119 | extern wait_queue_head_t jfs_IO_thread_wait; | ||
120 | extern wait_queue_head_t jfs_commit_thread_wait; | ||
121 | extern wait_queue_head_t jfs_sync_thread_wait; | ||
122 | extern int jfs_extendfs(struct super_block *, s64, int); | 116 | extern int jfs_extendfs(struct super_block *, s64, int); |
123 | 117 | ||
118 | extern struct task_struct *jfsIOthread; | ||
119 | extern struct task_struct *jfsSyncThread; | ||
120 | |||
124 | #endif /*_H_JFS_SUPERBLOCK */ | 121 | #endif /*_H_JFS_SUPERBLOCK */ |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index 2ddb6b892bcf..ac3d66948e8c 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/suspend.h> | 49 | #include <linux/suspend.h> |
50 | #include <linux/module.h> | 50 | #include <linux/module.h> |
51 | #include <linux/moduleparam.h> | 51 | #include <linux/moduleparam.h> |
52 | #include <linux/kthread.h> | ||
52 | #include "jfs_incore.h" | 53 | #include "jfs_incore.h" |
53 | #include "jfs_inode.h" | 54 | #include "jfs_inode.h" |
54 | #include "jfs_filsys.h" | 55 | #include "jfs_filsys.h" |
@@ -121,8 +122,7 @@ static DEFINE_SPINLOCK(jfsTxnLock); | |||
121 | #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags) | 122 | #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags) |
122 | #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags) | 123 | #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags) |
123 | 124 | ||
124 | DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait); | 125 | static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait); |
125 | DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait); | ||
126 | static int jfs_commit_thread_waking; | 126 | static int jfs_commit_thread_waking; |
127 | 127 | ||
128 | /* | 128 | /* |
@@ -207,7 +207,7 @@ static lid_t txLockAlloc(void) | |||
207 | if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) { | 207 | if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) { |
208 | jfs_info("txLockAlloc tlocks low"); | 208 | jfs_info("txLockAlloc tlocks low"); |
209 | jfs_tlocks_low = 1; | 209 | jfs_tlocks_low = 1; |
210 | wake_up(&jfs_sync_thread_wait); | 210 | wake_up_process(jfsSyncThread); |
211 | } | 211 | } |
212 | 212 | ||
213 | return lid; | 213 | return lid; |
@@ -2743,10 +2743,6 @@ int jfs_lazycommit(void *arg) | |||
2743 | unsigned long flags; | 2743 | unsigned long flags; |
2744 | struct jfs_sb_info *sbi; | 2744 | struct jfs_sb_info *sbi; |
2745 | 2745 | ||
2746 | daemonize("jfsCommit"); | ||
2747 | |||
2748 | complete(&jfsIOwait); | ||
2749 | |||
2750 | do { | 2746 | do { |
2751 | LAZY_LOCK(flags); | 2747 | LAZY_LOCK(flags); |
2752 | jfs_commit_thread_waking = 0; /* OK to wake another thread */ | 2748 | jfs_commit_thread_waking = 0; /* OK to wake another thread */ |
@@ -2806,13 +2802,13 @@ int jfs_lazycommit(void *arg) | |||
2806 | current->state = TASK_RUNNING; | 2802 | current->state = TASK_RUNNING; |
2807 | remove_wait_queue(&jfs_commit_thread_wait, &wq); | 2803 | remove_wait_queue(&jfs_commit_thread_wait, &wq); |
2808 | } | 2804 | } |
2809 | } while (!jfs_stop_threads); | 2805 | } while (!kthread_should_stop()); |
2810 | 2806 | ||
2811 | if (!list_empty(&TxAnchor.unlock_queue)) | 2807 | if (!list_empty(&TxAnchor.unlock_queue)) |
2812 | jfs_err("jfs_lazycommit being killed w/pending transactions!"); | 2808 | jfs_err("jfs_lazycommit being killed w/pending transactions!"); |
2813 | else | 2809 | else |
2814 | jfs_info("jfs_lazycommit being killed\n"); | 2810 | jfs_info("jfs_lazycommit being killed\n"); |
2815 | complete_and_exit(&jfsIOwait, 0); | 2811 | return 0; |
2816 | } | 2812 | } |
2817 | 2813 | ||
2818 | void txLazyUnlock(struct tblock * tblk) | 2814 | void txLazyUnlock(struct tblock * tblk) |
@@ -2876,10 +2872,10 @@ restart: | |||
2876 | */ | 2872 | */ |
2877 | TXN_UNLOCK(); | 2873 | TXN_UNLOCK(); |
2878 | tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE); | 2874 | tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE); |
2879 | down(&jfs_ip->commit_sem); | 2875 | mutex_lock(&jfs_ip->commit_mutex); |
2880 | txCommit(tid, 1, &ip, 0); | 2876 | txCommit(tid, 1, &ip, 0); |
2881 | txEnd(tid); | 2877 | txEnd(tid); |
2882 | up(&jfs_ip->commit_sem); | 2878 | mutex_unlock(&jfs_ip->commit_mutex); |
2883 | /* | 2879 | /* |
2884 | * Just to be safe. I don't know how | 2880 | * Just to be safe. I don't know how |
2885 | * long we can run without blocking | 2881 | * long we can run without blocking |
@@ -2932,10 +2928,6 @@ int jfs_sync(void *arg) | |||
2932 | int rc; | 2928 | int rc; |
2933 | tid_t tid; | 2929 | tid_t tid; |
2934 | 2930 | ||
2935 | daemonize("jfsSync"); | ||
2936 | |||
2937 | complete(&jfsIOwait); | ||
2938 | |||
2939 | do { | 2931 | do { |
2940 | /* | 2932 | /* |
2941 | * write each inode on the anonymous inode list | 2933 | * write each inode on the anonymous inode list |
@@ -2952,7 +2944,7 @@ int jfs_sync(void *arg) | |||
2952 | * Inode is being freed | 2944 | * Inode is being freed |
2953 | */ | 2945 | */ |
2954 | list_del_init(&jfs_ip->anon_inode_list); | 2946 | list_del_init(&jfs_ip->anon_inode_list); |
2955 | } else if (! down_trylock(&jfs_ip->commit_sem)) { | 2947 | } else if (! !mutex_trylock(&jfs_ip->commit_mutex)) { |
2956 | /* | 2948 | /* |
2957 | * inode will be removed from anonymous list | 2949 | * inode will be removed from anonymous list |
2958 | * when it is committed | 2950 | * when it is committed |
@@ -2961,7 +2953,7 @@ int jfs_sync(void *arg) | |||
2961 | tid = txBegin(ip->i_sb, COMMIT_INODE); | 2953 | tid = txBegin(ip->i_sb, COMMIT_INODE); |
2962 | rc = txCommit(tid, 1, &ip, 0); | 2954 | rc = txCommit(tid, 1, &ip, 0); |
2963 | txEnd(tid); | 2955 | txEnd(tid); |
2964 | up(&jfs_ip->commit_sem); | 2956 | mutex_unlock(&jfs_ip->commit_mutex); |
2965 | 2957 | ||
2966 | iput(ip); | 2958 | iput(ip); |
2967 | /* | 2959 | /* |
@@ -2971,7 +2963,7 @@ int jfs_sync(void *arg) | |||
2971 | cond_resched(); | 2963 | cond_resched(); |
2972 | TXN_LOCK(); | 2964 | TXN_LOCK(); |
2973 | } else { | 2965 | } else { |
2974 | /* We can't get the commit semaphore. It may | 2966 | /* We can't get the commit mutex. It may |
2975 | * be held by a thread waiting for tlock's | 2967 | * be held by a thread waiting for tlock's |
2976 | * so let's not block here. Save it to | 2968 | * so let's not block here. Save it to |
2977 | * put back on the anon_list. | 2969 | * put back on the anon_list. |
@@ -2996,19 +2988,15 @@ int jfs_sync(void *arg) | |||
2996 | TXN_UNLOCK(); | 2988 | TXN_UNLOCK(); |
2997 | refrigerator(); | 2989 | refrigerator(); |
2998 | } else { | 2990 | } else { |
2999 | DECLARE_WAITQUEUE(wq, current); | ||
3000 | |||
3001 | add_wait_queue(&jfs_sync_thread_wait, &wq); | ||
3002 | set_current_state(TASK_INTERRUPTIBLE); | 2991 | set_current_state(TASK_INTERRUPTIBLE); |
3003 | TXN_UNLOCK(); | 2992 | TXN_UNLOCK(); |
3004 | schedule(); | 2993 | schedule(); |
3005 | current->state = TASK_RUNNING; | 2994 | current->state = TASK_RUNNING; |
3006 | remove_wait_queue(&jfs_sync_thread_wait, &wq); | ||
3007 | } | 2995 | } |
3008 | } while (!jfs_stop_threads); | 2996 | } while (!kthread_should_stop()); |
3009 | 2997 | ||
3010 | jfs_info("jfs_sync being killed"); | 2998 | jfs_info("jfs_sync being killed"); |
3011 | complete_and_exit(&jfsIOwait, 0); | 2999 | return 0; |
3012 | } | 3000 | } |
3013 | 3001 | ||
3014 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG) | 3002 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG) |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4abbe8604302..309cee575f7d 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, | |||
104 | 104 | ||
105 | tid = txBegin(dip->i_sb, 0); | 105 | tid = txBegin(dip->i_sb, 0); |
106 | 106 | ||
107 | down(&JFS_IP(dip)->commit_sem); | 107 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
108 | down(&JFS_IP(ip)->commit_sem); | 108 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
109 | 109 | ||
110 | rc = jfs_init_acl(tid, ip, dip); | 110 | rc = jfs_init_acl(tid, ip, dip); |
111 | if (rc) | 111 | if (rc) |
@@ -165,8 +165,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, | |||
165 | 165 | ||
166 | out3: | 166 | out3: |
167 | txEnd(tid); | 167 | txEnd(tid); |
168 | up(&JFS_IP(dip)->commit_sem); | 168 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
169 | up(&JFS_IP(ip)->commit_sem); | 169 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
170 | if (rc) { | 170 | if (rc) { |
171 | free_ea_wmap(ip); | 171 | free_ea_wmap(ip); |
172 | ip->i_nlink = 0; | 172 | ip->i_nlink = 0; |
@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) | |||
238 | 238 | ||
239 | tid = txBegin(dip->i_sb, 0); | 239 | tid = txBegin(dip->i_sb, 0); |
240 | 240 | ||
241 | down(&JFS_IP(dip)->commit_sem); | 241 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
242 | down(&JFS_IP(ip)->commit_sem); | 242 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
243 | 243 | ||
244 | rc = jfs_init_acl(tid, ip, dip); | 244 | rc = jfs_init_acl(tid, ip, dip); |
245 | if (rc) | 245 | if (rc) |
@@ -300,8 +300,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) | |||
300 | 300 | ||
301 | out3: | 301 | out3: |
302 | txEnd(tid); | 302 | txEnd(tid); |
303 | up(&JFS_IP(dip)->commit_sem); | 303 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
304 | up(&JFS_IP(ip)->commit_sem); | 304 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
305 | if (rc) { | 305 | if (rc) { |
306 | free_ea_wmap(ip); | 306 | free_ea_wmap(ip); |
307 | ip->i_nlink = 0; | 307 | ip->i_nlink = 0; |
@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
365 | 365 | ||
366 | tid = txBegin(dip->i_sb, 0); | 366 | tid = txBegin(dip->i_sb, 0); |
367 | 367 | ||
368 | down(&JFS_IP(dip)->commit_sem); | 368 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
369 | down(&JFS_IP(ip)->commit_sem); | 369 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
370 | 370 | ||
371 | iplist[0] = dip; | 371 | iplist[0] = dip; |
372 | iplist[1] = ip; | 372 | iplist[1] = ip; |
@@ -384,8 +384,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
384 | if (rc == -EIO) | 384 | if (rc == -EIO) |
385 | txAbort(tid, 1); | 385 | txAbort(tid, 1); |
386 | txEnd(tid); | 386 | txEnd(tid); |
387 | up(&JFS_IP(dip)->commit_sem); | 387 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
388 | up(&JFS_IP(ip)->commit_sem); | 388 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
389 | 389 | ||
390 | goto out2; | 390 | goto out2; |
391 | } | 391 | } |
@@ -422,8 +422,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
422 | 422 | ||
423 | txEnd(tid); | 423 | txEnd(tid); |
424 | 424 | ||
425 | up(&JFS_IP(dip)->commit_sem); | 425 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
426 | up(&JFS_IP(ip)->commit_sem); | 426 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
427 | 427 | ||
428 | /* | 428 | /* |
429 | * Truncating the directory index table is not guaranteed. It | 429 | * Truncating the directory index table is not guaranteed. It |
@@ -488,8 +488,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
488 | 488 | ||
489 | tid = txBegin(dip->i_sb, 0); | 489 | tid = txBegin(dip->i_sb, 0); |
490 | 490 | ||
491 | down(&JFS_IP(dip)->commit_sem); | 491 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
492 | down(&JFS_IP(ip)->commit_sem); | 492 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
493 | 493 | ||
494 | iplist[0] = dip; | 494 | iplist[0] = dip; |
495 | iplist[1] = ip; | 495 | iplist[1] = ip; |
@@ -503,8 +503,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
503 | if (rc == -EIO) | 503 | if (rc == -EIO) |
504 | txAbort(tid, 1); /* Marks FS Dirty */ | 504 | txAbort(tid, 1); /* Marks FS Dirty */ |
505 | txEnd(tid); | 505 | txEnd(tid); |
506 | up(&JFS_IP(dip)->commit_sem); | 506 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
507 | up(&JFS_IP(ip)->commit_sem); | 507 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
508 | IWRITE_UNLOCK(ip); | 508 | IWRITE_UNLOCK(ip); |
509 | goto out1; | 509 | goto out1; |
510 | } | 510 | } |
@@ -527,8 +527,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
527 | if ((new_size = commitZeroLink(tid, ip)) < 0) { | 527 | if ((new_size = commitZeroLink(tid, ip)) < 0) { |
528 | txAbort(tid, 1); /* Marks FS Dirty */ | 528 | txAbort(tid, 1); /* Marks FS Dirty */ |
529 | txEnd(tid); | 529 | txEnd(tid); |
530 | up(&JFS_IP(dip)->commit_sem); | 530 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
531 | up(&JFS_IP(ip)->commit_sem); | 531 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
532 | IWRITE_UNLOCK(ip); | 532 | IWRITE_UNLOCK(ip); |
533 | rc = new_size; | 533 | rc = new_size; |
534 | goto out1; | 534 | goto out1; |
@@ -556,13 +556,13 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
556 | 556 | ||
557 | txEnd(tid); | 557 | txEnd(tid); |
558 | 558 | ||
559 | up(&JFS_IP(dip)->commit_sem); | 559 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
560 | up(&JFS_IP(ip)->commit_sem); | 560 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
561 | 561 | ||
562 | 562 | ||
563 | while (new_size && (rc == 0)) { | 563 | while (new_size && (rc == 0)) { |
564 | tid = txBegin(dip->i_sb, 0); | 564 | tid = txBegin(dip->i_sb, 0); |
565 | down(&JFS_IP(ip)->commit_sem); | 565 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
566 | new_size = xtTruncate_pmap(tid, ip, new_size); | 566 | new_size = xtTruncate_pmap(tid, ip, new_size); |
567 | if (new_size < 0) { | 567 | if (new_size < 0) { |
568 | txAbort(tid, 1); /* Marks FS Dirty */ | 568 | txAbort(tid, 1); /* Marks FS Dirty */ |
@@ -570,7 +570,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
570 | } else | 570 | } else |
571 | rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); | 571 | rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); |
572 | txEnd(tid); | 572 | txEnd(tid); |
573 | up(&JFS_IP(ip)->commit_sem); | 573 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
574 | } | 574 | } |
575 | 575 | ||
576 | if (ip->i_nlink == 0) | 576 | if (ip->i_nlink == 0) |
@@ -805,8 +805,8 @@ static int jfs_link(struct dentry *old_dentry, | |||
805 | 805 | ||
806 | tid = txBegin(ip->i_sb, 0); | 806 | tid = txBegin(ip->i_sb, 0); |
807 | 807 | ||
808 | down(&JFS_IP(dir)->commit_sem); | 808 | mutex_lock(&JFS_IP(dir)->commit_mutex); |
809 | down(&JFS_IP(ip)->commit_sem); | 809 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
810 | 810 | ||
811 | /* | 811 | /* |
812 | * scan parent directory for entry/freespace | 812 | * scan parent directory for entry/freespace |
@@ -847,8 +847,8 @@ static int jfs_link(struct dentry *old_dentry, | |||
847 | out: | 847 | out: |
848 | txEnd(tid); | 848 | txEnd(tid); |
849 | 849 | ||
850 | up(&JFS_IP(dir)->commit_sem); | 850 | mutex_unlock(&JFS_IP(dir)->commit_mutex); |
851 | up(&JFS_IP(ip)->commit_sem); | 851 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
852 | 852 | ||
853 | jfs_info("jfs_link: rc:%d", rc); | 853 | jfs_info("jfs_link: rc:%d", rc); |
854 | return rc; | 854 | return rc; |
@@ -916,8 +916,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
916 | 916 | ||
917 | tid = txBegin(dip->i_sb, 0); | 917 | tid = txBegin(dip->i_sb, 0); |
918 | 918 | ||
919 | down(&JFS_IP(dip)->commit_sem); | 919 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
920 | down(&JFS_IP(ip)->commit_sem); | 920 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
921 | 921 | ||
922 | rc = jfs_init_security(tid, ip, dip); | 922 | rc = jfs_init_security(tid, ip, dip); |
923 | if (rc) | 923 | if (rc) |
@@ -1037,8 +1037,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
1037 | 1037 | ||
1038 | out3: | 1038 | out3: |
1039 | txEnd(tid); | 1039 | txEnd(tid); |
1040 | up(&JFS_IP(dip)->commit_sem); | 1040 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
1041 | up(&JFS_IP(ip)->commit_sem); | 1041 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
1042 | if (rc) { | 1042 | if (rc) { |
1043 | free_ea_wmap(ip); | 1043 | free_ea_wmap(ip); |
1044 | ip->i_nlink = 0; | 1044 | ip->i_nlink = 0; |
@@ -1141,13 +1141,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1141 | */ | 1141 | */ |
1142 | tid = txBegin(new_dir->i_sb, 0); | 1142 | tid = txBegin(new_dir->i_sb, 0); |
1143 | 1143 | ||
1144 | down(&JFS_IP(new_dir)->commit_sem); | 1144 | mutex_lock(&JFS_IP(new_dir)->commit_mutex); |
1145 | down(&JFS_IP(old_ip)->commit_sem); | 1145 | mutex_lock(&JFS_IP(old_ip)->commit_mutex); |
1146 | if (old_dir != new_dir) | 1146 | if (old_dir != new_dir) |
1147 | down(&JFS_IP(old_dir)->commit_sem); | 1147 | mutex_lock(&JFS_IP(old_dir)->commit_mutex); |
1148 | 1148 | ||
1149 | if (new_ip) { | 1149 | if (new_ip) { |
1150 | down(&JFS_IP(new_ip)->commit_sem); | 1150 | mutex_lock(&JFS_IP(new_ip)->commit_mutex); |
1151 | /* | 1151 | /* |
1152 | * Change existing directory entry to new inode number | 1152 | * Change existing directory entry to new inode number |
1153 | */ | 1153 | */ |
@@ -1160,10 +1160,10 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1160 | if (S_ISDIR(new_ip->i_mode)) { | 1160 | if (S_ISDIR(new_ip->i_mode)) { |
1161 | new_ip->i_nlink--; | 1161 | new_ip->i_nlink--; |
1162 | if (new_ip->i_nlink) { | 1162 | if (new_ip->i_nlink) { |
1163 | up(&JFS_IP(new_dir)->commit_sem); | 1163 | mutex_unlock(&JFS_IP(new_dir)->commit_mutex); |
1164 | up(&JFS_IP(old_ip)->commit_sem); | 1164 | mutex_unlock(&JFS_IP(old_ip)->commit_mutex); |
1165 | if (old_dir != new_dir) | 1165 | if (old_dir != new_dir) |
1166 | up(&JFS_IP(old_dir)->commit_sem); | 1166 | mutex_unlock(&JFS_IP(old_dir)->commit_mutex); |
1167 | if (!S_ISDIR(old_ip->i_mode) && new_ip) | 1167 | if (!S_ISDIR(old_ip->i_mode) && new_ip) |
1168 | IWRITE_UNLOCK(new_ip); | 1168 | IWRITE_UNLOCK(new_ip); |
1169 | jfs_error(new_ip->i_sb, | 1169 | jfs_error(new_ip->i_sb, |
@@ -1282,16 +1282,16 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1282 | out4: | 1282 | out4: |
1283 | txEnd(tid); | 1283 | txEnd(tid); |
1284 | 1284 | ||
1285 | up(&JFS_IP(new_dir)->commit_sem); | 1285 | mutex_unlock(&JFS_IP(new_dir)->commit_mutex); |
1286 | up(&JFS_IP(old_ip)->commit_sem); | 1286 | mutex_unlock(&JFS_IP(old_ip)->commit_mutex); |
1287 | if (old_dir != new_dir) | 1287 | if (old_dir != new_dir) |
1288 | up(&JFS_IP(old_dir)->commit_sem); | 1288 | mutex_unlock(&JFS_IP(old_dir)->commit_mutex); |
1289 | if (new_ip) | 1289 | if (new_ip) |
1290 | up(&JFS_IP(new_ip)->commit_sem); | 1290 | mutex_unlock(&JFS_IP(new_ip)->commit_mutex); |
1291 | 1291 | ||
1292 | while (new_size && (rc == 0)) { | 1292 | while (new_size && (rc == 0)) { |
1293 | tid = txBegin(new_ip->i_sb, 0); | 1293 | tid = txBegin(new_ip->i_sb, 0); |
1294 | down(&JFS_IP(new_ip)->commit_sem); | 1294 | mutex_lock(&JFS_IP(new_ip)->commit_mutex); |
1295 | new_size = xtTruncate_pmap(tid, new_ip, new_size); | 1295 | new_size = xtTruncate_pmap(tid, new_ip, new_size); |
1296 | if (new_size < 0) { | 1296 | if (new_size < 0) { |
1297 | txAbort(tid, 1); | 1297 | txAbort(tid, 1); |
@@ -1299,7 +1299,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1299 | } else | 1299 | } else |
1300 | rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); | 1300 | rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); |
1301 | txEnd(tid); | 1301 | txEnd(tid); |
1302 | up(&JFS_IP(new_ip)->commit_sem); | 1302 | mutex_unlock(&JFS_IP(new_ip)->commit_mutex); |
1303 | } | 1303 | } |
1304 | if (new_ip && (new_ip->i_nlink == 0)) | 1304 | if (new_ip && (new_ip->i_nlink == 0)) |
1305 | set_cflag(COMMIT_Nolink, new_ip); | 1305 | set_cflag(COMMIT_Nolink, new_ip); |
@@ -1361,8 +1361,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
1361 | 1361 | ||
1362 | tid = txBegin(dir->i_sb, 0); | 1362 | tid = txBegin(dir->i_sb, 0); |
1363 | 1363 | ||
1364 | down(&JFS_IP(dir)->commit_sem); | 1364 | mutex_lock(&JFS_IP(dir)->commit_mutex); |
1365 | down(&JFS_IP(ip)->commit_sem); | 1365 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
1366 | 1366 | ||
1367 | rc = jfs_init_acl(tid, ip, dir); | 1367 | rc = jfs_init_acl(tid, ip, dir); |
1368 | if (rc) | 1368 | if (rc) |
@@ -1407,8 +1407,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
1407 | 1407 | ||
1408 | out3: | 1408 | out3: |
1409 | txEnd(tid); | 1409 | txEnd(tid); |
1410 | up(&JFS_IP(ip)->commit_sem); | 1410 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
1411 | up(&JFS_IP(dir)->commit_sem); | 1411 | mutex_unlock(&JFS_IP(dir)->commit_mutex); |
1412 | if (rc) { | 1412 | if (rc) { |
1413 | free_ea_wmap(ip); | 1413 | free_ea_wmap(ip); |
1414 | ip->i_nlink = 0; | 1414 | ip->i_nlink = 0; |
@@ -1523,6 +1523,7 @@ struct file_operations jfs_dir_operations = { | |||
1523 | .read = generic_read_dir, | 1523 | .read = generic_read_dir, |
1524 | .readdir = jfs_readdir, | 1524 | .readdir = jfs_readdir, |
1525 | .fsync = jfs_fsync, | 1525 | .fsync = jfs_fsync, |
1526 | .ioctl = jfs_ioctl, | ||
1526 | }; | 1527 | }; |
1527 | 1528 | ||
1528 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) | 1529 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 8d31f1336431..18f69e6aa719 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/vfs.h> | 25 | #include <linux/vfs.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/kthread.h> | ||
28 | #include <linux/posix_acl.h> | 29 | #include <linux/posix_acl.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
@@ -54,11 +55,9 @@ static int commit_threads = 0; | |||
54 | module_param(commit_threads, int, 0); | 55 | module_param(commit_threads, int, 0); |
55 | MODULE_PARM_DESC(commit_threads, "Number of commit threads"); | 56 | MODULE_PARM_DESC(commit_threads, "Number of commit threads"); |
56 | 57 | ||
57 | int jfs_stop_threads; | 58 | static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; |
58 | static pid_t jfsIOthread; | 59 | struct task_struct *jfsIOthread; |
59 | static pid_t jfsCommitThread[MAX_COMMIT_THREADS]; | 60 | struct task_struct *jfsSyncThread; |
60 | static pid_t jfsSyncThread; | ||
61 | DECLARE_COMPLETION(jfsIOwait); | ||
62 | 61 | ||
63 | #ifdef CONFIG_JFS_DEBUG | 62 | #ifdef CONFIG_JFS_DEBUG |
64 | int jfsloglevel = JFS_LOGLEVEL_WARN; | 63 | int jfsloglevel = JFS_LOGLEVEL_WARN; |
@@ -195,7 +194,7 @@ static void jfs_put_super(struct super_block *sb) | |||
195 | enum { | 194 | enum { |
196 | Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, | 195 | Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, |
197 | Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, | 196 | Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, |
198 | Opt_usrquota, Opt_grpquota | 197 | Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask |
199 | }; | 198 | }; |
200 | 199 | ||
201 | static match_table_t tokens = { | 200 | static match_table_t tokens = { |
@@ -209,6 +208,9 @@ static match_table_t tokens = { | |||
209 | {Opt_ignore, "quota"}, | 208 | {Opt_ignore, "quota"}, |
210 | {Opt_usrquota, "usrquota"}, | 209 | {Opt_usrquota, "usrquota"}, |
211 | {Opt_grpquota, "grpquota"}, | 210 | {Opt_grpquota, "grpquota"}, |
211 | {Opt_uid, "uid=%u"}, | ||
212 | {Opt_gid, "gid=%u"}, | ||
213 | {Opt_umask, "umask=%u"}, | ||
212 | {Opt_err, NULL} | 214 | {Opt_err, NULL} |
213 | }; | 215 | }; |
214 | 216 | ||
@@ -313,7 +315,29 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, | |||
313 | "JFS: quota operations not supported\n"); | 315 | "JFS: quota operations not supported\n"); |
314 | break; | 316 | break; |
315 | #endif | 317 | #endif |
316 | 318 | case Opt_uid: | |
319 | { | ||
320 | char *uid = args[0].from; | ||
321 | sbi->uid = simple_strtoul(uid, &uid, 0); | ||
322 | break; | ||
323 | } | ||
324 | case Opt_gid: | ||
325 | { | ||
326 | char *gid = args[0].from; | ||
327 | sbi->gid = simple_strtoul(gid, &gid, 0); | ||
328 | break; | ||
329 | } | ||
330 | case Opt_umask: | ||
331 | { | ||
332 | char *umask = args[0].from; | ||
333 | sbi->umask = simple_strtoul(umask, &umask, 8); | ||
334 | if (sbi->umask & ~0777) { | ||
335 | printk(KERN_ERR | ||
336 | "JFS: Invalid value of umask\n"); | ||
337 | goto cleanup; | ||
338 | } | ||
339 | break; | ||
340 | } | ||
317 | default: | 341 | default: |
318 | printk("jfs: Unrecognized mount option \"%s\" " | 342 | printk("jfs: Unrecognized mount option \"%s\" " |
319 | " or missing value\n", p); | 343 | " or missing value\n", p); |
@@ -396,12 +420,12 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
396 | if (!new_valid_dev(sb->s_bdev->bd_dev)) | 420 | if (!new_valid_dev(sb->s_bdev->bd_dev)) |
397 | return -EOVERFLOW; | 421 | return -EOVERFLOW; |
398 | 422 | ||
399 | sbi = kmalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); | 423 | sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); |
400 | if (!sbi) | 424 | if (!sbi) |
401 | return -ENOSPC; | 425 | return -ENOSPC; |
402 | memset(sbi, 0, sizeof (struct jfs_sb_info)); | ||
403 | sb->s_fs_info = sbi; | 426 | sb->s_fs_info = sbi; |
404 | sbi->sb = sb; | 427 | sbi->sb = sb; |
428 | sbi->uid = sbi->gid = sbi->umask = -1; | ||
405 | 429 | ||
406 | /* initialize the mount flag and determine the default error handler */ | 430 | /* initialize the mount flag and determine the default error handler */ |
407 | flag = JFS_ERR_REMOUNT_RO; | 431 | flag = JFS_ERR_REMOUNT_RO; |
@@ -564,10 +588,14 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
564 | { | 588 | { |
565 | struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); | 589 | struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); |
566 | 590 | ||
591 | if (sbi->uid != -1) | ||
592 | seq_printf(seq, ",uid=%d", sbi->uid); | ||
593 | if (sbi->gid != -1) | ||
594 | seq_printf(seq, ",gid=%d", sbi->gid); | ||
595 | if (sbi->umask != -1) | ||
596 | seq_printf(seq, ",umask=%03o", sbi->umask); | ||
567 | if (sbi->flag & JFS_NOINTEGRITY) | 597 | if (sbi->flag & JFS_NOINTEGRITY) |
568 | seq_puts(seq, ",nointegrity"); | 598 | seq_puts(seq, ",nointegrity"); |
569 | else | ||
570 | seq_puts(seq, ",integrity"); | ||
571 | 599 | ||
572 | #if defined(CONFIG_QUOTA) | 600 | #if defined(CONFIG_QUOTA) |
573 | if (sbi->flag & JFS_USRQUOTA) | 601 | if (sbi->flag & JFS_USRQUOTA) |
@@ -617,7 +645,7 @@ static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) | |||
617 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); | 645 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); |
618 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); | 646 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); |
619 | init_rwsem(&jfs_ip->rdwrlock); | 647 | init_rwsem(&jfs_ip->rdwrlock); |
620 | init_MUTEX(&jfs_ip->commit_sem); | 648 | mutex_init(&jfs_ip->commit_mutex); |
621 | init_rwsem(&jfs_ip->xattr_sem); | 649 | init_rwsem(&jfs_ip->xattr_sem); |
622 | spin_lock_init(&jfs_ip->ag_lock); | 650 | spin_lock_init(&jfs_ip->ag_lock); |
623 | jfs_ip->active_ag = -1; | 651 | jfs_ip->active_ag = -1; |
@@ -661,12 +689,12 @@ static int __init init_jfs_fs(void) | |||
661 | /* | 689 | /* |
662 | * I/O completion thread (endio) | 690 | * I/O completion thread (endio) |
663 | */ | 691 | */ |
664 | jfsIOthread = kernel_thread(jfsIOWait, NULL, CLONE_KERNEL); | 692 | jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); |
665 | if (jfsIOthread < 0) { | 693 | if (IS_ERR(jfsIOthread)) { |
666 | jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsIOthread); | 694 | rc = PTR_ERR(jfsIOthread); |
695 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); | ||
667 | goto end_txmngr; | 696 | goto end_txmngr; |
668 | } | 697 | } |
669 | wait_for_completion(&jfsIOwait); /* Wait until thread starts */ | ||
670 | 698 | ||
671 | if (commit_threads < 1) | 699 | if (commit_threads < 1) |
672 | commit_threads = num_online_cpus(); | 700 | commit_threads = num_online_cpus(); |
@@ -674,24 +702,21 @@ static int __init init_jfs_fs(void) | |||
674 | commit_threads = MAX_COMMIT_THREADS; | 702 | commit_threads = MAX_COMMIT_THREADS; |
675 | 703 | ||
676 | for (i = 0; i < commit_threads; i++) { | 704 | for (i = 0; i < commit_threads; i++) { |
677 | jfsCommitThread[i] = kernel_thread(jfs_lazycommit, NULL, | 705 | jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); |
678 | CLONE_KERNEL); | 706 | if (IS_ERR(jfsCommitThread[i])) { |
679 | if (jfsCommitThread[i] < 0) { | 707 | rc = PTR_ERR(jfsCommitThread[i]); |
680 | jfs_err("init_jfs_fs: fork failed w/rc = %d", | 708 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); |
681 | jfsCommitThread[i]); | ||
682 | commit_threads = i; | 709 | commit_threads = i; |
683 | goto kill_committask; | 710 | goto kill_committask; |
684 | } | 711 | } |
685 | /* Wait until thread starts */ | ||
686 | wait_for_completion(&jfsIOwait); | ||
687 | } | 712 | } |
688 | 713 | ||
689 | jfsSyncThread = kernel_thread(jfs_sync, NULL, CLONE_KERNEL); | 714 | jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); |
690 | if (jfsSyncThread < 0) { | 715 | if (IS_ERR(jfsSyncThread)) { |
691 | jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsSyncThread); | 716 | rc = PTR_ERR(jfsSyncThread); |
717 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); | ||
692 | goto kill_committask; | 718 | goto kill_committask; |
693 | } | 719 | } |
694 | wait_for_completion(&jfsIOwait); /* Wait until thread starts */ | ||
695 | 720 | ||
696 | #ifdef PROC_FS_JFS | 721 | #ifdef PROC_FS_JFS |
697 | jfs_proc_init(); | 722 | jfs_proc_init(); |
@@ -700,13 +725,9 @@ static int __init init_jfs_fs(void) | |||
700 | return register_filesystem(&jfs_fs_type); | 725 | return register_filesystem(&jfs_fs_type); |
701 | 726 | ||
702 | kill_committask: | 727 | kill_committask: |
703 | jfs_stop_threads = 1; | ||
704 | wake_up_all(&jfs_commit_thread_wait); | ||
705 | for (i = 0; i < commit_threads; i++) | 728 | for (i = 0; i < commit_threads; i++) |
706 | wait_for_completion(&jfsIOwait); | 729 | kthread_stop(jfsCommitThread[i]); |
707 | 730 | kthread_stop(jfsIOthread); | |
708 | wake_up(&jfs_IO_thread_wait); | ||
709 | wait_for_completion(&jfsIOwait); /* Wait for thread exit */ | ||
710 | end_txmngr: | 731 | end_txmngr: |
711 | txExit(); | 732 | txExit(); |
712 | free_metapage: | 733 | free_metapage: |
@@ -722,16 +743,13 @@ static void __exit exit_jfs_fs(void) | |||
722 | 743 | ||
723 | jfs_info("exit_jfs_fs called"); | 744 | jfs_info("exit_jfs_fs called"); |
724 | 745 | ||
725 | jfs_stop_threads = 1; | ||
726 | txExit(); | 746 | txExit(); |
727 | metapage_exit(); | 747 | metapage_exit(); |
728 | wake_up(&jfs_IO_thread_wait); | 748 | |
729 | wait_for_completion(&jfsIOwait); /* Wait until IO thread exits */ | 749 | kthread_stop(jfsIOthread); |
730 | wake_up_all(&jfs_commit_thread_wait); | ||
731 | for (i = 0; i < commit_threads; i++) | 750 | for (i = 0; i < commit_threads; i++) |
732 | wait_for_completion(&jfsIOwait); | 751 | kthread_stop(jfsCommitThread[i]); |
733 | wake_up(&jfs_sync_thread_wait); | 752 | kthread_stop(jfsSyncThread); |
734 | wait_for_completion(&jfsIOwait); /* Wait until Sync thread exits */ | ||
735 | #ifdef PROC_FS_JFS | 753 | #ifdef PROC_FS_JFS |
736 | jfs_proc_clean(); | 754 | jfs_proc_clean(); |
737 | #endif | 755 | #endif |
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index f23048f9471f..9bc5b7c055ce 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c | |||
@@ -934,13 +934,13 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
934 | } | 934 | } |
935 | 935 | ||
936 | tid = txBegin(inode->i_sb, 0); | 936 | tid = txBegin(inode->i_sb, 0); |
937 | down(&ji->commit_sem); | 937 | mutex_lock(&ji->commit_mutex); |
938 | rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, | 938 | rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, |
939 | flags); | 939 | flags); |
940 | if (!rc) | 940 | if (!rc) |
941 | rc = txCommit(tid, 1, &inode, 0); | 941 | rc = txCommit(tid, 1, &inode, 0); |
942 | txEnd(tid); | 942 | txEnd(tid); |
943 | up(&ji->commit_sem); | 943 | mutex_unlock(&ji->commit_mutex); |
944 | 944 | ||
945 | return rc; | 945 | return rc; |
946 | } | 946 | } |
@@ -1093,12 +1093,12 @@ int jfs_removexattr(struct dentry *dentry, const char *name) | |||
1093 | return rc; | 1093 | return rc; |
1094 | 1094 | ||
1095 | tid = txBegin(inode->i_sb, 0); | 1095 | tid = txBegin(inode->i_sb, 0); |
1096 | down(&ji->commit_sem); | 1096 | mutex_lock(&ji->commit_mutex); |
1097 | rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); | 1097 | rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); |
1098 | if (!rc) | 1098 | if (!rc) |
1099 | rc = txCommit(tid, 1, &inode, 0); | 1099 | rc = txCommit(tid, 1, &inode, 0); |
1100 | txEnd(tid); | 1100 | txEnd(tid); |
1101 | up(&ji->commit_sem); | 1101 | mutex_unlock(&ji->commit_mutex); |
1102 | 1102 | ||
1103 | return rc; | 1103 | return rc; |
1104 | } | 1104 | } |
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7198f129e135..231ba090ae34 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -206,7 +206,6 @@ struct ArcProto { | |||
206 | 206 | ||
207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, | 207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, |
208 | *arc_bcast_proto, *arc_raw_proto; | 208 | *arc_bcast_proto, *arc_raw_proto; |
209 | extern struct ArcProto arc_proto_null; | ||
210 | 209 | ||
211 | 210 | ||
212 | /* | 211 | /* |
@@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); | |||
334 | #define arcnet_dump_skb(dev,skb,desc) ; | 333 | #define arcnet_dump_skb(dev,skb,desc) ; |
335 | #endif | 334 | #endif |
336 | 335 | ||
337 | #if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX) | ||
338 | void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | ||
339 | int take_arcnet_lock); | ||
340 | #else | ||
341 | #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ; | ||
342 | #endif | ||
343 | |||
344 | void arcnet_unregister_proto(struct ArcProto *proto); | 336 | void arcnet_unregister_proto(struct ArcProto *proto); |
345 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); | 337 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); |
346 | struct net_device *alloc_arcdev(char *name); | 338 | struct net_device *alloc_arcdev(char *name); |
347 | void arcnet_rx(struct net_device *dev, int bufnum); | ||
348 | 339 | ||
349 | #endif /* __KERNEL__ */ | 340 | #endif /* __KERNEL__ */ |
350 | #endif /* _LINUX_ARCDEVICE_H */ | 341 | #endif /* _LINUX_ARCDEVICE_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5f..56bb6a4e15f3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -58,7 +58,7 @@ struct cfq_io_context { | |||
58 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
59 | */ | 59 | */ |
60 | struct list_head list; | 60 | struct list_head list; |
61 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq[2]; |
62 | void *key; | 62 | void *key; |
63 | 63 | ||
64 | struct io_context *ioc; | 64 | struct io_context *ioc; |
@@ -69,6 +69,8 @@ struct cfq_io_context { | |||
69 | unsigned long ttime_samples; | 69 | unsigned long ttime_samples; |
70 | unsigned long ttime_mean; | 70 | unsigned long ttime_mean; |
71 | 71 | ||
72 | struct list_head queue_list; | ||
73 | |||
72 | void (*dtor)(struct cfq_io_context *); | 74 | void (*dtor)(struct cfq_io_context *); |
73 | void (*exit)(struct cfq_io_context *); | 75 | void (*exit)(struct cfq_io_context *); |
74 | }; | 76 | }; |
@@ -404,8 +406,6 @@ struct request_queue | |||
404 | 406 | ||
405 | struct blk_queue_tag *queue_tags; | 407 | struct blk_queue_tag *queue_tags; |
406 | 408 | ||
407 | atomic_t refcnt; | ||
408 | |||
409 | unsigned int nr_sorted; | 409 | unsigned int nr_sorted; |
410 | unsigned int in_flight; | 410 | unsigned int in_flight; |
411 | 411 | ||
@@ -424,6 +424,8 @@ struct request_queue | |||
424 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
425 | struct request *orig_bar_rq; | 425 | struct request *orig_bar_rq; |
426 | unsigned int bi_size; | 426 | unsigned int bi_size; |
427 | |||
428 | struct mutex sysfs_lock; | ||
427 | }; | 429 | }; |
428 | 430 | ||
429 | #define RQ_INACTIVE (-1) | 431 | #define RQ_INACTIVE (-1) |
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void); | |||
725 | int blk_get_queue(request_queue_t *); | 727 | int blk_get_queue(request_queue_t *); |
726 | request_queue_t *blk_alloc_queue(gfp_t); | 728 | request_queue_t *blk_alloc_queue(gfp_t); |
727 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 729 | request_queue_t *blk_alloc_queue_node(gfp_t, int); |
728 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 730 | extern void blk_put_queue(request_queue_t *); |
729 | 731 | ||
730 | /* | 732 | /* |
731 | * tag stuff | 733 | * tag stuff |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 18cf1f3e1184..ad133fcfb239 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -48,10 +48,17 @@ struct elevator_ops | |||
48 | 48 | ||
49 | elevator_init_fn *elevator_init_fn; | 49 | elevator_init_fn *elevator_init_fn; |
50 | elevator_exit_fn *elevator_exit_fn; | 50 | elevator_exit_fn *elevator_exit_fn; |
51 | void (*trim)(struct io_context *); | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define ELV_NAME_MAX (16) | 54 | #define ELV_NAME_MAX (16) |
54 | 55 | ||
56 | struct elv_fs_entry { | ||
57 | struct attribute attr; | ||
58 | ssize_t (*show)(elevator_t *, char *); | ||
59 | ssize_t (*store)(elevator_t *, const char *, size_t); | ||
60 | }; | ||
61 | |||
55 | /* | 62 | /* |
56 | * identifies an elevator type, such as AS or deadline | 63 | * identifies an elevator type, such as AS or deadline |
57 | */ | 64 | */ |
@@ -60,7 +67,7 @@ struct elevator_type | |||
60 | struct list_head list; | 67 | struct list_head list; |
61 | struct elevator_ops ops; | 68 | struct elevator_ops ops; |
62 | struct elevator_type *elevator_type; | 69 | struct elevator_type *elevator_type; |
63 | struct kobj_type *elevator_ktype; | 70 | struct elv_fs_entry *elevator_attrs; |
64 | char elevator_name[ELV_NAME_MAX]; | 71 | char elevator_name[ELV_NAME_MAX]; |
65 | struct module *elevator_owner; | 72 | struct module *elevator_owner; |
66 | }; | 73 | }; |
@@ -74,6 +81,7 @@ struct elevator_queue | |||
74 | void *elevator_data; | 81 | void *elevator_data; |
75 | struct kobject kobj; | 82 | struct kobject kobj; |
76 | struct elevator_type *elevator_type; | 83 | struct elevator_type *elevator_type; |
84 | struct mutex sysfs_lock; | ||
77 | }; | 85 | }; |
78 | 86 | ||
79 | /* | 87 | /* |
diff --git a/include/linux/if.h b/include/linux/if.h index ce627d9092ef..12c6f6d157c3 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -52,6 +52,9 @@ | |||
52 | /* Private (from user) interface flags (netdevice->priv_flags). */ | 52 | /* Private (from user) interface flags (netdevice->priv_flags). */ |
53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ | 53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ |
54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ | 54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ |
55 | #define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */ | ||
56 | #define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */ | ||
57 | #define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ | ||
55 | 58 | ||
56 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 59 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
57 | #define IF_GET_PROTO 0x0002 | 60 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 7a92c1ce1457..ab08f35cbc35 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ | 61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ |
62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ | 62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ |
63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ | 63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ |
64 | #define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ | ||
64 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol | 65 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol |
65 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ | 66 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ |
66 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ | 67 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 0b08cd692201..955d3069d727 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata { | |||
1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 | 1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 |
1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) | 1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) |
1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) | 1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) |
1217 | #define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9) | ||
1217 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 | 1218 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 |
1218 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) | 1219 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) |
1219 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 | 1220 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 |
@@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata { | |||
1243 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 | 1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 |
1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) | 1245 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) |
1245 | 1246 | ||
1247 | #define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17) | ||
1248 | |||
1246 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ | 1249 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ |
1247 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ | 1250 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ |
1248 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ | 1251 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ |
@@ -1285,23 +1288,15 @@ struct mv64xxx_i2c_pdata { | |||
1285 | #define MV643XX_ETH_NAME "mv643xx_eth" | 1288 | #define MV643XX_ETH_NAME "mv643xx_eth" |
1286 | 1289 | ||
1287 | struct mv643xx_eth_platform_data { | 1290 | struct mv643xx_eth_platform_data { |
1288 | /* | ||
1289 | * Non-values for mac_addr, phy_addr, port_config, etc. | ||
1290 | * override the default value. Setting the corresponding | ||
1291 | * force_* field, causes the default value to be overridden | ||
1292 | * even when zero. | ||
1293 | */ | ||
1294 | unsigned int force_phy_addr:1; | ||
1295 | unsigned int force_port_config:1; | ||
1296 | unsigned int force_port_config_extend:1; | ||
1297 | unsigned int force_port_sdma_config:1; | ||
1298 | unsigned int force_port_serial_control:1; | ||
1299 | int phy_addr; | ||
1300 | char *mac_addr; /* pointer to mac address */ | 1291 | char *mac_addr; /* pointer to mac address */ |
1301 | u32 port_config; | 1292 | u16 force_phy_addr; /* force override if phy_addr == 0 */ |
1302 | u32 port_config_extend; | 1293 | u16 phy_addr; |
1303 | u32 port_sdma_config; | 1294 | |
1304 | u32 port_serial_control; | 1295 | /* If speed is 0, then speed and duplex are autonegotiated. */ |
1296 | int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ | ||
1297 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | ||
1298 | |||
1299 | /* non-zero values of the following fields override defaults */ | ||
1305 | u32 tx_queue_size; | 1300 | u32 tx_queue_size; |
1306 | u32 rx_queue_size; | 1301 | u32 rx_queue_size; |
1307 | u32 tx_sram_addr; | 1302 | u32 tx_sram_addr; |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 9a92aef8b0b2..4725ff861c57 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -220,6 +220,7 @@ struct ieee80211_snap_hdr { | |||
220 | /* Authentication algorithms */ | 220 | /* Authentication algorithms */ |
221 | #define WLAN_AUTH_OPEN 0 | 221 | #define WLAN_AUTH_OPEN 0 |
222 | #define WLAN_AUTH_SHARED_KEY 1 | 222 | #define WLAN_AUTH_SHARED_KEY 1 |
223 | #define WLAN_AUTH_LEAP 2 | ||
223 | 224 | ||
224 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 225 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
225 | 226 | ||
@@ -299,6 +300,23 @@ enum ieee80211_reasoncode { | |||
299 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, | 300 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, |
300 | }; | 301 | }; |
301 | 302 | ||
303 | /* Action categories - 802.11h */ | ||
304 | enum ieee80211_actioncategories { | ||
305 | WLAN_ACTION_SPECTRUM_MGMT = 0, | ||
306 | /* Reserved 1-127 */ | ||
307 | /* Error 128-255 */ | ||
308 | }; | ||
309 | |||
310 | /* Action details - 802.11h */ | ||
311 | enum ieee80211_actiondetails { | ||
312 | WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0, | ||
313 | WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1, | ||
314 | WLAN_ACTION_CATEGORY_TPC_REQUEST = 2, | ||
315 | WLAN_ACTION_CATEGORY_TPC_REPORT = 3, | ||
316 | WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4, | ||
317 | /* 5 - 255 Reserved */ | ||
318 | }; | ||
319 | |||
302 | #define IEEE80211_STATMASK_SIGNAL (1<<0) | 320 | #define IEEE80211_STATMASK_SIGNAL (1<<0) |
303 | #define IEEE80211_STATMASK_RSSI (1<<1) | 321 | #define IEEE80211_STATMASK_RSSI (1<<1) |
304 | #define IEEE80211_STATMASK_NOISE (1<<2) | 322 | #define IEEE80211_STATMASK_NOISE (1<<2) |
@@ -377,6 +395,8 @@ struct ieee80211_rx_stats { | |||
377 | u8 mask; | 395 | u8 mask; |
378 | u8 freq; | 396 | u8 freq; |
379 | u16 len; | 397 | u16 len; |
398 | u64 tsf; | ||
399 | u32 beacon_time; | ||
380 | }; | 400 | }; |
381 | 401 | ||
382 | /* IEEE 802.11 requires that STA supports concurrent reception of at least | 402 | /* IEEE 802.11 requires that STA supports concurrent reception of at least |
@@ -608,6 +628,28 @@ struct ieee80211_auth { | |||
608 | struct ieee80211_info_element info_element[0]; | 628 | struct ieee80211_info_element info_element[0]; |
609 | } __attribute__ ((packed)); | 629 | } __attribute__ ((packed)); |
610 | 630 | ||
631 | struct ieee80211_channel_switch { | ||
632 | u8 id; | ||
633 | u8 len; | ||
634 | u8 mode; | ||
635 | u8 channel; | ||
636 | u8 count; | ||
637 | } __attribute__ ((packed)); | ||
638 | |||
639 | struct ieee80211_action { | ||
640 | struct ieee80211_hdr_3addr header; | ||
641 | u8 category; | ||
642 | u8 action; | ||
643 | union { | ||
644 | struct ieee80211_action_exchange { | ||
645 | u8 token; | ||
646 | struct ieee80211_info_element info_element[0]; | ||
647 | } exchange; | ||
648 | struct ieee80211_channel_switch channel_switch; | ||
649 | |||
650 | } format; | ||
651 | } __attribute__ ((packed)); | ||
652 | |||
611 | struct ieee80211_disassoc { | 653 | struct ieee80211_disassoc { |
612 | struct ieee80211_hdr_3addr header; | 654 | struct ieee80211_hdr_3addr header; |
613 | __le16 reason; | 655 | __le16 reason; |
@@ -692,7 +734,15 @@ struct ieee80211_txb { | |||
692 | /* QoS structure */ | 734 | /* QoS structure */ |
693 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) | 735 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) |
694 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) | 736 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) |
695 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) | 737 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ |
738 | NETWORK_HAS_QOS_INFORMATION) | ||
739 | |||
740 | /* 802.11h */ | ||
741 | #define NETWORK_HAS_POWER_CONSTRAINT (1<<5) | ||
742 | #define NETWORK_HAS_CSA (1<<6) | ||
743 | #define NETWORK_HAS_QUIET (1<<7) | ||
744 | #define NETWORK_HAS_IBSS_DFS (1<<8) | ||
745 | #define NETWORK_HAS_TPC_REPORT (1<<9) | ||
696 | 746 | ||
697 | #define QOS_QUEUE_NUM 4 | 747 | #define QOS_QUEUE_NUM 4 |
698 | #define QOS_OUI_LEN 3 | 748 | #define QOS_OUI_LEN 3 |
@@ -748,6 +798,91 @@ struct ieee80211_tim_parameters { | |||
748 | 798 | ||
749 | /*******************************************************/ | 799 | /*******************************************************/ |
750 | 800 | ||
801 | enum { /* ieee80211_basic_report.map */ | ||
802 | IEEE80211_BASIC_MAP_BSS = (1 << 0), | ||
803 | IEEE80211_BASIC_MAP_OFDM = (1 << 1), | ||
804 | IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), | ||
805 | IEEE80211_BASIC_MAP_RADAR = (1 << 3), | ||
806 | IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), | ||
807 | /* Bits 5-7 are reserved */ | ||
808 | |||
809 | }; | ||
810 | struct ieee80211_basic_report { | ||
811 | u8 channel; | ||
812 | __le64 start_time; | ||
813 | __le16 duration; | ||
814 | u8 map; | ||
815 | } __attribute__ ((packed)); | ||
816 | |||
817 | enum { /* ieee80211_measurement_request.mode */ | ||
818 | /* Bit 0 is reserved */ | ||
819 | IEEE80211_MEASUREMENT_ENABLE = (1 << 1), | ||
820 | IEEE80211_MEASUREMENT_REQUEST = (1 << 2), | ||
821 | IEEE80211_MEASUREMENT_REPORT = (1 << 3), | ||
822 | /* Bits 4-7 are reserved */ | ||
823 | }; | ||
824 | |||
825 | enum { | ||
826 | IEEE80211_REPORT_BASIC = 0, /* required */ | ||
827 | IEEE80211_REPORT_CCA = 1, /* optional */ | ||
828 | IEEE80211_REPORT_RPI = 2, /* optional */ | ||
829 | /* 3-255 reserved */ | ||
830 | }; | ||
831 | |||
832 | struct ieee80211_measurement_params { | ||
833 | u8 channel; | ||
834 | __le64 start_time; | ||
835 | __le16 duration; | ||
836 | } __attribute__ ((packed)); | ||
837 | |||
838 | struct ieee80211_measurement_request { | ||
839 | struct ieee80211_info_element ie; | ||
840 | u8 token; | ||
841 | u8 mode; | ||
842 | u8 type; | ||
843 | struct ieee80211_measurement_params params[0]; | ||
844 | } __attribute__ ((packed)); | ||
845 | |||
846 | struct ieee80211_measurement_report { | ||
847 | struct ieee80211_info_element ie; | ||
848 | u8 token; | ||
849 | u8 mode; | ||
850 | u8 type; | ||
851 | union { | ||
852 | struct ieee80211_basic_report basic[0]; | ||
853 | } u; | ||
854 | } __attribute__ ((packed)); | ||
855 | |||
856 | struct ieee80211_tpc_report { | ||
857 | u8 transmit_power; | ||
858 | u8 link_margin; | ||
859 | } __attribute__ ((packed)); | ||
860 | |||
861 | struct ieee80211_channel_map { | ||
862 | u8 channel; | ||
863 | u8 map; | ||
864 | } __attribute__ ((packed)); | ||
865 | |||
866 | struct ieee80211_ibss_dfs { | ||
867 | struct ieee80211_info_element ie; | ||
868 | u8 owner[ETH_ALEN]; | ||
869 | u8 recovery_interval; | ||
870 | struct ieee80211_channel_map channel_map[0]; | ||
871 | }; | ||
872 | |||
873 | struct ieee80211_csa { | ||
874 | u8 mode; | ||
875 | u8 channel; | ||
876 | u8 count; | ||
877 | } __attribute__ ((packed)); | ||
878 | |||
879 | struct ieee80211_quiet { | ||
880 | u8 count; | ||
881 | u8 period; | ||
882 | u8 duration; | ||
883 | u8 offset; | ||
884 | } __attribute__ ((packed)); | ||
885 | |||
751 | struct ieee80211_network { | 886 | struct ieee80211_network { |
752 | /* These entries are used to identify a unique network */ | 887 | /* These entries are used to identify a unique network */ |
753 | u8 bssid[ETH_ALEN]; | 888 | u8 bssid[ETH_ALEN]; |
@@ -767,7 +902,7 @@ struct ieee80211_network { | |||
767 | u8 rates_ex_len; | 902 | u8 rates_ex_len; |
768 | unsigned long last_scanned; | 903 | unsigned long last_scanned; |
769 | u8 mode; | 904 | u8 mode; |
770 | u8 flags; | 905 | u32 flags; |
771 | u32 last_associate; | 906 | u32 last_associate; |
772 | u32 time_stamp[2]; | 907 | u32 time_stamp[2]; |
773 | u16 beacon_interval; | 908 | u16 beacon_interval; |
@@ -779,6 +914,25 @@ struct ieee80211_network { | |||
779 | u8 rsn_ie[MAX_WPA_IE_LEN]; | 914 | u8 rsn_ie[MAX_WPA_IE_LEN]; |
780 | size_t rsn_ie_len; | 915 | size_t rsn_ie_len; |
781 | struct ieee80211_tim_parameters tim; | 916 | struct ieee80211_tim_parameters tim; |
917 | |||
918 | /* 802.11h info */ | ||
919 | |||
920 | /* Power Constraint - mandatory if spctrm mgmt required */ | ||
921 | u8 power_constraint; | ||
922 | |||
923 | /* TPC Report - mandatory if spctrm mgmt required */ | ||
924 | struct ieee80211_tpc_report tpc_report; | ||
925 | |||
926 | /* IBSS DFS - mandatory if spctrm mgmt required and IBSS | ||
927 | * NOTE: This is variable length and so must be allocated dynamically */ | ||
928 | struct ieee80211_ibss_dfs *ibss_dfs; | ||
929 | |||
930 | /* Channel Switch Announcement - optional if spctrm mgmt required */ | ||
931 | struct ieee80211_csa csa; | ||
932 | |||
933 | /* Quiet - optional if spctrm mgmt required */ | ||
934 | struct ieee80211_quiet quiet; | ||
935 | |||
782 | struct list_head list; | 936 | struct list_head list; |
783 | }; | 937 | }; |
784 | 938 | ||
@@ -924,7 +1078,10 @@ struct ieee80211_device { | |||
924 | int (*handle_auth) (struct net_device * dev, | 1078 | int (*handle_auth) (struct net_device * dev, |
925 | struct ieee80211_auth * auth); | 1079 | struct ieee80211_auth * auth); |
926 | int (*handle_deauth) (struct net_device * dev, | 1080 | int (*handle_deauth) (struct net_device * dev, |
927 | struct ieee80211_auth * auth); | 1081 | struct ieee80211_deauth * auth); |
1082 | int (*handle_action) (struct net_device * dev, | ||
1083 | struct ieee80211_action * action, | ||
1084 | struct ieee80211_rx_stats * stats); | ||
928 | int (*handle_disassoc) (struct net_device * dev, | 1085 | int (*handle_disassoc) (struct net_device * dev, |
929 | struct ieee80211_disassoc * assoc); | 1086 | struct ieee80211_disassoc * assoc); |
930 | int (*handle_beacon) (struct net_device * dev, | 1087 | int (*handle_beacon) (struct net_device * dev, |
@@ -1093,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
1093 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, | 1250 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, |
1094 | struct ieee80211_hdr_4addr *header, | 1251 | struct ieee80211_hdr_4addr *header, |
1095 | struct ieee80211_rx_stats *stats); | 1252 | struct ieee80211_rx_stats *stats); |
1253 | extern void ieee80211_network_reset(struct ieee80211_network *network); | ||
1096 | 1254 | ||
1097 | /* ieee80211_geo.c */ | 1255 | /* ieee80211_geo.c */ |
1098 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device | 1256 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device |
@@ -1105,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, | |||
1105 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, | 1263 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, |
1106 | u8 channel); | 1264 | u8 channel); |
1107 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); | 1265 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); |
1266 | extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee, | ||
1267 | u8 channel); | ||
1268 | extern const struct ieee80211_channel *ieee80211_get_channel(struct | ||
1269 | ieee80211_device | ||
1270 | *ieee, u8 channel); | ||
1108 | 1271 | ||
1109 | /* ieee80211_wx.c */ | 1272 | /* ieee80211_wx.c */ |
1110 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | 1273 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, |
@@ -1122,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, | |||
1122 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | 1285 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, |
1123 | struct iw_request_info *info, | 1286 | struct iw_request_info *info, |
1124 | union iwreq_data *wrqu, char *extra); | 1287 | union iwreq_data *wrqu, char *extra); |
1288 | extern int ieee80211_wx_set_auth(struct net_device *dev, | ||
1289 | struct iw_request_info *info, | ||
1290 | union iwreq_data *wrqu, | ||
1291 | char *extra); | ||
1292 | extern int ieee80211_wx_get_auth(struct net_device *dev, | ||
1293 | struct iw_request_info *info, | ||
1294 | union iwreq_data *wrqu, | ||
1295 | char *extra); | ||
1125 | 1296 | ||
1126 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) | 1297 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) |
1127 | { | 1298 | { |
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h index cd82c3e998e4..eb476414fd72 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/ieee80211_crypt.h | |||
@@ -47,7 +47,8 @@ struct ieee80211_crypto_ops { | |||
47 | /* deinitialize crypto context and free allocated private data */ | 47 | /* deinitialize crypto context and free allocated private data */ |
48 | void (*deinit) (void *priv); | 48 | void (*deinit) (void *priv); |
49 | 49 | ||
50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); | 50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, |
51 | u8 *key, int keylen, void *priv); | ||
51 | 52 | ||
52 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return | 53 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return |
53 | * value from decrypt_mpdu is passed as the keyidx value for | 54 | * value from decrypt_mpdu is passed as the keyidx value for |
diff --git a/kernel/exit.c b/kernel/exit.c index 531aadca5530..d1e8d500a7e1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
807 | panic("Attempted to kill the idle task!"); | 807 | panic("Attempted to kill the idle task!"); |
808 | if (unlikely(tsk->pid == 1)) | 808 | if (unlikely(tsk->pid == 1)) |
809 | panic("Attempted to kill init!"); | 809 | panic("Attempted to kill init!"); |
810 | if (tsk->io_context) | ||
811 | exit_io_context(); | ||
812 | 810 | ||
813 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 811 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
814 | current->ptrace_message = code; | 812 | current->ptrace_message = code; |
@@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code) | |||
822 | if (unlikely(tsk->flags & PF_EXITING)) { | 820 | if (unlikely(tsk->flags & PF_EXITING)) { |
823 | printk(KERN_ALERT | 821 | printk(KERN_ALERT |
824 | "Fixing recursive fault but reboot is needed!\n"); | 822 | "Fixing recursive fault but reboot is needed!\n"); |
823 | if (tsk->io_context) | ||
824 | exit_io_context(); | ||
825 | set_current_state(TASK_UNINTERRUPTIBLE); | 825 | set_current_state(TASK_UNINTERRUPTIBLE); |
826 | schedule(); | 826 | schedule(); |
827 | } | 827 | } |
@@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code) | |||
881 | */ | 881 | */ |
882 | mutex_debug_check_no_locks_held(tsk); | 882 | mutex_debug_check_no_locks_held(tsk); |
883 | 883 | ||
884 | if (tsk->io_context) | ||
885 | exit_io_context(); | ||
886 | |||
884 | /* PF_DEAD causes final put_task_struct after we schedule. */ | 887 | /* PF_DEAD causes final put_task_struct after we schedule. */ |
885 | preempt_disable(); | 888 | preempt_disable(); |
886 | BUG_ON(tsk->flags & PF_DEAD); | 889 | BUG_ON(tsk->flags & PF_DEAD); |
diff --git a/net/Kconfig b/net/Kconfig index 5126f58d9c44..4193cdcd3ae7 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -224,6 +224,9 @@ source "net/irda/Kconfig" | |||
224 | source "net/bluetooth/Kconfig" | 224 | source "net/bluetooth/Kconfig" |
225 | source "net/ieee80211/Kconfig" | 225 | source "net/ieee80211/Kconfig" |
226 | 226 | ||
227 | config WIRELESS_EXT | ||
228 | bool | ||
229 | |||
227 | endif # if NET | 230 | endif # if NET |
228 | endmenu # Networking | 231 | endmenu # Networking |
229 | 232 | ||
diff --git a/net/core/Makefile b/net/core/Makefile index 630da0f0579e..79fe12cced27 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -14,5 +14,5 @@ obj-$(CONFIG_XFRM) += flow.o | |||
14 | obj-$(CONFIG_SYSFS) += net-sysfs.o | 14 | obj-$(CONFIG_SYSFS) += net-sysfs.o |
15 | obj-$(CONFIG_NET_DIVERT) += dv.o | 15 | obj-$(CONFIG_NET_DIVERT) += dv.o |
16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o | 16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o |
17 | obj-$(CONFIG_NET_RADIO) += wireless.o | 17 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o |
18 | obj-$(CONFIG_NETPOLL) += netpoll.o | 18 | obj-$(CONFIG_NETPOLL) += netpoll.o |
diff --git a/net/core/dev.c b/net/core/dev.c index 2afb0de95329..ef56c035d44e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -110,10 +110,8 @@ | |||
110 | #include <linux/netpoll.h> | 110 | #include <linux/netpoll.h> |
111 | #include <linux/rcupdate.h> | 111 | #include <linux/rcupdate.h> |
112 | #include <linux/delay.h> | 112 | #include <linux/delay.h> |
113 | #ifdef CONFIG_NET_RADIO | 113 | #include <linux/wireless.h> |
114 | #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ | ||
115 | #include <net/iw_handler.h> | 114 | #include <net/iw_handler.h> |
116 | #endif /* CONFIG_NET_RADIO */ | ||
117 | #include <asm/current.h> | 115 | #include <asm/current.h> |
118 | 116 | ||
119 | /* | 117 | /* |
@@ -1448,8 +1446,29 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) | |||
1448 | { | 1446 | { |
1449 | struct net_device *dev = skb->dev; | 1447 | struct net_device *dev = skb->dev; |
1450 | 1448 | ||
1451 | if (dev->master) | 1449 | if (dev->master) { |
1450 | /* | ||
1451 | * On bonding slaves other than the currently active | ||
1452 | * slave, suppress duplicates except for 802.3ad | ||
1453 | * ETH_P_SLOW and alb non-mcast/bcast. | ||
1454 | */ | ||
1455 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
1456 | if (dev->master->priv_flags & IFF_MASTER_ALB) { | ||
1457 | if (skb->pkt_type != PACKET_BROADCAST && | ||
1458 | skb->pkt_type != PACKET_MULTICAST) | ||
1459 | goto keep; | ||
1460 | } | ||
1461 | |||
1462 | if (dev->master->priv_flags & IFF_MASTER_8023AD && | ||
1463 | skb->protocol == __constant_htons(ETH_P_SLOW)) | ||
1464 | goto keep; | ||
1465 | |||
1466 | kfree_skb(skb); | ||
1467 | return NULL; | ||
1468 | } | ||
1469 | keep: | ||
1452 | skb->dev = dev->master; | 1470 | skb->dev = dev->master; |
1471 | } | ||
1453 | 1472 | ||
1454 | return dev; | 1473 | return dev; |
1455 | } | 1474 | } |
@@ -1593,6 +1612,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1593 | 1612 | ||
1594 | orig_dev = skb_bond(skb); | 1613 | orig_dev = skb_bond(skb); |
1595 | 1614 | ||
1615 | if (!orig_dev) | ||
1616 | return NET_RX_DROP; | ||
1617 | |||
1596 | __get_cpu_var(netdev_rx_stat).total++; | 1618 | __get_cpu_var(netdev_rx_stat).total++; |
1597 | 1619 | ||
1598 | skb->h.raw = skb->nh.raw = skb->data; | 1620 | skb->h.raw = skb->nh.raw = skb->data; |
@@ -2028,7 +2050,7 @@ static struct file_operations softnet_seq_fops = { | |||
2028 | .release = seq_release, | 2050 | .release = seq_release, |
2029 | }; | 2051 | }; |
2030 | 2052 | ||
2031 | #ifdef WIRELESS_EXT | 2053 | #ifdef CONFIG_WIRELESS_EXT |
2032 | extern int wireless_proc_init(void); | 2054 | extern int wireless_proc_init(void); |
2033 | #else | 2055 | #else |
2034 | #define wireless_proc_init() 0 | 2056 | #define wireless_proc_init() 0 |
@@ -2582,7 +2604,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2582 | ret = -EFAULT; | 2604 | ret = -EFAULT; |
2583 | return ret; | 2605 | return ret; |
2584 | } | 2606 | } |
2585 | #ifdef WIRELESS_EXT | 2607 | #ifdef CONFIG_WIRELESS_EXT |
2586 | /* Take care of Wireless Extensions */ | 2608 | /* Take care of Wireless Extensions */ |
2587 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 2609 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
2588 | /* If command is `set a parameter', or | 2610 | /* If command is `set a parameter', or |
@@ -2603,7 +2625,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2603 | ret = -EFAULT; | 2625 | ret = -EFAULT; |
2604 | return ret; | 2626 | return ret; |
2605 | } | 2627 | } |
2606 | #endif /* WIRELESS_EXT */ | 2628 | #endif /* CONFIG_WIRELESS_EXT */ |
2607 | return -EINVAL; | 2629 | return -EINVAL; |
2608 | } | 2630 | } |
2609 | } | 2631 | } |
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index ecc9bb196abc..cb71d794a7d1 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <net/ieee80211.h> | 19 | #include <net/ieee80211.h> |
20 | 20 | ||
21 | |||
22 | MODULE_AUTHOR("Jouni Malinen"); | 21 | MODULE_AUTHOR("Jouni Malinen"); |
23 | MODULE_DESCRIPTION("HostAP crypto"); | 22 | MODULE_DESCRIPTION("HostAP crypto"); |
24 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
@@ -33,11 +32,11 @@ static DEFINE_SPINLOCK(ieee80211_crypto_lock); | |||
33 | 32 | ||
34 | void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) | 33 | void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) |
35 | { | 34 | { |
36 | struct ieee80211_crypt_data *entry, *next; | 35 | struct ieee80211_crypt_data *entry, *next; |
37 | unsigned long flags; | 36 | unsigned long flags; |
38 | 37 | ||
39 | spin_lock_irqsave(&ieee->lock, flags); | 38 | spin_lock_irqsave(&ieee->lock, flags); |
40 | list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) { | 39 | list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) { |
41 | if (atomic_read(&entry->refcnt) != 0 && !force) | 40 | if (atomic_read(&entry->refcnt) != 0 && !force) |
42 | continue; | 41 | continue; |
43 | 42 | ||
@@ -141,9 +140,9 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops) | |||
141 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 140 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
142 | return -EINVAL; | 141 | return -EINVAL; |
143 | 142 | ||
144 | found: | 143 | found: |
145 | printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " | 144 | printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " |
146 | "'%s'\n", ops->name); | 145 | "'%s'\n", ops->name); |
147 | list_del(&alg->list); | 146 | list_del(&alg->list); |
148 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 147 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
149 | kfree(alg); | 148 | kfree(alg); |
@@ -163,7 +162,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name) | |||
163 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 162 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
164 | return NULL; | 163 | return NULL; |
165 | 164 | ||
166 | found: | 165 | found: |
167 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 166 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
168 | return alg->ops; | 167 | return alg->ops; |
169 | } | 168 | } |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 3840d1911f2b..78b2d13e80e3 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -190,7 +190,8 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, | |||
190 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); | 190 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) | 193 | static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, |
194 | u8 *aeskey, int keylen, void *priv) | ||
194 | { | 195 | { |
195 | struct ieee80211_ccmp_data *key = priv; | 196 | struct ieee80211_ccmp_data *key = priv; |
196 | int i; | 197 | int i; |
@@ -199,6 +200,9 @@ static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) | |||
199 | if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) | 200 | if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) |
200 | return -1; | 201 | return -1; |
201 | 202 | ||
203 | if (aeskey != NULL && keylen >= CCMP_TK_LEN) | ||
204 | memcpy(aeskey, key->key, CCMP_TK_LEN); | ||
205 | |||
202 | pos = skb_push(skb, CCMP_HDR_LEN); | 206 | pos = skb_push(skb, CCMP_HDR_LEN); |
203 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); | 207 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); |
204 | pos += hdr_len; | 208 | pos += hdr_len; |
@@ -238,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
238 | return -1; | 242 | return -1; |
239 | 243 | ||
240 | data_len = skb->len - hdr_len; | 244 | data_len = skb->len - hdr_len; |
241 | len = ieee80211_ccmp_hdr(skb, hdr_len, priv); | 245 | len = ieee80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv); |
242 | if (len < 0) | 246 | if (len < 0) |
243 | return -1; | 247 | return -1; |
244 | 248 | ||
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index e0988320efbf..93def94c1b32 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -80,10 +80,9 @@ static void *ieee80211_tkip_init(int key_idx) | |||
80 | { | 80 | { |
81 | struct ieee80211_tkip_data *priv; | 81 | struct ieee80211_tkip_data *priv; |
82 | 82 | ||
83 | priv = kmalloc(sizeof(*priv), GFP_ATOMIC); | 83 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); |
84 | if (priv == NULL) | 84 | if (priv == NULL) |
85 | goto fail; | 85 | goto fail; |
86 | memset(priv, 0, sizeof(*priv)); | ||
87 | 86 | ||
88 | priv->key_idx = key_idx; | 87 | priv->key_idx = key_idx; |
89 | 88 | ||
@@ -271,34 +270,33 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, | |||
271 | #endif | 270 | #endif |
272 | } | 271 | } |
273 | 272 | ||
274 | static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) | 273 | static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, |
274 | u8 * rc4key, int keylen, void *priv) | ||
275 | { | 275 | { |
276 | struct ieee80211_tkip_data *tkey = priv; | 276 | struct ieee80211_tkip_data *tkey = priv; |
277 | int len; | 277 | int len; |
278 | u8 *rc4key, *pos, *icv; | 278 | u8 *pos; |
279 | struct ieee80211_hdr_4addr *hdr; | 279 | struct ieee80211_hdr_4addr *hdr; |
280 | u32 crc; | ||
281 | 280 | ||
282 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | 281 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
283 | 282 | ||
284 | if (skb_headroom(skb) < 8 || skb->len < hdr_len) | 283 | if (skb_headroom(skb) < 8 || skb->len < hdr_len) |
285 | return NULL; | 284 | return -1; |
285 | |||
286 | if (rc4key == NULL || keylen < 16) | ||
287 | return -1; | ||
286 | 288 | ||
287 | if (!tkey->tx_phase1_done) { | 289 | if (!tkey->tx_phase1_done) { |
288 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, | 290 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, |
289 | tkey->tx_iv32); | 291 | tkey->tx_iv32); |
290 | tkey->tx_phase1_done = 1; | 292 | tkey->tx_phase1_done = 1; |
291 | } | 293 | } |
292 | rc4key = kmalloc(16, GFP_ATOMIC); | ||
293 | if (!rc4key) | ||
294 | return NULL; | ||
295 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); | 294 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); |
296 | 295 | ||
297 | len = skb->len - hdr_len; | 296 | len = skb->len - hdr_len; |
298 | pos = skb_push(skb, 8); | 297 | pos = skb_push(skb, 8); |
299 | memmove(pos, pos + 8, hdr_len); | 298 | memmove(pos, pos + 8, hdr_len); |
300 | pos += hdr_len; | 299 | pos += hdr_len; |
301 | icv = skb_put(skb, 4); | ||
302 | 300 | ||
303 | *pos++ = *rc4key; | 301 | *pos++ = *rc4key; |
304 | *pos++ = *(rc4key + 1); | 302 | *pos++ = *(rc4key + 1); |
@@ -309,28 +307,28 @@ static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) | |||
309 | *pos++ = (tkey->tx_iv32 >> 16) & 0xff; | 307 | *pos++ = (tkey->tx_iv32 >> 16) & 0xff; |
310 | *pos++ = (tkey->tx_iv32 >> 24) & 0xff; | 308 | *pos++ = (tkey->tx_iv32 >> 24) & 0xff; |
311 | 309 | ||
312 | crc = ~crc32_le(~0, pos, len); | 310 | tkey->tx_iv16++; |
313 | icv[0] = crc; | 311 | if (tkey->tx_iv16 == 0) { |
314 | icv[1] = crc >> 8; | 312 | tkey->tx_phase1_done = 0; |
315 | icv[2] = crc >> 16; | 313 | tkey->tx_iv32++; |
316 | icv[3] = crc >> 24; | 314 | } |
317 | 315 | ||
318 | return rc4key; | 316 | return 8; |
319 | } | 317 | } |
320 | 318 | ||
321 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | 319 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) |
322 | { | 320 | { |
323 | struct ieee80211_tkip_data *tkey = priv; | 321 | struct ieee80211_tkip_data *tkey = priv; |
324 | int len; | 322 | int len; |
325 | const u8 *rc4key; | 323 | u8 rc4key[16], *pos, *icv; |
326 | u8 *pos; | 324 | u32 crc; |
327 | struct scatterlist sg; | 325 | struct scatterlist sg; |
328 | 326 | ||
329 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | 327 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { |
330 | if (net_ratelimit()) { | 328 | if (net_ratelimit()) { |
331 | struct ieee80211_hdr_4addr *hdr = | 329 | struct ieee80211_hdr_4addr *hdr = |
332 | (struct ieee80211_hdr_4addr *)skb->data; | 330 | (struct ieee80211_hdr_4addr *)skb->data; |
333 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | 331 | printk(KERN_DEBUG ": TKIP countermeasures: dropped " |
334 | "TX packet to " MAC_FMT "\n", | 332 | "TX packet to " MAC_FMT "\n", |
335 | MAC_ARG(hdr->addr1)); | 333 | MAC_ARG(hdr->addr1)); |
336 | } | 334 | } |
@@ -343,22 +341,23 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
343 | len = skb->len - hdr_len; | 341 | len = skb->len - hdr_len; |
344 | pos = skb->data + hdr_len; | 342 | pos = skb->data + hdr_len; |
345 | 343 | ||
346 | rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); | 344 | if ((ieee80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) |
347 | if (!rc4key) | ||
348 | return -1; | 345 | return -1; |
349 | 346 | ||
347 | icv = skb_put(skb, 4); | ||
348 | |||
349 | crc = ~crc32_le(~0, pos, len); | ||
350 | icv[0] = crc; | ||
351 | icv[1] = crc >> 8; | ||
352 | icv[2] = crc >> 16; | ||
353 | icv[3] = crc >> 24; | ||
354 | |||
350 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); | 355 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); |
351 | sg.page = virt_to_page(pos); | 356 | sg.page = virt_to_page(pos); |
352 | sg.offset = offset_in_page(pos); | 357 | sg.offset = offset_in_page(pos); |
353 | sg.length = len + 4; | 358 | sg.length = len + 4; |
354 | crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); | 359 | crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); |
355 | 360 | ||
356 | tkey->tx_iv16++; | ||
357 | if (tkey->tx_iv16 == 0) { | ||
358 | tkey->tx_phase1_done = 0; | ||
359 | tkey->tx_iv32++; | ||
360 | } | ||
361 | |||
362 | return 0; | 361 | return 0; |
363 | } | 362 | } |
364 | 363 | ||
@@ -379,7 +378,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
379 | 378 | ||
380 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | 379 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { |
381 | if (net_ratelimit()) { | 380 | if (net_ratelimit()) { |
382 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | 381 | printk(KERN_DEBUG ": TKIP countermeasures: dropped " |
383 | "received packet from " MAC_FMT "\n", | 382 | "received packet from " MAC_FMT "\n", |
384 | MAC_ARG(hdr->addr2)); | 383 | MAC_ARG(hdr->addr2)); |
385 | } | 384 | } |
@@ -695,6 +694,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { | |||
695 | .name = "TKIP", | 694 | .name = "TKIP", |
696 | .init = ieee80211_tkip_init, | 695 | .init = ieee80211_tkip_init, |
697 | .deinit = ieee80211_tkip_deinit, | 696 | .deinit = ieee80211_tkip_deinit, |
697 | .build_iv = ieee80211_tkip_hdr, | ||
698 | .encrypt_mpdu = ieee80211_tkip_encrypt, | 698 | .encrypt_mpdu = ieee80211_tkip_encrypt, |
699 | .decrypt_mpdu = ieee80211_tkip_decrypt, | 699 | .decrypt_mpdu = ieee80211_tkip_decrypt, |
700 | .encrypt_msdu = ieee80211_michael_mic_add, | 700 | .encrypt_msdu = ieee80211_michael_mic_add, |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index f8dca31be5dd..649e581fa565 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
@@ -76,7 +76,8 @@ static void prism2_wep_deinit(void *priv) | |||
76 | } | 76 | } |
77 | 77 | ||
78 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ | 78 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ |
79 | static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, void *priv) | 79 | static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, |
80 | u8 *key, int keylen, void *priv) | ||
80 | { | 81 | { |
81 | struct prism2_wep_data *wep = priv; | 82 | struct prism2_wep_data *wep = priv; |
82 | u32 klen, len; | 83 | u32 klen, len; |
@@ -131,7 +132,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
131 | return -1; | 132 | return -1; |
132 | 133 | ||
133 | /* add the IV to the frame */ | 134 | /* add the IV to the frame */ |
134 | if (prism2_wep_build_iv(skb, hdr_len, priv)) | 135 | if (prism2_wep_build_iv(skb, hdr_len, NULL, 0, priv)) |
135 | return -1; | 136 | return -1; |
136 | 137 | ||
137 | /* Copy the IV into the first 3 bytes of the key */ | 138 | /* Copy the IV into the first 3 bytes of the key */ |
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c index 610cc5cbc252..192243ab35ed 100644 --- a/net/ieee80211/ieee80211_geo.c +++ b/net/ieee80211/ieee80211_geo.c | |||
@@ -50,7 +50,8 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | |||
50 | 50 | ||
51 | /* Driver needs to initialize the geography map before using | 51 | /* Driver needs to initialize the geography map before using |
52 | * these helper functions */ | 52 | * these helper functions */ |
53 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 53 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
54 | return 0; | ||
54 | 55 | ||
55 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | 56 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) |
56 | for (i = 0; i < ieee->geo.bg_channels; i++) | 57 | for (i = 0; i < ieee->geo.bg_channels; i++) |
@@ -58,13 +59,15 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | |||
58 | * this is a B only channel, we don't see it | 59 | * this is a B only channel, we don't see it |
59 | * as valid. */ | 60 | * as valid. */ |
60 | if ((ieee->geo.bg[i].channel == channel) && | 61 | if ((ieee->geo.bg[i].channel == channel) && |
62 | !(ieee->geo.bg[i].flags & IEEE80211_CH_INVALID) && | ||
61 | (!(ieee->mode & IEEE_G) || | 63 | (!(ieee->mode & IEEE_G) || |
62 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) | 64 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) |
63 | return IEEE80211_24GHZ_BAND; | 65 | return IEEE80211_24GHZ_BAND; |
64 | 66 | ||
65 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | 67 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) |
66 | for (i = 0; i < ieee->geo.a_channels; i++) | 68 | for (i = 0; i < ieee->geo.a_channels; i++) |
67 | if (ieee->geo.a[i].channel == channel) | 69 | if ((ieee->geo.a[i].channel == channel) && |
70 | !(ieee->geo.a[i].flags & IEEE80211_CH_INVALID)) | ||
68 | return IEEE80211_52GHZ_BAND; | 71 | return IEEE80211_52GHZ_BAND; |
69 | 72 | ||
70 | return 0; | 73 | return 0; |
@@ -76,7 +79,8 @@ int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel) | |||
76 | 79 | ||
77 | /* Driver needs to initialize the geography map before using | 80 | /* Driver needs to initialize the geography map before using |
78 | * these helper functions */ | 81 | * these helper functions */ |
79 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 82 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
83 | return -1; | ||
80 | 84 | ||
81 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | 85 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) |
82 | for (i = 0; i < ieee->geo.bg_channels; i++) | 86 | for (i = 0; i < ieee->geo.bg_channels; i++) |
@@ -97,7 +101,8 @@ u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq) | |||
97 | 101 | ||
98 | /* Driver needs to initialize the geography map before using | 102 | /* Driver needs to initialize the geography map before using |
99 | * these helper functions */ | 103 | * these helper functions */ |
100 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 104 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
105 | return 0; | ||
101 | 106 | ||
102 | freq /= 100000; | 107 | freq /= 100000; |
103 | 108 | ||
@@ -133,6 +138,41 @@ const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee) | |||
133 | return &ieee->geo; | 138 | return &ieee->geo; |
134 | } | 139 | } |
135 | 140 | ||
141 | u8 ieee80211_get_channel_flags(struct ieee80211_device * ieee, u8 channel) | ||
142 | { | ||
143 | int index = ieee80211_channel_to_index(ieee, channel); | ||
144 | |||
145 | if (index == -1) | ||
146 | return IEEE80211_CH_INVALID; | ||
147 | |||
148 | if (channel <= IEEE80211_24GHZ_CHANNELS) | ||
149 | return ieee->geo.bg[index].flags; | ||
150 | |||
151 | return ieee->geo.a[index].flags; | ||
152 | } | ||
153 | |||
154 | static const struct ieee80211_channel bad_channel = { | ||
155 | .channel = 0, | ||
156 | .flags = IEEE80211_CH_INVALID, | ||
157 | .max_power = 0, | ||
158 | }; | ||
159 | |||
160 | const struct ieee80211_channel *ieee80211_get_channel(struct ieee80211_device | ||
161 | *ieee, u8 channel) | ||
162 | { | ||
163 | int index = ieee80211_channel_to_index(ieee, channel); | ||
164 | |||
165 | if (index == -1) | ||
166 | return &bad_channel; | ||
167 | |||
168 | if (channel <= IEEE80211_24GHZ_CHANNELS) | ||
169 | return &ieee->geo.bg[index]; | ||
170 | |||
171 | return &ieee->geo.a[index]; | ||
172 | } | ||
173 | |||
174 | EXPORT_SYMBOL(ieee80211_get_channel); | ||
175 | EXPORT_SYMBOL(ieee80211_get_channel_flags); | ||
136 | EXPORT_SYMBOL(ieee80211_is_valid_channel); | 176 | EXPORT_SYMBOL(ieee80211_is_valid_channel); |
137 | EXPORT_SYMBOL(ieee80211_freq_to_channel); | 177 | EXPORT_SYMBOL(ieee80211_freq_to_channel); |
138 | EXPORT_SYMBOL(ieee80211_channel_to_index); | 178 | EXPORT_SYMBOL(ieee80211_channel_to_index); |
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 90d18b72da3d..2cb84d84f671 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -82,10 +82,28 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | void ieee80211_network_reset(struct ieee80211_network *network) | ||
86 | { | ||
87 | if (!network) | ||
88 | return; | ||
89 | |||
90 | if (network->ibss_dfs) { | ||
91 | kfree(network->ibss_dfs); | ||
92 | network->ibss_dfs = NULL; | ||
93 | } | ||
94 | } | ||
95 | |||
85 | static inline void ieee80211_networks_free(struct ieee80211_device *ieee) | 96 | static inline void ieee80211_networks_free(struct ieee80211_device *ieee) |
86 | { | 97 | { |
98 | int i; | ||
99 | |||
87 | if (!ieee->networks) | 100 | if (!ieee->networks) |
88 | return; | 101 | return; |
102 | |||
103 | for (i = 0; i < MAX_NETWORK_COUNT; i++) | ||
104 | if (ieee->networks[i].ibss_dfs) | ||
105 | kfree(ieee->networks[i].ibss_dfs); | ||
106 | |||
89 | kfree(ieee->networks); | 107 | kfree(ieee->networks); |
90 | ieee->networks = NULL; | 108 | ieee->networks = NULL; |
91 | } | 109 | } |
@@ -195,7 +213,7 @@ void free_ieee80211(struct net_device *dev) | |||
195 | 213 | ||
196 | static int debug = 0; | 214 | static int debug = 0; |
197 | u32 ieee80211_debug_level = 0; | 215 | u32 ieee80211_debug_level = 0; |
198 | struct proc_dir_entry *ieee80211_proc = NULL; | 216 | static struct proc_dir_entry *ieee80211_proc = NULL; |
199 | 217 | ||
200 | static int show_debug_level(char *page, char **start, off_t offset, | 218 | static int show_debug_level(char *page, char **start, off_t offset, |
201 | int count, int *eof, void *data) | 219 | int count, int *eof, void *data) |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 7ac6a7165d9c..a7f2a642a512 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -369,8 +369,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
369 | 369 | ||
370 | /* Put this code here so that we avoid duplicating it in all | 370 | /* Put this code here so that we avoid duplicating it in all |
371 | * Rx paths. - Jean II */ | 371 | * Rx paths. - Jean II */ |
372 | #ifdef CONFIG_WIRELESS_EXT | ||
372 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ | 373 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ |
373 | #ifdef CONFIG_NET_RADIO | ||
374 | /* If spy monitoring on */ | 374 | /* If spy monitoring on */ |
375 | if (ieee->spy_data.spy_number > 0) { | 375 | if (ieee->spy_data.spy_number > 0) { |
376 | struct iw_quality wstats; | 376 | struct iw_quality wstats; |
@@ -397,8 +397,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
397 | /* Update spy records */ | 397 | /* Update spy records */ |
398 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); | 398 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); |
399 | } | 399 | } |
400 | #endif /* CONFIG_NET_RADIO */ | ||
401 | #endif /* IW_WIRELESS_SPY */ | 400 | #endif /* IW_WIRELESS_SPY */ |
401 | #endif /* CONFIG_WIRELESS_EXT */ | ||
402 | 402 | ||
403 | #ifdef NOT_YET | 403 | #ifdef NOT_YET |
404 | hostap_update_rx_stats(local->ap, hdr, rx_stats); | 404 | hostap_update_rx_stats(local->ap, hdr, rx_stats); |
@@ -574,7 +574,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
574 | /* skb: hdr + (possibly fragmented) plaintext payload */ | 574 | /* skb: hdr + (possibly fragmented) plaintext payload */ |
575 | // PR: FIXME: hostap has additional conditions in the "if" below: | 575 | // PR: FIXME: hostap has additional conditions in the "if" below: |
576 | // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 576 | // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && |
577 | if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { | 577 | if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { |
578 | int flen; | 578 | int flen; |
579 | struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); | 579 | struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); |
580 | IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); | 580 | IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); |
@@ -754,7 +754,14 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
754 | memset(skb->cb, 0, sizeof(skb->cb)); | 754 | memset(skb->cb, 0, sizeof(skb->cb)); |
755 | skb->dev = dev; | 755 | skb->dev = dev; |
756 | skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ | 756 | skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ |
757 | netif_rx(skb); | 757 | if (netif_rx(skb) == NET_RX_DROP) { |
758 | /* netif_rx always succeeds, but it might drop | ||
759 | * the packet. If it drops the packet, we log that | ||
760 | * in our stats. */ | ||
761 | IEEE80211_DEBUG_DROP | ||
762 | ("RX: netif_rx dropped the packet\n"); | ||
763 | stats->rx_dropped++; | ||
764 | } | ||
758 | } | 765 | } |
759 | 766 | ||
760 | rx_exit: | 767 | rx_exit: |
@@ -930,6 +937,45 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element | |||
930 | return rc; | 937 | return rc; |
931 | } | 938 | } |
932 | 939 | ||
940 | #ifdef CONFIG_IEEE80211_DEBUG | ||
941 | #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x | ||
942 | |||
943 | static const char *get_info_element_string(u16 id) | ||
944 | { | ||
945 | switch (id) { | ||
946 | MFIE_STRING(SSID); | ||
947 | MFIE_STRING(RATES); | ||
948 | MFIE_STRING(FH_SET); | ||
949 | MFIE_STRING(DS_SET); | ||
950 | MFIE_STRING(CF_SET); | ||
951 | MFIE_STRING(TIM); | ||
952 | MFIE_STRING(IBSS_SET); | ||
953 | MFIE_STRING(COUNTRY); | ||
954 | MFIE_STRING(HOP_PARAMS); | ||
955 | MFIE_STRING(HOP_TABLE); | ||
956 | MFIE_STRING(REQUEST); | ||
957 | MFIE_STRING(CHALLENGE); | ||
958 | MFIE_STRING(POWER_CONSTRAINT); | ||
959 | MFIE_STRING(POWER_CAPABILITY); | ||
960 | MFIE_STRING(TPC_REQUEST); | ||
961 | MFIE_STRING(TPC_REPORT); | ||
962 | MFIE_STRING(SUPP_CHANNELS); | ||
963 | MFIE_STRING(CSA); | ||
964 | MFIE_STRING(MEASURE_REQUEST); | ||
965 | MFIE_STRING(MEASURE_REPORT); | ||
966 | MFIE_STRING(QUIET); | ||
967 | MFIE_STRING(IBSS_DFS); | ||
968 | MFIE_STRING(ERP_INFO); | ||
969 | MFIE_STRING(RSN); | ||
970 | MFIE_STRING(RATES_EX); | ||
971 | MFIE_STRING(GENERIC); | ||
972 | MFIE_STRING(QOS_PARAMETER); | ||
973 | default: | ||
974 | return "UNKNOWN"; | ||
975 | } | ||
976 | } | ||
977 | #endif | ||
978 | |||
933 | static int ieee80211_parse_info_param(struct ieee80211_info_element | 979 | static int ieee80211_parse_info_param(struct ieee80211_info_element |
934 | *info_element, u16 length, | 980 | *info_element, u16 length, |
935 | struct ieee80211_network *network) | 981 | struct ieee80211_network *network) |
@@ -1040,7 +1086,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1040 | break; | 1086 | break; |
1041 | 1087 | ||
1042 | case MFIE_TYPE_TIM: | 1088 | case MFIE_TYPE_TIM: |
1043 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); | 1089 | network->tim.tim_count = info_element->data[0]; |
1090 | network->tim.tim_period = info_element->data[1]; | ||
1091 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n"); | ||
1044 | break; | 1092 | break; |
1045 | 1093 | ||
1046 | case MFIE_TYPE_ERP_INFO: | 1094 | case MFIE_TYPE_ERP_INFO: |
@@ -1091,10 +1139,49 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1091 | printk(KERN_ERR | 1139 | printk(KERN_ERR |
1092 | "QoS Error need to parse QOS_PARAMETER IE\n"); | 1140 | "QoS Error need to parse QOS_PARAMETER IE\n"); |
1093 | break; | 1141 | break; |
1142 | /* 802.11h */ | ||
1143 | case MFIE_TYPE_POWER_CONSTRAINT: | ||
1144 | network->power_constraint = info_element->data[0]; | ||
1145 | network->flags |= NETWORK_HAS_POWER_CONSTRAINT; | ||
1146 | break; | ||
1147 | |||
1148 | case MFIE_TYPE_CSA: | ||
1149 | network->power_constraint = info_element->data[0]; | ||
1150 | network->flags |= NETWORK_HAS_CSA; | ||
1151 | break; | ||
1152 | |||
1153 | case MFIE_TYPE_QUIET: | ||
1154 | network->quiet.count = info_element->data[0]; | ||
1155 | network->quiet.period = info_element->data[1]; | ||
1156 | network->quiet.duration = info_element->data[2]; | ||
1157 | network->quiet.offset = info_element->data[3]; | ||
1158 | network->flags |= NETWORK_HAS_QUIET; | ||
1159 | break; | ||
1160 | |||
1161 | case MFIE_TYPE_IBSS_DFS: | ||
1162 | if (network->ibss_dfs) | ||
1163 | break; | ||
1164 | network->ibss_dfs = | ||
1165 | kmalloc(info_element->len, GFP_ATOMIC); | ||
1166 | if (!network->ibss_dfs) | ||
1167 | return 1; | ||
1168 | memcpy(network->ibss_dfs, info_element->data, | ||
1169 | info_element->len); | ||
1170 | network->flags |= NETWORK_HAS_IBSS_DFS; | ||
1171 | break; | ||
1172 | |||
1173 | case MFIE_TYPE_TPC_REPORT: | ||
1174 | network->tpc_report.transmit_power = | ||
1175 | info_element->data[0]; | ||
1176 | network->tpc_report.link_margin = info_element->data[1]; | ||
1177 | network->flags |= NETWORK_HAS_TPC_REPORT; | ||
1178 | break; | ||
1094 | 1179 | ||
1095 | default: | 1180 | default: |
1096 | IEEE80211_DEBUG_MGMT("unsupported IE %d\n", | 1181 | IEEE80211_DEBUG_MGMT |
1097 | info_element->id); | 1182 | ("Unsupported info element: %s (%d)\n", |
1183 | get_info_element_string(info_element->id), | ||
1184 | info_element->id); | ||
1098 | break; | 1185 | break; |
1099 | } | 1186 | } |
1100 | 1187 | ||
@@ -1110,7 +1197,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1110 | static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response | 1197 | static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response |
1111 | *frame, struct ieee80211_rx_stats *stats) | 1198 | *frame, struct ieee80211_rx_stats *stats) |
1112 | { | 1199 | { |
1113 | struct ieee80211_network network_resp; | 1200 | struct ieee80211_network network_resp = { |
1201 | .ibss_dfs = NULL, | ||
1202 | }; | ||
1114 | struct ieee80211_network *network = &network_resp; | 1203 | struct ieee80211_network *network = &network_resp; |
1115 | struct net_device *dev = ieee->dev; | 1204 | struct net_device *dev = ieee->dev; |
1116 | 1205 | ||
@@ -1253,7 +1342,22 @@ static void update_network(struct ieee80211_network *dst, | |||
1253 | int qos_active; | 1342 | int qos_active; |
1254 | u8 old_param; | 1343 | u8 old_param; |
1255 | 1344 | ||
1256 | memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); | 1345 | ieee80211_network_reset(dst); |
1346 | dst->ibss_dfs = src->ibss_dfs; | ||
1347 | |||
1348 | /* We only update the statistics if they were created by receiving | ||
1349 | * the network information on the actual channel the network is on. | ||
1350 | * | ||
1351 | * This keeps beacons received on neighbor channels from bringing | ||
1352 | * down the signal level of an AP. */ | ||
1353 | if (dst->channel == src->stats.received_channel) | ||
1354 | memcpy(&dst->stats, &src->stats, | ||
1355 | sizeof(struct ieee80211_rx_stats)); | ||
1356 | else | ||
1357 | IEEE80211_DEBUG_SCAN("Network " MAC_FMT " info received " | ||
1358 | "off channel (%d vs. %d)\n", MAC_ARG(src->bssid), | ||
1359 | dst->channel, src->stats.received_channel); | ||
1360 | |||
1257 | dst->capability = src->capability; | 1361 | dst->capability = src->capability; |
1258 | memcpy(dst->rates, src->rates, src->rates_len); | 1362 | memcpy(dst->rates, src->rates, src->rates_len); |
1259 | dst->rates_len = src->rates_len; | 1363 | dst->rates_len = src->rates_len; |
@@ -1269,6 +1373,7 @@ static void update_network(struct ieee80211_network *dst, | |||
1269 | dst->listen_interval = src->listen_interval; | 1373 | dst->listen_interval = src->listen_interval; |
1270 | dst->atim_window = src->atim_window; | 1374 | dst->atim_window = src->atim_window; |
1271 | dst->erp_value = src->erp_value; | 1375 | dst->erp_value = src->erp_value; |
1376 | dst->tim = src->tim; | ||
1272 | 1377 | ||
1273 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); | 1378 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); |
1274 | dst->wpa_ie_len = src->wpa_ie_len; | 1379 | dst->wpa_ie_len = src->wpa_ie_len; |
@@ -1313,7 +1418,9 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1313 | *stats) | 1418 | *stats) |
1314 | { | 1419 | { |
1315 | struct net_device *dev = ieee->dev; | 1420 | struct net_device *dev = ieee->dev; |
1316 | struct ieee80211_network network; | 1421 | struct ieee80211_network network = { |
1422 | .ibss_dfs = NULL, | ||
1423 | }; | ||
1317 | struct ieee80211_network *target; | 1424 | struct ieee80211_network *target; |
1318 | struct ieee80211_network *oldest = NULL; | 1425 | struct ieee80211_network *oldest = NULL; |
1319 | #ifdef CONFIG_IEEE80211_DEBUG | 1426 | #ifdef CONFIG_IEEE80211_DEBUG |
@@ -1386,6 +1493,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1386 | escape_essid(target->ssid, | 1493 | escape_essid(target->ssid, |
1387 | target->ssid_len), | 1494 | target->ssid_len), |
1388 | MAC_ARG(target->bssid)); | 1495 | MAC_ARG(target->bssid)); |
1496 | ieee80211_network_reset(target); | ||
1389 | } else { | 1497 | } else { |
1390 | /* Otherwise just pull from the free list */ | 1498 | /* Otherwise just pull from the free list */ |
1391 | target = list_entry(ieee->network_free_list.next, | 1499 | target = list_entry(ieee->network_free_list.next, |
@@ -1402,6 +1510,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1402 | "BEACON" : "PROBE RESPONSE"); | 1510 | "BEACON" : "PROBE RESPONSE"); |
1403 | #endif | 1511 | #endif |
1404 | memcpy(target, &network, sizeof(*target)); | 1512 | memcpy(target, &network, sizeof(*target)); |
1513 | network.ibss_dfs = NULL; | ||
1405 | list_add_tail(&target->list, &ieee->network_list); | 1514 | list_add_tail(&target->list, &ieee->network_list); |
1406 | } else { | 1515 | } else { |
1407 | IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", | 1516 | IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", |
@@ -1411,6 +1520,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1411 | is_beacon(beacon->header.frame_ctl) ? | 1520 | is_beacon(beacon->header.frame_ctl) ? |
1412 | "BEACON" : "PROBE RESPONSE"); | 1521 | "BEACON" : "PROBE RESPONSE"); |
1413 | update_network(target, &network); | 1522 | update_network(target, &network); |
1523 | network.ibss_dfs = NULL; | ||
1414 | } | 1524 | } |
1415 | 1525 | ||
1416 | spin_unlock_irqrestore(&ieee->lock, flags); | 1526 | spin_unlock_irqrestore(&ieee->lock, flags); |
@@ -1495,10 +1605,43 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1495 | header); | 1605 | header); |
1496 | break; | 1606 | break; |
1497 | 1607 | ||
1608 | case IEEE80211_STYPE_ACTION: | ||
1609 | IEEE80211_DEBUG_MGMT("ACTION\n"); | ||
1610 | if (ieee->handle_action) | ||
1611 | ieee->handle_action(ieee->dev, | ||
1612 | (struct ieee80211_action *) | ||
1613 | header, stats); | ||
1614 | break; | ||
1615 | |||
1616 | case IEEE80211_STYPE_REASSOC_REQ: | ||
1617 | IEEE80211_DEBUG_MGMT("received reassoc (%d)\n", | ||
1618 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1619 | (header->frame_ctl))); | ||
1620 | |||
1621 | IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n", | ||
1622 | ieee->dev->name); | ||
1623 | if (ieee->handle_reassoc_request != NULL) | ||
1624 | ieee->handle_reassoc_request(ieee->dev, | ||
1625 | (struct ieee80211_reassoc_request *) | ||
1626 | header); | ||
1627 | break; | ||
1628 | |||
1629 | case IEEE80211_STYPE_ASSOC_REQ: | ||
1630 | IEEE80211_DEBUG_MGMT("received assoc (%d)\n", | ||
1631 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1632 | (header->frame_ctl))); | ||
1633 | |||
1634 | IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n", | ||
1635 | ieee->dev->name); | ||
1636 | if (ieee->handle_assoc_request != NULL) | ||
1637 | ieee->handle_assoc_request(ieee->dev); | ||
1638 | break; | ||
1639 | |||
1498 | case IEEE80211_STYPE_DEAUTH: | 1640 | case IEEE80211_STYPE_DEAUTH: |
1499 | printk("DEAUTH from AP\n"); | 1641 | IEEE80211_DEBUG_MGMT("DEAUTH\n"); |
1500 | if (ieee->handle_deauth != NULL) | 1642 | if (ieee->handle_deauth != NULL) |
1501 | ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) | 1643 | ieee->handle_deauth(ieee->dev, |
1644 | (struct ieee80211_deauth *) | ||
1502 | header); | 1645 | header); |
1503 | break; | 1646 | break; |
1504 | default: | 1647 | default: |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 8fdd943ebe8e..8b4332f53394 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -56,7 +56,18 @@ Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | |||
56 | `--------------------------------------------------| |------' | 56 | `--------------------------------------------------| |------' |
57 | Total: 28 non-data bytes `----.----' | 57 | Total: 28 non-data bytes `----.----' |
58 | | | 58 | | |
59 | .- 'Frame data' expands to <---------------------------' | 59 | .- 'Frame data' expands, if WEP enabled, to <----------' |
60 | | | ||
61 | V | ||
62 | ,-----------------------. | ||
63 | Bytes | 4 | 0-2296 | 4 | | ||
64 | |-----|-----------|-----| | ||
65 | Desc. | IV | Encrypted | ICV | | ||
66 | | | Packet | | | ||
67 | `-----| |-----' | ||
68 | `-----.-----' | ||
69 | | | ||
70 | .- 'Encrypted Packet' expands to | ||
60 | | | 71 | | |
61 | V | 72 | V |
62 | ,---------------------------------------------------. | 73 | ,---------------------------------------------------. |
@@ -65,18 +76,7 @@ Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | | |||
65 | Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | 76 | Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | |
66 | | DSAP | SSAP | | | | Packet | | 77 | | DSAP | SSAP | | | | Packet | |
67 | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | | 78 | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | |
68 | `-----------------------------------------| | | 79 | `---------------------------------------------------- |
69 | Total: 8 non-data bytes `----.----' | ||
70 | | | ||
71 | .- 'IP Packet' expands, if WEP enabled, to <--' | ||
72 | | | ||
73 | V | ||
74 | ,-----------------------. | ||
75 | Bytes | 4 | 0-2296 | 4 | | ||
76 | |-----|-----------|-----| | ||
77 | Desc. | IV | Encrypted | ICV | | ||
78 | | | IP Packet | | | ||
79 | `-----------------------' | ||
80 | Total: 8 non-data bytes | 80 | Total: 8 non-data bytes |
81 | 81 | ||
82 | 802.3 Ethernet Data Frame | 82 | 802.3 Ethernet Data Frame |
@@ -470,7 +470,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
470 | atomic_inc(&crypt->refcnt); | 470 | atomic_inc(&crypt->refcnt); |
471 | if (crypt->ops->build_iv) | 471 | if (crypt->ops->build_iv) |
472 | crypt->ops->build_iv(skb_frag, hdr_len, | 472 | crypt->ops->build_iv(skb_frag, hdr_len, |
473 | crypt->priv); | 473 | ieee->sec.keys[ieee->sec.active_key], |
474 | ieee->sec.key_sizes[ieee->sec.active_key], | ||
475 | crypt->priv); | ||
474 | atomic_dec(&crypt->refcnt); | 476 | atomic_dec(&crypt->refcnt); |
475 | } | 477 | } |
476 | 478 | ||
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index f87c6b89f845..af7f9bbfd18a 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -149,9 +149,7 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
149 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | | 149 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | |
150 | IW_QUAL_LEVEL_INVALID; | 150 | IW_QUAL_LEVEL_INVALID; |
151 | iwe.u.qual.qual = 0; | 151 | iwe.u.qual.qual = 0; |
152 | iwe.u.qual.level = 0; | ||
153 | } else { | 152 | } else { |
154 | iwe.u.qual.level = network->stats.rssi; | ||
155 | if (ieee->perfect_rssi == ieee->worst_rssi) | 153 | if (ieee->perfect_rssi == ieee->worst_rssi) |
156 | iwe.u.qual.qual = 100; | 154 | iwe.u.qual.qual = 100; |
157 | else | 155 | else |
@@ -179,6 +177,13 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
179 | iwe.u.qual.noise = network->stats.noise; | 177 | iwe.u.qual.noise = network->stats.noise; |
180 | } | 178 | } |
181 | 179 | ||
180 | if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) { | ||
181 | iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; | ||
182 | iwe.u.qual.level = 0; | ||
183 | } else { | ||
184 | iwe.u.qual.level = network->stats.signal; | ||
185 | } | ||
186 | |||
182 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); | 187 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); |
183 | 188 | ||
184 | iwe.cmd = IWEVCUSTOM; | 189 | iwe.cmd = IWEVCUSTOM; |
@@ -188,33 +193,21 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
188 | if (iwe.u.data.length) | 193 | if (iwe.u.data.length) |
189 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 194 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
190 | 195 | ||
196 | memset(&iwe, 0, sizeof(iwe)); | ||
191 | if (network->wpa_ie_len) { | 197 | if (network->wpa_ie_len) { |
192 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 198 | char buf[MAX_WPA_IE_LEN]; |
193 | 199 | memcpy(buf, network->wpa_ie, network->wpa_ie_len); | |
194 | u8 *p = buf; | 200 | iwe.cmd = IWEVGENIE; |
195 | p += sprintf(p, "wpa_ie="); | 201 | iwe.u.data.length = network->wpa_ie_len; |
196 | for (i = 0; i < network->wpa_ie_len; i++) { | ||
197 | p += sprintf(p, "%02x", network->wpa_ie[i]); | ||
198 | } | ||
199 | |||
200 | memset(&iwe, 0, sizeof(iwe)); | ||
201 | iwe.cmd = IWEVCUSTOM; | ||
202 | iwe.u.data.length = strlen(buf); | ||
203 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 202 | start = iwe_stream_add_point(start, stop, &iwe, buf); |
204 | } | 203 | } |
205 | 204 | ||
205 | memset(&iwe, 0, sizeof(iwe)); | ||
206 | if (network->rsn_ie_len) { | 206 | if (network->rsn_ie_len) { |
207 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 207 | char buf[MAX_WPA_IE_LEN]; |
208 | 208 | memcpy(buf, network->rsn_ie, network->rsn_ie_len); | |
209 | u8 *p = buf; | 209 | iwe.cmd = IWEVGENIE; |
210 | p += sprintf(p, "rsn_ie="); | 210 | iwe.u.data.length = network->rsn_ie_len; |
211 | for (i = 0; i < network->rsn_ie_len; i++) { | ||
212 | p += sprintf(p, "%02x", network->rsn_ie[i]); | ||
213 | } | ||
214 | |||
215 | memset(&iwe, 0, sizeof(iwe)); | ||
216 | iwe.cmd = IWEVCUSTOM; | ||
217 | iwe.u.data.length = strlen(buf); | ||
218 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 211 | start = iwe_stream_add_point(start, stop, &iwe, buf); |
219 | } | 212 | } |
220 | 213 | ||
@@ -229,6 +222,28 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
229 | if (iwe.u.data.length) | 222 | if (iwe.u.data.length) |
230 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 223 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
231 | 224 | ||
225 | /* Add spectrum management information */ | ||
226 | iwe.cmd = -1; | ||
227 | p = custom; | ||
228 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: "); | ||
229 | |||
230 | if (ieee80211_get_channel_flags(ieee, network->channel) & | ||
231 | IEEE80211_CH_INVALID) { | ||
232 | iwe.cmd = IWEVCUSTOM; | ||
233 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID "); | ||
234 | } | ||
235 | |||
236 | if (ieee80211_get_channel_flags(ieee, network->channel) & | ||
237 | IEEE80211_CH_RADAR_DETECT) { | ||
238 | iwe.cmd = IWEVCUSTOM; | ||
239 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS "); | ||
240 | } | ||
241 | |||
242 | if (iwe.cmd == IWEVCUSTOM) { | ||
243 | iwe.u.data.length = p - custom; | ||
244 | start = iwe_stream_add_point(start, stop, &iwe, custom); | ||
245 | } | ||
246 | |||
232 | return start; | 247 | return start; |
233 | } | 248 | } |
234 | 249 | ||
@@ -734,9 +749,98 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | |||
734 | return 0; | 749 | return 0; |
735 | } | 750 | } |
736 | 751 | ||
752 | int ieee80211_wx_set_auth(struct net_device *dev, | ||
753 | struct iw_request_info *info, | ||
754 | union iwreq_data *wrqu, | ||
755 | char *extra) | ||
756 | { | ||
757 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
758 | unsigned long flags; | ||
759 | int err = 0; | ||
760 | |||
761 | spin_lock_irqsave(&ieee->lock, flags); | ||
762 | |||
763 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
764 | case IW_AUTH_WPA_VERSION: | ||
765 | case IW_AUTH_CIPHER_PAIRWISE: | ||
766 | case IW_AUTH_CIPHER_GROUP: | ||
767 | case IW_AUTH_KEY_MGMT: | ||
768 | /* | ||
769 | * Host AP driver does not use these parameters and allows | ||
770 | * wpa_supplicant to control them internally. | ||
771 | */ | ||
772 | break; | ||
773 | case IW_AUTH_TKIP_COUNTERMEASURES: | ||
774 | break; /* FIXME */ | ||
775 | case IW_AUTH_DROP_UNENCRYPTED: | ||
776 | ieee->drop_unencrypted = !!wrqu->param.value; | ||
777 | break; | ||
778 | case IW_AUTH_80211_AUTH_ALG: | ||
779 | break; /* FIXME */ | ||
780 | case IW_AUTH_WPA_ENABLED: | ||
781 | ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value; | ||
782 | break; | ||
783 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
784 | ieee->ieee802_1x = !!wrqu->param.value; | ||
785 | break; | ||
786 | case IW_AUTH_PRIVACY_INVOKED: | ||
787 | ieee->privacy_invoked = !!wrqu->param.value; | ||
788 | break; | ||
789 | default: | ||
790 | err = -EOPNOTSUPP; | ||
791 | break; | ||
792 | } | ||
793 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
794 | return err; | ||
795 | } | ||
796 | |||
797 | int ieee80211_wx_get_auth(struct net_device *dev, | ||
798 | struct iw_request_info *info, | ||
799 | union iwreq_data *wrqu, | ||
800 | char *extra) | ||
801 | { | ||
802 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
803 | unsigned long flags; | ||
804 | int err = 0; | ||
805 | |||
806 | spin_lock_irqsave(&ieee->lock, flags); | ||
807 | |||
808 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
809 | case IW_AUTH_WPA_VERSION: | ||
810 | case IW_AUTH_CIPHER_PAIRWISE: | ||
811 | case IW_AUTH_CIPHER_GROUP: | ||
812 | case IW_AUTH_KEY_MGMT: | ||
813 | case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */ | ||
814 | case IW_AUTH_80211_AUTH_ALG: /* FIXME */ | ||
815 | /* | ||
816 | * Host AP driver does not use these parameters and allows | ||
817 | * wpa_supplicant to control them internally. | ||
818 | */ | ||
819 | err = -EOPNOTSUPP; | ||
820 | break; | ||
821 | case IW_AUTH_DROP_UNENCRYPTED: | ||
822 | wrqu->param.value = ieee->drop_unencrypted; | ||
823 | break; | ||
824 | case IW_AUTH_WPA_ENABLED: | ||
825 | wrqu->param.value = ieee->wpa_enabled; | ||
826 | break; | ||
827 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
828 | wrqu->param.value = ieee->ieee802_1x; | ||
829 | break; | ||
830 | default: | ||
831 | err = -EOPNOTSUPP; | ||
832 | break; | ||
833 | } | ||
834 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
835 | return err; | ||
836 | } | ||
837 | |||
737 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); | 838 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); |
738 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); | 839 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); |
739 | 840 | ||
740 | EXPORT_SYMBOL(ieee80211_wx_get_scan); | 841 | EXPORT_SYMBOL(ieee80211_wx_get_scan); |
741 | EXPORT_SYMBOL(ieee80211_wx_set_encode); | 842 | EXPORT_SYMBOL(ieee80211_wx_set_encode); |
742 | EXPORT_SYMBOL(ieee80211_wx_get_encode); | 843 | EXPORT_SYMBOL(ieee80211_wx_get_encode); |
844 | |||
845 | EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth); | ||
846 | EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth); | ||
diff --git a/net/socket.c b/net/socket.c index a00851f981db..7e1bdef8b09e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -84,10 +84,7 @@ | |||
84 | #include <linux/compat.h> | 84 | #include <linux/compat.h> |
85 | #include <linux/kmod.h> | 85 | #include <linux/kmod.h> |
86 | #include <linux/audit.h> | 86 | #include <linux/audit.h> |
87 | 87 | #include <linux/wireless.h> | |
88 | #ifdef CONFIG_NET_RADIO | ||
89 | #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ | ||
90 | #endif /* CONFIG_NET_RADIO */ | ||
91 | 88 | ||
92 | #include <asm/uaccess.h> | 89 | #include <asm/uaccess.h> |
93 | #include <asm/unistd.h> | 90 | #include <asm/unistd.h> |
@@ -840,11 +837,11 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
840 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { | 837 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { |
841 | err = dev_ioctl(cmd, argp); | 838 | err = dev_ioctl(cmd, argp); |
842 | } else | 839 | } else |
843 | #ifdef WIRELESS_EXT | 840 | #ifdef CONFIG_WIRELESS_EXT |
844 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 841 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
845 | err = dev_ioctl(cmd, argp); | 842 | err = dev_ioctl(cmd, argp); |
846 | } else | 843 | } else |
847 | #endif /* WIRELESS_EXT */ | 844 | #endif /* CONFIG_WIRELESS_EXT */ |
848 | switch (cmd) { | 845 | switch (cmd) { |
849 | case FIOSETOWN: | 846 | case FIOSETOWN: |
850 | case SIOCSPGRP: | 847 | case SIOCSPGRP: |