diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-21 12:08:21 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-21 12:08:21 -0500 |
commit | ac58c9059da8886b5e8cde012a80266b18ca146e (patch) | |
tree | 40bf486843a2cace6c3a959d73423e50e6aa0c00 | |
parent | df6db302cb236ac3a683d535a3e2073d9f4b2833 (diff) | |
parent | c4a1745aa09fc110afdefea0e5d025043e348bae (diff) |
Merge branch 'linus'
318 files changed, 20896 insertions, 9982 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile index 1c955883cf58..2975291e296a 100644 --- a/Documentation/DocBook/Makefile +++ b/Documentation/DocBook/Makefile | |||
@@ -9,7 +9,7 @@ | |||
9 | DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \ | 9 | DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \ |
10 | kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ | 10 | kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ |
11 | procfs-guide.xml writing_usb_driver.xml \ | 11 | procfs-guide.xml writing_usb_driver.xml \ |
12 | sis900.xml kernel-api.xml journal-api.xml lsm.xml usb.xml \ | 12 | kernel-api.xml journal-api.xml lsm.xml usb.xml \ |
13 | gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml | 13 | gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml |
14 | 14 | ||
15 | ### | 15 | ### |
diff --git a/Documentation/DocBook/sis900.tmpl b/Documentation/DocBook/sis900.tmpl deleted file mode 100644 index 6c2cbac93c3f..000000000000 --- a/Documentation/DocBook/sis900.tmpl +++ /dev/null | |||
@@ -1,585 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" | ||
3 | "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []> | ||
4 | |||
5 | <book id="SiS900Guide"> | ||
6 | |||
7 | <bookinfo> | ||
8 | |||
9 | <title>SiS 900/7016 Fast Ethernet Device Driver</title> | ||
10 | |||
11 | <authorgroup> | ||
12 | <author> | ||
13 | <firstname>Ollie</firstname> | ||
14 | <surname>Lho</surname> | ||
15 | </author> | ||
16 | |||
17 | <author> | ||
18 | <firstname>Lei Chun</firstname> | ||
19 | <surname>Chang</surname> | ||
20 | </author> | ||
21 | </authorgroup> | ||
22 | |||
23 | <edition>Document Revision: 0.3 for SiS900 driver v1.06 & v1.07</edition> | ||
24 | <pubdate>November 16, 2000</pubdate> | ||
25 | |||
26 | <copyright> | ||
27 | <year>1999</year> | ||
28 | <holder>Silicon Integrated System Corp.</holder> | ||
29 | </copyright> | ||
30 | |||
31 | <legalnotice> | ||
32 | <para> | ||
33 | This program is free software; you can redistribute it and/or modify | ||
34 | it under the terms of the GNU General Public License as published by | ||
35 | the Free Software Foundation; either version 2 of the License, or | ||
36 | (at your option) any later version. | ||
37 | </para> | ||
38 | |||
39 | <para> | ||
40 | This program is distributed in the hope that it will be useful, | ||
41 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
42 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
43 | GNU General Public License for more details. | ||
44 | </para> | ||
45 | |||
46 | <para> | ||
47 | You should have received a copy of the GNU General Public License | ||
48 | along with this program; if not, write to the Free Software | ||
49 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
50 | </para> | ||
51 | </legalnotice> | ||
52 | |||
53 | <abstract> | ||
54 | <para> | ||
55 | This document gives some information on installation and usage of SiS 900/7016 | ||
56 | device driver under Linux. | ||
57 | </para> | ||
58 | </abstract> | ||
59 | |||
60 | </bookinfo> | ||
61 | |||
62 | <toc></toc> | ||
63 | |||
64 | <chapter id="intro"> | ||
65 | <title>Introduction</title> | ||
66 | |||
67 | <para> | ||
68 | This document describes the revision 1.06 and 1.07 of SiS 900/7016 Fast Ethernet | ||
69 | device driver under Linux. The driver is developed by Silicon Integrated | ||
70 | System Corp. and distributed freely under the GNU General Public License (GPL). | ||
71 | The driver can be compiled as a loadable module and used under Linux kernel | ||
72 | version 2.2.x. (rev. 1.06) | ||
73 | With minimal changes, the driver can also be used under 2.3.x and 2.4.x kernel | ||
74 | (rev. 1.07), please see | ||
75 | <xref linkend="install"/>. If you are intended to | ||
76 | use the driver for earlier kernels, you are on your own. | ||
77 | </para> | ||
78 | |||
79 | <para> | ||
80 | The driver is tested with usual TCP/IP applications including | ||
81 | FTP, Telnet, Netscape etc. and is used constantly by the developers. | ||
82 | </para> | ||
83 | |||
84 | <para> | ||
85 | Please send all comments/fixes/questions to | ||
86 | <ulink url="mailto:lcchang@sis.com.tw">Lei-Chun Chang</ulink>. | ||
87 | </para> | ||
88 | </chapter> | ||
89 | |||
90 | <chapter id="changes"> | ||
91 | <title>Changes</title> | ||
92 | |||
93 | <para> | ||
94 | Changes made in Revision 1.07 | ||
95 | |||
96 | <orderedlist> | ||
97 | <listitem> | ||
98 | <para> | ||
99 | Separation of sis900.c and sis900.h in order to move most | ||
100 | constant definition to sis900.h (many of those constants were | ||
101 | corrected) | ||
102 | </para> | ||
103 | </listitem> | ||
104 | |||
105 | <listitem> | ||
106 | <para> | ||
107 | Clean up PCI detection, the pci-scan from Donald Becker were not used, | ||
108 | just simple pci_find_*. | ||
109 | </para> | ||
110 | </listitem> | ||
111 | |||
112 | <listitem> | ||
113 | <para> | ||
114 | MII detection is modified to support multiple mii transceiver. | ||
115 | </para> | ||
116 | </listitem> | ||
117 | |||
118 | <listitem> | ||
119 | <para> | ||
120 | Bugs in read_eeprom, mdio_* were removed. | ||
121 | </para> | ||
122 | </listitem> | ||
123 | |||
124 | <listitem> | ||
125 | <para> | ||
126 | Lot of sis900 irrelevant comments were removed/changed and | ||
127 | more comments were added to reflect the real situation. | ||
128 | </para> | ||
129 | </listitem> | ||
130 | |||
131 | <listitem> | ||
132 | <para> | ||
133 | Clean up of physical/virtual address space mess in buffer | ||
134 | descriptors. | ||
135 | </para> | ||
136 | </listitem> | ||
137 | |||
138 | <listitem> | ||
139 | <para> | ||
140 | Better transmit/receive error handling. | ||
141 | </para> | ||
142 | </listitem> | ||
143 | |||
144 | <listitem> | ||
145 | <para> | ||
146 | The driver now uses zero-copy single buffer management | ||
147 | scheme to improve performance. | ||
148 | </para> | ||
149 | </listitem> | ||
150 | |||
151 | <listitem> | ||
152 | <para> | ||
153 | Names of variables were changed to be more consistent. | ||
154 | </para> | ||
155 | </listitem> | ||
156 | |||
157 | <listitem> | ||
158 | <para> | ||
159 | Clean up of auo-negotiation and timer code. | ||
160 | </para> | ||
161 | </listitem> | ||
162 | |||
163 | <listitem> | ||
164 | <para> | ||
165 | Automatic detection and change of PHY on the fly. | ||
166 | </para> | ||
167 | </listitem> | ||
168 | |||
169 | <listitem> | ||
170 | <para> | ||
171 | Bug in mac probing fixed. | ||
172 | </para> | ||
173 | </listitem> | ||
174 | |||
175 | <listitem> | ||
176 | <para> | ||
177 | Fix 630E equalier problem by modifying the equalizer workaround rule. | ||
178 | </para> | ||
179 | </listitem> | ||
180 | |||
181 | <listitem> | ||
182 | <para> | ||
183 | Support for ICS1893 10/100 Interated PHYceiver. | ||
184 | </para> | ||
185 | </listitem> | ||
186 | |||
187 | <listitem> | ||
188 | <para> | ||
189 | Support for media select by ifconfig. | ||
190 | </para> | ||
191 | </listitem> | ||
192 | |||
193 | <listitem> | ||
194 | <para> | ||
195 | Added kernel-doc extratable documentation. | ||
196 | </para> | ||
197 | </listitem> | ||
198 | |||
199 | </orderedlist> | ||
200 | </para> | ||
201 | </chapter> | ||
202 | |||
203 | <chapter id="tested"> | ||
204 | <title>Tested Environment</title> | ||
205 | |||
206 | <para> | ||
207 | This driver is developed on the following hardware | ||
208 | |||
209 | <itemizedlist> | ||
210 | <listitem> | ||
211 | |||
212 | <para> | ||
213 | Intel Celeron 500 with SiS 630 (rev 02) chipset | ||
214 | </para> | ||
215 | </listitem> | ||
216 | <listitem> | ||
217 | |||
218 | <para> | ||
219 | SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card | ||
220 | </para> | ||
221 | </listitem> | ||
222 | |||
223 | </itemizedlist> | ||
224 | |||
225 | and tested with these software environments | ||
226 | |||
227 | <itemizedlist> | ||
228 | <listitem> | ||
229 | |||
230 | <para> | ||
231 | Red Hat Linux version 6.2 | ||
232 | </para> | ||
233 | </listitem> | ||
234 | <listitem> | ||
235 | |||
236 | <para> | ||
237 | Linux kernel version 2.4.0 | ||
238 | </para> | ||
239 | </listitem> | ||
240 | <listitem> | ||
241 | |||
242 | <para> | ||
243 | Netscape version 4.6 | ||
244 | </para> | ||
245 | </listitem> | ||
246 | <listitem> | ||
247 | |||
248 | <para> | ||
249 | NcFTP 3.0.0 beta 18 | ||
250 | </para> | ||
251 | </listitem> | ||
252 | <listitem> | ||
253 | |||
254 | <para> | ||
255 | Samba version 2.0.3 | ||
256 | </para> | ||
257 | </listitem> | ||
258 | |||
259 | </itemizedlist> | ||
260 | |||
261 | </para> | ||
262 | |||
263 | </chapter> | ||
264 | |||
265 | <chapter id="files"> | ||
266 | <title>Files in This Package</title> | ||
267 | |||
268 | <para> | ||
269 | In the package you can find these files: | ||
270 | </para> | ||
271 | |||
272 | <para> | ||
273 | <variablelist> | ||
274 | |||
275 | <varlistentry> | ||
276 | <term>sis900.c</term> | ||
277 | <listitem> | ||
278 | <para> | ||
279 | Driver source file in C | ||
280 | </para> | ||
281 | </listitem> | ||
282 | </varlistentry> | ||
283 | |||
284 | <varlistentry> | ||
285 | <term>sis900.h</term> | ||
286 | <listitem> | ||
287 | <para> | ||
288 | Header file for sis900.c | ||
289 | </para> | ||
290 | </listitem> | ||
291 | </varlistentry> | ||
292 | |||
293 | <varlistentry> | ||
294 | <term>sis900.sgml</term> | ||
295 | <listitem> | ||
296 | <para> | ||
297 | DocBook SGML source of the document | ||
298 | </para> | ||
299 | </listitem> | ||
300 | </varlistentry> | ||
301 | |||
302 | <varlistentry> | ||
303 | <term>sis900.txt</term> | ||
304 | <listitem> | ||
305 | <para> | ||
306 | Driver document in plain text | ||
307 | </para> | ||
308 | </listitem> | ||
309 | </varlistentry> | ||
310 | |||
311 | </variablelist> | ||
312 | </para> | ||
313 | </chapter> | ||
314 | |||
315 | <chapter id="install"> | ||
316 | <title>Installation</title> | ||
317 | |||
318 | <para> | ||
319 | Silicon Integrated System Corp. is cooperating closely with core Linux Kernel | ||
320 | developers. The revisions of SiS 900 driver are distributed by the usuall channels | ||
321 | for kernel tar files and patches. Those kernel tar files for official kernel and | ||
322 | patches for kernel pre-release can be download at | ||
323 | <ulink url="http://ftp.kernel.org/pub/linux/kernel/">official kernel ftp site</ulink> | ||
324 | and its mirrors. | ||
325 | The 1.06 revision can be found in kernel version later than 2.3.15 and pre-2.2.14, | ||
326 | and 1.07 revision can be found in kernel version 2.4.0. | ||
327 | If you have no prior experience in networking under Linux, please read | ||
328 | <ulink url="http://www.tldp.org/">Ethernet HOWTO</ulink> and | ||
329 | <ulink url="http://www.tldp.org/">Networking HOWTO</ulink> available from | ||
330 | Linux Documentation Project (LDP). | ||
331 | </para> | ||
332 | |||
333 | <para> | ||
334 | The driver is bundled in release later than 2.2.11 and 2.3.15 so this | ||
335 | is the most easy case. | ||
336 | Be sure you have the appropriate packages for compiling kernel source. | ||
337 | Those packages are listed in Document/Changes in kernel source | ||
338 | distribution. If you have to install the driver other than those bundled | ||
339 | in kernel release, you should have your driver file | ||
340 | <filename>sis900.c</filename> and <filename>sis900.h</filename> | ||
341 | copied into <filename class="directory">/usr/src/linux/drivers/net/</filename> first. | ||
342 | There are two alternative ways to install the driver | ||
343 | </para> | ||
344 | |||
345 | <sect1> | ||
346 | <title>Building the driver as loadable module</title> | ||
347 | |||
348 | <para> | ||
349 | To build the driver as a loadable kernel module you have to reconfigure | ||
350 | the kernel to activate network support by | ||
351 | </para> | ||
352 | |||
353 | <para><screen> | ||
354 | make menuconfig | ||
355 | </screen></para> | ||
356 | |||
357 | <para> | ||
358 | Choose <quote>Loadable module support ---></quote>, | ||
359 | then select <quote>Enable loadable module support</quote>. | ||
360 | </para> | ||
361 | |||
362 | <para> | ||
363 | Choose <quote>Network Device Support ---></quote>, select | ||
364 | <quote>Ethernet (10 or 100Mbit)</quote>. | ||
365 | Then select <quote>EISA, VLB, PCI and on board controllers</quote>, | ||
366 | and choose <quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote> | ||
367 | to <quote>M</quote>. | ||
368 | </para> | ||
369 | |||
370 | <para> | ||
371 | After reconfiguring the kernel, you can make the driver module by | ||
372 | </para> | ||
373 | |||
374 | <para><screen> | ||
375 | make modules | ||
376 | </screen></para> | ||
377 | |||
378 | <para> | ||
379 | The driver should be compiled with no errors. After compiling the driver, | ||
380 | the driver can be installed to proper place by | ||
381 | </para> | ||
382 | |||
383 | <para><screen> | ||
384 | make modules_install | ||
385 | </screen></para> | ||
386 | |||
387 | <para> | ||
388 | Load the driver into kernel by | ||
389 | </para> | ||
390 | |||
391 | <para><screen> | ||
392 | insmod sis900 | ||
393 | </screen></para> | ||
394 | |||
395 | <para> | ||
396 | When loading the driver into memory, some information message can be view by | ||
397 | </para> | ||
398 | |||
399 | <para> | ||
400 | <screen> | ||
401 | dmesg | ||
402 | </screen> | ||
403 | |||
404 | or | ||
405 | |||
406 | <screen> | ||
407 | cat /var/log/message | ||
408 | </screen> | ||
409 | </para> | ||
410 | |||
411 | <para> | ||
412 | If the driver is loaded properly you will have messages similar to this: | ||
413 | </para> | ||
414 | |||
415 | <para><screen> | ||
416 | sis900.c: v1.07.06 11/07/2000 | ||
417 | eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4. | ||
418 | eth0: SiS 900 Internal MII PHY transceiver found at address 1. | ||
419 | eth0: Using SiS 900 Internal MII PHY as default | ||
420 | </screen></para> | ||
421 | |||
422 | <para> | ||
423 | showing the version of the driver and the results of probing routine. | ||
424 | </para> | ||
425 | |||
426 | <para> | ||
427 | Once the driver is loaded, network can be brought up by | ||
428 | </para> | ||
429 | |||
430 | <para><screen> | ||
431 | /sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE | ||
432 | </screen></para> | ||
433 | |||
434 | <para> | ||
435 | where IPADDR, BROADCAST, NETMASK are your IP address, broadcast address and | ||
436 | netmask respectively. TYPE is used to set medium type used by the device. | ||
437 | Typical values are "10baseT"(twisted-pair 10Mbps Ethernet) or "100baseT" | ||
438 | (twisted-pair 100Mbps Ethernet). For more information on how to configure | ||
439 | network interface, please refer to | ||
440 | <ulink url="http://www.tldp.org/">Networking HOWTO</ulink>. | ||
441 | </para> | ||
442 | |||
443 | <para> | ||
444 | The link status is also shown by kernel messages. For example, after the | ||
445 | network interface is activated, you may have the message: | ||
446 | </para> | ||
447 | |||
448 | <para><screen> | ||
449 | eth0: Media Link On 100mbps full-duplex | ||
450 | </screen></para> | ||
451 | |||
452 | <para> | ||
453 | If you try to unplug the twist pair (TP) cable you will get | ||
454 | </para> | ||
455 | |||
456 | <para><screen> | ||
457 | eth0: Media Link Off | ||
458 | </screen></para> | ||
459 | |||
460 | <para> | ||
461 | indicating that the link is failed. | ||
462 | </para> | ||
463 | </sect1> | ||
464 | |||
465 | <sect1> | ||
466 | <title>Building the driver into kernel</title> | ||
467 | |||
468 | <para> | ||
469 | If you want to make the driver into kernel, choose <quote>Y</quote> | ||
470 | rather than <quote>M</quote> on | ||
471 | <quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote> | ||
472 | when configuring the kernel. Build the kernel image in the usual way | ||
473 | </para> | ||
474 | |||
475 | <para><screen> | ||
476 | make clean | ||
477 | |||
478 | make bzlilo | ||
479 | </screen></para> | ||
480 | |||
481 | <para> | ||
482 | Next time the system reboot, you have the driver in memory. | ||
483 | </para> | ||
484 | |||
485 | </sect1> | ||
486 | </chapter> | ||
487 | |||
488 | <chapter id="problems"> | ||
489 | <title>Known Problems and Bugs</title> | ||
490 | |||
491 | <para> | ||
492 | There are some known problems and bugs. If you find any other bugs please | ||
493 | mail to <ulink url="mailto:lcchang@sis.com.tw">lcchang@sis.com.tw</ulink> | ||
494 | |||
495 | <orderedlist> | ||
496 | |||
497 | <listitem> | ||
498 | <para> | ||
499 | AM79C901 HomePNA PHY is not thoroughly tested, there may be some | ||
500 | bugs in the <quote>on the fly</quote> change of transceiver. | ||
501 | </para> | ||
502 | </listitem> | ||
503 | |||
504 | <listitem> | ||
505 | <para> | ||
506 | A bug is hidden somewhere in the receive buffer management code, | ||
507 | the bug causes NULL pointer reference in the kernel. This fault is | ||
508 | caught before bad things happen and reported with the message: | ||
509 | |||
510 | <computeroutput> | ||
511 | eth0: NULL pointer encountered in Rx ring, skipping | ||
512 | </computeroutput> | ||
513 | |||
514 | which can be viewed with <literal remap="tt">dmesg</literal> or | ||
515 | <literal remap="tt">cat /var/log/message</literal>. | ||
516 | </para> | ||
517 | </listitem> | ||
518 | |||
519 | <listitem> | ||
520 | <para> | ||
521 | The media type change from 10Mbps to 100Mbps twisted-pair ethernet | ||
522 | by ifconfig causes the media link down. | ||
523 | </para> | ||
524 | </listitem> | ||
525 | |||
526 | </orderedlist> | ||
527 | </para> | ||
528 | </chapter> | ||
529 | |||
530 | <chapter id="RHistory"> | ||
531 | <title>Revision History</title> | ||
532 | |||
533 | <para> | ||
534 | <itemizedlist> | ||
535 | |||
536 | <listitem> | ||
537 | <para> | ||
538 | November 13, 2000, Revision 1.07, seventh release, 630E problem fixed | ||
539 | and further clean up. | ||
540 | </para> | ||
541 | </listitem> | ||
542 | |||
543 | <listitem> | ||
544 | <para> | ||
545 | November 4, 1999, Revision 1.06, Second release, lots of clean up | ||
546 | and optimization. | ||
547 | </para> | ||
548 | </listitem> | ||
549 | |||
550 | <listitem> | ||
551 | <para> | ||
552 | August 8, 1999, Revision 1.05, Initial Public Release | ||
553 | </para> | ||
554 | </listitem> | ||
555 | |||
556 | </itemizedlist> | ||
557 | </para> | ||
558 | </chapter> | ||
559 | |||
560 | <chapter id="acknowledgements"> | ||
561 | <title>Acknowledgements</title> | ||
562 | |||
563 | <para> | ||
564 | This driver was originally derived form | ||
565 | <ulink url="mailto:becker@cesdis1.gsfc.nasa.gov">Donald Becker</ulink>'s | ||
566 | <ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/pci-skeleton.c" | ||
567 | >pci-skeleton</ulink> and | ||
568 | <ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/rtl8139.c" | ||
569 | >rtl8139</ulink> drivers. Donald also provided various suggestion | ||
570 | regarded with improvements made in revision 1.06. | ||
571 | </para> | ||
572 | |||
573 | <para> | ||
574 | The 1.05 revision was created by | ||
575 | <ulink url="mailto:cmhuang@sis.com.tw">Jim Huang</ulink>, AMD 79c901 | ||
576 | support was added by <ulink url="mailto:lcs@sis.com.tw">Chin-Shan Li</ulink>. | ||
577 | </para> | ||
578 | </chapter> | ||
579 | |||
580 | <chapter id="functions"> | ||
581 | <title>List of Functions</title> | ||
582 | !Idrivers/net/sis900.c | ||
583 | </chapter> | ||
584 | |||
585 | </book> | ||
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 81bc51369f59..28a31c5e2289 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -151,6 +151,13 @@ Who: Ralf Baechle <ralf@linux-mips.org> | |||
151 | 151 | ||
152 | --------------------------- | 152 | --------------------------- |
153 | 153 | ||
154 | What: eepro100 network driver | ||
155 | When: January 2007 | ||
156 | Why: replaced by the e100 driver | ||
157 | Who: Adrian Bunk <bunk@stusta.de> | ||
158 | |||
159 | --------------------------- | ||
160 | |||
154 | What: Legacy /proc/pci interface (PCI_LEGACY_PROC) | 161 | What: Legacy /proc/pci interface (PCI_LEGACY_PROC) |
155 | When: March 2006 | 162 | When: March 2006 |
156 | Why: deprecated since 2.5.53 in favor of lspci(8) | 163 | Why: deprecated since 2.5.53 in favor of lspci(8) |
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 5b01d5cc4e95..b1181ce232d9 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
@@ -92,8 +92,6 @@ routing.txt | |||
92 | - the new routing mechanism | 92 | - the new routing mechanism |
93 | shaper.txt | 93 | shaper.txt |
94 | - info on the module that can shape/limit transmitted traffic. | 94 | - info on the module that can shape/limit transmitted traffic. |
95 | sis900.txt | ||
96 | - SiS 900/7016 Fast Ethernet device driver info. | ||
97 | sk98lin.txt | 95 | sk98lin.txt |
98 | - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit | 96 | - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit |
99 | Ethernet Adapter family driver info | 97 | Ethernet Adapter family driver info |
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/README.ipw2100 index 3ab40379d1cf..f3fcaa41f774 100644 --- a/Documentation/networking/README.ipw2100 +++ b/Documentation/networking/README.ipw2100 | |||
@@ -3,18 +3,18 @@ Intel(R) PRO/Wireless 2100 Driver for Linux in support of: | |||
3 | 3 | ||
4 | Intel(R) PRO/Wireless 2100 Network Connection | 4 | Intel(R) PRO/Wireless 2100 Network Connection |
5 | 5 | ||
6 | Copyright (C) 2003-2005, Intel Corporation | 6 | Copyright (C) 2003-2006, Intel Corporation |
7 | 7 | ||
8 | README.ipw2100 | 8 | README.ipw2100 |
9 | 9 | ||
10 | Version: 1.1.3 | 10 | Version: git-1.1.5 |
11 | Date : October 17, 2005 | 11 | Date : January 25, 2006 |
12 | 12 | ||
13 | Index | 13 | Index |
14 | ----------------------------------------------- | 14 | ----------------------------------------------- |
15 | 0. IMPORTANT INFORMATION BEFORE USING THIS DRIVER | 15 | 0. IMPORTANT INFORMATION BEFORE USING THIS DRIVER |
16 | 1. Introduction | 16 | 1. Introduction |
17 | 2. Release 1.1.3 Current Features | 17 | 2. Release git-1.1.5 Current Features |
18 | 3. Command Line Parameters | 18 | 3. Command Line Parameters |
19 | 4. Sysfs Helper Files | 19 | 4. Sysfs Helper Files |
20 | 5. Radio Kill Switch | 20 | 5. Radio Kill Switch |
@@ -89,7 +89,7 @@ potential fixes and patches, as well as links to the development mailing list | |||
89 | for the driver project. | 89 | for the driver project. |
90 | 90 | ||
91 | 91 | ||
92 | 2. Release 1.1.3 Current Supported Features | 92 | 2. Release git-1.1.5 Current Supported Features |
93 | ----------------------------------------------- | 93 | ----------------------------------------------- |
94 | - Managed (BSS) and Ad-Hoc (IBSS) | 94 | - Managed (BSS) and Ad-Hoc (IBSS) |
95 | - WEP (shared key and open) | 95 | - WEP (shared key and open) |
@@ -270,7 +270,7 @@ For installation support on the ipw2100 1.1.0 driver on Linux kernels | |||
270 | 9. License | 270 | 9. License |
271 | ----------------------------------------------- | 271 | ----------------------------------------------- |
272 | 272 | ||
273 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 273 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
274 | 274 | ||
275 | This program is free software; you can redistribute it and/or modify it | 275 | This program is free software; you can redistribute it and/or modify it |
276 | under the terms of the GNU General Public License (version 2) as | 276 | under the terms of the GNU General Public License (version 2) as |
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200 index c6492d3839fa..acb30c5dcff3 100644 --- a/Documentation/networking/README.ipw2200 +++ b/Documentation/networking/README.ipw2200 | |||
@@ -10,7 +10,7 @@ both hardware adapters listed above. In this document the Intel(R) | |||
10 | PRO/Wireless 2915ABG Driver for Linux will be used to reference the | 10 | PRO/Wireless 2915ABG Driver for Linux will be used to reference the |
11 | unified driver. | 11 | unified driver. |
12 | 12 | ||
13 | Copyright (C) 2004-2005, Intel Corporation | 13 | Copyright (C) 2004-2006, Intel Corporation |
14 | 14 | ||
15 | README.ipw2200 | 15 | README.ipw2200 |
16 | 16 | ||
@@ -26,9 +26,11 @@ Index | |||
26 | 1.2. Module parameters | 26 | 1.2. Module parameters |
27 | 1.3. Wireless Extension Private Methods | 27 | 1.3. Wireless Extension Private Methods |
28 | 1.4. Sysfs Helper Files | 28 | 1.4. Sysfs Helper Files |
29 | 1.5. Supported channels | ||
29 | 2. Ad-Hoc Networking | 30 | 2. Ad-Hoc Networking |
30 | 3. Interacting with Wireless Tools | 31 | 3. Interacting with Wireless Tools |
31 | 3.1. iwconfig mode | 32 | 3.1. iwconfig mode |
33 | 3.2. iwconfig sens | ||
32 | 4. About the Version Numbers | 34 | 4. About the Version Numbers |
33 | 5. Firmware installation | 35 | 5. Firmware installation |
34 | 6. Support | 36 | 6. Support |
@@ -314,6 +316,35 @@ For the device level files, see /sys/bus/pci/drivers/ipw2200: | |||
314 | running ifconfig and is therefore disabled by default. | 316 | running ifconfig and is therefore disabled by default. |
315 | 317 | ||
316 | 318 | ||
319 | 1.5. Supported channels | ||
320 | ----------------------------------------------- | ||
321 | |||
322 | Upon loading the Intel(R) PRO/Wireless 2915ABG Driver for Linux, a | ||
323 | message stating the detected geography code and the number of 802.11 | ||
324 | channels supported by the card will be displayed in the log. | ||
325 | |||
326 | The geography code corresponds to a regulatory domain as shown in the | ||
327 | table below. | ||
328 | |||
329 | Supported channels | ||
330 | Code Geography 802.11bg 802.11a | ||
331 | |||
332 | --- Restricted 11 0 | ||
333 | ZZF Custom US/Canada 11 8 | ||
334 | ZZD Rest of World 13 0 | ||
335 | ZZA Custom USA & Europe & High 11 13 | ||
336 | ZZB Custom NA & Europe 11 13 | ||
337 | ZZC Custom Japan 11 4 | ||
338 | ZZM Custom 11 0 | ||
339 | ZZE Europe 13 19 | ||
340 | ZZJ Custom Japan 14 4 | ||
341 | ZZR Rest of World 14 0 | ||
342 | ZZH High Band 13 4 | ||
343 | ZZG Custom Europe 13 4 | ||
344 | ZZK Europe 13 24 | ||
345 | ZZL Europe 11 13 | ||
346 | |||
347 | |||
317 | 2. Ad-Hoc Networking | 348 | 2. Ad-Hoc Networking |
318 | ----------------------------------------------- | 349 | ----------------------------------------------- |
319 | 350 | ||
@@ -353,6 +384,15 @@ When configuring the mode of the adapter, all run-time configured parameters | |||
353 | are reset to the value used when the module was loaded. This includes | 384 | are reset to the value used when the module was loaded. This includes |
354 | channels, rates, ESSID, etc. | 385 | channels, rates, ESSID, etc. |
355 | 386 | ||
387 | 3.2 iwconfig sens | ||
388 | ----------------------------------------------- | ||
389 | |||
390 | The 'iwconfig ethX sens XX' command will not set the signal sensitivity | ||
391 | threshold, as described in iwconfig documentation, but rather the number | ||
392 | of consecutive missed beacons that will trigger handover, i.e. roaming | ||
393 | to another access point. At the same time, it will set the disassociation | ||
394 | threshold to 3 times the given value. | ||
395 | |||
356 | 396 | ||
357 | 4. About the Version Numbers | 397 | 4. About the Version Numbers |
358 | ----------------------------------------------- | 398 | ----------------------------------------------- |
@@ -408,7 +448,7 @@ For general information and support, go to: | |||
408 | 7. License | 448 | 7. License |
409 | ----------------------------------------------- | 449 | ----------------------------------------------- |
410 | 450 | ||
411 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 451 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
412 | 452 | ||
413 | This program is free software; you can redistribute it and/or modify it | 453 | This program is free software; you can redistribute it and/or modify it |
414 | under the terms of the GNU General Public License version 2 as | 454 | under the terms of the GNU General Public License version 2 as |
diff --git a/Documentation/networking/sis900.txt b/Documentation/networking/sis900.txt deleted file mode 100644 index bddffd7385ae..000000000000 --- a/Documentation/networking/sis900.txt +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | |||
2 | SiS 900/7016 Fast Ethernet Device Driver | ||
3 | |||
4 | Ollie Lho | ||
5 | |||
6 | Lei Chun Chang | ||
7 | |||
8 | Copyright © 1999 by Silicon Integrated System Corp. | ||
9 | |||
10 | This document gives some information on installation and usage of SiS | ||
11 | 900/7016 device driver under Linux. | ||
12 | |||
13 | This program is free software; you can redistribute it and/or modify | ||
14 | it under the terms of the GNU General Public License as published by | ||
15 | the Free Software Foundation; either version 2 of the License, or (at | ||
16 | your option) any later version. | ||
17 | |||
18 | This program is distributed in the hope that it will be useful, but | ||
19 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
21 | General Public License for more details. | ||
22 | |||
23 | You should have received a copy of the GNU General Public License | ||
24 | along with this program; if not, write to the Free Software | ||
25 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
26 | USA | ||
27 | _________________________________________________________________ | ||
28 | |||
29 | Table of Contents | ||
30 | 1. Introduction | ||
31 | 2. Changes | ||
32 | 3. Tested Environment | ||
33 | 4. Files in This Package | ||
34 | 5. Installation | ||
35 | |||
36 | Building the driver as loadable module | ||
37 | Building the driver into kernel | ||
38 | |||
39 | 6. Known Problems and Bugs | ||
40 | 7. Revision History | ||
41 | 8. Acknowledgements | ||
42 | _________________________________________________________________ | ||
43 | |||
44 | Chapter 1. Introduction | ||
45 | |||
46 | This document describes the revision 1.06 and 1.07 of SiS 900/7016 | ||
47 | Fast Ethernet device driver under Linux. The driver is developed by | ||
48 | Silicon Integrated System Corp. and distributed freely under the GNU | ||
49 | General Public License (GPL). The driver can be compiled as a loadable | ||
50 | module and used under Linux kernel version 2.2.x. (rev. 1.06) With | ||
51 | minimal changes, the driver can also be used under 2.3.x and 2.4.x | ||
52 | kernel (rev. 1.07), please see Chapter 5. If you are intended to use | ||
53 | the driver for earlier kernels, you are on your own. | ||
54 | |||
55 | The driver is tested with usual TCP/IP applications including FTP, | ||
56 | Telnet, Netscape etc. and is used constantly by the developers. | ||
57 | |||
58 | Please send all comments/fixes/questions to Lei-Chun Chang. | ||
59 | _________________________________________________________________ | ||
60 | |||
61 | Chapter 2. Changes | ||
62 | |||
63 | Changes made in Revision 1.07 | ||
64 | |||
65 | 1. Separation of sis900.c and sis900.h in order to move most constant | ||
66 | definition to sis900.h (many of those constants were corrected) | ||
67 | 2. Clean up PCI detection, the pci-scan from Donald Becker were not | ||
68 | used, just simple pci_find_*. | ||
69 | 3. MII detection is modified to support multiple mii transceiver. | ||
70 | 4. Bugs in read_eeprom, mdio_* were removed. | ||
71 | 5. Lot of sis900 irrelevant comments were removed/changed and more | ||
72 | comments were added to reflect the real situation. | ||
73 | 6. Clean up of physical/virtual address space mess in buffer | ||
74 | descriptors. | ||
75 | 7. Better transmit/receive error handling. | ||
76 | 8. The driver now uses zero-copy single buffer management scheme to | ||
77 | improve performance. | ||
78 | 9. Names of variables were changed to be more consistent. | ||
79 | 10. Clean up of auo-negotiation and timer code. | ||
80 | 11. Automatic detection and change of PHY on the fly. | ||
81 | 12. Bug in mac probing fixed. | ||
82 | 13. Fix 630E equalier problem by modifying the equalizer workaround | ||
83 | rule. | ||
84 | 14. Support for ICS1893 10/100 Interated PHYceiver. | ||
85 | 15. Support for media select by ifconfig. | ||
86 | 16. Added kernel-doc extratable documentation. | ||
87 | _________________________________________________________________ | ||
88 | |||
89 | Chapter 3. Tested Environment | ||
90 | |||
91 | This driver is developed on the following hardware | ||
92 | |||
93 | * Intel Celeron 500 with SiS 630 (rev 02) chipset | ||
94 | * SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card | ||
95 | |||
96 | and tested with these software environments | ||
97 | |||
98 | * Red Hat Linux version 6.2 | ||
99 | * Linux kernel version 2.4.0 | ||
100 | * Netscape version 4.6 | ||
101 | * NcFTP 3.0.0 beta 18 | ||
102 | * Samba version 2.0.3 | ||
103 | _________________________________________________________________ | ||
104 | |||
105 | Chapter 4. Files in This Package | ||
106 | |||
107 | In the package you can find these files: | ||
108 | |||
109 | sis900.c | ||
110 | Driver source file in C | ||
111 | |||
112 | sis900.h | ||
113 | Header file for sis900.c | ||
114 | |||
115 | sis900.sgml | ||
116 | DocBook SGML source of the document | ||
117 | |||
118 | sis900.txt | ||
119 | Driver document in plain text | ||
120 | _________________________________________________________________ | ||
121 | |||
122 | Chapter 5. Installation | ||
123 | |||
124 | Silicon Integrated System Corp. is cooperating closely with core Linux | ||
125 | Kernel developers. The revisions of SiS 900 driver are distributed by | ||
126 | the usuall channels for kernel tar files and patches. Those kernel tar | ||
127 | files for official kernel and patches for kernel pre-release can be | ||
128 | download at official kernel ftp site and its mirrors. The 1.06 | ||
129 | revision can be found in kernel version later than 2.3.15 and | ||
130 | pre-2.2.14, and 1.07 revision can be found in kernel version 2.4.0. If | ||
131 | you have no prior experience in networking under Linux, please read | ||
132 | Ethernet HOWTO and Networking HOWTO available from Linux Documentation | ||
133 | Project (LDP). | ||
134 | |||
135 | The driver is bundled in release later than 2.2.11 and 2.3.15 so this | ||
136 | is the most easy case. Be sure you have the appropriate packages for | ||
137 | compiling kernel source. Those packages are listed in Document/Changes | ||
138 | in kernel source distribution. If you have to install the driver other | ||
139 | than those bundled in kernel release, you should have your driver file | ||
140 | sis900.c and sis900.h copied into /usr/src/linux/drivers/net/ first. | ||
141 | There are two alternative ways to install the driver | ||
142 | _________________________________________________________________ | ||
143 | |||
144 | Building the driver as loadable module | ||
145 | |||
146 | To build the driver as a loadable kernel module you have to | ||
147 | reconfigure the kernel to activate network support by | ||
148 | |||
149 | make menuconfig | ||
150 | |||
151 | Choose "Loadable module support --->", then select "Enable loadable | ||
152 | module support". | ||
153 | |||
154 | Choose "Network Device Support --->", select "Ethernet (10 or | ||
155 | 100Mbit)". Then select "EISA, VLB, PCI and on board controllers", and | ||
156 | choose "SiS 900/7016 PCI Fast Ethernet Adapter support" to "M". | ||
157 | |||
158 | After reconfiguring the kernel, you can make the driver module by | ||
159 | |||
160 | make modules | ||
161 | |||
162 | The driver should be compiled with no errors. After compiling the | ||
163 | driver, the driver can be installed to proper place by | ||
164 | |||
165 | make modules_install | ||
166 | |||
167 | Load the driver into kernel by | ||
168 | |||
169 | insmod sis900 | ||
170 | |||
171 | When loading the driver into memory, some information message can be | ||
172 | view by | ||
173 | |||
174 | dmesg | ||
175 | |||
176 | or | ||
177 | cat /var/log/message | ||
178 | |||
179 | If the driver is loaded properly you will have messages similar to | ||
180 | this: | ||
181 | |||
182 | sis900.c: v1.07.06 11/07/2000 | ||
183 | eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4. | ||
184 | eth0: SiS 900 Internal MII PHY transceiver found at address 1. | ||
185 | eth0: Using SiS 900 Internal MII PHY as default | ||
186 | |||
187 | showing the version of the driver and the results of probing routine. | ||
188 | |||
189 | Once the driver is loaded, network can be brought up by | ||
190 | |||
191 | /sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE | ||
192 | |||
193 | where IPADDR, BROADCAST, NETMASK are your IP address, broadcast | ||
194 | address and netmask respectively. TYPE is used to set medium type used | ||
195 | by the device. Typical values are "10baseT"(twisted-pair 10Mbps | ||
196 | Ethernet) or "100baseT" (twisted-pair 100Mbps Ethernet). For more | ||
197 | information on how to configure network interface, please refer to | ||
198 | Networking HOWTO. | ||
199 | |||
200 | The link status is also shown by kernel messages. For example, after | ||
201 | the network interface is activated, you may have the message: | ||
202 | |||
203 | eth0: Media Link On 100mbps full-duplex | ||
204 | |||
205 | If you try to unplug the twist pair (TP) cable you will get | ||
206 | |||
207 | eth0: Media Link Off | ||
208 | |||
209 | indicating that the link is failed. | ||
210 | _________________________________________________________________ | ||
211 | |||
212 | Building the driver into kernel | ||
213 | |||
214 | If you want to make the driver into kernel, choose "Y" rather than "M" | ||
215 | on "SiS 900/7016 PCI Fast Ethernet Adapter support" when configuring | ||
216 | the kernel. Build the kernel image in the usual way | ||
217 | |||
218 | make clean | ||
219 | |||
220 | make bzlilo | ||
221 | |||
222 | Next time the system reboot, you have the driver in memory. | ||
223 | _________________________________________________________________ | ||
224 | |||
225 | Chapter 6. Known Problems and Bugs | ||
226 | |||
227 | There are some known problems and bugs. If you find any other bugs | ||
228 | please mail to lcchang@sis.com.tw | ||
229 | |||
230 | 1. AM79C901 HomePNA PHY is not thoroughly tested, there may be some | ||
231 | bugs in the "on the fly" change of transceiver. | ||
232 | 2. A bug is hidden somewhere in the receive buffer management code, | ||
233 | the bug causes NULL pointer reference in the kernel. This fault is | ||
234 | caught before bad things happen and reported with the message: | ||
235 | eth0: NULL pointer encountered in Rx ring, skipping which can be | ||
236 | viewed with dmesg or cat /var/log/message. | ||
237 | 3. The media type change from 10Mbps to 100Mbps twisted-pair ethernet | ||
238 | by ifconfig causes the media link down. | ||
239 | _________________________________________________________________ | ||
240 | |||
241 | Chapter 7. Revision History | ||
242 | |||
243 | * November 13, 2000, Revision 1.07, seventh release, 630E problem | ||
244 | fixed and further clean up. | ||
245 | * November 4, 1999, Revision 1.06, Second release, lots of clean up | ||
246 | and optimization. | ||
247 | * August 8, 1999, Revision 1.05, Initial Public Release | ||
248 | _________________________________________________________________ | ||
249 | |||
250 | Chapter 8. Acknowledgements | ||
251 | |||
252 | This driver was originally derived form Donald Becker's pci-skeleton | ||
253 | and rtl8139 drivers. Donald also provided various suggestion regarded | ||
254 | with improvements made in revision 1.06. | ||
255 | |||
256 | The 1.05 revision was created by Jim Huang, AMD 79c901 support was | ||
257 | added by Chin-Shan Li. | ||
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c index 50039a204c24..f945416960e9 100644 --- a/arch/ppc/platforms/hdpu.c +++ b/arch/ppc/platforms/hdpu.c | |||
@@ -319,11 +319,10 @@ static void __init hdpu_fixup_eth_pdata(struct platform_device *pd) | |||
319 | struct mv643xx_eth_platform_data *eth_pd; | 319 | struct mv643xx_eth_platform_data *eth_pd; |
320 | eth_pd = pd->dev.platform_data; | 320 | eth_pd = pd->dev.platform_data; |
321 | 321 | ||
322 | eth_pd->port_serial_control = | ||
323 | mv64x60_read(&bh, MV643XX_ETH_PORT_SERIAL_CONTROL_REG(pd->id) & ~1); | ||
324 | |||
325 | eth_pd->force_phy_addr = 1; | 322 | eth_pd->force_phy_addr = 1; |
326 | eth_pd->phy_addr = pd->id; | 323 | eth_pd->phy_addr = pd->id; |
324 | eth_pd->speed = SPEED_100; | ||
325 | eth_pd->duplex = DUPLEX_FULL; | ||
327 | eth_pd->tx_queue_size = 400; | 326 | eth_pd->tx_queue_size = 400; |
328 | eth_pd->rx_queue_size = 800; | 327 | eth_pd->rx_queue_size = 800; |
329 | } | 328 | } |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d39c9f206271..460f72e640e6 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -217,7 +217,7 @@ static void _sparc_free_io(struct resource *res) | |||
217 | unsigned long plen; | 217 | unsigned long plen; |
218 | 218 | ||
219 | plen = res->end - res->start + 1; | 219 | plen = res->end - res->start + 1; |
220 | if ((plen & (PAGE_SIZE-1)) != 0) BUG(); | 220 | BUG_ON((plen & (PAGE_SIZE-1)) != 0); |
221 | sparc_unmapiorange(res->start, plen); | 221 | sparc_unmapiorange(res->start, plen); |
222 | release_resource(res); | 222 | release_resource(res); |
223 | } | 223 | } |
@@ -512,8 +512,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |||
512 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | 512 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, |
513 | int direction) | 513 | int direction) |
514 | { | 514 | { |
515 | if (direction == PCI_DMA_NONE) | 515 | BUG_ON(direction == PCI_DMA_NONE); |
516 | BUG(); | ||
517 | /* IIep is write-through, not flushing. */ | 516 | /* IIep is write-through, not flushing. */ |
518 | return virt_to_phys(ptr); | 517 | return virt_to_phys(ptr); |
519 | } | 518 | } |
@@ -528,8 +527,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |||
528 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | 527 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, |
529 | int direction) | 528 | int direction) |
530 | { | 529 | { |
531 | if (direction == PCI_DMA_NONE) | 530 | BUG_ON(direction == PCI_DMA_NONE); |
532 | BUG(); | ||
533 | if (direction != PCI_DMA_TODEVICE) { | 531 | if (direction != PCI_DMA_TODEVICE) { |
534 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 532 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
535 | (size + PAGE_SIZE-1) & PAGE_MASK); | 533 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -542,8 +540,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | |||
542 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 540 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, |
543 | unsigned long offset, size_t size, int direction) | 541 | unsigned long offset, size_t size, int direction) |
544 | { | 542 | { |
545 | if (direction == PCI_DMA_NONE) | 543 | BUG_ON(direction == PCI_DMA_NONE); |
546 | BUG(); | ||
547 | /* IIep is write-through, not flushing. */ | 544 | /* IIep is write-through, not flushing. */ |
548 | return page_to_phys(page) + offset; | 545 | return page_to_phys(page) + offset; |
549 | } | 546 | } |
@@ -551,8 +548,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | |||
551 | void pci_unmap_page(struct pci_dev *hwdev, | 548 | void pci_unmap_page(struct pci_dev *hwdev, |
552 | dma_addr_t dma_address, size_t size, int direction) | 549 | dma_addr_t dma_address, size_t size, int direction) |
553 | { | 550 | { |
554 | if (direction == PCI_DMA_NONE) | 551 | BUG_ON(direction == PCI_DMA_NONE); |
555 | BUG(); | ||
556 | /* mmu_inval_dma_area XXX */ | 552 | /* mmu_inval_dma_area XXX */ |
557 | } | 553 | } |
558 | 554 | ||
@@ -576,11 +572,10 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
576 | { | 572 | { |
577 | int n; | 573 | int n; |
578 | 574 | ||
579 | if (direction == PCI_DMA_NONE) | 575 | BUG_ON(direction == PCI_DMA_NONE); |
580 | BUG(); | ||
581 | /* IIep is write-through, not flushing. */ | 576 | /* IIep is write-through, not flushing. */ |
582 | for (n = 0; n < nents; n++) { | 577 | for (n = 0; n < nents; n++) { |
583 | if (page_address(sg->page) == NULL) BUG(); | 578 | BUG_ON(page_address(sg->page) == NULL); |
584 | sg->dvma_address = virt_to_phys(page_address(sg->page)); | 579 | sg->dvma_address = virt_to_phys(page_address(sg->page)); |
585 | sg->dvma_length = sg->length; | 580 | sg->dvma_length = sg->length; |
586 | sg++; | 581 | sg++; |
@@ -597,11 +592,10 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
597 | { | 592 | { |
598 | int n; | 593 | int n; |
599 | 594 | ||
600 | if (direction == PCI_DMA_NONE) | 595 | BUG_ON(direction == PCI_DMA_NONE); |
601 | BUG(); | ||
602 | if (direction != PCI_DMA_TODEVICE) { | 596 | if (direction != PCI_DMA_TODEVICE) { |
603 | for (n = 0; n < nents; n++) { | 597 | for (n = 0; n < nents; n++) { |
604 | if (page_address(sg->page) == NULL) BUG(); | 598 | BUG_ON(page_address(sg->page) == NULL); |
605 | mmu_inval_dma_area( | 599 | mmu_inval_dma_area( |
606 | (unsigned long) page_address(sg->page), | 600 | (unsigned long) page_address(sg->page), |
607 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 601 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
@@ -622,8 +616,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
622 | */ | 616 | */ |
623 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 617 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) |
624 | { | 618 | { |
625 | if (direction == PCI_DMA_NONE) | 619 | BUG_ON(direction == PCI_DMA_NONE); |
626 | BUG(); | ||
627 | if (direction != PCI_DMA_TODEVICE) { | 620 | if (direction != PCI_DMA_TODEVICE) { |
628 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 621 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
629 | (size + PAGE_SIZE-1) & PAGE_MASK); | 622 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -632,8 +625,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si | |||
632 | 625 | ||
633 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 626 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) |
634 | { | 627 | { |
635 | if (direction == PCI_DMA_NONE) | 628 | BUG_ON(direction == PCI_DMA_NONE); |
636 | BUG(); | ||
637 | if (direction != PCI_DMA_TODEVICE) { | 629 | if (direction != PCI_DMA_TODEVICE) { |
638 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 630 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
639 | (size + PAGE_SIZE-1) & PAGE_MASK); | 631 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -650,11 +642,10 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int | |||
650 | { | 642 | { |
651 | int n; | 643 | int n; |
652 | 644 | ||
653 | if (direction == PCI_DMA_NONE) | 645 | BUG_ON(direction == PCI_DMA_NONE); |
654 | BUG(); | ||
655 | if (direction != PCI_DMA_TODEVICE) { | 646 | if (direction != PCI_DMA_TODEVICE) { |
656 | for (n = 0; n < nents; n++) { | 647 | for (n = 0; n < nents; n++) { |
657 | if (page_address(sg->page) == NULL) BUG(); | 648 | BUG_ON(page_address(sg->page) == NULL); |
658 | mmu_inval_dma_area( | 649 | mmu_inval_dma_area( |
659 | (unsigned long) page_address(sg->page), | 650 | (unsigned long) page_address(sg->page), |
660 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 651 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
@@ -667,11 +658,10 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, i | |||
667 | { | 658 | { |
668 | int n; | 659 | int n; |
669 | 660 | ||
670 | if (direction == PCI_DMA_NONE) | 661 | BUG_ON(direction == PCI_DMA_NONE); |
671 | BUG(); | ||
672 | if (direction != PCI_DMA_TODEVICE) { | 662 | if (direction != PCI_DMA_TODEVICE) { |
673 | for (n = 0; n < nents; n++) { | 663 | for (n = 0; n < nents; n++) { |
674 | if (page_address(sg->page) == NULL) BUG(); | 664 | BUG_ON(page_address(sg->page) == NULL); |
675 | mmu_inval_dma_area( | 665 | mmu_inval_dma_area( |
676 | (unsigned long) page_address(sg->page), | 666 | (unsigned long) page_address(sg->page), |
677 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 667 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 4c0a50a76554..c3685b314d71 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -186,6 +186,15 @@ endchoice | |||
186 | 186 | ||
187 | endmenu | 187 | endmenu |
188 | 188 | ||
189 | config ARCH_SPARSEMEM_ENABLE | ||
190 | def_bool y | ||
191 | |||
192 | config ARCH_SPARSEMEM_DEFAULT | ||
193 | def_bool y | ||
194 | |||
195 | config LARGE_ALLOCS | ||
196 | def_bool y | ||
197 | |||
189 | source "mm/Kconfig" | 198 | source "mm/Kconfig" |
190 | 199 | ||
191 | config GENERIC_ISA_DMA | 200 | config GENERIC_ISA_DMA |
@@ -350,6 +359,15 @@ config SOLARIS_EMUL | |||
350 | 359 | ||
351 | endmenu | 360 | endmenu |
352 | 361 | ||
362 | config SCHED_SMT | ||
363 | bool "SMT (Hyperthreading) scheduler support" | ||
364 | depends on SMP | ||
365 | default y | ||
366 | help | ||
367 | SMT scheduler support improves the CPU scheduler's decision making | ||
368 | when dealing with UltraSPARC cpus at a cost of slightly increased | ||
369 | overhead in some places. If unsure say N here. | ||
370 | |||
353 | config CMDLINE_BOOL | 371 | config CMDLINE_BOOL |
354 | bool "Default bootloader kernel arguments" | 372 | bool "Default bootloader kernel arguments" |
355 | 373 | ||
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index 069d49777b2a..f819a9663a8d 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.16-rc2 | 3 | # Linux kernel version: 2.6.16 |
4 | # Tue Feb 7 17:47:18 2006 | 4 | # Mon Mar 20 01:23:21 2006 |
5 | # | 5 | # |
6 | CONFIG_SPARC=y | 6 | CONFIG_SPARC=y |
7 | CONFIG_SPARC64=y | 7 | CONFIG_SPARC64=y |
@@ -115,14 +115,20 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y | |||
115 | CONFIG_HUGETLB_PAGE_SIZE_4MB=y | 115 | CONFIG_HUGETLB_PAGE_SIZE_4MB=y |
116 | # CONFIG_HUGETLB_PAGE_SIZE_512K is not set | 116 | # CONFIG_HUGETLB_PAGE_SIZE_512K is not set |
117 | # CONFIG_HUGETLB_PAGE_SIZE_64K is not set | 117 | # CONFIG_HUGETLB_PAGE_SIZE_64K is not set |
118 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
119 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | ||
120 | CONFIG_LARGE_ALLOCS=y | ||
118 | CONFIG_SELECT_MEMORY_MODEL=y | 121 | CONFIG_SELECT_MEMORY_MODEL=y |
119 | CONFIG_FLATMEM_MANUAL=y | 122 | # CONFIG_FLATMEM_MANUAL is not set |
120 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 123 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
121 | # CONFIG_SPARSEMEM_MANUAL is not set | 124 | CONFIG_SPARSEMEM_MANUAL=y |
122 | CONFIG_FLATMEM=y | 125 | CONFIG_SPARSEMEM=y |
123 | CONFIG_FLAT_NODE_MEM_MAP=y | 126 | CONFIG_HAVE_MEMORY_PRESENT=y |
124 | # CONFIG_SPARSEMEM_STATIC is not set | 127 | # CONFIG_SPARSEMEM_STATIC is not set |
128 | CONFIG_SPARSEMEM_EXTREME=y | ||
129 | CONFIG_MEMORY_HOTPLUG=y | ||
125 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 130 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
131 | CONFIG_MIGRATION=y | ||
126 | CONFIG_GENERIC_ISA_DMA=y | 132 | CONFIG_GENERIC_ISA_DMA=y |
127 | CONFIG_SBUS=y | 133 | CONFIG_SBUS=y |
128 | CONFIG_SBUSCHAR=y | 134 | CONFIG_SBUSCHAR=y |
@@ -655,6 +661,7 @@ CONFIG_SERIAL_SUNCORE=y | |||
655 | CONFIG_SERIAL_SUNSU=y | 661 | CONFIG_SERIAL_SUNSU=y |
656 | CONFIG_SERIAL_SUNSU_CONSOLE=y | 662 | CONFIG_SERIAL_SUNSU_CONSOLE=y |
657 | CONFIG_SERIAL_SUNSAB=m | 663 | CONFIG_SERIAL_SUNSAB=m |
664 | CONFIG_SERIAL_SUNHV=y | ||
658 | CONFIG_SERIAL_CORE=y | 665 | CONFIG_SERIAL_CORE=y |
659 | CONFIG_SERIAL_CORE_CONSOLE=y | 666 | CONFIG_SERIAL_CORE_CONSOLE=y |
660 | # CONFIG_SERIAL_JSM is not set | 667 | # CONFIG_SERIAL_JSM is not set |
@@ -1116,11 +1123,7 @@ CONFIG_USB_HIDDEV=y | |||
1116 | # CONFIG_INFINIBAND is not set | 1123 | # CONFIG_INFINIBAND is not set |
1117 | 1124 | ||
1118 | # | 1125 | # |
1119 | # SN Devices | 1126 | # EDAC - error detection and reporting (RAS) (EXPERIMENTAL) |
1120 | # | ||
1121 | |||
1122 | # | ||
1123 | # EDAC - error detection and reporting (RAS) | ||
1124 | # | 1127 | # |
1125 | 1128 | ||
1126 | # | 1129 | # |
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 83d67eb18895..6f6816488b04 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -11,10 +11,12 @@ obj-y := process.o setup.o cpu.o idprom.o \ | |||
11 | traps.o devices.o auxio.o una_asm.o \ | 11 | traps.o devices.o auxio.o una_asm.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o semaphore.o \ |
14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o | 14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ |
15 | visemul.o | ||
15 | 16 | ||
16 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ | 17 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ |
17 | pci_psycho.o pci_sabre.o pci_schizo.o | 18 | pci_psycho.o pci_sabre.o pci_schizo.o \ |
19 | pci_sun4v.o pci_sun4v_asm.o | ||
18 | obj-$(CONFIG_SMP) += smp.o trampoline.o | 20 | obj-$(CONFIG_SMP) += smp.o trampoline.o |
19 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o | 21 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o |
20 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o | 22 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o |
@@ -38,5 +40,5 @@ else | |||
38 | CMODEL_CFLAG := -m64 -mcmodel=medlow | 40 | CMODEL_CFLAG := -m64 -mcmodel=medlow |
39 | endif | 41 | endif |
40 | 42 | ||
41 | head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \ | 43 | head.o: head.S ttable.S itlb_miss.S dtlb_miss.S ktlb.S tsb.S \ |
42 | etrap.S rtrap.S winfixup.S entry.S | 44 | etrap.S rtrap.S winfixup.S entry.S |
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index 202a80c24b6f..d7caa60a0074 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/system.h> | 31 | #include <asm/system.h> |
32 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/mmu_context.h> | ||
34 | 35 | ||
35 | static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); | 36 | static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); |
36 | static int load_aout32_library(struct file*); | 37 | static int load_aout32_library(struct file*); |
@@ -238,6 +239,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
238 | (current->mm->start_data = N_DATADDR(ex)); | 239 | (current->mm->start_data = N_DATADDR(ex)); |
239 | current->mm->brk = ex.a_bss + | 240 | current->mm->brk = ex.a_bss + |
240 | (current->mm->start_brk = N_BSSADDR(ex)); | 241 | (current->mm->start_brk = N_BSSADDR(ex)); |
242 | current->mm->free_area_cache = current->mm->mmap_base; | ||
243 | current->mm->cached_hole_size = 0; | ||
241 | 244 | ||
242 | current->mm->mmap = NULL; | 245 | current->mm->mmap = NULL; |
243 | compute_creds(bprm); | 246 | compute_creds(bprm); |
@@ -329,15 +332,8 @@ beyond_if: | |||
329 | 332 | ||
330 | current->mm->start_stack = | 333 | current->mm->start_stack = |
331 | (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); | 334 | (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); |
332 | if (!(orig_thr_flags & _TIF_32BIT)) { | 335 | tsb_context_switch(current->mm); |
333 | unsigned long pgd_cache = get_pgd_cache(current->mm->pgd); | 336 | |
334 | |||
335 | __asm__ __volatile__("stxa\t%0, [%1] %2\n\t" | ||
336 | "membar #Sync" | ||
337 | : /* no outputs */ | ||
338 | : "r" (pgd_cache), | ||
339 | "r" (TSB_REG), "i" (ASI_DMMU)); | ||
340 | } | ||
341 | start_thread32(regs, ex.a_entry, current->mm->start_stack); | 337 | start_thread32(regs, ex.a_entry, current->mm->start_stack); |
342 | if (current->ptrace & PT_PTRACED) | 338 | if (current->ptrace & PT_PTRACED) |
343 | send_sig(SIGTRAP, current, 0); | 339 | send_sig(SIGTRAP, current, 0); |
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c index a1a12d2aa353..8a2abcce2737 100644 --- a/arch/sparc64/kernel/binfmt_elf32.c +++ b/arch/sparc64/kernel/binfmt_elf32.c | |||
@@ -153,7 +153,9 @@ MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek"); | |||
153 | #undef MODULE_DESCRIPTION | 153 | #undef MODULE_DESCRIPTION |
154 | #undef MODULE_AUTHOR | 154 | #undef MODULE_AUTHOR |
155 | 155 | ||
156 | #include <asm/a.out.h> | ||
157 | |||
156 | #undef TASK_SIZE | 158 | #undef TASK_SIZE |
157 | #define TASK_SIZE 0xf0000000 | 159 | #define TASK_SIZE STACK_TOP32 |
158 | 160 | ||
159 | #include "../../../fs/binfmt_elf.c" | 161 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 00eed88ef2e8..11cc0caef592 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/fpumacro.h> | 14 | #include <asm/fpumacro.h> |
15 | #include <asm/cpudata.h> | 15 | #include <asm/cpudata.h> |
16 | #include <asm/spitfire.h> | ||
16 | 17 | ||
17 | DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; | 18 | DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; |
18 | 19 | ||
@@ -71,6 +72,12 @@ void __init cpu_probe(void) | |||
71 | unsigned long ver, fpu_vers, manuf, impl, fprs; | 72 | unsigned long ver, fpu_vers, manuf, impl, fprs; |
72 | int i; | 73 | int i; |
73 | 74 | ||
75 | if (tlb_type == hypervisor) { | ||
76 | sparc_cpu_type = "UltraSparc T1 (Niagara)"; | ||
77 | sparc_fpu_type = "UltraSparc T1 integrated FPU"; | ||
78 | return; | ||
79 | } | ||
80 | |||
74 | fprs = fprs_read(); | 81 | fprs = fprs_read(); |
75 | fprs_write(FPRS_FEF); | 82 | fprs_write(FPRS_FEF); |
76 | __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" | 83 | __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" |
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index df9a1ca8fd77..007e8922cd16 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/bootmem.h> | ||
15 | 16 | ||
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
17 | #include <asm/oplib.h> | 18 | #include <asm/oplib.h> |
@@ -20,6 +21,8 @@ | |||
20 | #include <asm/spitfire.h> | 21 | #include <asm/spitfire.h> |
21 | #include <asm/timer.h> | 22 | #include <asm/timer.h> |
22 | #include <asm/cpudata.h> | 23 | #include <asm/cpudata.h> |
24 | #include <asm/vdev.h> | ||
25 | #include <asm/irq.h> | ||
23 | 26 | ||
24 | /* Used to synchronize acceses to NatSemi SUPER I/O chip configure | 27 | /* Used to synchronize acceses to NatSemi SUPER I/O chip configure |
25 | * operations in asm/ns87303.h | 28 | * operations in asm/ns87303.h |
@@ -29,13 +32,158 @@ DEFINE_SPINLOCK(ns87303_lock); | |||
29 | extern void cpu_probe(void); | 32 | extern void cpu_probe(void); |
30 | extern void central_probe(void); | 33 | extern void central_probe(void); |
31 | 34 | ||
32 | static char *cpu_mid_prop(void) | 35 | u32 sun4v_vdev_devhandle; |
36 | int sun4v_vdev_root; | ||
37 | |||
38 | struct vdev_intmap { | ||
39 | unsigned int phys; | ||
40 | unsigned int irq; | ||
41 | unsigned int cnode; | ||
42 | unsigned int cinterrupt; | ||
43 | }; | ||
44 | |||
45 | struct vdev_intmask { | ||
46 | unsigned int phys; | ||
47 | unsigned int interrupt; | ||
48 | unsigned int __unused; | ||
49 | }; | ||
50 | |||
51 | static struct vdev_intmap *vdev_intmap; | ||
52 | static int vdev_num_intmap; | ||
53 | static struct vdev_intmask vdev_intmask; | ||
54 | |||
55 | static void __init sun4v_virtual_device_probe(void) | ||
56 | { | ||
57 | struct linux_prom64_registers regs; | ||
58 | struct vdev_intmap *ip; | ||
59 | int node, sz, err; | ||
60 | |||
61 | if (tlb_type != hypervisor) | ||
62 | return; | ||
63 | |||
64 | node = prom_getchild(prom_root_node); | ||
65 | node = prom_searchsiblings(node, "virtual-devices"); | ||
66 | if (!node) { | ||
67 | prom_printf("SUN4V: Fatal error, no virtual-devices node.\n"); | ||
68 | prom_halt(); | ||
69 | } | ||
70 | |||
71 | sun4v_vdev_root = node; | ||
72 | |||
73 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | ||
74 | sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | ||
75 | |||
76 | sz = prom_getproplen(node, "interrupt-map"); | ||
77 | if (sz <= 0) { | ||
78 | prom_printf("SUN4V: Error, no vdev interrupt-map.\n"); | ||
79 | prom_halt(); | ||
80 | } | ||
81 | |||
82 | if ((sz % sizeof(*ip)) != 0) { | ||
83 | prom_printf("SUN4V: Bogus interrupt-map property size %d\n", | ||
84 | sz); | ||
85 | prom_halt(); | ||
86 | } | ||
87 | |||
88 | vdev_intmap = ip = alloc_bootmem_low_pages(sz); | ||
89 | if (!vdev_intmap) { | ||
90 | prom_printf("SUN4V: Error, cannot allocate vdev_intmap.\n"); | ||
91 | prom_halt(); | ||
92 | } | ||
93 | |||
94 | err = prom_getproperty(node, "interrupt-map", (char *) ip, sz); | ||
95 | if (err == -1) { | ||
96 | prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n"); | ||
97 | prom_halt(); | ||
98 | } | ||
99 | if (err != sz) { | ||
100 | prom_printf("SUN4V: Inconsistent interrupt-map size, " | ||
101 | "proplen(%d) vs getprop(%d).\n", sz,err); | ||
102 | prom_halt(); | ||
103 | } | ||
104 | |||
105 | vdev_num_intmap = err / sizeof(*ip); | ||
106 | |||
107 | err = prom_getproperty(node, "interrupt-map-mask", | ||
108 | (char *) &vdev_intmask, | ||
109 | sizeof(vdev_intmask)); | ||
110 | if (err <= 0) { | ||
111 | prom_printf("SUN4V: Fatal error, no vdev " | ||
112 | "interrupt-map-mask.\n"); | ||
113 | prom_halt(); | ||
114 | } | ||
115 | if (err % sizeof(vdev_intmask)) { | ||
116 | prom_printf("SUN4V: Bogus interrupt-map-mask " | ||
117 | "property size %d\n", err); | ||
118 | prom_halt(); | ||
119 | } | ||
120 | |||
121 | printk("SUN4V: virtual-devices devhandle[%x]\n", | ||
122 | sun4v_vdev_devhandle); | ||
123 | } | ||
124 | |||
125 | unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node) | ||
126 | { | ||
127 | unsigned int irq, reg; | ||
128 | int err, i; | ||
129 | |||
130 | err = prom_getproperty(dev_node, "interrupts", | ||
131 | (char *) &irq, sizeof(irq)); | ||
132 | if (err <= 0) { | ||
133 | printk("VDEV: Cannot get \"interrupts\" " | ||
134 | "property for OBP node %x\n", dev_node); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | err = prom_getproperty(dev_node, "reg", | ||
139 | (char *) ®, sizeof(reg)); | ||
140 | if (err <= 0) { | ||
141 | printk("VDEV: Cannot get \"reg\" " | ||
142 | "property for OBP node %x\n", dev_node); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | for (i = 0; i < vdev_num_intmap; i++) { | ||
147 | if (vdev_intmap[i].phys == (reg & vdev_intmask.phys) && | ||
148 | vdev_intmap[i].irq == (irq & vdev_intmask.interrupt)) { | ||
149 | irq = vdev_intmap[i].cinterrupt; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (i == vdev_num_intmap) { | ||
155 | printk("VDEV: No matching interrupt map entry " | ||
156 | "for OBP node %x\n", dev_node); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | return sun4v_build_irq(sun4v_vdev_devhandle, irq, 5, 0); | ||
161 | } | ||
162 | |||
163 | static const char *cpu_mid_prop(void) | ||
33 | { | 164 | { |
34 | if (tlb_type == spitfire) | 165 | if (tlb_type == spitfire) |
35 | return "upa-portid"; | 166 | return "upa-portid"; |
36 | return "portid"; | 167 | return "portid"; |
37 | } | 168 | } |
38 | 169 | ||
170 | static int get_cpu_mid(int prom_node) | ||
171 | { | ||
172 | if (tlb_type == hypervisor) { | ||
173 | struct linux_prom64_registers reg; | ||
174 | |||
175 | if (prom_getproplen(prom_node, "cpuid") == 4) | ||
176 | return prom_getintdefault(prom_node, "cpuid", 0); | ||
177 | |||
178 | prom_getproperty(prom_node, "reg", (char *) ®, sizeof(reg)); | ||
179 | return (reg.phys_addr >> 32) & 0x0fffffffUL; | ||
180 | } else { | ||
181 | const char *prop_name = cpu_mid_prop(); | ||
182 | |||
183 | return prom_getintdefault(prom_node, prop_name, 0); | ||
184 | } | ||
185 | } | ||
186 | |||
39 | static int check_cpu_node(int nd, int *cur_inst, | 187 | static int check_cpu_node(int nd, int *cur_inst, |
40 | int (*compare)(int, int, void *), void *compare_arg, | 188 | int (*compare)(int, int, void *), void *compare_arg, |
41 | int *prom_node, int *mid) | 189 | int *prom_node, int *mid) |
@@ -50,7 +198,7 @@ static int check_cpu_node(int nd, int *cur_inst, | |||
50 | if (prom_node) | 198 | if (prom_node) |
51 | *prom_node = nd; | 199 | *prom_node = nd; |
52 | if (mid) | 200 | if (mid) |
53 | *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); | 201 | *mid = get_cpu_mid(nd); |
54 | return 0; | 202 | return 0; |
55 | } | 203 | } |
56 | 204 | ||
@@ -105,7 +253,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg) | |||
105 | int desired_mid = (int) (long) _arg; | 253 | int desired_mid = (int) (long) _arg; |
106 | int this_mid; | 254 | int this_mid; |
107 | 255 | ||
108 | this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); | 256 | this_mid = get_cpu_mid(nd); |
109 | if (this_mid == desired_mid) | 257 | if (this_mid == desired_mid) |
110 | return 0; | 258 | return 0; |
111 | return -ENODEV; | 259 | return -ENODEV; |
@@ -126,7 +274,8 @@ void __init device_scan(void) | |||
126 | 274 | ||
127 | #ifndef CONFIG_SMP | 275 | #ifndef CONFIG_SMP |
128 | { | 276 | { |
129 | int err, cpu_node; | 277 | int err, cpu_node, def; |
278 | |||
130 | err = cpu_find_by_instance(0, &cpu_node, NULL); | 279 | err = cpu_find_by_instance(0, &cpu_node, NULL); |
131 | if (err) { | 280 | if (err) { |
132 | prom_printf("No cpu nodes, cannot continue\n"); | 281 | prom_printf("No cpu nodes, cannot continue\n"); |
@@ -135,21 +284,40 @@ void __init device_scan(void) | |||
135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, | 284 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, |
136 | "clock-frequency", | 285 | "clock-frequency", |
137 | 0); | 286 | 0); |
287 | |||
288 | def = ((tlb_type == hypervisor) ? | ||
289 | (8 * 1024) : | ||
290 | (16 * 1024)); | ||
138 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, | 291 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, |
139 | "dcache-size", | 292 | "dcache-size", |
140 | 16 * 1024); | 293 | def); |
294 | |||
295 | def = 32; | ||
141 | cpu_data(0).dcache_line_size = | 296 | cpu_data(0).dcache_line_size = |
142 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | 297 | prom_getintdefault(cpu_node, "dcache-line-size", |
298 | def); | ||
299 | |||
300 | def = 16 * 1024; | ||
143 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, | 301 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, |
144 | "icache-size", | 302 | "icache-size", |
145 | 16 * 1024); | 303 | def); |
304 | |||
305 | def = 32; | ||
146 | cpu_data(0).icache_line_size = | 306 | cpu_data(0).icache_line_size = |
147 | prom_getintdefault(cpu_node, "icache-line-size", 32); | 307 | prom_getintdefault(cpu_node, "icache-line-size", |
308 | def); | ||
309 | |||
310 | def = ((tlb_type == hypervisor) ? | ||
311 | (3 * 1024 * 1024) : | ||
312 | (4 * 1024 * 1024)); | ||
148 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, | 313 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, |
149 | "ecache-size", | 314 | "ecache-size", |
150 | 4 * 1024 * 1024); | 315 | def); |
316 | |||
317 | def = 64; | ||
151 | cpu_data(0).ecache_line_size = | 318 | cpu_data(0).ecache_line_size = |
152 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | 319 | prom_getintdefault(cpu_node, "ecache-line-size", |
320 | def); | ||
153 | printk("CPU[0]: Caches " | 321 | printk("CPU[0]: Caches " |
154 | "D[sz(%d):line_sz(%d)] " | 322 | "D[sz(%d):line_sz(%d)] " |
155 | "I[sz(%d):line_sz(%d)] " | 323 | "I[sz(%d):line_sz(%d)] " |
@@ -160,6 +328,7 @@ void __init device_scan(void) | |||
160 | } | 328 | } |
161 | #endif | 329 | #endif |
162 | 330 | ||
331 | sun4v_virtual_device_probe(); | ||
163 | central_probe(); | 332 | central_probe(); |
164 | 333 | ||
165 | cpu_probe(); | 334 | cpu_probe(); |
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S deleted file mode 100644 index acc889a7f9c1..000000000000 --- a/arch/sparc64/kernel/dtlb_backend.S +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $ | ||
2 | * dtlb_backend.S: Back end to DTLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/mmu.h> | ||
11 | |||
12 | #define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) | ||
13 | |||
14 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) | ||
15 | #define VPTE_SHIFT (PAGE_SHIFT - 3) | ||
16 | |||
17 | /* Ways we can get here: | ||
18 | * | ||
19 | * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1. | ||
20 | * 2) Nucleus loads and stores to/from user/kernel window save areas. | ||
21 | * 3) VPTE misses from dtlb_base and itlb_base. | ||
22 | * | ||
23 | * We need to extract out the PMD and PGDIR indexes from the | ||
24 | * linear virtual page table access address. The PTE index | ||
25 | * is at the bottom, but we are not concerned with it. Bits | ||
26 | * 0 to 2 are clear since each PTE is 8 bytes in size. Each | ||
27 | * PMD and PGDIR entry are 4 bytes in size. Thus, this | ||
28 | * address looks something like: | ||
29 | * | ||
30 | * |---------------------------------------------------------------| | ||
31 | * | ... | PGDIR index | PMD index | PTE index | | | ||
32 | * |---------------------------------------------------------------| | ||
33 | * 63 F E D C B A 3 2 0 <- bit nr | ||
34 | * | ||
35 | * The variable bits above are defined as: | ||
36 | * A --> 3 + (PAGE_SHIFT - log2(8)) | ||
37 | * --> 3 + (PAGE_SHIFT - 3) - 1 | ||
38 | * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1) | ||
39 | * B --> A + 1 | ||
40 | * C --> B + (PAGE_SHIFT - log2(4)) | ||
41 | * --> B + (PAGE_SHIFT - 2) - 1 | ||
42 | * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1) | ||
43 | * D --> C + 1 | ||
44 | * E --> D + (PAGE_SHIFT - log2(4)) | ||
45 | * --> D + (PAGE_SHIFT - 2) - 1 | ||
46 | * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1) | ||
47 | * F --> E + 1 | ||
48 | * | ||
49 | * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants | ||
50 | * cancel out.) | ||
51 | * | ||
52 | * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are: | ||
53 | * A --> 12 | ||
54 | * B --> 13 | ||
55 | * C --> 23 | ||
56 | * D --> 24 | ||
57 | * E --> 34 | ||
58 | * F --> 35 | ||
59 | * | ||
60 | * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are: | ||
61 | * A --> 15 | ||
62 | * B --> 16 | ||
63 | * C --> 29 | ||
64 | * D --> 30 | ||
65 | * E --> 43 | ||
66 | * F --> 44 | ||
67 | * | ||
68 | * Because bits both above and below each PGDIR and PMD index need to | ||
69 | * be masked out, and the index can be as long as 14 bits (when using a | ||
70 | * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions | ||
71 | * to extract each index out. | ||
72 | * | ||
73 | * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so | ||
74 | * we try to avoid using them for the entire operation. We could setup | ||
75 | * a mask anywhere from bit 31 down to bit 10 using the sethi instruction. | ||
76 | * | ||
77 | * We need a mask covering bits B --> C and one covering D --> E. | ||
78 | * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000. | ||
79 | * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000. | ||
80 | * The second in each set cannot be loaded with a single sethi | ||
81 | * instruction, because the upper bits are past bit 32. We would | ||
82 | * need to use a sethi + a shift. | ||
83 | * | ||
84 | * For the time being, we use 2 shifts and a simple "and" mask. | ||
85 | * We shift left to clear the bits above the index, we shift down | ||
86 | * to clear the bits below the index (sans the log2(4 or 8) bits) | ||
87 | * and a mask to clear the log2(4 or 8) bits. We need therefore | ||
88 | * define 4 shift counts, all of which are relative to PAGE_SHIFT. | ||
89 | * | ||
90 | * Although unsupportable for other reasons, this does mean that | ||
91 | * 512K and 4MB page sizes would be generaally supported by the | ||
92 | * kernel. (ELF binaries would break with > 64K PAGE_SIZE since | ||
93 | * the sections are only aligned that strongly). | ||
94 | * | ||
95 | * The operations performed for extraction are thus: | ||
96 | * | ||
97 | * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3 | ||
98 | * | ||
99 | */ | ||
100 | |||
101 | #define A (3 + (PAGE_SHIFT - 3) - 1) | ||
102 | #define B (A + 1) | ||
103 | #define C (B + (PAGE_SHIFT - 2) - 1) | ||
104 | #define D (C + 1) | ||
105 | #define E (D + (PAGE_SHIFT - 2) - 1) | ||
106 | #define F (E + 1) | ||
107 | |||
108 | #define PMD_SHIFT_LEFT (64 - D) | ||
109 | #define PMD_SHIFT_RIGHT (64 - (D - B) - 2) | ||
110 | #define PGDIR_SHIFT_LEFT (64 - F) | ||
111 | #define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2) | ||
112 | #define LOW_MASK_BITS 0x3 | ||
113 | |||
114 | /* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */ | ||
115 | ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS | ||
116 | add %g3, %g3, %g5 ! Compute VPTE base | ||
117 | cmp %g4, %g5 ! VPTE miss? | ||
118 | bgeu,pt %xcc, 1f ! Continue here | ||
119 | andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test | ||
120 | ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss | ||
121 | 1: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS | ||
122 | or %g4, %g5, %g4 ! Prepare TAG_ACCESS | ||
123 | |||
124 | /* TLB1 ** ICACHE line 2: Quick VPTE miss */ | ||
125 | mov TSB_REG, %g1 ! Grab TSB reg | ||
126 | ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching? | ||
127 | sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset | ||
128 | be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus? | ||
129 | srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits | ||
130 | brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke | ||
131 | andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask | ||
132 | sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset | ||
133 | |||
134 | /* TLB1 ** ICACHE line 3: Quick VPTE miss */ | ||
135 | srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits | ||
136 | andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask | ||
137 | lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD | ||
138 | brz,pn %g5, vpte_noent ! Valid? | ||
139 | sparc64_kpte_continue: | ||
140 | sllx %g5, 11, %g5 ! Shift into place | ||
141 | sparc64_vpte_continue: | ||
142 | lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD | ||
143 | sllx %g5, 11, %g5 ! Shift into place | ||
144 | brz,pn %g5, vpte_noent ! Valid? | ||
145 | |||
146 | /* TLB1 ** ICACHE line 4: Quick VPTE miss */ | ||
147 | mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1 | ||
148 | sllx %g1, 61, %g1 ! finish calc | ||
149 | or %g5, VPTE_BITS, %g5 ! Prepare VPTE data | ||
150 | or %g5, %g1, %g5 ! ... | ||
151 | mov TLB_SFSR, %g1 ! Restore %g1 value | ||
152 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB | ||
153 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS | ||
154 | retry ! Load PTE once again | ||
155 | |||
156 | #undef VALID_SZ_BITS | ||
157 | #undef VPTE_SHIFT | ||
158 | #undef VPTE_BITS | ||
159 | #undef A | ||
160 | #undef B | ||
161 | #undef C | ||
162 | #undef D | ||
163 | #undef E | ||
164 | #undef F | ||
165 | #undef PMD_SHIFT_LEFT | ||
166 | #undef PMD_SHIFT_RIGHT | ||
167 | #undef PGDIR_SHIFT_LEFT | ||
168 | #undef PGDIR_SHIFT_RIGHT | ||
169 | #undef LOW_MASK_BITS | ||
170 | |||
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S deleted file mode 100644 index 6528786840c0..000000000000 --- a/arch/sparc64/kernel/dtlb_base.S +++ /dev/null | |||
@@ -1,109 +0,0 @@ | |||
1 | /* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $ | ||
2 | * dtlb_base.S: Front end to DTLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/mmu.h> | ||
11 | |||
12 | /* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS) | ||
13 | * %g2 (KERN_HIGHBITS | KERN_LOWBITS) | ||
14 | * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space) | ||
15 | * (0xffe0000000000000) Cheetah (64-bit VA space) | ||
16 | * %g7 __pa(current->mm->pgd) | ||
17 | * | ||
18 | * The VPTE base value is completely magic, but note that | ||
19 | * few places in the kernel other than these TLB miss | ||
20 | * handlers know anything about the VPTE mechanism or | ||
21 | * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD). | ||
22 | * Consider the 44-bit VADDR Ultra-I/II case as an example: | ||
23 | * | ||
24 | * VA[0 : (1<<43)] produce VPTE index [%g3 : 0] | ||
25 | * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3] | ||
26 | * | ||
27 | * For Cheetah's 64-bit VADDR space this is: | ||
28 | * | ||
29 | * VA[0 : (1<<63)] produce VPTE index [%g3 : 0] | ||
30 | * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3] | ||
31 | * | ||
32 | * If you're paying attention you'll notice that this means half of | ||
33 | * the VPTE table is above %g3 and half is below, low VA addresses | ||
34 | * map progressively upwards from %g3, and high VA addresses map | ||
35 | * progressively upwards towards %g3. This trick was needed to make | ||
36 | * the same 8 instruction handler work both for Spitfire/Blackbird's | ||
37 | * peculiar VA space hole configuration and the full 64-bit VA space | ||
38 | * one of Cheetah at the same time. | ||
39 | */ | ||
40 | |||
41 | /* Ways we can get here: | ||
42 | * | ||
43 | * 1) Nucleus loads and stores to/from PA-->VA direct mappings. | ||
44 | * 2) Nucleus loads and stores to/from vmalloc() areas. | ||
45 | * 3) User loads and stores. | ||
46 | * 4) User space accesses by nucleus at tl0 | ||
47 | */ | ||
48 | |||
49 | #if PAGE_SHIFT == 13 | ||
50 | /* | ||
51 | * To compute vpte offset, we need to do ((addr >> 13) << 3), | ||
52 | * which can be optimized to (addr >> 10) if bits 10/11/12 can | ||
53 | * be guaranteed to be 0 ... mmu_context.h does guarantee this | ||
54 | * by only using 10 bits in the hwcontext value. | ||
55 | */ | ||
56 | #define CREATE_VPTE_OFFSET1(r1, r2) nop | ||
57 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
58 | srax r1, 10, r2 | ||
59 | #else | ||
60 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
61 | srax r1, PAGE_SHIFT, r2 | ||
62 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
63 | sllx r2, 3, r2 | ||
64 | #endif | ||
65 | |||
66 | /* DTLB ** ICACHE line 1: Quick user TLB misses */ | ||
67 | mov TLB_SFSR, %g1 | ||
68 | ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS | ||
69 | andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? | ||
70 | from_tl1_trap: | ||
71 | rdpr %tl, %g5 ! For TL==3 test | ||
72 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | ||
73 | be,pn %xcc, kvmap ! Yep, special processing | ||
74 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | ||
75 | cmp %g5, 4 ! Last trap level? | ||
76 | |||
77 | /* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ | ||
78 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss | ||
79 | nop ! delay slot | ||
80 | ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE | ||
81 | 1: brgez,pn %g5, longpath ! Invalid, branch out | ||
82 | nop ! Delay-slot | ||
83 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
84 | retry ! Trap return | ||
85 | nop | ||
86 | |||
87 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ | ||
88 | longpath: | ||
89 | rdpr %pstate, %g5 ! Move into alternate globals | ||
90 | wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate | ||
91 | rdpr %tl, %g4 ! See where we came from. | ||
92 | cmp %g4, 1 ! Is etrap/rtrap window fault? | ||
93 | mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing | ||
94 | ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page | ||
95 | be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling | ||
96 | mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB | ||
97 | |||
98 | /* DTLB ** ICACHE line 4: Unused... */ | ||
99 | ba,a,pt %xcc, winfix_trampoline ! Call window fixup code | ||
100 | nop | ||
101 | nop | ||
102 | nop | ||
103 | nop | ||
104 | nop | ||
105 | nop | ||
106 | nop | ||
107 | |||
108 | #undef CREATE_VPTE_OFFSET1 | ||
109 | #undef CREATE_VPTE_OFFSET2 | ||
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S new file mode 100644 index 000000000000..09a6a15a7105 --- /dev/null +++ b/arch/sparc64/kernel/dtlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* DTLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_dtlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* DTLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_dtlb ! Miss | ||
13 | mov FAULT_CODE_DTLB, %g3 | ||
14 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB | ||
15 | retry ! Trap done | ||
16 | nop | ||
17 | nop | ||
18 | nop | ||
19 | nop | ||
20 | |||
21 | /* DTLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* DTLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index 7991e919d8ab..c69504aa638f 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c | |||
@@ -277,10 +277,9 @@ static inline void *ebus_alloc(size_t size) | |||
277 | { | 277 | { |
278 | void *mem; | 278 | void *mem; |
279 | 279 | ||
280 | mem = kmalloc(size, GFP_ATOMIC); | 280 | mem = kzalloc(size, GFP_ATOMIC); |
281 | if (!mem) | 281 | if (!mem) |
282 | panic("ebus_alloc: out of memory"); | 282 | panic("ebus_alloc: out of memory"); |
283 | memset((char *)mem, 0, size); | ||
284 | return mem; | 283 | return mem; |
285 | } | 284 | } |
286 | 285 | ||
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index a73553ae7e53..6d0b3ed77a02 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -50,7 +50,8 @@ do_fpdis: | |||
50 | add %g0, %g0, %g0 | 50 | add %g0, %g0, %g0 |
51 | ba,a,pt %xcc, rtrap_clr_l6 | 51 | ba,a,pt %xcc, rtrap_clr_l6 |
52 | 52 | ||
53 | 1: ldub [%g6 + TI_FPSAVED], %g5 | 53 | 1: TRAP_LOAD_THREAD_REG(%g6, %g1) |
54 | ldub [%g6 + TI_FPSAVED], %g5 | ||
54 | wr %g0, FPRS_FEF, %fprs | 55 | wr %g0, FPRS_FEF, %fprs |
55 | andcc %g5, FPRS_FEF, %g0 | 56 | andcc %g5, FPRS_FEF, %g0 |
56 | be,a,pt %icc, 1f | 57 | be,a,pt %icc, 1f |
@@ -96,10 +97,22 @@ do_fpdis: | |||
96 | add %g6, TI_FPREGS + 0x80, %g1 | 97 | add %g6, TI_FPREGS + 0x80, %g1 |
97 | faddd %f0, %f2, %f4 | 98 | faddd %f0, %f2, %f4 |
98 | fmuld %f0, %f2, %f6 | 99 | fmuld %f0, %f2, %f6 |
99 | ldxa [%g3] ASI_DMMU, %g5 | 100 | |
101 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
102 | .section .sun4v_1insn_patch, "ax" | ||
103 | .word 661b | ||
104 | ldxa [%g3] ASI_MMU, %g5 | ||
105 | .previous | ||
106 | |||
100 | sethi %hi(sparc64_kern_sec_context), %g2 | 107 | sethi %hi(sparc64_kern_sec_context), %g2 |
101 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 108 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
102 | stxa %g2, [%g3] ASI_DMMU | 109 | |
110 | 661: stxa %g2, [%g3] ASI_DMMU | ||
111 | .section .sun4v_1insn_patch, "ax" | ||
112 | .word 661b | ||
113 | stxa %g2, [%g3] ASI_MMU | ||
114 | .previous | ||
115 | |||
103 | membar #Sync | 116 | membar #Sync |
104 | add %g6, TI_FPREGS + 0xc0, %g2 | 117 | add %g6, TI_FPREGS + 0xc0, %g2 |
105 | faddd %f0, %f2, %f8 | 118 | faddd %f0, %f2, %f8 |
@@ -125,11 +138,23 @@ do_fpdis: | |||
125 | fzero %f32 | 138 | fzero %f32 |
126 | mov SECONDARY_CONTEXT, %g3 | 139 | mov SECONDARY_CONTEXT, %g3 |
127 | fzero %f34 | 140 | fzero %f34 |
128 | ldxa [%g3] ASI_DMMU, %g5 | 141 | |
142 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
143 | .section .sun4v_1insn_patch, "ax" | ||
144 | .word 661b | ||
145 | ldxa [%g3] ASI_MMU, %g5 | ||
146 | .previous | ||
147 | |||
129 | add %g6, TI_FPREGS, %g1 | 148 | add %g6, TI_FPREGS, %g1 |
130 | sethi %hi(sparc64_kern_sec_context), %g2 | 149 | sethi %hi(sparc64_kern_sec_context), %g2 |
131 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 150 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
132 | stxa %g2, [%g3] ASI_DMMU | 151 | |
152 | 661: stxa %g2, [%g3] ASI_DMMU | ||
153 | .section .sun4v_1insn_patch, "ax" | ||
154 | .word 661b | ||
155 | stxa %g2, [%g3] ASI_MMU | ||
156 | .previous | ||
157 | |||
133 | membar #Sync | 158 | membar #Sync |
134 | add %g6, TI_FPREGS + 0x40, %g2 | 159 | add %g6, TI_FPREGS + 0x40, %g2 |
135 | faddd %f32, %f34, %f36 | 160 | faddd %f32, %f34, %f36 |
@@ -154,10 +179,22 @@ do_fpdis: | |||
154 | nop | 179 | nop |
155 | 3: mov SECONDARY_CONTEXT, %g3 | 180 | 3: mov SECONDARY_CONTEXT, %g3 |
156 | add %g6, TI_FPREGS, %g1 | 181 | add %g6, TI_FPREGS, %g1 |
157 | ldxa [%g3] ASI_DMMU, %g5 | 182 | |
183 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
184 | .section .sun4v_1insn_patch, "ax" | ||
185 | .word 661b | ||
186 | ldxa [%g3] ASI_MMU, %g5 | ||
187 | .previous | ||
188 | |||
158 | sethi %hi(sparc64_kern_sec_context), %g2 | 189 | sethi %hi(sparc64_kern_sec_context), %g2 |
159 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 190 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
160 | stxa %g2, [%g3] ASI_DMMU | 191 | |
192 | 661: stxa %g2, [%g3] ASI_DMMU | ||
193 | .section .sun4v_1insn_patch, "ax" | ||
194 | .word 661b | ||
195 | stxa %g2, [%g3] ASI_MMU | ||
196 | .previous | ||
197 | |||
161 | membar #Sync | 198 | membar #Sync |
162 | mov 0x40, %g2 | 199 | mov 0x40, %g2 |
163 | membar #Sync | 200 | membar #Sync |
@@ -168,7 +205,13 @@ do_fpdis: | |||
168 | ldda [%g1 + %g2] ASI_BLK_S, %f48 | 205 | ldda [%g1 + %g2] ASI_BLK_S, %f48 |
169 | membar #Sync | 206 | membar #Sync |
170 | fpdis_exit: | 207 | fpdis_exit: |
171 | stxa %g5, [%g3] ASI_DMMU | 208 | |
209 | 661: stxa %g5, [%g3] ASI_DMMU | ||
210 | .section .sun4v_1insn_patch, "ax" | ||
211 | .word 661b | ||
212 | stxa %g5, [%g3] ASI_MMU | ||
213 | .previous | ||
214 | |||
172 | membar #Sync | 215 | membar #Sync |
173 | fpdis_exit2: | 216 | fpdis_exit2: |
174 | wr %g7, 0, %gsr | 217 | wr %g7, 0, %gsr |
@@ -189,6 +232,7 @@ fp_other_bounce: | |||
189 | .globl do_fpother_check_fitos | 232 | .globl do_fpother_check_fitos |
190 | .align 32 | 233 | .align 32 |
191 | do_fpother_check_fitos: | 234 | do_fpother_check_fitos: |
235 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
192 | sethi %hi(fp_other_bounce - 4), %g7 | 236 | sethi %hi(fp_other_bounce - 4), %g7 |
193 | or %g7, %lo(fp_other_bounce - 4), %g7 | 237 | or %g7, %lo(fp_other_bounce - 4), %g7 |
194 | 238 | ||
@@ -312,6 +356,7 @@ fitos_emul_fini: | |||
312 | .globl do_fptrap | 356 | .globl do_fptrap |
313 | .align 32 | 357 | .align 32 |
314 | do_fptrap: | 358 | do_fptrap: |
359 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
315 | stx %fsr, [%g6 + TI_XFSR] | 360 | stx %fsr, [%g6 + TI_XFSR] |
316 | do_fptrap_after_fsr: | 361 | do_fptrap_after_fsr: |
317 | ldub [%g6 + TI_FPSAVED], %g3 | 362 | ldub [%g6 + TI_FPSAVED], %g3 |
@@ -321,10 +366,22 @@ do_fptrap_after_fsr: | |||
321 | rd %gsr, %g3 | 366 | rd %gsr, %g3 |
322 | stx %g3, [%g6 + TI_GSR] | 367 | stx %g3, [%g6 + TI_GSR] |
323 | mov SECONDARY_CONTEXT, %g3 | 368 | mov SECONDARY_CONTEXT, %g3 |
324 | ldxa [%g3] ASI_DMMU, %g5 | 369 | |
370 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
371 | .section .sun4v_1insn_patch, "ax" | ||
372 | .word 661b | ||
373 | ldxa [%g3] ASI_MMU, %g5 | ||
374 | .previous | ||
375 | |||
325 | sethi %hi(sparc64_kern_sec_context), %g2 | 376 | sethi %hi(sparc64_kern_sec_context), %g2 |
326 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 377 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
327 | stxa %g2, [%g3] ASI_DMMU | 378 | |
379 | 661: stxa %g2, [%g3] ASI_DMMU | ||
380 | .section .sun4v_1insn_patch, "ax" | ||
381 | .word 661b | ||
382 | stxa %g2, [%g3] ASI_MMU | ||
383 | .previous | ||
384 | |||
328 | membar #Sync | 385 | membar #Sync |
329 | add %g6, TI_FPREGS, %g2 | 386 | add %g6, TI_FPREGS, %g2 |
330 | andcc %g1, FPRS_DL, %g0 | 387 | andcc %g1, FPRS_DL, %g0 |
@@ -339,7 +396,13 @@ do_fptrap_after_fsr: | |||
339 | stda %f48, [%g2 + %g3] ASI_BLK_S | 396 | stda %f48, [%g2 + %g3] ASI_BLK_S |
340 | 5: mov SECONDARY_CONTEXT, %g1 | 397 | 5: mov SECONDARY_CONTEXT, %g1 |
341 | membar #Sync | 398 | membar #Sync |
342 | stxa %g5, [%g1] ASI_DMMU | 399 | |
400 | 661: stxa %g5, [%g1] ASI_DMMU | ||
401 | .section .sun4v_1insn_patch, "ax" | ||
402 | .word 661b | ||
403 | stxa %g5, [%g1] ASI_MMU | ||
404 | .previous | ||
405 | |||
343 | membar #Sync | 406 | membar #Sync |
344 | ba,pt %xcc, etrap | 407 | ba,pt %xcc, etrap |
345 | wr %g0, 0, %fprs | 408 | wr %g0, 0, %fprs |
@@ -353,8 +416,6 @@ do_fptrap_after_fsr: | |||
353 | * | 416 | * |
354 | * With this method we can do most of the cross-call tlb/cache | 417 | * With this method we can do most of the cross-call tlb/cache |
355 | * flushing very quickly. | 418 | * flushing very quickly. |
356 | * | ||
357 | * Current CPU's IRQ worklist table is locked into %g6, don't touch. | ||
358 | */ | 419 | */ |
359 | .text | 420 | .text |
360 | .align 32 | 421 | .align 32 |
@@ -378,6 +439,8 @@ do_ivec: | |||
378 | sllx %g2, %g4, %g2 | 439 | sllx %g2, %g4, %g2 |
379 | sllx %g4, 2, %g4 | 440 | sllx %g4, 2, %g4 |
380 | 441 | ||
442 | TRAP_LOAD_IRQ_WORK(%g6, %g1) | ||
443 | |||
381 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ | 444 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ |
382 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ | 445 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ |
383 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ | 446 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ |
@@ -399,76 +462,6 @@ do_ivec_xcall: | |||
399 | 1: jmpl %g3, %g0 | 462 | 1: jmpl %g3, %g0 |
400 | nop | 463 | nop |
401 | 464 | ||
402 | .globl save_alternate_globals | ||
403 | save_alternate_globals: /* %o0 = save_area */ | ||
404 | rdpr %pstate, %o5 | ||
405 | andn %o5, PSTATE_IE, %o1 | ||
406 | wrpr %o1, PSTATE_AG, %pstate | ||
407 | stx %g0, [%o0 + 0x00] | ||
408 | stx %g1, [%o0 + 0x08] | ||
409 | stx %g2, [%o0 + 0x10] | ||
410 | stx %g3, [%o0 + 0x18] | ||
411 | stx %g4, [%o0 + 0x20] | ||
412 | stx %g5, [%o0 + 0x28] | ||
413 | stx %g6, [%o0 + 0x30] | ||
414 | stx %g7, [%o0 + 0x38] | ||
415 | wrpr %o1, PSTATE_IG, %pstate | ||
416 | stx %g0, [%o0 + 0x40] | ||
417 | stx %g1, [%o0 + 0x48] | ||
418 | stx %g2, [%o0 + 0x50] | ||
419 | stx %g3, [%o0 + 0x58] | ||
420 | stx %g4, [%o0 + 0x60] | ||
421 | stx %g5, [%o0 + 0x68] | ||
422 | stx %g6, [%o0 + 0x70] | ||
423 | stx %g7, [%o0 + 0x78] | ||
424 | wrpr %o1, PSTATE_MG, %pstate | ||
425 | stx %g0, [%o0 + 0x80] | ||
426 | stx %g1, [%o0 + 0x88] | ||
427 | stx %g2, [%o0 + 0x90] | ||
428 | stx %g3, [%o0 + 0x98] | ||
429 | stx %g4, [%o0 + 0xa0] | ||
430 | stx %g5, [%o0 + 0xa8] | ||
431 | stx %g6, [%o0 + 0xb0] | ||
432 | stx %g7, [%o0 + 0xb8] | ||
433 | wrpr %o5, 0x0, %pstate | ||
434 | retl | ||
435 | nop | ||
436 | |||
437 | .globl restore_alternate_globals | ||
438 | restore_alternate_globals: /* %o0 = save_area */ | ||
439 | rdpr %pstate, %o5 | ||
440 | andn %o5, PSTATE_IE, %o1 | ||
441 | wrpr %o1, PSTATE_AG, %pstate | ||
442 | ldx [%o0 + 0x00], %g0 | ||
443 | ldx [%o0 + 0x08], %g1 | ||
444 | ldx [%o0 + 0x10], %g2 | ||
445 | ldx [%o0 + 0x18], %g3 | ||
446 | ldx [%o0 + 0x20], %g4 | ||
447 | ldx [%o0 + 0x28], %g5 | ||
448 | ldx [%o0 + 0x30], %g6 | ||
449 | ldx [%o0 + 0x38], %g7 | ||
450 | wrpr %o1, PSTATE_IG, %pstate | ||
451 | ldx [%o0 + 0x40], %g0 | ||
452 | ldx [%o0 + 0x48], %g1 | ||
453 | ldx [%o0 + 0x50], %g2 | ||
454 | ldx [%o0 + 0x58], %g3 | ||
455 | ldx [%o0 + 0x60], %g4 | ||
456 | ldx [%o0 + 0x68], %g5 | ||
457 | ldx [%o0 + 0x70], %g6 | ||
458 | ldx [%o0 + 0x78], %g7 | ||
459 | wrpr %o1, PSTATE_MG, %pstate | ||
460 | ldx [%o0 + 0x80], %g0 | ||
461 | ldx [%o0 + 0x88], %g1 | ||
462 | ldx [%o0 + 0x90], %g2 | ||
463 | ldx [%o0 + 0x98], %g3 | ||
464 | ldx [%o0 + 0xa0], %g4 | ||
465 | ldx [%o0 + 0xa8], %g5 | ||
466 | ldx [%o0 + 0xb0], %g6 | ||
467 | ldx [%o0 + 0xb8], %g7 | ||
468 | wrpr %o5, 0x0, %pstate | ||
469 | retl | ||
470 | nop | ||
471 | |||
472 | .globl getcc, setcc | 465 | .globl getcc, setcc |
473 | getcc: | 466 | getcc: |
474 | ldx [%o0 + PT_V9_TSTATE], %o1 | 467 | ldx [%o0 + PT_V9_TSTATE], %o1 |
@@ -488,9 +481,24 @@ setcc: | |||
488 | retl | 481 | retl |
489 | stx %o1, [%o0 + PT_V9_TSTATE] | 482 | stx %o1, [%o0 + PT_V9_TSTATE] |
490 | 483 | ||
491 | .globl utrap, utrap_ill | 484 | .globl utrap_trap |
492 | utrap: brz,pn %g1, etrap | 485 | utrap_trap: /* %g3=handler,%g4=level */ |
486 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
487 | ldx [%g6 + TI_UTRAPS], %g1 | ||
488 | brnz,pt %g1, invoke_utrap | ||
493 | nop | 489 | nop |
490 | |||
491 | ba,pt %xcc, etrap | ||
492 | rd %pc, %g7 | ||
493 | mov %l4, %o1 | ||
494 | call bad_trap | ||
495 | add %sp, PTREGS_OFF, %o0 | ||
496 | ba,pt %xcc, rtrap | ||
497 | clr %l6 | ||
498 | |||
499 | invoke_utrap: | ||
500 | sllx %g3, 3, %g3 | ||
501 | ldx [%g1 + %g3], %g1 | ||
494 | save %sp, -128, %sp | 502 | save %sp, -128, %sp |
495 | rdpr %tstate, %l6 | 503 | rdpr %tstate, %l6 |
496 | rdpr %cwp, %l7 | 504 | rdpr %cwp, %l7 |
@@ -500,17 +508,6 @@ utrap: brz,pn %g1, etrap | |||
500 | rdpr %tnpc, %l7 | 508 | rdpr %tnpc, %l7 |
501 | wrpr %g1, 0, %tnpc | 509 | wrpr %g1, 0, %tnpc |
502 | done | 510 | done |
503 | utrap_ill: | ||
504 | call bad_trap | ||
505 | add %sp, PTREGS_OFF, %o0 | ||
506 | ba,pt %xcc, rtrap | ||
507 | clr %l6 | ||
508 | |||
509 | /* XXX Here is stuff we still need to write... -DaveM XXX */ | ||
510 | .globl netbsd_syscall | ||
511 | netbsd_syscall: | ||
512 | retl | ||
513 | nop | ||
514 | 511 | ||
515 | /* We need to carefully read the error status, ACK | 512 | /* We need to carefully read the error status, ACK |
516 | * the errors, prevent recursive traps, and pass the | 513 | * the errors, prevent recursive traps, and pass the |
@@ -1001,7 +998,7 @@ dcpe_icpe_tl1_common: | |||
1001 | * %g3: scratch | 998 | * %g3: scratch |
1002 | * %g4: AFSR | 999 | * %g4: AFSR |
1003 | * %g5: AFAR | 1000 | * %g5: AFAR |
1004 | * %g6: current thread ptr | 1001 | * %g6: unused, will have current thread ptr after etrap |
1005 | * %g7: scratch | 1002 | * %g7: scratch |
1006 | */ | 1003 | */ |
1007 | __cheetah_log_error: | 1004 | __cheetah_log_error: |
@@ -1539,13 +1536,14 @@ ret_from_syscall: | |||
1539 | 1536 | ||
1540 | 1: b,pt %xcc, ret_sys_call | 1537 | 1: b,pt %xcc, ret_sys_call |
1541 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 | 1538 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 |
1542 | sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate | 1539 | sparc_exit: rdpr %pstate, %g2 |
1540 | wrpr %g2, PSTATE_IE, %pstate | ||
1543 | rdpr %otherwin, %g1 | 1541 | rdpr %otherwin, %g1 |
1544 | rdpr %cansave, %g3 | 1542 | rdpr %cansave, %g3 |
1545 | add %g3, %g1, %g3 | 1543 | add %g3, %g1, %g3 |
1546 | wrpr %g3, 0x0, %cansave | 1544 | wrpr %g3, 0x0, %cansave |
1547 | wrpr %g0, 0x0, %otherwin | 1545 | wrpr %g0, 0x0, %otherwin |
1548 | wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate | 1546 | wrpr %g2, 0x0, %pstate |
1549 | ba,pt %xcc, sys_exit | 1547 | ba,pt %xcc, sys_exit |
1550 | stb %g0, [%g6 + TI_WSAVED] | 1548 | stb %g0, [%g6 + TI_WSAVED] |
1551 | 1549 | ||
@@ -1690,3 +1688,138 @@ __flushw_user: | |||
1690 | restore %g0, %g0, %g0 | 1688 | restore %g0, %g0, %g0 |
1691 | 2: retl | 1689 | 2: retl |
1692 | nop | 1690 | nop |
1691 | |||
1692 | #ifdef CONFIG_SMP | ||
1693 | .globl hard_smp_processor_id | ||
1694 | hard_smp_processor_id: | ||
1695 | #endif | ||
1696 | .globl real_hard_smp_processor_id | ||
1697 | real_hard_smp_processor_id: | ||
1698 | __GET_CPUID(%o0) | ||
1699 | retl | ||
1700 | nop | ||
1701 | |||
1702 | /* %o0: devhandle | ||
1703 | * %o1: devino | ||
1704 | * | ||
1705 | * returns %o0: sysino | ||
1706 | */ | ||
1707 | .globl sun4v_devino_to_sysino | ||
1708 | sun4v_devino_to_sysino: | ||
1709 | mov HV_FAST_INTR_DEVINO2SYSINO, %o5 | ||
1710 | ta HV_FAST_TRAP | ||
1711 | retl | ||
1712 | mov %o1, %o0 | ||
1713 | |||
1714 | /* %o0: sysino | ||
1715 | * | ||
1716 | * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1717 | */ | ||
1718 | .globl sun4v_intr_getenabled | ||
1719 | sun4v_intr_getenabled: | ||
1720 | mov HV_FAST_INTR_GETENABLED, %o5 | ||
1721 | ta HV_FAST_TRAP | ||
1722 | retl | ||
1723 | mov %o1, %o0 | ||
1724 | |||
1725 | /* %o0: sysino | ||
1726 | * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1727 | */ | ||
1728 | .globl sun4v_intr_setenabled | ||
1729 | sun4v_intr_setenabled: | ||
1730 | mov HV_FAST_INTR_SETENABLED, %o5 | ||
1731 | ta HV_FAST_TRAP | ||
1732 | retl | ||
1733 | nop | ||
1734 | |||
1735 | /* %o0: sysino | ||
1736 | * | ||
1737 | * returns %o0: intr_state (HV_INTR_STATE_*) | ||
1738 | */ | ||
1739 | .globl sun4v_intr_getstate | ||
1740 | sun4v_intr_getstate: | ||
1741 | mov HV_FAST_INTR_GETSTATE, %o5 | ||
1742 | ta HV_FAST_TRAP | ||
1743 | retl | ||
1744 | mov %o1, %o0 | ||
1745 | |||
1746 | /* %o0: sysino | ||
1747 | * %o1: intr_state (HV_INTR_STATE_*) | ||
1748 | */ | ||
1749 | .globl sun4v_intr_setstate | ||
1750 | sun4v_intr_setstate: | ||
1751 | mov HV_FAST_INTR_SETSTATE, %o5 | ||
1752 | ta HV_FAST_TRAP | ||
1753 | retl | ||
1754 | nop | ||
1755 | |||
1756 | /* %o0: sysino | ||
1757 | * | ||
1758 | * returns %o0: cpuid | ||
1759 | */ | ||
1760 | .globl sun4v_intr_gettarget | ||
1761 | sun4v_intr_gettarget: | ||
1762 | mov HV_FAST_INTR_GETTARGET, %o5 | ||
1763 | ta HV_FAST_TRAP | ||
1764 | retl | ||
1765 | mov %o1, %o0 | ||
1766 | |||
1767 | /* %o0: sysino | ||
1768 | * %o1: cpuid | ||
1769 | */ | ||
1770 | .globl sun4v_intr_settarget | ||
1771 | sun4v_intr_settarget: | ||
1772 | mov HV_FAST_INTR_SETTARGET, %o5 | ||
1773 | ta HV_FAST_TRAP | ||
1774 | retl | ||
1775 | nop | ||
1776 | |||
1777 | /* %o0: type | ||
1778 | * %o1: queue paddr | ||
1779 | * %o2: num queue entries | ||
1780 | * | ||
1781 | * returns %o0: status | ||
1782 | */ | ||
1783 | .globl sun4v_cpu_qconf | ||
1784 | sun4v_cpu_qconf: | ||
1785 | mov HV_FAST_CPU_QCONF, %o5 | ||
1786 | ta HV_FAST_TRAP | ||
1787 | retl | ||
1788 | nop | ||
1789 | |||
1790 | /* returns %o0: status | ||
1791 | */ | ||
1792 | .globl sun4v_cpu_yield | ||
1793 | sun4v_cpu_yield: | ||
1794 | mov HV_FAST_CPU_YIELD, %o5 | ||
1795 | ta HV_FAST_TRAP | ||
1796 | retl | ||
1797 | nop | ||
1798 | |||
1799 | /* %o0: num cpus in cpu list | ||
1800 | * %o1: cpu list paddr | ||
1801 | * %o2: mondo block paddr | ||
1802 | * | ||
1803 | * returns %o0: status | ||
1804 | */ | ||
1805 | .globl sun4v_cpu_mondo_send | ||
1806 | sun4v_cpu_mondo_send: | ||
1807 | mov HV_FAST_CPU_MONDO_SEND, %o5 | ||
1808 | ta HV_FAST_TRAP | ||
1809 | retl | ||
1810 | nop | ||
1811 | |||
1812 | /* %o0: CPU ID | ||
1813 | * | ||
1814 | * returns %o0: -status if status non-zero, else | ||
1815 | * %o0: cpu state as HV_CPU_STATE_* | ||
1816 | */ | ||
1817 | .globl sun4v_cpu_state | ||
1818 | sun4v_cpu_state: | ||
1819 | mov HV_FAST_CPU_STATE, %o5 | ||
1820 | ta HV_FAST_TRAP | ||
1821 | brnz,pn %o0, 1f | ||
1822 | sub %g0, %o0, %o0 | ||
1823 | mov %o1, %o0 | ||
1824 | 1: retl | ||
1825 | nop | ||
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 0d8eba21111b..149383835c25 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S | |||
@@ -31,6 +31,7 @@ | |||
31 | .globl etrap, etrap_irq, etraptl1 | 31 | .globl etrap, etrap_irq, etraptl1 |
32 | etrap: rdpr %pil, %g2 | 32 | etrap: rdpr %pil, %g2 |
33 | etrap_irq: | 33 | etrap_irq: |
34 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
34 | rdpr %tstate, %g1 | 35 | rdpr %tstate, %g1 |
35 | sllx %g2, 20, %g3 | 36 | sllx %g2, 20, %g3 |
36 | andcc %g1, TSTATE_PRIV, %g0 | 37 | andcc %g1, TSTATE_PRIV, %g0 |
@@ -54,7 +55,31 @@ etrap_irq: | |||
54 | rd %y, %g3 | 55 | rd %y, %g3 |
55 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] | 56 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] |
56 | st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] | 57 | st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] |
57 | save %g2, -STACK_BIAS, %sp ! Ordering here is critical | 58 | |
59 | rdpr %cansave, %g1 | ||
60 | brnz,pt %g1, etrap_save | ||
61 | nop | ||
62 | |||
63 | rdpr %cwp, %g1 | ||
64 | add %g1, 2, %g1 | ||
65 | wrpr %g1, %cwp | ||
66 | be,pt %xcc, etrap_user_spill | ||
67 | mov ASI_AIUP, %g3 | ||
68 | |||
69 | rdpr %otherwin, %g3 | ||
70 | brz %g3, etrap_kernel_spill | ||
71 | mov ASI_AIUS, %g3 | ||
72 | |||
73 | etrap_user_spill: | ||
74 | |||
75 | wr %g3, 0x0, %asi | ||
76 | ldx [%g6 + TI_FLAGS], %g3 | ||
77 | and %g3, _TIF_32BIT, %g3 | ||
78 | brnz,pt %g3, etrap_user_spill_32bit | ||
79 | nop | ||
80 | ba,a,pt %xcc, etrap_user_spill_64bit | ||
81 | |||
82 | etrap_save: save %g2, -STACK_BIAS, %sp | ||
58 | mov %g6, %l6 | 83 | mov %g6, %l6 |
59 | 84 | ||
60 | bne,pn %xcc, 3f | 85 | bne,pn %xcc, 3f |
@@ -70,42 +95,56 @@ etrap_irq: | |||
70 | wrpr %g2, 0, %wstate | 95 | wrpr %g2, 0, %wstate |
71 | sethi %hi(sparc64_kern_pri_context), %g2 | 96 | sethi %hi(sparc64_kern_pri_context), %g2 |
72 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 | 97 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 |
73 | stxa %g3, [%l4] ASI_DMMU | 98 | |
74 | flush %l6 | 99 | 661: stxa %g3, [%l4] ASI_DMMU |
75 | wr %g0, ASI_AIUS, %asi | 100 | .section .sun4v_1insn_patch, "ax" |
76 | 2: wrpr %g0, 0x0, %tl | 101 | .word 661b |
77 | mov %g4, %l4 | 102 | stxa %g3, [%l4] ASI_MMU |
103 | .previous | ||
104 | |||
105 | sethi %hi(KERNBASE), %l4 | ||
106 | flush %l4 | ||
107 | mov ASI_AIUS, %l7 | ||
108 | 2: mov %g4, %l4 | ||
78 | mov %g5, %l5 | 109 | mov %g5, %l5 |
110 | add %g7, 4, %l2 | ||
111 | |||
112 | /* Go to trap time globals so we can save them. */ | ||
113 | 661: wrpr %g0, ETRAP_PSTATE1, %pstate | ||
114 | .section .sun4v_1insn_patch, "ax" | ||
115 | .word 661b | ||
116 | SET_GL(0) | ||
117 | .previous | ||
79 | 118 | ||
80 | mov %g7, %l2 | ||
81 | wrpr %g0, ETRAP_PSTATE1, %pstate | ||
82 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | 119 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] |
83 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | 120 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] |
121 | sllx %l7, 24, %l7 | ||
84 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | 122 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] |
123 | rdpr %cwp, %l0 | ||
85 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | 124 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] |
86 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | 125 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] |
87 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | 126 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] |
88 | |||
89 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | 127 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] |
128 | or %l7, %l0, %l7 | ||
129 | sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 | ||
130 | or %l7, %l0, %l7 | ||
131 | wrpr %l2, %tnpc | ||
132 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
90 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | 133 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] |
91 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | 134 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] |
92 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | 135 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] |
93 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | 136 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] |
94 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | 137 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] |
95 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | 138 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] |
96 | |||
97 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | 139 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] |
98 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
99 | wrpr %g0, ETRAP_PSTATE2, %pstate | ||
100 | mov %l6, %g6 | 140 | mov %l6, %g6 |
101 | #ifdef CONFIG_SMP | 141 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] |
102 | mov TSB_REG, %g3 | 142 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) |
103 | ldxa [%g3] ASI_IMMU, %g5 | 143 | ldx [%g6 + TI_TASK], %g4 |
104 | #endif | 144 | done |
105 | jmpl %l2 + 0x4, %g0 | ||
106 | ldx [%g6 + TI_TASK], %g4 | ||
107 | 145 | ||
108 | 3: ldub [%l6 + TI_FPDEPTH], %l5 | 146 | 3: mov ASI_P, %l7 |
147 | ldub [%l6 + TI_FPDEPTH], %l5 | ||
109 | add %l6, TI_FPSAVED + 1, %l4 | 148 | add %l6, TI_FPSAVED + 1, %l4 |
110 | srl %l5, 1, %l3 | 149 | srl %l5, 1, %l3 |
111 | add %l5, 2, %l5 | 150 | add %l5, 2, %l5 |
@@ -125,6 +164,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
125 | * 0x58 TL4's TT | 164 | * 0x58 TL4's TT |
126 | * 0x60 TL | 165 | * 0x60 TL |
127 | */ | 166 | */ |
167 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
128 | sub %sp, ((4 * 8) * 4) + 8, %g2 | 168 | sub %sp, ((4 * 8) * 4) + 8, %g2 |
129 | rdpr %tl, %g1 | 169 | rdpr %tl, %g1 |
130 | 170 | ||
@@ -148,6 +188,11 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
148 | rdpr %tt, %g3 | 188 | rdpr %tt, %g3 |
149 | stx %g3, [%g2 + STACK_BIAS + 0x38] | 189 | stx %g3, [%g2 + STACK_BIAS + 0x38] |
150 | 190 | ||
191 | sethi %hi(is_sun4v), %g3 | ||
192 | lduw [%g3 + %lo(is_sun4v)], %g3 | ||
193 | brnz,pn %g3, finish_tl1_capture | ||
194 | nop | ||
195 | |||
151 | wrpr %g0, 3, %tl | 196 | wrpr %g0, 3, %tl |
152 | rdpr %tstate, %g3 | 197 | rdpr %tstate, %g3 |
153 | stx %g3, [%g2 + STACK_BIAS + 0x40] | 198 | stx %g3, [%g2 + STACK_BIAS + 0x40] |
@@ -168,91 +213,20 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
168 | rdpr %tt, %g3 | 213 | rdpr %tt, %g3 |
169 | stx %g3, [%g2 + STACK_BIAS + 0x78] | 214 | stx %g3, [%g2 + STACK_BIAS + 0x78] |
170 | 215 | ||
171 | wrpr %g1, %tl | ||
172 | stx %g1, [%g2 + STACK_BIAS + 0x80] | 216 | stx %g1, [%g2 + STACK_BIAS + 0x80] |
173 | 217 | ||
218 | finish_tl1_capture: | ||
219 | wrpr %g0, 1, %tl | ||
220 | 661: nop | ||
221 | .section .sun4v_1insn_patch, "ax" | ||
222 | .word 661b | ||
223 | SET_GL(1) | ||
224 | .previous | ||
225 | |||
174 | rdpr %tstate, %g1 | 226 | rdpr %tstate, %g1 |
175 | sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 | 227 | sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 |
176 | ba,pt %xcc, 1b | 228 | ba,pt %xcc, 1b |
177 | andcc %g1, TSTATE_PRIV, %g0 | 229 | andcc %g1, TSTATE_PRIV, %g0 |
178 | 230 | ||
179 | .align 64 | ||
180 | .globl scetrap | ||
181 | scetrap: rdpr %pil, %g2 | ||
182 | rdpr %tstate, %g1 | ||
183 | sllx %g2, 20, %g3 | ||
184 | andcc %g1, TSTATE_PRIV, %g0 | ||
185 | or %g1, %g3, %g1 | ||
186 | bne,pn %xcc, 1f | ||
187 | sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2 | ||
188 | wrpr %g0, 7, %cleanwin | ||
189 | |||
190 | sllx %g1, 51, %g3 | ||
191 | sethi %hi(TASK_REGOFF), %g2 | ||
192 | or %g2, %lo(TASK_REGOFF), %g2 | ||
193 | brlz,pn %g3, 1f | ||
194 | add %g6, %g2, %g2 | ||
195 | wr %g0, 0, %fprs | ||
196 | 1: rdpr %tpc, %g3 | ||
197 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] | ||
198 | |||
199 | rdpr %tnpc, %g1 | ||
200 | stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] | ||
201 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] | ||
202 | save %g2, -STACK_BIAS, %sp ! Ordering here is critical | ||
203 | mov %g6, %l6 | ||
204 | bne,pn %xcc, 2f | ||
205 | mov ASI_P, %l7 | ||
206 | rdpr %canrestore, %g3 | ||
207 | |||
208 | rdpr %wstate, %g2 | ||
209 | wrpr %g0, 0, %canrestore | ||
210 | sll %g2, 3, %g2 | ||
211 | mov PRIMARY_CONTEXT, %l4 | ||
212 | wrpr %g3, 0, %otherwin | ||
213 | wrpr %g2, 0, %wstate | ||
214 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
215 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 | ||
216 | stxa %g3, [%l4] ASI_DMMU | ||
217 | flush %l6 | ||
218 | |||
219 | mov ASI_AIUS, %l7 | ||
220 | 2: mov %g4, %l4 | ||
221 | mov %g5, %l5 | ||
222 | add %g7, 0x4, %l2 | ||
223 | wrpr %g0, ETRAP_PSTATE1, %pstate | ||
224 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | ||
225 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | ||
226 | sllx %l7, 24, %l7 | ||
227 | |||
228 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | ||
229 | rdpr %cwp, %l0 | ||
230 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | ||
231 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | ||
232 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | ||
233 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | ||
234 | or %l7, %l0, %l7 | ||
235 | sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 | ||
236 | |||
237 | or %l7, %l0, %l7 | ||
238 | wrpr %l2, %tnpc | ||
239 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
240 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | ||
241 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | ||
242 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | ||
243 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | ||
244 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | ||
245 | |||
246 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | ||
247 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | ||
248 | mov %l6, %g6 | ||
249 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
250 | #ifdef CONFIG_SMP | ||
251 | mov TSB_REG, %g3 | ||
252 | ldxa [%g3] ASI_IMMU, %g5 | ||
253 | #endif | ||
254 | ldx [%g6 + TI_TASK], %g4 | ||
255 | done | ||
256 | |||
257 | #undef TASK_REGOFF | 231 | #undef TASK_REGOFF |
258 | #undef ETRAP_PSTATE1 | 232 | #undef ETRAP_PSTATE1 |
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index b49dcd4504b0..3eadac5e171e 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/head.h> | 26 | #include <asm/head.h> |
27 | #include <asm/ttable.h> | 27 | #include <asm/ttable.h> |
28 | #include <asm/mmu.h> | 28 | #include <asm/mmu.h> |
29 | #include <asm/cpudata.h> | ||
29 | 30 | ||
30 | /* This section from from _start to sparc64_boot_end should fit into | 31 | /* This section from from _start to sparc64_boot_end should fit into |
31 | * 0x0000000000404000 to 0x0000000000408000. | 32 | * 0x0000000000404000 to 0x0000000000408000. |
@@ -94,12 +95,17 @@ sparc64_boot: | |||
94 | wrpr %g1, 0x0, %pstate | 95 | wrpr %g1, 0x0, %pstate |
95 | ba,a,pt %xcc, 1f | 96 | ba,a,pt %xcc, 1f |
96 | 97 | ||
97 | .globl prom_finddev_name, prom_chosen_path | 98 | .globl prom_finddev_name, prom_chosen_path, prom_root_node |
98 | .globl prom_getprop_name, prom_mmu_name | 99 | .globl prom_getprop_name, prom_mmu_name, prom_peer_name |
99 | .globl prom_callmethod_name, prom_translate_name | 100 | .globl prom_callmethod_name, prom_translate_name, prom_root_compatible |
100 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache | 101 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache |
101 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode | 102 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode |
102 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low | 103 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low |
104 | .globl is_sun4v | ||
105 | prom_peer_name: | ||
106 | .asciz "peer" | ||
107 | prom_compatible_name: | ||
108 | .asciz "compatible" | ||
103 | prom_finddev_name: | 109 | prom_finddev_name: |
104 | .asciz "finddevice" | 110 | .asciz "finddevice" |
105 | prom_chosen_path: | 111 | prom_chosen_path: |
@@ -116,7 +122,13 @@ prom_map_name: | |||
116 | .asciz "map" | 122 | .asciz "map" |
117 | prom_unmap_name: | 123 | prom_unmap_name: |
118 | .asciz "unmap" | 124 | .asciz "unmap" |
125 | prom_sun4v_name: | ||
126 | .asciz "sun4v" | ||
119 | .align 4 | 127 | .align 4 |
128 | prom_root_compatible: | ||
129 | .skip 64 | ||
130 | prom_root_node: | ||
131 | .word 0 | ||
120 | prom_mmu_ihandle_cache: | 132 | prom_mmu_ihandle_cache: |
121 | .word 0 | 133 | .word 0 |
122 | prom_boot_mapped_pc: | 134 | prom_boot_mapped_pc: |
@@ -128,8 +140,54 @@ prom_boot_mapping_phys_high: | |||
128 | .xword 0 | 140 | .xword 0 |
129 | prom_boot_mapping_phys_low: | 141 | prom_boot_mapping_phys_low: |
130 | .xword 0 | 142 | .xword 0 |
143 | is_sun4v: | ||
144 | .word 0 | ||
131 | 1: | 145 | 1: |
132 | rd %pc, %l0 | 146 | rd %pc, %l0 |
147 | |||
148 | mov (1b - prom_peer_name), %l1 | ||
149 | sub %l0, %l1, %l1 | ||
150 | mov 0, %l2 | ||
151 | |||
152 | /* prom_root_node = prom_peer(0) */ | ||
153 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" | ||
154 | mov 1, %l3 | ||
155 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
156 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
157 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 | ||
158 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
159 | call %l7 | ||
160 | add %sp, (2047 + 128), %o0 ! argument array | ||
161 | |||
162 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node | ||
163 | mov (1b - prom_root_node), %l1 | ||
164 | sub %l0, %l1, %l1 | ||
165 | stw %l4, [%l1] | ||
166 | |||
167 | mov (1b - prom_getprop_name), %l1 | ||
168 | mov (1b - prom_compatible_name), %l2 | ||
169 | mov (1b - prom_root_compatible), %l5 | ||
170 | sub %l0, %l1, %l1 | ||
171 | sub %l0, %l2, %l2 | ||
172 | sub %l0, %l5, %l5 | ||
173 | |||
174 | /* prom_getproperty(prom_root_node, "compatible", | ||
175 | * &prom_root_compatible, 64) | ||
176 | */ | ||
177 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
178 | mov 4, %l3 | ||
179 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
180 | mov 1, %l3 | ||
181 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
182 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node | ||
183 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" | ||
184 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible | ||
185 | mov 64, %l3 | ||
186 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size | ||
187 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
188 | call %l7 | ||
189 | add %sp, (2047 + 128), %o0 ! argument array | ||
190 | |||
133 | mov (1b - prom_finddev_name), %l1 | 191 | mov (1b - prom_finddev_name), %l1 |
134 | mov (1b - prom_chosen_path), %l2 | 192 | mov (1b - prom_chosen_path), %l2 |
135 | mov (1b - prom_boot_mapped_pc), %l3 | 193 | mov (1b - prom_boot_mapped_pc), %l3 |
@@ -238,6 +296,27 @@ prom_boot_mapping_phys_low: | |||
238 | add %sp, (192 + 128), %sp | 296 | add %sp, (192 + 128), %sp |
239 | 297 | ||
240 | sparc64_boot_after_remap: | 298 | sparc64_boot_after_remap: |
299 | sethi %hi(prom_root_compatible), %g1 | ||
300 | or %g1, %lo(prom_root_compatible), %g1 | ||
301 | sethi %hi(prom_sun4v_name), %g7 | ||
302 | or %g7, %lo(prom_sun4v_name), %g7 | ||
303 | mov 5, %g3 | ||
304 | 1: ldub [%g7], %g2 | ||
305 | ldub [%g1], %g4 | ||
306 | cmp %g2, %g4 | ||
307 | bne,pn %icc, 2f | ||
308 | add %g7, 1, %g7 | ||
309 | subcc %g3, 1, %g3 | ||
310 | bne,pt %xcc, 1b | ||
311 | add %g1, 1, %g1 | ||
312 | |||
313 | sethi %hi(is_sun4v), %g1 | ||
314 | or %g1, %lo(is_sun4v), %g1 | ||
315 | mov 1, %g7 | ||
316 | stw %g7, [%g1] | ||
317 | |||
318 | 2: | ||
319 | BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) | ||
241 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) | 320 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) |
242 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) | 321 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) |
243 | ba,pt %xcc, spitfire_boot | 322 | ba,pt %xcc, spitfire_boot |
@@ -301,20 +380,58 @@ jump_to_sun4u_init: | |||
301 | nop | 380 | nop |
302 | 381 | ||
303 | sun4u_init: | 382 | sun4u_init: |
383 | BRANCH_IF_SUN4V(g1, sun4v_init) | ||
384 | |||
304 | /* Set ctx 0 */ | 385 | /* Set ctx 0 */ |
305 | mov PRIMARY_CONTEXT, %g7 | 386 | mov PRIMARY_CONTEXT, %g7 |
306 | stxa %g0, [%g7] ASI_DMMU | 387 | stxa %g0, [%g7] ASI_DMMU |
307 | membar #Sync | 388 | membar #Sync |
308 | 389 | ||
309 | mov SECONDARY_CONTEXT, %g7 | 390 | mov SECONDARY_CONTEXT, %g7 |
310 | stxa %g0, [%g7] ASI_DMMU | 391 | stxa %g0, [%g7] ASI_DMMU |
311 | membar #Sync | 392 | membar #Sync |
312 | 393 | ||
313 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) | 394 | ba,pt %xcc, sun4u_continue |
395 | nop | ||
396 | |||
397 | sun4v_init: | ||
398 | /* Set ctx 0 */ | ||
399 | mov PRIMARY_CONTEXT, %g7 | ||
400 | stxa %g0, [%g7] ASI_MMU | ||
401 | membar #Sync | ||
402 | |||
403 | mov SECONDARY_CONTEXT, %g7 | ||
404 | stxa %g0, [%g7] ASI_MMU | ||
405 | membar #Sync | ||
406 | ba,pt %xcc, niagara_tlb_fixup | ||
407 | nop | ||
408 | |||
409 | sun4u_continue: | ||
410 | BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) | ||
314 | 411 | ||
315 | ba,pt %xcc, spitfire_tlb_fixup | 412 | ba,pt %xcc, spitfire_tlb_fixup |
316 | nop | 413 | nop |
317 | 414 | ||
415 | niagara_tlb_fixup: | ||
416 | mov 3, %g2 /* Set TLB type to hypervisor. */ | ||
417 | sethi %hi(tlb_type), %g1 | ||
418 | stw %g2, [%g1 + %lo(tlb_type)] | ||
419 | |||
420 | /* Patch copy/clear ops. */ | ||
421 | call niagara_patch_copyops | ||
422 | nop | ||
423 | call niagara_patch_bzero | ||
424 | nop | ||
425 | call niagara_patch_pageops | ||
426 | nop | ||
427 | |||
428 | /* Patch TLB/cache ops. */ | ||
429 | call hypervisor_patch_cachetlbops | ||
430 | nop | ||
431 | |||
432 | ba,pt %xcc, tlb_fixup_done | ||
433 | nop | ||
434 | |||
318 | cheetah_tlb_fixup: | 435 | cheetah_tlb_fixup: |
319 | mov 2, %g2 /* Set TLB type to cheetah+. */ | 436 | mov 2, %g2 /* Set TLB type to cheetah+. */ |
320 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | 437 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) |
@@ -411,85 +528,55 @@ setup_trap_table: | |||
411 | wrpr %g0, 15, %pil | 528 | wrpr %g0, 15, %pil |
412 | 529 | ||
413 | /* Make the firmware call to jump over to the Linux trap table. */ | 530 | /* Make the firmware call to jump over to the Linux trap table. */ |
414 | call prom_set_trap_table | 531 | sethi %hi(is_sun4v), %o0 |
415 | sethi %hi(sparc64_ttable_tl0), %o0 | 532 | lduw [%o0 + %lo(is_sun4v)], %o0 |
533 | brz,pt %o0, 1f | ||
534 | nop | ||
416 | 535 | ||
417 | /* Start using proper page size encodings in ctx register. */ | 536 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) |
418 | sethi %hi(sparc64_kern_pri_context), %g3 | 537 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
419 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | 538 | stxa %g2, [%g0] ASI_SCRATCHPAD |
420 | mov PRIMARY_CONTEXT, %g1 | ||
421 | stxa %g2, [%g1] ASI_DMMU | ||
422 | membar #Sync | ||
423 | 539 | ||
424 | /* The Linux trap handlers expect various trap global registers | 540 | /* Compute physical address: |
425 | * to be setup with some fixed values. So here we set these | ||
426 | * up very carefully. These globals are: | ||
427 | * | ||
428 | * Alternate Globals (PSTATE_AG): | ||
429 | * | ||
430 | * %g6 --> current_thread_info() | ||
431 | * | ||
432 | * MMU Globals (PSTATE_MG): | ||
433 | * | ||
434 | * %g1 --> TLB_SFSR | ||
435 | * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB | | ||
436 | * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
437 | * ^ 0xfffff80000000000) | ||
438 | * (this %g2 value is used for computing the PAGE_OFFSET kernel | ||
439 | * TLB entries quickly, the virtual address of the fault XOR'd | ||
440 | * with this %g2 value is the PTE to load into the TLB) | ||
441 | * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE | ||
442 | * | 541 | * |
443 | * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): | 542 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) |
444 | * | ||
445 | * %g6 --> __irq_work[smp_processor_id()] | ||
446 | */ | 543 | */ |
544 | sethi %hi(KERNBASE), %g3 | ||
545 | sub %g2, %g3, %g2 | ||
546 | sethi %hi(kern_base), %g3 | ||
547 | ldx [%g3 + %lo(kern_base)], %g3 | ||
548 | add %g2, %g3, %o1 | ||
447 | 549 | ||
448 | rdpr %pstate, %o1 | 550 | call prom_set_trap_table_sun4v |
449 | mov %g6, %o2 | 551 | sethi %hi(sparc64_ttable_tl0), %o0 |
450 | wrpr %o1, PSTATE_AG, %pstate | 552 | |
451 | mov %o2, %g6 | 553 | ba,pt %xcc, 2f |
452 | |||
453 | #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) | ||
454 | #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
455 | wrpr %o1, PSTATE_MG, %pstate | ||
456 | mov TSB_REG, %g1 | ||
457 | stxa %g0, [%g1] ASI_DMMU | ||
458 | membar #Sync | ||
459 | stxa %g0, [%g1] ASI_IMMU | ||
460 | membar #Sync | ||
461 | mov TLB_SFSR, %g1 | ||
462 | sethi %uhi(KERN_HIGHBITS), %g2 | ||
463 | or %g2, %ulo(KERN_HIGHBITS), %g2 | ||
464 | sllx %g2, 32, %g2 | ||
465 | or %g2, KERN_LOWBITS, %g2 | ||
466 | |||
467 | BRANCH_IF_ANY_CHEETAH(g3,g7,8f) | ||
468 | ba,pt %xcc, 9f | ||
469 | nop | 554 | nop |
470 | 555 | ||
471 | 8: | 556 | 1: call prom_set_trap_table |
472 | sethi %uhi(VPTE_BASE_CHEETAH), %g3 | 557 | sethi %hi(sparc64_ttable_tl0), %o0 |
473 | or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 | ||
474 | ba,pt %xcc, 2f | ||
475 | sllx %g3, 32, %g3 | ||
476 | 558 | ||
477 | 9: | 559 | /* Start using proper page size encodings in ctx register. */ |
478 | sethi %uhi(VPTE_BASE_SPITFIRE), %g3 | 560 | 2: sethi %hi(sparc64_kern_pri_context), %g3 |
479 | or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 | 561 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
480 | sllx %g3, 32, %g3 | ||
481 | 562 | ||
482 | 2: | 563 | mov PRIMARY_CONTEXT, %g1 |
483 | clr %g7 | 564 | |
484 | #undef KERN_HIGHBITS | 565 | 661: stxa %g2, [%g1] ASI_DMMU |
485 | #undef KERN_LOWBITS | 566 | .section .sun4v_1insn_patch, "ax" |
567 | .word 661b | ||
568 | stxa %g2, [%g1] ASI_MMU | ||
569 | .previous | ||
570 | |||
571 | membar #Sync | ||
486 | 572 | ||
487 | /* Kill PROM timer */ | 573 | /* Kill PROM timer */ |
488 | sethi %hi(0x80000000), %o2 | 574 | sethi %hi(0x80000000), %o2 |
489 | sllx %o2, 32, %o2 | 575 | sllx %o2, 32, %o2 |
490 | wr %o2, 0, %tick_cmpr | 576 | wr %o2, 0, %tick_cmpr |
491 | 577 | ||
492 | BRANCH_IF_ANY_CHEETAH(o2,o3,1f) | 578 | BRANCH_IF_SUN4V(o2, 1f) |
579 | BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) | ||
493 | 580 | ||
494 | ba,pt %xcc, 2f | 581 | ba,pt %xcc, 2f |
495 | nop | 582 | nop |
@@ -502,7 +589,6 @@ setup_trap_table: | |||
502 | 589 | ||
503 | 2: | 590 | 2: |
504 | wrpr %g0, %g0, %wstate | 591 | wrpr %g0, %g0, %wstate |
505 | wrpr %o1, 0x0, %pstate | ||
506 | 592 | ||
507 | call init_irqwork_curcpu | 593 | call init_irqwork_curcpu |
508 | nop | 594 | nop |
@@ -517,7 +603,7 @@ setup_trap_table: | |||
517 | restore | 603 | restore |
518 | 604 | ||
519 | .globl setup_tba | 605 | .globl setup_tba |
520 | setup_tba: /* i0 = is_starfire */ | 606 | setup_tba: |
521 | save %sp, -192, %sp | 607 | save %sp, -192, %sp |
522 | 608 | ||
523 | /* The boot processor is the only cpu which invokes this | 609 | /* The boot processor is the only cpu which invokes this |
@@ -536,31 +622,35 @@ setup_tba: /* i0 = is_starfire */ | |||
536 | restore | 622 | restore |
537 | sparc64_boot_end: | 623 | sparc64_boot_end: |
538 | 624 | ||
539 | #include "systbls.S" | ||
540 | #include "ktlb.S" | 625 | #include "ktlb.S" |
626 | #include "tsb.S" | ||
541 | #include "etrap.S" | 627 | #include "etrap.S" |
542 | #include "rtrap.S" | 628 | #include "rtrap.S" |
543 | #include "winfixup.S" | 629 | #include "winfixup.S" |
544 | #include "entry.S" | 630 | #include "entry.S" |
631 | #include "sun4v_tlb_miss.S" | ||
632 | #include "sun4v_ivec.S" | ||
545 | 633 | ||
546 | /* | 634 | /* |
547 | * The following skip makes sure the trap table in ttable.S is aligned | 635 | * The following skip makes sure the trap table in ttable.S is aligned |
548 | * on a 32K boundary as required by the v9 specs for TBA register. | 636 | * on a 32K boundary as required by the v9 specs for TBA register. |
637 | * | ||
638 | * We align to a 32K boundary, then we have the 32K kernel TSB, | ||
639 | * then the 32K aligned trap table. | ||
549 | */ | 640 | */ |
550 | 1: | 641 | 1: |
551 | .skip 0x4000 + _start - 1b | 642 | .skip 0x4000 + _start - 1b |
552 | 643 | ||
553 | #ifdef CONFIG_SBUS | 644 | .globl swapper_tsb |
554 | /* This is just a hack to fool make depend config.h discovering | 645 | swapper_tsb: |
555 | strategy: As the .S files below need config.h, but | 646 | .skip (32 * 1024) |
556 | make depend does not find it for them, we include config.h | ||
557 | in head.S */ | ||
558 | #endif | ||
559 | 647 | ||
560 | ! 0x0000000000408000 | 648 | ! 0x0000000000408000 |
561 | 649 | ||
562 | #include "ttable.S" | 650 | #include "ttable.S" |
563 | 651 | ||
652 | #include "systbls.S" | ||
653 | |||
564 | .data | 654 | .data |
565 | .align 8 | 655 | .align 8 |
566 | .globl prom_tba, tlb_type | 656 | .globl prom_tba, tlb_type |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 233526ba3abe..8c93ba655b33 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/bootmem.h> | ||
24 | 25 | ||
25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/cache.h> | 40 | #include <asm/cache.h> |
40 | #include <asm/cpudata.h> | 41 | #include <asm/cpudata.h> |
41 | #include <asm/auxio.h> | 42 | #include <asm/auxio.h> |
43 | #include <asm/head.h> | ||
42 | 44 | ||
43 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
44 | static void distribute_irqs(void); | 46 | static void distribute_irqs(void); |
@@ -136,12 +138,48 @@ out_unlock: | |||
136 | return 0; | 138 | return 0; |
137 | } | 139 | } |
138 | 140 | ||
141 | extern unsigned long real_hard_smp_processor_id(void); | ||
142 | |||
143 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | ||
144 | { | ||
145 | unsigned int tid; | ||
146 | |||
147 | if (this_is_starfire) { | ||
148 | tid = starfire_translate(imap, cpuid); | ||
149 | tid <<= IMAP_TID_SHIFT; | ||
150 | tid &= IMAP_TID_UPA; | ||
151 | } else { | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
153 | unsigned long ver; | ||
154 | |||
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
156 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
157 | (ver >> 32UL) == __SERRANO_ID) { | ||
158 | tid = cpuid << IMAP_TID_SHIFT; | ||
159 | tid &= IMAP_TID_JBUS; | ||
160 | } else { | ||
161 | unsigned int a = cpuid & 0x1f; | ||
162 | unsigned int n = (cpuid >> 5) & 0x1f; | ||
163 | |||
164 | tid = ((a << IMAP_AID_SHIFT) | | ||
165 | (n << IMAP_NID_SHIFT)); | ||
166 | tid &= (IMAP_AID_SAFARI | | ||
167 | IMAP_NID_SAFARI);; | ||
168 | } | ||
169 | } else { | ||
170 | tid = cpuid << IMAP_TID_SHIFT; | ||
171 | tid &= IMAP_TID_UPA; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | return tid; | ||
176 | } | ||
177 | |||
139 | /* Now these are always passed a true fully specified sun4u INO. */ | 178 | /* Now these are always passed a true fully specified sun4u INO. */ |
140 | void enable_irq(unsigned int irq) | 179 | void enable_irq(unsigned int irq) |
141 | { | 180 | { |
142 | struct ino_bucket *bucket = __bucket(irq); | 181 | struct ino_bucket *bucket = __bucket(irq); |
143 | unsigned long imap; | 182 | unsigned long imap, cpuid; |
144 | unsigned long tid; | ||
145 | 183 | ||
146 | imap = bucket->imap; | 184 | imap = bucket->imap; |
147 | if (imap == 0UL) | 185 | if (imap == 0UL) |
@@ -149,47 +187,38 @@ void enable_irq(unsigned int irq) | |||
149 | 187 | ||
150 | preempt_disable(); | 188 | preempt_disable(); |
151 | 189 | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 190 | /* This gets the physical processor ID, even on uniprocessor, |
153 | unsigned long ver; | 191 | * so we can always program the interrupt target correctly. |
154 | 192 | */ | |
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 193 | cpuid = real_hard_smp_processor_id(); |
156 | if ((ver >> 32) == 0x003e0016) { | 194 | |
157 | /* We set it to our JBUS ID. */ | 195 | if (tlb_type == hypervisor) { |
158 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 196 | unsigned int ino = __irq_ino(irq); |
159 | : "=r" (tid) | 197 | int err; |
160 | : "i" (ASI_JBUS_CONFIG)); | 198 | |
161 | tid = ((tid & (0x1fUL<<17)) << 9); | 199 | err = sun4v_intr_settarget(ino, cpuid); |
162 | tid &= IMAP_TID_JBUS; | 200 | if (err != HV_EOK) |
163 | } else { | 201 | printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", |
164 | /* We set it to our Safari AID. */ | 202 | ino, cpuid, err); |
165 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 203 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
166 | : "=r" (tid) | 204 | if (err != HV_EOK) |
167 | : "i" (ASI_SAFARI_CONFIG)); | 205 | printk("sun4v_intr_setenabled(%x): err(%d)\n", |
168 | tid = ((tid & (0x3ffUL<<17)) << 9); | 206 | ino, err); |
169 | tid &= IMAP_AID_SAFARI; | ||
170 | } | ||
171 | } else if (this_is_starfire == 0) { | ||
172 | /* We set it to our UPA MID. */ | ||
173 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
174 | : "=r" (tid) | ||
175 | : "i" (ASI_UPA_CONFIG)); | ||
176 | tid = ((tid & UPA_CONFIG_MID) << 9); | ||
177 | tid &= IMAP_TID_UPA; | ||
178 | } else { | 207 | } else { |
179 | tid = (starfire_translate(imap, smp_processor_id()) << 26); | 208 | unsigned int tid = sun4u_compute_tid(imap, cpuid); |
180 | tid &= IMAP_TID_UPA; | 209 | |
210 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
211 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
212 | * Register, the hardware just mirrors that value here. | ||
213 | * However for Graphics and UPA Slave devices the full | ||
214 | * IMAP_INR field can be set by the programmer here. | ||
215 | * | ||
216 | * Things like FFB can now be handled via the new IRQ | ||
217 | * mechanism. | ||
218 | */ | ||
219 | upa_writel(tid | IMAP_VALID, imap); | ||
181 | } | 220 | } |
182 | 221 | ||
183 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
184 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
185 | * Register, the hardware just mirrors that value here. | ||
186 | * However for Graphics and UPA Slave devices the full | ||
187 | * IMAP_INR field can be set by the programmer here. | ||
188 | * | ||
189 | * Things like FFB can now be handled via the new IRQ mechanism. | ||
190 | */ | ||
191 | upa_writel(tid | IMAP_VALID, imap); | ||
192 | |||
193 | preempt_enable(); | 222 | preempt_enable(); |
194 | } | 223 | } |
195 | 224 | ||
@@ -201,16 +230,26 @@ void disable_irq(unsigned int irq) | |||
201 | 230 | ||
202 | imap = bucket->imap; | 231 | imap = bucket->imap; |
203 | if (imap != 0UL) { | 232 | if (imap != 0UL) { |
204 | u32 tmp; | 233 | if (tlb_type == hypervisor) { |
234 | unsigned int ino = __irq_ino(irq); | ||
235 | int err; | ||
236 | |||
237 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
238 | if (err != HV_EOK) | ||
239 | printk("sun4v_intr_setenabled(%x): " | ||
240 | "err(%d)\n", ino, err); | ||
241 | } else { | ||
242 | u32 tmp; | ||
205 | 243 | ||
206 | /* NOTE: We do not want to futz with the IRQ clear registers | 244 | /* NOTE: We do not want to futz with the IRQ clear registers |
207 | * and move the state to IDLE, the SCSI code does call | 245 | * and move the state to IDLE, the SCSI code does call |
208 | * disable_irq() to assure atomicity in the queue cmd | 246 | * disable_irq() to assure atomicity in the queue cmd |
209 | * SCSI adapter driver code. Thus we'd lose interrupts. | 247 | * SCSI adapter driver code. Thus we'd lose interrupts. |
210 | */ | 248 | */ |
211 | tmp = upa_readl(imap); | 249 | tmp = upa_readl(imap); |
212 | tmp &= ~IMAP_VALID; | 250 | tmp &= ~IMAP_VALID; |
213 | upa_writel(tmp, imap); | 251 | upa_writel(tmp, imap); |
252 | } | ||
214 | } | 253 | } |
215 | } | 254 | } |
216 | 255 | ||
@@ -248,6 +287,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
248 | return __irq(&pil0_dummy_bucket); | 287 | return __irq(&pil0_dummy_bucket); |
249 | } | 288 | } |
250 | 289 | ||
290 | BUG_ON(tlb_type == hypervisor); | ||
291 | |||
251 | /* RULE: Both must be specified in all other cases. */ | 292 | /* RULE: Both must be specified in all other cases. */ |
252 | if (iclr == 0UL || imap == 0UL) { | 293 | if (iclr == 0UL || imap == 0UL) { |
253 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", | 294 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", |
@@ -275,12 +316,11 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
275 | goto out; | 316 | goto out; |
276 | } | 317 | } |
277 | 318 | ||
278 | bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); | 319 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); |
279 | if (!bucket->irq_info) { | 320 | if (!bucket->irq_info) { |
280 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | 321 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); |
281 | prom_halt(); | 322 | prom_halt(); |
282 | } | 323 | } |
283 | memset(bucket->irq_info, 0, sizeof(struct irq_desc)); | ||
284 | 324 | ||
285 | /* Ok, looks good, set it up. Don't touch the irq_chain or | 325 | /* Ok, looks good, set it up. Don't touch the irq_chain or |
286 | * the pending flag. | 326 | * the pending flag. |
@@ -294,6 +334,37 @@ out: | |||
294 | return __irq(bucket); | 334 | return __irq(bucket); |
295 | } | 335 | } |
296 | 336 | ||
337 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags) | ||
338 | { | ||
339 | struct ino_bucket *bucket; | ||
340 | unsigned long sysino; | ||
341 | |||
342 | sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
343 | |||
344 | bucket = &ivector_table[sysino]; | ||
345 | |||
346 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
347 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
348 | * register accesses. | ||
349 | * | ||
350 | * But we need to make them look unique for the disable_irq() logic | ||
351 | * in free_irq(). | ||
352 | */ | ||
353 | bucket->imap = ~0UL - sysino; | ||
354 | bucket->iclr = ~0UL - sysino; | ||
355 | |||
356 | bucket->pil = pil; | ||
357 | bucket->flags = flags; | ||
358 | |||
359 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); | ||
360 | if (!bucket->irq_info) { | ||
361 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | ||
362 | prom_halt(); | ||
363 | } | ||
364 | |||
365 | return __irq(bucket); | ||
366 | } | ||
367 | |||
297 | static void atomic_bucket_insert(struct ino_bucket *bucket) | 368 | static void atomic_bucket_insert(struct ino_bucket *bucket) |
298 | { | 369 | { |
299 | unsigned long pstate; | 370 | unsigned long pstate; |
@@ -482,7 +553,6 @@ void free_irq(unsigned int irq, void *dev_id) | |||
482 | bucket = __bucket(irq); | 553 | bucket = __bucket(irq); |
483 | if (bucket != &pil0_dummy_bucket) { | 554 | if (bucket != &pil0_dummy_bucket) { |
484 | struct irq_desc *desc = bucket->irq_info; | 555 | struct irq_desc *desc = bucket->irq_info; |
485 | unsigned long imap = bucket->imap; | ||
486 | int ent, i; | 556 | int ent, i; |
487 | 557 | ||
488 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { | 558 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
@@ -495,6 +565,8 @@ void free_irq(unsigned int irq, void *dev_id) | |||
495 | } | 565 | } |
496 | 566 | ||
497 | if (!desc->action_active_mask) { | 567 | if (!desc->action_active_mask) { |
568 | unsigned long imap = bucket->imap; | ||
569 | |||
498 | /* This unique interrupt source is now inactive. */ | 570 | /* This unique interrupt source is now inactive. */ |
499 | bucket->flags &= ~IBF_ACTIVE; | 571 | bucket->flags &= ~IBF_ACTIVE; |
500 | 572 | ||
@@ -592,7 +664,18 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | |||
592 | break; | 664 | break; |
593 | } | 665 | } |
594 | if (bp->pil != 0) { | 666 | if (bp->pil != 0) { |
595 | upa_writel(ICLR_IDLE, bp->iclr); | 667 | if (tlb_type == hypervisor) { |
668 | unsigned int ino = __irq_ino(bp); | ||
669 | int err; | ||
670 | |||
671 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
672 | if (err != HV_EOK) | ||
673 | printk("sun4v_intr_setstate(%x): " | ||
674 | "err(%d)\n", ino, err); | ||
675 | } else { | ||
676 | upa_writel(ICLR_IDLE, bp->iclr); | ||
677 | } | ||
678 | |||
596 | /* Test and add entropy */ | 679 | /* Test and add entropy */ |
597 | if (random & SA_SAMPLE_RANDOM) | 680 | if (random & SA_SAMPLE_RANDOM) |
598 | add_interrupt_randomness(irq); | 681 | add_interrupt_randomness(irq); |
@@ -694,7 +777,7 @@ irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) | |||
694 | val = readb(auxio_register); | 777 | val = readb(auxio_register); |
695 | val |= AUXIO_AUX1_FTCNT; | 778 | val |= AUXIO_AUX1_FTCNT; |
696 | writeb(val, auxio_register); | 779 | writeb(val, auxio_register); |
697 | val &= AUXIO_AUX1_FTCNT; | 780 | val &= ~AUXIO_AUX1_FTCNT; |
698 | writeb(val, auxio_register); | 781 | writeb(val, auxio_register); |
699 | 782 | ||
700 | doing_pdma = 0; | 783 | doing_pdma = 0; |
@@ -727,25 +810,23 @@ EXPORT_SYMBOL(probe_irq_off); | |||
727 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) | 810 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) |
728 | { | 811 | { |
729 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; | 812 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; |
730 | unsigned long imap = bucket->imap; | ||
731 | unsigned int tid; | ||
732 | 813 | ||
733 | while (!cpu_online(goal_cpu)) { | 814 | while (!cpu_online(goal_cpu)) { |
734 | if (++goal_cpu >= NR_CPUS) | 815 | if (++goal_cpu >= NR_CPUS) |
735 | goal_cpu = 0; | 816 | goal_cpu = 0; |
736 | } | 817 | } |
737 | 818 | ||
738 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 819 | if (tlb_type == hypervisor) { |
739 | tid = goal_cpu << 26; | 820 | unsigned int ino = __irq_ino(bucket); |
740 | tid &= IMAP_AID_SAFARI; | 821 | |
741 | } else if (this_is_starfire == 0) { | 822 | sun4v_intr_settarget(ino, goal_cpu); |
742 | tid = goal_cpu << 26; | 823 | sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
743 | tid &= IMAP_TID_UPA; | ||
744 | } else { | 824 | } else { |
745 | tid = (starfire_translate(imap, goal_cpu) << 26); | 825 | unsigned long imap = bucket->imap; |
746 | tid &= IMAP_TID_UPA; | 826 | unsigned int tid = sun4u_compute_tid(imap, goal_cpu); |
827 | |||
828 | upa_writel(tid | IMAP_VALID, imap); | ||
747 | } | 829 | } |
748 | upa_writel(tid | IMAP_VALID, imap); | ||
749 | 830 | ||
750 | do { | 831 | do { |
751 | if (++goal_cpu >= NR_CPUS) | 832 | if (++goal_cpu >= NR_CPUS) |
@@ -848,33 +929,114 @@ static void kill_prom_timer(void) | |||
848 | 929 | ||
849 | void init_irqwork_curcpu(void) | 930 | void init_irqwork_curcpu(void) |
850 | { | 931 | { |
851 | register struct irq_work_struct *workp asm("o2"); | ||
852 | register unsigned long tmp asm("o3"); | ||
853 | int cpu = hard_smp_processor_id(); | 932 | int cpu = hard_smp_processor_id(); |
854 | 933 | ||
855 | memset(__irq_work + cpu, 0, sizeof(*workp)); | 934 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); |
856 | 935 | } | |
857 | /* Make sure we are called with PSTATE_IE disabled. */ | 936 | |
858 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | 937 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |
859 | : "=r" (tmp)); | 938 | { |
860 | if (tmp & PSTATE_IE) { | 939 | unsigned long num_entries = 128; |
861 | prom_printf("BUG: init_irqwork_curcpu() called with " | 940 | unsigned long status; |
862 | "PSTATE_IE enabled, bailing.\n"); | 941 | |
863 | __asm__ __volatile__("mov %%i7, %0\n\t" | 942 | status = sun4v_cpu_qconf(type, paddr, num_entries); |
864 | : "=r" (tmp)); | 943 | if (status != HV_EOK) { |
865 | prom_printf("BUG: Called from %lx\n", tmp); | 944 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " |
945 | "err %lu\n", type, paddr, num_entries, status); | ||
866 | prom_halt(); | 946 | prom_halt(); |
867 | } | 947 | } |
948 | } | ||
868 | 949 | ||
869 | /* Set interrupt globals. */ | 950 | static void __cpuinit sun4v_register_mondo_queues(int this_cpu) |
870 | workp = &__irq_work[cpu]; | 951 | { |
871 | __asm__ __volatile__( | 952 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
872 | "rdpr %%pstate, %0\n\t" | 953 | |
873 | "wrpr %0, %1, %%pstate\n\t" | 954 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); |
874 | "mov %2, %%g6\n\t" | 955 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); |
875 | "wrpr %0, 0x0, %%pstate\n\t" | 956 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); |
876 | : "=&r" (tmp) | 957 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); |
877 | : "i" (PSTATE_IG), "r" (workp)); | 958 | } |
959 | |||
960 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) | ||
961 | { | ||
962 | void *page; | ||
963 | |||
964 | if (use_bootmem) | ||
965 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
966 | else | ||
967 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
968 | |||
969 | if (!page) { | ||
970 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
971 | prom_halt(); | ||
972 | } | ||
973 | |||
974 | *pa_ptr = __pa(page); | ||
975 | } | ||
976 | |||
977 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) | ||
978 | { | ||
979 | void *page; | ||
980 | |||
981 | if (use_bootmem) | ||
982 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
983 | else | ||
984 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
985 | |||
986 | if (!page) { | ||
987 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | ||
988 | prom_halt(); | ||
989 | } | ||
990 | |||
991 | *pa_ptr = __pa(page); | ||
992 | } | ||
993 | |||
994 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) | ||
995 | { | ||
996 | #ifdef CONFIG_SMP | ||
997 | void *page; | ||
998 | |||
999 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
1000 | |||
1001 | if (use_bootmem) | ||
1002 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
1003 | else | ||
1004 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
1005 | |||
1006 | if (!page) { | ||
1007 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
1008 | prom_halt(); | ||
1009 | } | ||
1010 | |||
1011 | tb->cpu_mondo_block_pa = __pa(page); | ||
1012 | tb->cpu_list_pa = __pa(page + 64); | ||
1013 | #endif | ||
1014 | } | ||
1015 | |||
1016 | /* Allocate and register the mondo and error queues for this cpu. */ | ||
1017 | void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load) | ||
1018 | { | ||
1019 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1020 | |||
1021 | if (alloc) { | ||
1022 | alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); | ||
1023 | alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); | ||
1024 | alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); | ||
1025 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); | ||
1026 | alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); | ||
1027 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); | ||
1028 | |||
1029 | init_cpu_send_mondo_info(tb, use_bootmem); | ||
1030 | } | ||
1031 | |||
1032 | if (load) { | ||
1033 | if (cpu != hard_smp_processor_id()) { | ||
1034 | prom_printf("SUN4V: init mondo on cpu %d not %d\n", | ||
1035 | cpu, hard_smp_processor_id()); | ||
1036 | prom_halt(); | ||
1037 | } | ||
1038 | sun4v_register_mondo_queues(cpu); | ||
1039 | } | ||
878 | } | 1040 | } |
879 | 1041 | ||
880 | /* Only invoked on boot processor. */ | 1042 | /* Only invoked on boot processor. */ |
@@ -884,6 +1046,9 @@ void __init init_IRQ(void) | |||
884 | kill_prom_timer(); | 1046 | kill_prom_timer(); |
885 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 1047 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
886 | 1048 | ||
1049 | if (tlb_type == hypervisor) | ||
1050 | sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1); | ||
1051 | |||
887 | /* We need to clear any IRQ's pending in the soft interrupt | 1052 | /* We need to clear any IRQ's pending in the soft interrupt |
888 | * registers, a spurious one could be left around from the | 1053 | * registers, a spurious one could be left around from the |
889 | * PROM timer which we just disabled. | 1054 | * PROM timer which we just disabled. |
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S deleted file mode 100644 index 4951ff8f6877..000000000000 --- a/arch/sparc64/kernel/itlb_base.S +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $ | ||
2 | * itlb_base.S: Front end to ITLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #if PAGE_SHIFT == 13 | ||
10 | /* | ||
11 | * To compute vpte offset, we need to do ((addr >> 13) << 3), | ||
12 | * which can be optimized to (addr >> 10) if bits 10/11/12 can | ||
13 | * be guaranteed to be 0 ... mmu_context.h does guarantee this | ||
14 | * by only using 10 bits in the hwcontext value. | ||
15 | */ | ||
16 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
17 | srax r1, 10, r2 | ||
18 | #define CREATE_VPTE_OFFSET2(r1, r2) nop | ||
19 | #else /* PAGE_SHIFT */ | ||
20 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
21 | srax r1, PAGE_SHIFT, r2 | ||
22 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
23 | sllx r2, 3, r2 | ||
24 | #endif /* PAGE_SHIFT */ | ||
25 | |||
26 | |||
27 | /* Ways we can get here: | ||
28 | * | ||
29 | * 1) Nucleus instruction misses from module code. | ||
30 | * 2) All user instruction misses. | ||
31 | * | ||
32 | * All real page faults merge their code paths to the | ||
33 | * sparc64_realfault_common label below. | ||
34 | */ | ||
35 | |||
36 | /* ITLB ** ICACHE line 1: Quick user TLB misses */ | ||
37 | mov TLB_SFSR, %g1 | ||
38 | ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS | ||
39 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | ||
40 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | ||
41 | ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE | ||
42 | 1: brgez,pn %g5, 3f ! Not valid, branch out | ||
43 | sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot | ||
44 | andcc %g5, %g4, %g0 ! Executable? | ||
45 | |||
46 | /* ITLB ** ICACHE line 2: Real faults */ | ||
47 | be,pn %xcc, 3f ! Nope, branch. | ||
48 | nop ! Delay-slot | ||
49 | 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB | ||
50 | retry ! Trap return | ||
51 | 3: rdpr %pstate, %g4 ! Move into alt-globals | ||
52 | wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate | ||
53 | rdpr %tpc, %g5 ! And load faulting VA | ||
54 | mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB | ||
55 | |||
56 | /* ITLB ** ICACHE line 3: Finish faults */ | ||
57 | sparc64_realfault_common: ! Called by dtlb_miss | ||
58 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
59 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
60 | ba,pt %xcc, etrap ! Save state | ||
61 | 1: rd %pc, %g7 ! ... | ||
62 | call do_sparc64_fault ! Call fault handler | ||
63 | add %sp, PTREGS_OFF, %o0! Compute pt_regs arg | ||
64 | ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state | ||
65 | nop | ||
66 | |||
67 | /* ITLB ** ICACHE line 4: Window fixups */ | ||
68 | winfix_trampoline: | ||
69 | rdpr %tpc, %g3 ! Prepare winfixup TNPC | ||
70 | or %g3, 0x7c, %g3 ! Compute branch offset | ||
71 | wrpr %g3, %tnpc ! Write it into TNPC | ||
72 | done ! Do it to it | ||
73 | nop | ||
74 | nop | ||
75 | nop | ||
76 | nop | ||
77 | |||
78 | #undef CREATE_VPTE_OFFSET1 | ||
79 | #undef CREATE_VPTE_OFFSET2 | ||
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S new file mode 100644 index 000000000000..ad46e2024f4b --- /dev/null +++ b/arch/sparc64/kernel/itlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* ITLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_itlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* ITLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_itlb ! Miss | ||
13 | mov FAULT_CODE_ITLB, %g3 | ||
14 | andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? | ||
15 | be,pn %xcc, tsb_do_fault | ||
16 | nop ! Delay slot, fill me | ||
17 | stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB | ||
18 | retry ! Trap done | ||
19 | nop | ||
20 | |||
21 | /* ITLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* ITLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index d9244d3c9f73..31da1e564c95 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -4,191 +4,276 @@ | |||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) |
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | 5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) |
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <asm/head.h> | 10 | #include <asm/head.h> |
11 | #include <asm/asi.h> | 11 | #include <asm/asi.h> |
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | #include <asm/tsb.h> | ||
14 | 15 | ||
15 | .text | 16 | .text |
16 | .align 32 | 17 | .align 32 |
17 | 18 | ||
18 | /* | 19 | kvmap_itlb: |
19 | * On a second level vpte miss, check whether the original fault is to the OBP | 20 | /* g6: TAG TARGET */ |
20 | * range (note that this is only possible for instruction miss, data misses to | 21 | mov TLB_TAG_ACCESS, %g4 |
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | 22 | ldxa [%g4] ASI_IMMU, %g4 |
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | 23 | |
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | 24 | /* sun4v_itlb_miss branches here with the missing virtual |
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | 25 | * address already loaded into %g4 |
25 | * executing (see inherit_locked_prom_mappings() rant). | ||
26 | */ | ||
27 | sparc64_vpte_nucleus: | ||
28 | /* Note that kvmap below has verified that the address is | ||
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
30 | * here we need only check if it is an OBP address or not. | ||
31 | */ | 26 | */ |
27 | kvmap_itlb_4v: | ||
28 | |||
29 | kvmap_itlb_nonlinear: | ||
30 | /* Catch kernel NULL pointer calls. */ | ||
31 | sethi %hi(PAGE_SIZE), %g5 | ||
32 | cmp %g4, %g5 | ||
33 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
34 | nop | ||
35 | |||
36 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) | ||
37 | |||
38 | kvmap_itlb_tsb_miss: | ||
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 39 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
33 | cmp %g4, %g5 | 40 | cmp %g4, %g5 |
34 | blu,pn %xcc, kern_vpte | 41 | blu,pn %xcc, kvmap_itlb_vmalloc_addr |
35 | mov 0x1, %g5 | 42 | mov 0x1, %g5 |
36 | sllx %g5, 32, %g5 | 43 | sllx %g5, 32, %g5 |
37 | cmp %g4, %g5 | 44 | cmp %g4, %g5 |
38 | blu,pn %xcc, vpte_insn_obp | 45 | blu,pn %xcc, kvmap_itlb_obp |
39 | nop | 46 | nop |
40 | 47 | ||
41 | /* These two instructions are patched by paginig_init(). */ | 48 | kvmap_itlb_vmalloc_addr: |
42 | kern_vpte: | 49 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
43 | sethi %hi(swapper_pgd_zero), %g5 | ||
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | ||
45 | 50 | ||
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | 51 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
47 | ba,pt %xcc, sparc64_kpte_continue | ||
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
49 | 52 | ||
50 | vpte_noent: | 53 | /* Load and check PTE. */ |
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | 54 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
52 | * skip over the trap instruction so that the top level | 55 | mov 1, %g7 |
53 | * TLB miss handler will thing this %g5 value is just an | 56 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
54 | * invalid PTE, thus branching to full fault processing. | 57 | brgez,a,pn %g5, kvmap_itlb_longpath |
55 | */ | 58 | KTSB_STORE(%g1, %g7) |
56 | mov TLB_SFSR, %g1 | 59 | |
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | 60 | KTSB_WRITE(%g1, %g5, %g6) |
58 | done | 61 | |
59 | 62 | /* fallthrough to TLB load */ | |
60 | vpte_insn_obp: | ||
61 | /* Behave as if we are at TL0. */ | ||
62 | wrpr %g0, 1, %tl | ||
63 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
64 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
65 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
66 | |||
67 | /* Restore previous TAG_ACCESS. */ | ||
68 | mov TLB_SFSR, %g1 | ||
69 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
70 | |||
71 | sethi %hi(prom_trans), %g5 | ||
72 | or %g5, %lo(prom_trans), %g5 | ||
73 | |||
74 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
75 | brz,a,pn %g6, longpath ! no more entries, fail | ||
76 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
77 | ldx [%g5 + 0x08], %g1 ! len | ||
78 | add %g6, %g1, %g1 ! end | ||
79 | cmp %g6, %g4 | ||
80 | bgu,pt %xcc, 2f | ||
81 | cmp %g4, %g1 | ||
82 | bgeu,pt %xcc, 2f | ||
83 | ldx [%g5 + 0x10], %g1 ! PTE | ||
84 | |||
85 | /* TLB load, restore %g1, and return from trap. */ | ||
86 | sub %g4, %g6, %g6 | ||
87 | add %g1, %g6, %g5 | ||
88 | mov TLB_SFSR, %g1 | ||
89 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
90 | retry | ||
91 | 63 | ||
92 | 2: ba,pt %xcc, 1b | 64 | kvmap_itlb_load: |
93 | add %g5, (3 * 8), %g5 ! next entry | 65 | |
94 | 66 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | |
95 | kvmap_do_obp: | ||
96 | sethi %hi(prom_trans), %g5 | ||
97 | or %g5, %lo(prom_trans), %g5 | ||
98 | srlx %g4, 13, %g4 | ||
99 | sllx %g4, 13, %g4 | ||
100 | |||
101 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
102 | brz,a,pn %g6, longpath ! no more entries, fail | ||
103 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
104 | ldx [%g5 + 0x08], %g1 ! len | ||
105 | add %g6, %g1, %g1 ! end | ||
106 | cmp %g6, %g4 | ||
107 | bgu,pt %xcc, 2f | ||
108 | cmp %g4, %g1 | ||
109 | bgeu,pt %xcc, 2f | ||
110 | ldx [%g5 + 0x10], %g1 ! PTE | ||
111 | |||
112 | /* TLB load, restore %g1, and return from trap. */ | ||
113 | sub %g4, %g6, %g6 | ||
114 | add %g1, %g6, %g5 | ||
115 | mov TLB_SFSR, %g1 | ||
116 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
117 | retry | 67 | retry |
68 | .section .sun4v_2insn_patch, "ax" | ||
69 | .word 661b | ||
70 | nop | ||
71 | nop | ||
72 | .previous | ||
73 | |||
74 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
75 | * instruction get nop'd out and we get here to branch | ||
76 | * to the sun4v tlb load code. The registers are setup | ||
77 | * as follows: | ||
78 | * | ||
79 | * %g4: vaddr | ||
80 | * %g5: PTE | ||
81 | * %g6: TAG | ||
82 | * | ||
83 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
84 | * up here. | ||
85 | */ | ||
86 | ba,pt %xcc, sun4v_itlb_load | ||
87 | mov %g5, %g3 | ||
118 | 88 | ||
119 | 2: ba,pt %xcc, 1b | 89 | kvmap_itlb_longpath: |
120 | add %g5, (3 * 8), %g5 ! next entry | 90 | |
91 | 661: rdpr %pstate, %g5 | ||
92 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
93 | .section .sun4v_2insn_patch, "ax" | ||
94 | .word 661b | ||
95 | SET_GL(1) | ||
96 | nop | ||
97 | .previous | ||
98 | |||
99 | rdpr %tpc, %g5 | ||
100 | ba,pt %xcc, sparc64_realfault_common | ||
101 | mov FAULT_CODE_ITLB, %g4 | ||
102 | |||
103 | kvmap_itlb_obp: | ||
104 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) | ||
105 | |||
106 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
107 | |||
108 | KTSB_WRITE(%g1, %g5, %g6) | ||
109 | |||
110 | ba,pt %xcc, kvmap_itlb_load | ||
111 | nop | ||
112 | |||
113 | kvmap_dtlb_obp: | ||
114 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) | ||
115 | |||
116 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
117 | |||
118 | KTSB_WRITE(%g1, %g5, %g6) | ||
119 | |||
120 | ba,pt %xcc, kvmap_dtlb_load | ||
121 | nop | ||
121 | 122 | ||
122 | /* | ||
123 | * On a first level data miss, check whether this is to the OBP range (note | ||
124 | * that such accesses can be made by prom, as well as by kernel using | ||
125 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
126 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
127 | * pagesize. | ||
128 | */ | ||
129 | .align 32 | 123 | .align 32 |
130 | kvmap: | 124 | kvmap_dtlb_tsb4m_load: |
131 | brgez,pn %g4, kvmap_nonlinear | 125 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
126 | KTSB_WRITE(%g1, %g5, %g6) | ||
127 | ba,pt %xcc, kvmap_dtlb_load | ||
132 | nop | 128 | nop |
133 | 129 | ||
134 | #ifdef CONFIG_DEBUG_PAGEALLOC | 130 | kvmap_dtlb: |
131 | /* %g6: TAG TARGET */ | ||
132 | mov TLB_TAG_ACCESS, %g4 | ||
133 | ldxa [%g4] ASI_DMMU, %g4 | ||
134 | |||
135 | /* sun4v_dtlb_miss branches here with the missing virtual | ||
136 | * address already loaded into %g4 | ||
137 | */ | ||
138 | kvmap_dtlb_4v: | ||
139 | brgez,pn %g4, kvmap_dtlb_nonlinear | ||
140 | nop | ||
141 | |||
142 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ | ||
143 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
144 | |||
145 | /* TSB entry address left in %g1, lookup linear PTE. | ||
146 | * Must preserve %g1 and %g6 (TAG). | ||
147 | */ | ||
148 | kvmap_dtlb_tsb4m_miss: | ||
149 | sethi %hi(kpte_linear_bitmap), %g2 | ||
150 | or %g2, %lo(kpte_linear_bitmap), %g2 | ||
151 | |||
152 | /* Clear the PAGE_OFFSET top virtual bits, then shift | ||
153 | * down to get a 256MB physical address index. | ||
154 | */ | ||
155 | sllx %g4, 21, %g5 | ||
156 | mov 1, %g7 | ||
157 | srlx %g5, 21 + 28, %g5 | ||
158 | |||
159 | /* Don't try this at home kids... this depends upon srlx | ||
160 | * only taking the low 6 bits of the shift count in %g5. | ||
161 | */ | ||
162 | sllx %g7, %g5, %g7 | ||
163 | |||
164 | /* Divide by 64 to get the offset into the bitmask. */ | ||
165 | srlx %g5, 6, %g5 | ||
166 | sllx %g5, 3, %g5 | ||
167 | |||
168 | /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ | ||
169 | ldx [%g2 + %g5], %g2 | ||
170 | andcc %g2, %g7, %g0 | ||
171 | sethi %hi(kern_linear_pte_xor), %g5 | ||
172 | or %g5, %lo(kern_linear_pte_xor), %g5 | ||
173 | bne,a,pt %xcc, 1f | ||
174 | add %g5, 8, %g5 | ||
175 | |||
176 | 1: ldx [%g5], %g2 | ||
177 | |||
135 | .globl kvmap_linear_patch | 178 | .globl kvmap_linear_patch |
136 | kvmap_linear_patch: | 179 | kvmap_linear_patch: |
137 | #endif | 180 | ba,pt %xcc, kvmap_dtlb_tsb4m_load |
138 | ba,pt %xcc, kvmap_load | ||
139 | xor %g2, %g4, %g5 | 181 | xor %g2, %g4, %g5 |
140 | 182 | ||
141 | #ifdef CONFIG_DEBUG_PAGEALLOC | 183 | kvmap_dtlb_vmalloc_addr: |
142 | sethi %hi(swapper_pg_dir), %g5 | 184 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
143 | or %g5, %lo(swapper_pg_dir), %g5 | 185 | |
144 | sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 | 186 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
145 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 187 | |
146 | andn %g6, 0x3, %g6 | 188 | /* Load and check PTE. */ |
147 | lduw [%g5 + %g6], %g5 | 189 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
148 | brz,pn %g5, longpath | 190 | mov 1, %g7 |
149 | sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 | 191 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
150 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 192 | brgez,a,pn %g5, kvmap_dtlb_longpath |
151 | sllx %g5, 11, %g5 | 193 | KTSB_STORE(%g1, %g7) |
152 | andn %g6, 0x3, %g6 | 194 | |
153 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 195 | KTSB_WRITE(%g1, %g5, %g6) |
154 | brz,pn %g5, longpath | 196 | |
155 | sllx %g4, 64 - PMD_SHIFT, %g6 | 197 | /* fallthrough to TLB load */ |
156 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 198 | |
157 | sllx %g5, 11, %g5 | 199 | kvmap_dtlb_load: |
158 | andn %g6, 0x7, %g6 | 200 | |
159 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 201 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB |
160 | brz,pn %g5, longpath | 202 | retry |
203 | .section .sun4v_2insn_patch, "ax" | ||
204 | .word 661b | ||
205 | nop | ||
206 | nop | ||
207 | .previous | ||
208 | |||
209 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
210 | * instruction get nop'd out and we get here to branch | ||
211 | * to the sun4v tlb load code. The registers are setup | ||
212 | * as follows: | ||
213 | * | ||
214 | * %g4: vaddr | ||
215 | * %g5: PTE | ||
216 | * %g6: TAG | ||
217 | * | ||
218 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
219 | * up here. | ||
220 | */ | ||
221 | ba,pt %xcc, sun4v_dtlb_load | ||
222 | mov %g5, %g3 | ||
223 | |||
224 | kvmap_dtlb_nonlinear: | ||
225 | /* Catch kernel NULL pointer derefs. */ | ||
226 | sethi %hi(PAGE_SIZE), %g5 | ||
227 | cmp %g4, %g5 | ||
228 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
161 | nop | 229 | nop |
162 | ba,a,pt %xcc, kvmap_load | ||
163 | #endif | ||
164 | 230 | ||
165 | kvmap_nonlinear: | 231 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) |
232 | |||
233 | kvmap_dtlb_tsbmiss: | ||
166 | sethi %hi(MODULES_VADDR), %g5 | 234 | sethi %hi(MODULES_VADDR), %g5 |
167 | cmp %g4, %g5 | 235 | cmp %g4, %g5 |
168 | blu,pn %xcc, longpath | 236 | blu,pn %xcc, kvmap_dtlb_longpath |
169 | mov (VMALLOC_END >> 24), %g5 | 237 | mov (VMALLOC_END >> 24), %g5 |
170 | sllx %g5, 24, %g5 | 238 | sllx %g5, 24, %g5 |
171 | cmp %g4, %g5 | 239 | cmp %g4, %g5 |
172 | bgeu,pn %xcc, longpath | 240 | bgeu,pn %xcc, kvmap_dtlb_longpath |
173 | nop | 241 | nop |
174 | 242 | ||
175 | kvmap_check_obp: | 243 | kvmap_check_obp: |
176 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 244 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
177 | cmp %g4, %g5 | 245 | cmp %g4, %g5 |
178 | blu,pn %xcc, kvmap_vmalloc_addr | 246 | blu,pn %xcc, kvmap_dtlb_vmalloc_addr |
179 | mov 0x1, %g5 | 247 | mov 0x1, %g5 |
180 | sllx %g5, 32, %g5 | 248 | sllx %g5, 32, %g5 |
181 | cmp %g4, %g5 | 249 | cmp %g4, %g5 |
182 | blu,pn %xcc, kvmap_do_obp | 250 | blu,pn %xcc, kvmap_dtlb_obp |
183 | nop | 251 | nop |
184 | 252 | ba,pt %xcc, kvmap_dtlb_vmalloc_addr | |
185 | kvmap_vmalloc_addr: | ||
186 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
187 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
188 | brgez,pn %g5, longpath | ||
189 | nop | 253 | nop |
190 | 254 | ||
191 | kvmap_load: | 255 | kvmap_dtlb_longpath: |
192 | /* PTE is valid, load into TLB and return from trap. */ | 256 | |
193 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | 257 | 661: rdpr %pstate, %g5 |
194 | retry | 258 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate |
259 | .section .sun4v_2insn_patch, "ax" | ||
260 | .word 661b | ||
261 | SET_GL(1) | ||
262 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
263 | .previous | ||
264 | |||
265 | rdpr %tl, %g3 | ||
266 | cmp %g3, 1 | ||
267 | |||
268 | 661: mov TLB_TAG_ACCESS, %g4 | ||
269 | ldxa [%g4] ASI_DMMU, %g5 | ||
270 | .section .sun4v_2insn_patch, "ax" | ||
271 | .word 661b | ||
272 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
273 | nop | ||
274 | .previous | ||
275 | |||
276 | be,pt %xcc, sparc64_realfault_common | ||
277 | mov FAULT_CODE_DTLB, %g4 | ||
278 | ba,pt %xcc, winfix_trampoline | ||
279 | nop | ||
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 2ff7c32ab0ce..95ffa9418620 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -188,6 +188,7 @@ extern void psycho_init(int, char *); | |||
188 | extern void schizo_init(int, char *); | 188 | extern void schizo_init(int, char *); |
189 | extern void schizo_plus_init(int, char *); | 189 | extern void schizo_plus_init(int, char *); |
190 | extern void tomatillo_init(int, char *); | 190 | extern void tomatillo_init(int, char *); |
191 | extern void sun4v_pci_init(int, char *); | ||
191 | 192 | ||
192 | static struct { | 193 | static struct { |
193 | char *model_name; | 194 | char *model_name; |
@@ -204,6 +205,7 @@ static struct { | |||
204 | { "pci108e,8002", schizo_plus_init }, | 205 | { "pci108e,8002", schizo_plus_init }, |
205 | { "SUNW,tomatillo", tomatillo_init }, | 206 | { "SUNW,tomatillo", tomatillo_init }, |
206 | { "pci108e,a801", tomatillo_init }, | 207 | { "pci108e,a801", tomatillo_init }, |
208 | { "SUNW,sun4v-pci", sun4v_pci_init }, | ||
207 | }; | 209 | }; |
208 | #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ | 210 | #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ |
209 | sizeof(pci_controller_table[0])) | 211 | sizeof(pci_controller_table[0])) |
@@ -283,6 +285,12 @@ int __init pcic_present(void) | |||
283 | return pci_controller_scan(pci_is_controller); | 285 | return pci_controller_scan(pci_is_controller); |
284 | } | 286 | } |
285 | 287 | ||
288 | struct pci_iommu_ops *pci_iommu_ops; | ||
289 | EXPORT_SYMBOL(pci_iommu_ops); | ||
290 | |||
291 | extern struct pci_iommu_ops pci_sun4u_iommu_ops, | ||
292 | pci_sun4v_iommu_ops; | ||
293 | |||
286 | /* Find each controller in the system, attach and initialize | 294 | /* Find each controller in the system, attach and initialize |
287 | * software state structure for each and link into the | 295 | * software state structure for each and link into the |
288 | * pci_controller_root. Setup the controller enough such | 296 | * pci_controller_root. Setup the controller enough such |
@@ -290,6 +298,11 @@ int __init pcic_present(void) | |||
290 | */ | 298 | */ |
291 | static void __init pci_controller_probe(void) | 299 | static void __init pci_controller_probe(void) |
292 | { | 300 | { |
301 | if (tlb_type == hypervisor) | ||
302 | pci_iommu_ops = &pci_sun4v_iommu_ops; | ||
303 | else | ||
304 | pci_iommu_ops = &pci_sun4u_iommu_ops; | ||
305 | |||
293 | printk("PCI: Probing for controllers.\n"); | 306 | printk("PCI: Probing for controllers.\n"); |
294 | 307 | ||
295 | pci_controller_scan(pci_controller_init); | 308 | pci_controller_scan(pci_controller_init); |
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 58310aacea28..33dedb1aacd4 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c | |||
@@ -39,6 +39,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, | |||
39 | { | 39 | { |
40 | int node; | 40 | int node; |
41 | 41 | ||
42 | *nregs = 0; | ||
43 | |||
42 | /* | 44 | /* |
43 | * Return the PBM's PROM node in case we are it's PCI device, | 45 | * Return the PBM's PROM node in case we are it's PCI device, |
44 | * as the PBM's reg property is different to standard PCI reg | 46 | * as the PBM's reg property is different to standard PCI reg |
@@ -51,10 +53,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, | |||
51 | pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || | 53 | pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || |
52 | pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || | 54 | pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || |
53 | pdev->device == PCI_DEVICE_ID_SUN_SABRE || | 55 | pdev->device == PCI_DEVICE_ID_SUN_SABRE || |
54 | pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) { | 56 | pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) |
55 | *nregs = 0; | ||
56 | return bus_prom_node; | 57 | return bus_prom_node; |
57 | } | ||
58 | 58 | ||
59 | node = prom_getchild(bus_prom_node); | 59 | node = prom_getchild(bus_prom_node); |
60 | while (node != 0) { | 60 | while (node != 0) { |
@@ -541,135 +541,183 @@ void __init pci_assign_unassigned(struct pci_pbm_info *pbm, | |||
541 | pci_assign_unassigned(pbm, bus); | 541 | pci_assign_unassigned(pbm, bus); |
542 | } | 542 | } |
543 | 543 | ||
544 | static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) | 544 | static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm, |
545 | struct pci_dev *toplevel_pdev, | ||
546 | struct pci_dev *pdev, | ||
547 | unsigned int interrupt) | ||
545 | { | 548 | { |
546 | struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap; | 549 | unsigned int ret; |
547 | struct linux_prom_pci_intmask bridge_local_intmask, *intmask; | ||
548 | struct pcidev_cookie *dev_pcp = pdev->sysdata; | ||
549 | struct pci_pbm_info *pbm = dev_pcp->pbm; | ||
550 | struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs; | ||
551 | unsigned int hi, mid, lo, irq; | ||
552 | int i, num_intmap, map_slot; | ||
553 | 550 | ||
554 | intmap = &pbm->pbm_intmap[0]; | 551 | if (unlikely(interrupt < 1 || interrupt > 4)) { |
555 | intmask = &pbm->pbm_intmask; | 552 | printk("%s: Device %s interrupt value of %u is strange.\n", |
556 | num_intmap = pbm->num_pbm_intmap; | 553 | pbm->name, pci_name(pdev), interrupt); |
557 | map_slot = 0; | 554 | return interrupt; |
555 | } | ||
558 | 556 | ||
559 | /* If we are underneath a PCI bridge, use PROM register | 557 | ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1; |
560 | * property of the parent bridge which is closest to | 558 | |
561 | * the PBM. | 559 | printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n", |
562 | * | 560 | pbm->name, pci_name(toplevel_pdev), pci_name(pdev), |
563 | * However if that parent bridge has interrupt map/mask | 561 | interrupt, PCI_SLOT(pdev->devfn), ret); |
564 | * properties of its own we use the PROM register property | 562 | |
565 | * of the next child device on the path to PDEV. | 563 | return ret; |
566 | * | 564 | } |
567 | * In detail the two cases are (note that the 'X' below is the | 565 | |
568 | * 'next child on the path to PDEV' mentioned above): | 566 | static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm, |
569 | * | 567 | struct pci_dev *toplevel_pdev, |
570 | * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV | 568 | struct pci_dev *pbus, |
571 | * | 569 | struct pci_dev *pdev, |
572 | * Here we use regs of 'PCI bus' device. | 570 | unsigned int interrupt, |
573 | * | 571 | unsigned int *cnode) |
574 | * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV | 572 | { |
575 | * | 573 | struct linux_prom_pci_intmap imap[PROM_PCIIMAP_MAX]; |
576 | * Here we use regs of 'X'. Note that X can be PDEV. | 574 | struct linux_prom_pci_intmask imask; |
577 | */ | 575 | struct pcidev_cookie *pbus_pcp = pbus->sysdata; |
578 | if (pdev->bus->number != pbm->pci_first_busno) { | 576 | struct pcidev_cookie *pdev_pcp = pdev->sysdata; |
579 | struct pcidev_cookie *bus_pcp, *regs_pcp; | 577 | struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs; |
580 | struct pci_dev *bus_dev, *regs_dev; | 578 | int plen, num_imap, i; |
581 | int plen; | 579 | unsigned int hi, mid, lo, irq, orig_interrupt; |
580 | |||
581 | *cnode = pbus_pcp->prom_node; | ||
582 | |||
583 | plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map", | ||
584 | (char *) &imap[0], sizeof(imap)); | ||
585 | if (plen <= 0 || | ||
586 | (plen % sizeof(struct linux_prom_pci_intmap)) != 0) { | ||
587 | printk("%s: Device %s interrupt-map has bad len %d\n", | ||
588 | pbm->name, pci_name(pbus), plen); | ||
589 | goto no_intmap; | ||
590 | } | ||
591 | num_imap = plen / sizeof(struct linux_prom_pci_intmap); | ||
592 | |||
593 | plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map-mask", | ||
594 | (char *) &imask, sizeof(imask)); | ||
595 | if (plen <= 0 || | ||
596 | (plen % sizeof(struct linux_prom_pci_intmask)) != 0) { | ||
597 | printk("%s: Device %s interrupt-map-mask has bad len %d\n", | ||
598 | pbm->name, pci_name(pbus), plen); | ||
599 | goto no_intmap; | ||
600 | } | ||
601 | |||
602 | orig_interrupt = interrupt; | ||
582 | 603 | ||
583 | bus_dev = pdev->bus->self; | 604 | hi = pregs->phys_hi & imask.phys_hi; |
584 | regs_dev = pdev; | 605 | mid = pregs->phys_mid & imask.phys_mid; |
606 | lo = pregs->phys_lo & imask.phys_lo; | ||
607 | irq = interrupt & imask.interrupt; | ||
585 | 608 | ||
586 | while (bus_dev->bus && | 609 | for (i = 0; i < num_imap; i++) { |
587 | bus_dev->bus->number != pbm->pci_first_busno) { | 610 | if (imap[i].phys_hi == hi && |
588 | regs_dev = bus_dev; | 611 | imap[i].phys_mid == mid && |
589 | bus_dev = bus_dev->bus->self; | 612 | imap[i].phys_lo == lo && |
613 | imap[i].interrupt == irq) { | ||
614 | *cnode = imap[i].cnode; | ||
615 | interrupt = imap[i].cinterrupt; | ||
590 | } | 616 | } |
617 | } | ||
591 | 618 | ||
592 | regs_pcp = regs_dev->sysdata; | 619 | printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n", |
593 | pregs = regs_pcp->prom_regs; | 620 | pbm->name, pci_name(toplevel_pdev), |
621 | pci_name(pbus), pci_name(pdev), | ||
622 | orig_interrupt, interrupt); | ||
594 | 623 | ||
595 | bus_pcp = bus_dev->sysdata; | 624 | no_intmap: |
625 | return interrupt; | ||
626 | } | ||
596 | 627 | ||
597 | /* But if the PCI bridge has it's own interrupt map | 628 | /* For each PCI bus on the way to the root: |
598 | * and mask properties, use that and the regs of the | 629 | * 1) If it has an interrupt-map property, apply it. |
599 | * PCI entity at the next level down on the path to the | 630 | * 2) Else, swivel the interrupt number based upon the PCI device number. |
600 | * device. | 631 | * |
601 | */ | 632 | * Return the "IRQ controller" node. If this is the PBM's device node, |
602 | plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map", | 633 | * all interrupt translations are complete, else we should use that node's |
603 | (char *) &bridge_local_intmap[0], | 634 | * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt. |
604 | sizeof(bridge_local_intmap)); | 635 | */ |
605 | if (plen != -1) { | 636 | static unsigned int __init pci_intmap_match_to_root(struct pci_pbm_info *pbm, |
606 | intmap = &bridge_local_intmap[0]; | 637 | struct pci_dev *pdev, |
607 | num_intmap = plen / sizeof(struct linux_prom_pci_intmap); | 638 | unsigned int *interrupt) |
608 | plen = prom_getproperty(bus_pcp->prom_node, | 639 | { |
609 | "interrupt-map-mask", | 640 | struct pci_dev *toplevel_pdev = pdev; |
610 | (char *) &bridge_local_intmask, | 641 | struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata; |
611 | sizeof(bridge_local_intmask)); | 642 | unsigned int cnode = toplevel_pcp->prom_node; |
612 | if (plen == -1) { | 643 | |
613 | printk("pci_intmap_match: Warning! Bridge has intmap " | 644 | while (pdev->bus->number != pbm->pci_first_busno) { |
614 | "but no intmask.\n"); | 645 | struct pci_dev *pbus = pdev->bus->self; |
615 | printk("pci_intmap_match: Trying to recover.\n"); | 646 | struct pcidev_cookie *pcp = pbus->sysdata; |
616 | return 0; | 647 | int plen; |
617 | } | ||
618 | 648 | ||
619 | if (pdev->bus->self != bus_dev) | 649 | plen = prom_getproplen(pcp->prom_node, "interrupt-map"); |
620 | map_slot = 1; | 650 | if (plen <= 0) { |
651 | *interrupt = pci_slot_swivel(pbm, toplevel_pdev, | ||
652 | pdev, *interrupt); | ||
653 | cnode = pcp->prom_node; | ||
621 | } else { | 654 | } else { |
622 | pregs = bus_pcp->prom_regs; | 655 | *interrupt = pci_apply_intmap(pbm, toplevel_pdev, |
623 | map_slot = 1; | 656 | pbus, pdev, |
657 | *interrupt, &cnode); | ||
658 | |||
659 | while (pcp->prom_node != cnode && | ||
660 | pbus->bus->number != pbm->pci_first_busno) { | ||
661 | pbus = pbus->bus->self; | ||
662 | pcp = pbus->sysdata; | ||
663 | } | ||
624 | } | 664 | } |
625 | } | 665 | pdev = pbus; |
626 | 666 | ||
627 | if (map_slot) { | 667 | if (cnode == pbm->prom_node) |
628 | *interrupt = ((*interrupt | 668 | break; |
629 | - 1 | ||
630 | + PCI_SLOT(pdev->devfn)) & 0x3) + 1; | ||
631 | } | 669 | } |
632 | 670 | ||
633 | hi = pregs->phys_hi & intmask->phys_hi; | 671 | return cnode; |
634 | mid = pregs->phys_mid & intmask->phys_mid; | 672 | } |
635 | lo = pregs->phys_lo & intmask->phys_lo; | 673 | |
636 | irq = *interrupt & intmask->interrupt; | 674 | static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) |
637 | 675 | { | |
638 | for (i = 0; i < num_intmap; i++) { | 676 | struct pcidev_cookie *dev_pcp = pdev->sysdata; |
639 | if (intmap[i].phys_hi == hi && | 677 | struct pci_pbm_info *pbm = dev_pcp->pbm; |
640 | intmap[i].phys_mid == mid && | 678 | struct linux_prom_pci_registers reg[PROMREG_MAX]; |
641 | intmap[i].phys_lo == lo && | 679 | unsigned int hi, mid, lo, irq; |
642 | intmap[i].interrupt == irq) { | 680 | int i, cnode, plen; |
643 | *interrupt = intmap[i].cinterrupt; | 681 | |
644 | printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n", | 682 | cnode = pci_intmap_match_to_root(pbm, pdev, interrupt); |
645 | pdev->bus->number, PCI_SLOT(pdev->devfn), | 683 | if (cnode == pbm->prom_node) |
646 | map_slot, *interrupt); | 684 | goto success; |
647 | return 1; | 685 | |
648 | } | 686 | plen = prom_getproperty(cnode, "reg", (char *) reg, sizeof(reg)); |
687 | if (plen <= 0 || | ||
688 | (plen % sizeof(struct linux_prom_pci_registers)) != 0) { | ||
689 | printk("%s: OBP node %x reg property has bad len %d\n", | ||
690 | pbm->name, cnode, plen); | ||
691 | goto fail; | ||
649 | } | 692 | } |
650 | 693 | ||
651 | /* We will run this code even if pbm->num_pbm_intmap is zero, just so | 694 | hi = reg[0].phys_hi & pbm->pbm_intmask.phys_hi; |
652 | * we can apply the slot mapping to the PROM interrupt property value. | 695 | mid = reg[0].phys_mid & pbm->pbm_intmask.phys_mid; |
653 | * So do not spit out these warnings in that case. | 696 | lo = reg[0].phys_lo & pbm->pbm_intmask.phys_lo; |
654 | */ | 697 | irq = *interrupt & pbm->pbm_intmask.interrupt; |
655 | if (num_intmap != 0) { | 698 | |
656 | /* Print it both to OBP console and kernel one so that if bootup | 699 | for (i = 0; i < pbm->num_pbm_intmap; i++) { |
657 | * hangs here the user has the information to report. | 700 | struct linux_prom_pci_intmap *intmap; |
658 | */ | 701 | |
659 | prom_printf("pci_intmap_match: bus %02x, devfn %02x: ", | 702 | intmap = &pbm->pbm_intmap[i]; |
660 | pdev->bus->number, pdev->devfn); | 703 | |
661 | prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", | 704 | if (intmap->phys_hi == hi && |
662 | pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); | 705 | intmap->phys_mid == mid && |
663 | prom_printf("Please email this information to davem@redhat.com\n"); | 706 | intmap->phys_lo == lo && |
664 | 707 | intmap->interrupt == irq) { | |
665 | printk("pci_intmap_match: bus %02x, devfn %02x: ", | 708 | *interrupt = intmap->cinterrupt; |
666 | pdev->bus->number, pdev->devfn); | 709 | goto success; |
667 | printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", | 710 | } |
668 | pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); | ||
669 | printk("Please email this information to davem@redhat.com\n"); | ||
670 | } | 711 | } |
671 | 712 | ||
713 | fail: | ||
672 | return 0; | 714 | return 0; |
715 | |||
716 | success: | ||
717 | printk("PCI-IRQ: Routing bus[%2x] slot[%2x] to INO[%02x]\n", | ||
718 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
719 | *interrupt); | ||
720 | return 1; | ||
673 | } | 721 | } |
674 | 722 | ||
675 | static void __init pdev_fixup_irq(struct pci_dev *pdev) | 723 | static void __init pdev_fixup_irq(struct pci_dev *pdev) |
@@ -703,16 +751,18 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev) | |||
703 | return; | 751 | return; |
704 | } | 752 | } |
705 | 753 | ||
706 | /* Fully specified already? */ | 754 | if (tlb_type != hypervisor) { |
707 | if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { | 755 | /* Fully specified already? */ |
708 | pdev->irq = p->irq_build(pbm, pdev, prom_irq); | 756 | if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { |
709 | goto have_irq; | 757 | pdev->irq = p->irq_build(pbm, pdev, prom_irq); |
710 | } | 758 | goto have_irq; |
759 | } | ||
711 | 760 | ||
712 | /* An onboard device? (bit 5 set) */ | 761 | /* An onboard device? (bit 5 set) */ |
713 | if ((prom_irq & PCI_IRQ_INO) & 0x20) { | 762 | if ((prom_irq & PCI_IRQ_INO) & 0x20) { |
714 | pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); | 763 | pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); |
715 | goto have_irq; | 764 | goto have_irq; |
765 | } | ||
716 | } | 766 | } |
717 | 767 | ||
718 | /* Can we find a matching entry in the interrupt-map? */ | 768 | /* Can we find a matching entry in the interrupt-map? */ |
@@ -927,33 +977,30 @@ void pci_register_legacy_regions(struct resource *io_res, | |||
927 | struct resource *p; | 977 | struct resource *p; |
928 | 978 | ||
929 | /* VGA Video RAM. */ | 979 | /* VGA Video RAM. */ |
930 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 980 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
931 | if (!p) | 981 | if (!p) |
932 | return; | 982 | return; |
933 | 983 | ||
934 | memset(p, 0, sizeof(*p)); | ||
935 | p->name = "Video RAM area"; | 984 | p->name = "Video RAM area"; |
936 | p->start = mem_res->start + 0xa0000UL; | 985 | p->start = mem_res->start + 0xa0000UL; |
937 | p->end = p->start + 0x1ffffUL; | 986 | p->end = p->start + 0x1ffffUL; |
938 | p->flags = IORESOURCE_BUSY; | 987 | p->flags = IORESOURCE_BUSY; |
939 | request_resource(mem_res, p); | 988 | request_resource(mem_res, p); |
940 | 989 | ||
941 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 990 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
942 | if (!p) | 991 | if (!p) |
943 | return; | 992 | return; |
944 | 993 | ||
945 | memset(p, 0, sizeof(*p)); | ||
946 | p->name = "System ROM"; | 994 | p->name = "System ROM"; |
947 | p->start = mem_res->start + 0xf0000UL; | 995 | p->start = mem_res->start + 0xf0000UL; |
948 | p->end = p->start + 0xffffUL; | 996 | p->end = p->start + 0xffffUL; |
949 | p->flags = IORESOURCE_BUSY; | 997 | p->flags = IORESOURCE_BUSY; |
950 | request_resource(mem_res, p); | 998 | request_resource(mem_res, p); |
951 | 999 | ||
952 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 1000 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
953 | if (!p) | 1001 | if (!p) |
954 | return; | 1002 | return; |
955 | 1003 | ||
956 | memset(p, 0, sizeof(*p)); | ||
957 | p->name = "Video ROM"; | 1004 | p->name = "Video ROM"; |
958 | p->start = mem_res->start + 0xc0000UL; | 1005 | p->start = mem_res->start + 0xc0000UL; |
959 | p->end = p->start + 0x7fffUL; | 1006 | p->end = p->start + 0x7fffUL; |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index a11910be1013..8efbc139769d 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -139,12 +139,11 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, | |||
139 | /* Allocate and initialize the free area map. */ | 139 | /* Allocate and initialize the free area map. */ |
140 | sz = num_tsb_entries / 8; | 140 | sz = num_tsb_entries / 8; |
141 | sz = (sz + 7UL) & ~7UL; | 141 | sz = (sz + 7UL) & ~7UL; |
142 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | 142 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
143 | if (!iommu->arena.map) { | 143 | if (!iommu->arena.map) { |
144 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | 144 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); |
145 | prom_halt(); | 145 | prom_halt(); |
146 | } | 146 | } |
147 | memset(iommu->arena.map, 0, sz); | ||
148 | iommu->arena.limit = num_tsb_entries; | 147 | iommu->arena.limit = num_tsb_entries; |
149 | 148 | ||
150 | /* Allocate and initialize the dummy page which we | 149 | /* Allocate and initialize the dummy page which we |
@@ -219,7 +218,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | |||
219 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 218 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
220 | * successful and set *DMA_ADDRP to the PCI side dma address. | 219 | * successful and set *DMA_ADDRP to the PCI side dma address. |
221 | */ | 220 | */ |
222 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | 221 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) |
223 | { | 222 | { |
224 | struct pcidev_cookie *pcp; | 223 | struct pcidev_cookie *pcp; |
225 | struct pci_iommu *iommu; | 224 | struct pci_iommu *iommu; |
@@ -267,7 +266,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
267 | } | 266 | } |
268 | 267 | ||
269 | /* Free and unmap a consistent DMA translation. */ | 268 | /* Free and unmap a consistent DMA translation. */ |
270 | void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 269 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
271 | { | 270 | { |
272 | struct pcidev_cookie *pcp; | 271 | struct pcidev_cookie *pcp; |
273 | struct pci_iommu *iommu; | 272 | struct pci_iommu *iommu; |
@@ -294,7 +293,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
294 | /* Map a single buffer at PTR of SZ bytes for PCI DMA | 293 | /* Map a single buffer at PTR of SZ bytes for PCI DMA |
295 | * in streaming mode. | 294 | * in streaming mode. |
296 | */ | 295 | */ |
297 | dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 296 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
298 | { | 297 | { |
299 | struct pcidev_cookie *pcp; | 298 | struct pcidev_cookie *pcp; |
300 | struct pci_iommu *iommu; | 299 | struct pci_iommu *iommu; |
@@ -415,7 +414,7 @@ do_flush_sync: | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* Unmap a single streaming mode DMA translation. */ | 416 | /* Unmap a single streaming mode DMA translation. */ |
418 | void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 417 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
419 | { | 418 | { |
420 | struct pcidev_cookie *pcp; | 419 | struct pcidev_cookie *pcp; |
421 | struct pci_iommu *iommu; | 420 | struct pci_iommu *iommu; |
@@ -548,7 +547,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
548 | * When making changes here, inspect the assembly output. I was having | 547 | * When making changes here, inspect the assembly output. I was having |
549 | * hard time to kepp this routine out of using stack slots for holding variables. | 548 | * hard time to kepp this routine out of using stack slots for holding variables. |
550 | */ | 549 | */ |
551 | int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 550 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
552 | { | 551 | { |
553 | struct pcidev_cookie *pcp; | 552 | struct pcidev_cookie *pcp; |
554 | struct pci_iommu *iommu; | 553 | struct pci_iommu *iommu; |
@@ -562,9 +561,9 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
562 | /* Fast path single entry scatterlists. */ | 561 | /* Fast path single entry scatterlists. */ |
563 | if (nelems == 1) { | 562 | if (nelems == 1) { |
564 | sglist->dma_address = | 563 | sglist->dma_address = |
565 | pci_map_single(pdev, | 564 | pci_4u_map_single(pdev, |
566 | (page_address(sglist->page) + sglist->offset), | 565 | (page_address(sglist->page) + sglist->offset), |
567 | sglist->length, direction); | 566 | sglist->length, direction); |
568 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | 567 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) |
569 | return 0; | 568 | return 0; |
570 | sglist->dma_length = sglist->length; | 569 | sglist->dma_length = sglist->length; |
@@ -635,7 +634,7 @@ bad_no_ctx: | |||
635 | } | 634 | } |
636 | 635 | ||
637 | /* Unmap a set of streaming mode DMA translations. */ | 636 | /* Unmap a set of streaming mode DMA translations. */ |
638 | void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 637 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
639 | { | 638 | { |
640 | struct pcidev_cookie *pcp; | 639 | struct pcidev_cookie *pcp; |
641 | struct pci_iommu *iommu; | 640 | struct pci_iommu *iommu; |
@@ -695,7 +694,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
695 | /* Make physical memory consistent for a single | 694 | /* Make physical memory consistent for a single |
696 | * streaming mode DMA translation after a transfer. | 695 | * streaming mode DMA translation after a transfer. |
697 | */ | 696 | */ |
698 | void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 697 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
699 | { | 698 | { |
700 | struct pcidev_cookie *pcp; | 699 | struct pcidev_cookie *pcp; |
701 | struct pci_iommu *iommu; | 700 | struct pci_iommu *iommu; |
@@ -735,7 +734,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
735 | /* Make physical memory consistent for a set of streaming | 734 | /* Make physical memory consistent for a set of streaming |
736 | * mode DMA translations after a transfer. | 735 | * mode DMA translations after a transfer. |
737 | */ | 736 | */ |
738 | void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 737 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
739 | { | 738 | { |
740 | struct pcidev_cookie *pcp; | 739 | struct pcidev_cookie *pcp; |
741 | struct pci_iommu *iommu; | 740 | struct pci_iommu *iommu; |
@@ -776,6 +775,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
776 | spin_unlock_irqrestore(&iommu->lock, flags); | 775 | spin_unlock_irqrestore(&iommu->lock, flags); |
777 | } | 776 | } |
778 | 777 | ||
778 | struct pci_iommu_ops pci_sun4u_iommu_ops = { | ||
779 | .alloc_consistent = pci_4u_alloc_consistent, | ||
780 | .free_consistent = pci_4u_free_consistent, | ||
781 | .map_single = pci_4u_map_single, | ||
782 | .unmap_single = pci_4u_unmap_single, | ||
783 | .map_sg = pci_4u_map_sg, | ||
784 | .unmap_sg = pci_4u_unmap_sg, | ||
785 | .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu, | ||
786 | .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu, | ||
787 | }; | ||
788 | |||
779 | static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | 789 | static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) |
780 | { | 790 | { |
781 | struct pci_dev *ali_isa_bridge; | 791 | struct pci_dev *ali_isa_bridge; |
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index c03ed5f49d31..d17878b145c2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -286,17 +286,17 @@ static unsigned char psycho_pil_table[] = { | |||
286 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ | 286 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ |
287 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ | 287 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ |
288 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ | 288 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ |
289 | /*0x20*/4, /* SCSI */ | 289 | /*0x20*/5, /* SCSI */ |
290 | /*0x21*/5, /* Ethernet */ | 290 | /*0x21*/5, /* Ethernet */ |
291 | /*0x22*/8, /* Parallel Port */ | 291 | /*0x22*/8, /* Parallel Port */ |
292 | /*0x23*/13, /* Audio Record */ | 292 | /*0x23*/13, /* Audio Record */ |
293 | /*0x24*/14, /* Audio Playback */ | 293 | /*0x24*/14, /* Audio Playback */ |
294 | /*0x25*/15, /* PowerFail */ | 294 | /*0x25*/15, /* PowerFail */ |
295 | /*0x26*/4, /* second SCSI */ | 295 | /*0x26*/5, /* second SCSI */ |
296 | /*0x27*/11, /* Floppy */ | 296 | /*0x27*/11, /* Floppy */ |
297 | /*0x28*/4, /* Spare Hardware */ | 297 | /*0x28*/5, /* Spare Hardware */ |
298 | /*0x29*/9, /* Keyboard */ | 298 | /*0x29*/9, /* Keyboard */ |
299 | /*0x2a*/4, /* Mouse */ | 299 | /*0x2a*/5, /* Mouse */ |
300 | /*0x2b*/12, /* Serial */ | 300 | /*0x2b*/12, /* Serial */ |
301 | /*0x2c*/10, /* Timer 0 */ | 301 | /*0x2c*/10, /* Timer 0 */ |
302 | /*0x2d*/11, /* Timer 1 */ | 302 | /*0x2d*/11, /* Timer 1 */ |
@@ -313,11 +313,11 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
313 | 313 | ||
314 | ret = psycho_pil_table[ino]; | 314 | ret = psycho_pil_table[ino]; |
315 | if (ret == 0 && pdev == NULL) { | 315 | if (ret == 0 && pdev == NULL) { |
316 | ret = 4; | 316 | ret = 5; |
317 | } else if (ret == 0) { | 317 | } else if (ret == 0) { |
318 | switch ((pdev->class >> 16) & 0xff) { | 318 | switch ((pdev->class >> 16) & 0xff) { |
319 | case PCI_BASE_CLASS_STORAGE: | 319 | case PCI_BASE_CLASS_STORAGE: |
320 | ret = 4; | 320 | ret = 5; |
321 | break; | 321 | break; |
322 | 322 | ||
323 | case PCI_BASE_CLASS_NETWORK: | 323 | case PCI_BASE_CLASS_NETWORK: |
@@ -336,7 +336,7 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
336 | break; | 336 | break; |
337 | 337 | ||
338 | default: | 338 | default: |
339 | ret = 4; | 339 | ret = 5; |
340 | break; | 340 | break; |
341 | }; | 341 | }; |
342 | } | 342 | } |
@@ -1164,7 +1164,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
1164 | static void pbm_scan_bus(struct pci_controller_info *p, | 1164 | static void pbm_scan_bus(struct pci_controller_info *p, |
1165 | struct pci_pbm_info *pbm) | 1165 | struct pci_pbm_info *pbm) |
1166 | { | 1166 | { |
1167 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1167 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1168 | 1168 | ||
1169 | if (!cookie) { | 1169 | if (!cookie) { |
1170 | prom_printf("PSYCHO: Critical allocation failure.\n"); | 1170 | prom_printf("PSYCHO: Critical allocation failure.\n"); |
@@ -1172,7 +1172,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, | |||
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | /* All we care about is the PBM. */ | 1174 | /* All we care about is the PBM. */ |
1175 | memset(cookie, 0, sizeof(*cookie)); | ||
1176 | cookie->pbm = pbm; | 1175 | cookie->pbm = pbm; |
1177 | 1176 | ||
1178 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | 1177 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, |
@@ -1465,18 +1464,16 @@ void psycho_init(int node, char *model_name) | |||
1465 | } | 1464 | } |
1466 | } | 1465 | } |
1467 | 1466 | ||
1468 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 1467 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
1469 | if (!p) { | 1468 | if (!p) { |
1470 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1469 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1471 | prom_halt(); | 1470 | prom_halt(); |
1472 | } | 1471 | } |
1473 | memset(p, 0, sizeof(*p)); | 1472 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
1474 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1475 | if (!iommu) { | 1473 | if (!iommu) { |
1476 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1474 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1477 | prom_halt(); | 1475 | prom_halt(); |
1478 | } | 1476 | } |
1479 | memset(iommu, 0, sizeof(*iommu)); | ||
1480 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; | 1477 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; |
1481 | 1478 | ||
1482 | p->next = pci_controller_root; | 1479 | p->next = pci_controller_root; |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index da8e1364194f..f67bb7f078cf 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -533,17 +533,17 @@ static unsigned char sabre_pil_table[] = { | |||
533 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ | 533 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ |
534 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ | 534 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ |
535 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ | 535 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ |
536 | /*0x20*/4, /* SCSI */ | 536 | /*0x20*/5, /* SCSI */ |
537 | /*0x21*/5, /* Ethernet */ | 537 | /*0x21*/5, /* Ethernet */ |
538 | /*0x22*/8, /* Parallel Port */ | 538 | /*0x22*/8, /* Parallel Port */ |
539 | /*0x23*/13, /* Audio Record */ | 539 | /*0x23*/13, /* Audio Record */ |
540 | /*0x24*/14, /* Audio Playback */ | 540 | /*0x24*/14, /* Audio Playback */ |
541 | /*0x25*/15, /* PowerFail */ | 541 | /*0x25*/15, /* PowerFail */ |
542 | /*0x26*/4, /* second SCSI */ | 542 | /*0x26*/5, /* second SCSI */ |
543 | /*0x27*/11, /* Floppy */ | 543 | /*0x27*/11, /* Floppy */ |
544 | /*0x28*/4, /* Spare Hardware */ | 544 | /*0x28*/5, /* Spare Hardware */ |
545 | /*0x29*/9, /* Keyboard */ | 545 | /*0x29*/9, /* Keyboard */ |
546 | /*0x2a*/4, /* Mouse */ | 546 | /*0x2a*/5, /* Mouse */ |
547 | /*0x2b*/12, /* Serial */ | 547 | /*0x2b*/12, /* Serial */ |
548 | /*0x2c*/10, /* Timer 0 */ | 548 | /*0x2c*/10, /* Timer 0 */ |
549 | /*0x2d*/11, /* Timer 1 */ | 549 | /*0x2d*/11, /* Timer 1 */ |
@@ -565,11 +565,11 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
565 | 565 | ||
566 | ret = sabre_pil_table[ino]; | 566 | ret = sabre_pil_table[ino]; |
567 | if (ret == 0 && pdev == NULL) { | 567 | if (ret == 0 && pdev == NULL) { |
568 | ret = 4; | 568 | ret = 5; |
569 | } else if (ret == 0) { | 569 | } else if (ret == 0) { |
570 | switch ((pdev->class >> 16) & 0xff) { | 570 | switch ((pdev->class >> 16) & 0xff) { |
571 | case PCI_BASE_CLASS_STORAGE: | 571 | case PCI_BASE_CLASS_STORAGE: |
572 | ret = 4; | 572 | ret = 5; |
573 | break; | 573 | break; |
574 | 574 | ||
575 | case PCI_BASE_CLASS_NETWORK: | 575 | case PCI_BASE_CLASS_NETWORK: |
@@ -588,7 +588,7 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
588 | break; | 588 | break; |
589 | 589 | ||
590 | default: | 590 | default: |
591 | ret = 4; | 591 | ret = 5; |
592 | break; | 592 | break; |
593 | }; | 593 | }; |
594 | } | 594 | } |
@@ -1167,7 +1167,7 @@ static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) | |||
1167 | 1167 | ||
1168 | static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) | 1168 | static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) |
1169 | { | 1169 | { |
1170 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1170 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1171 | 1171 | ||
1172 | if (!cookie) { | 1172 | if (!cookie) { |
1173 | prom_printf("SABRE: Critical allocation failure.\n"); | 1173 | prom_printf("SABRE: Critical allocation failure.\n"); |
@@ -1175,7 +1175,6 @@ static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) | |||
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | /* All we care about is the PBM. */ | 1177 | /* All we care about is the PBM. */ |
1178 | memset(cookie, 0, sizeof(*cookie)); | ||
1179 | cookie->pbm = pbm; | 1178 | cookie->pbm = pbm; |
1180 | 1179 | ||
1181 | return cookie; | 1180 | return cookie; |
@@ -1556,19 +1555,17 @@ void sabre_init(int pnode, char *model_name) | |||
1556 | } | 1555 | } |
1557 | } | 1556 | } |
1558 | 1557 | ||
1559 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | 1558 | p = kzalloc(sizeof(*p), GFP_ATOMIC); |
1560 | if (!p) { | 1559 | if (!p) { |
1561 | prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); | 1560 | prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); |
1562 | prom_halt(); | 1561 | prom_halt(); |
1563 | } | 1562 | } |
1564 | memset(p, 0, sizeof(*p)); | ||
1565 | 1563 | ||
1566 | iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC); | 1564 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); |
1567 | if (!iommu) { | 1565 | if (!iommu) { |
1568 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); | 1566 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); |
1569 | prom_halt(); | 1567 | prom_halt(); |
1570 | } | 1568 | } |
1571 | memset(iommu, 0, sizeof(*iommu)); | ||
1572 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; | 1569 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; |
1573 | 1570 | ||
1574 | upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); | 1571 | upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index d8c4e0919b4e..7fe4de03ac2e 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -243,8 +243,8 @@ static unsigned char schizo_pil_table[] = { | |||
243 | /*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ | 243 | /*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ |
244 | /*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ | 244 | /*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ |
245 | /*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ | 245 | /*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ |
246 | /*0x18*/4, /* SCSI */ | 246 | /*0x18*/5, /* SCSI */ |
247 | /*0x19*/4, /* second SCSI */ | 247 | /*0x19*/5, /* second SCSI */ |
248 | /*0x1a*/0, /* UNKNOWN */ | 248 | /*0x1a*/0, /* UNKNOWN */ |
249 | /*0x1b*/0, /* UNKNOWN */ | 249 | /*0x1b*/0, /* UNKNOWN */ |
250 | /*0x1c*/8, /* Parallel */ | 250 | /*0x1c*/8, /* Parallel */ |
@@ -254,7 +254,7 @@ static unsigned char schizo_pil_table[] = { | |||
254 | /*0x20*/13, /* Audio Record */ | 254 | /*0x20*/13, /* Audio Record */ |
255 | /*0x21*/14, /* Audio Playback */ | 255 | /*0x21*/14, /* Audio Playback */ |
256 | /*0x22*/12, /* Serial */ | 256 | /*0x22*/12, /* Serial */ |
257 | /*0x23*/4, /* EBUS I2C */ | 257 | /*0x23*/5, /* EBUS I2C */ |
258 | /*0x24*/10, /* RTC Clock */ | 258 | /*0x24*/10, /* RTC Clock */ |
259 | /*0x25*/11, /* Floppy */ | 259 | /*0x25*/11, /* Floppy */ |
260 | /*0x26*/0, /* UNKNOWN */ | 260 | /*0x26*/0, /* UNKNOWN */ |
@@ -296,11 +296,11 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
296 | 296 | ||
297 | ret = schizo_pil_table[ino]; | 297 | ret = schizo_pil_table[ino]; |
298 | if (ret == 0 && pdev == NULL) { | 298 | if (ret == 0 && pdev == NULL) { |
299 | ret = 4; | 299 | ret = 5; |
300 | } else if (ret == 0) { | 300 | } else if (ret == 0) { |
301 | switch ((pdev->class >> 16) & 0xff) { | 301 | switch ((pdev->class >> 16) & 0xff) { |
302 | case PCI_BASE_CLASS_STORAGE: | 302 | case PCI_BASE_CLASS_STORAGE: |
303 | ret = 4; | 303 | ret = 5; |
304 | break; | 304 | break; |
305 | 305 | ||
306 | case PCI_BASE_CLASS_NETWORK: | 306 | case PCI_BASE_CLASS_NETWORK: |
@@ -319,7 +319,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
319 | break; | 319 | break; |
320 | 320 | ||
321 | default: | 321 | default: |
322 | ret = 4; | 322 | ret = 5; |
323 | break; | 323 | break; |
324 | }; | 324 | }; |
325 | } | 325 | } |
@@ -1525,7 +1525,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
1525 | static void pbm_scan_bus(struct pci_controller_info *p, | 1525 | static void pbm_scan_bus(struct pci_controller_info *p, |
1526 | struct pci_pbm_info *pbm) | 1526 | struct pci_pbm_info *pbm) |
1527 | { | 1527 | { |
1528 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1528 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1529 | 1529 | ||
1530 | if (!cookie) { | 1530 | if (!cookie) { |
1531 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | 1531 | prom_printf("%s: Critical allocation failure.\n", pbm->name); |
@@ -1533,7 +1533,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, | |||
1533 | } | 1533 | } |
1534 | 1534 | ||
1535 | /* All we care about is the PBM. */ | 1535 | /* All we care about is the PBM. */ |
1536 | memset(cookie, 0, sizeof(*cookie)); | ||
1537 | cookie->pbm = pbm; | 1536 | cookie->pbm = pbm; |
1538 | 1537 | ||
1539 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | 1538 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, |
@@ -2120,27 +2119,24 @@ static void __schizo_init(int node, char *model_name, int chip_type) | |||
2120 | } | 2119 | } |
2121 | } | 2120 | } |
2122 | 2121 | ||
2123 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 2122 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
2124 | if (!p) { | 2123 | if (!p) { |
2125 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2124 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2126 | prom_halt(); | 2125 | prom_halt(); |
2127 | } | 2126 | } |
2128 | memset(p, 0, sizeof(*p)); | ||
2129 | 2127 | ||
2130 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 2128 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
2131 | if (!iommu) { | 2129 | if (!iommu) { |
2132 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2130 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2133 | prom_halt(); | 2131 | prom_halt(); |
2134 | } | 2132 | } |
2135 | memset(iommu, 0, sizeof(*iommu)); | ||
2136 | p->pbm_A.iommu = iommu; | 2133 | p->pbm_A.iommu = iommu; |
2137 | 2134 | ||
2138 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 2135 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
2139 | if (!iommu) { | 2136 | if (!iommu) { |
2140 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2137 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2141 | prom_halt(); | 2138 | prom_halt(); |
2142 | } | 2139 | } |
2143 | memset(iommu, 0, sizeof(*iommu)); | ||
2144 | p->pbm_B.iommu = iommu; | 2140 | p->pbm_B.iommu = iommu; |
2145 | 2141 | ||
2146 | p->next = pci_controller_root; | 2142 | p->next = pci_controller_root; |
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c new file mode 100644 index 000000000000..9372d4f376d5 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -0,0 +1,1147 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/pbm.h> | ||
15 | #include <asm/iommu.h> | ||
16 | #include <asm/irq.h> | ||
17 | #include <asm/upa.h> | ||
18 | #include <asm/pstate.h> | ||
19 | #include <asm/oplib.h> | ||
20 | #include <asm/hypervisor.h> | ||
21 | |||
22 | #include "pci_impl.h" | ||
23 | #include "iommu_common.h" | ||
24 | |||
25 | #include "pci_sun4v.h" | ||
26 | |||
27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | ||
28 | |||
29 | struct pci_iommu_batch { | ||
30 | struct pci_dev *pdev; /* Device mapping is for. */ | ||
31 | unsigned long prot; /* IOMMU page protections */ | ||
32 | unsigned long entry; /* Index into IOTSB. */ | ||
33 | u64 *pglist; /* List of physical pages */ | ||
34 | unsigned long npages; /* Number of pages in list. */ | ||
35 | }; | ||
36 | |||
37 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); | ||
38 | |||
39 | /* Interrupts must be disabled. */ | ||
40 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | ||
41 | { | ||
42 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
43 | |||
44 | p->pdev = pdev; | ||
45 | p->prot = prot; | ||
46 | p->entry = entry; | ||
47 | p->npages = 0; | ||
48 | } | ||
49 | |||
50 | /* Interrupts must be disabled. */ | ||
51 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | ||
52 | { | ||
53 | struct pcidev_cookie *pcp = p->pdev->sysdata; | ||
54 | unsigned long devhandle = pcp->pbm->devhandle; | ||
55 | unsigned long prot = p->prot; | ||
56 | unsigned long entry = p->entry; | ||
57 | u64 *pglist = p->pglist; | ||
58 | unsigned long npages = p->npages; | ||
59 | |||
60 | while (npages != 0) { | ||
61 | long num; | ||
62 | |||
63 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
64 | npages, prot, __pa(pglist)); | ||
65 | if (unlikely(num < 0)) { | ||
66 | if (printk_ratelimit()) | ||
67 | printk("pci_iommu_batch_flush: IOMMU map of " | ||
68 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | ||
69 | "status %ld\n", | ||
70 | devhandle, HV_PCI_TSBID(0, entry), | ||
71 | npages, prot, __pa(pglist), num); | ||
72 | return -1; | ||
73 | } | ||
74 | |||
75 | entry += num; | ||
76 | npages -= num; | ||
77 | pglist += num; | ||
78 | } | ||
79 | |||
80 | p->entry = entry; | ||
81 | p->npages = 0; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Interrupts must be disabled. */ | ||
87 | static inline long pci_iommu_batch_add(u64 phys_page) | ||
88 | { | ||
89 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
90 | |||
91 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
92 | |||
93 | p->pglist[p->npages++] = phys_page; | ||
94 | if (p->npages == PGLIST_NENTS) | ||
95 | return pci_iommu_batch_flush(p); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* Interrupts must be disabled. */ | ||
101 | static inline long pci_iommu_batch_end(void) | ||
102 | { | ||
103 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
104 | |||
105 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
106 | |||
107 | return pci_iommu_batch_flush(p); | ||
108 | } | ||
109 | |||
110 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | ||
111 | { | ||
112 | unsigned long n, i, start, end, limit; | ||
113 | int pass; | ||
114 | |||
115 | limit = arena->limit; | ||
116 | start = arena->hint; | ||
117 | pass = 0; | ||
118 | |||
119 | again: | ||
120 | n = find_next_zero_bit(arena->map, limit, start); | ||
121 | end = n + npages; | ||
122 | if (unlikely(end >= limit)) { | ||
123 | if (likely(pass < 1)) { | ||
124 | limit = start; | ||
125 | start = 0; | ||
126 | pass++; | ||
127 | goto again; | ||
128 | } else { | ||
129 | /* Scanned the whole thing, give up. */ | ||
130 | return -1; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | for (i = n; i < end; i++) { | ||
135 | if (test_bit(i, arena->map)) { | ||
136 | start = i + 1; | ||
137 | goto again; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | for (i = n; i < end; i++) | ||
142 | __set_bit(i, arena->map); | ||
143 | |||
144 | arena->hint = end; | ||
145 | |||
146 | return n; | ||
147 | } | ||
148 | |||
149 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | ||
150 | { | ||
151 | unsigned long i; | ||
152 | |||
153 | for (i = base; i < (base + npages); i++) | ||
154 | __clear_bit(i, arena->map); | ||
155 | } | ||
156 | |||
157 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | ||
158 | { | ||
159 | struct pcidev_cookie *pcp; | ||
160 | struct pci_iommu *iommu; | ||
161 | unsigned long flags, order, first_page, npages, n; | ||
162 | void *ret; | ||
163 | long entry; | ||
164 | |||
165 | size = IO_PAGE_ALIGN(size); | ||
166 | order = get_order(size); | ||
167 | if (unlikely(order >= MAX_ORDER)) | ||
168 | return NULL; | ||
169 | |||
170 | npages = size >> IO_PAGE_SHIFT; | ||
171 | |||
172 | first_page = __get_free_pages(GFP_ATOMIC, order); | ||
173 | if (unlikely(first_page == 0UL)) | ||
174 | return NULL; | ||
175 | |||
176 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
177 | |||
178 | pcp = pdev->sysdata; | ||
179 | iommu = pcp->pbm->iommu; | ||
180 | |||
181 | spin_lock_irqsave(&iommu->lock, flags); | ||
182 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
183 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
184 | |||
185 | if (unlikely(entry < 0L)) | ||
186 | goto arena_alloc_fail; | ||
187 | |||
188 | *dma_addrp = (iommu->page_table_map_base + | ||
189 | (entry << IO_PAGE_SHIFT)); | ||
190 | ret = (void *) first_page; | ||
191 | first_page = __pa(first_page); | ||
192 | |||
193 | local_irq_save(flags); | ||
194 | |||
195 | pci_iommu_batch_start(pdev, | ||
196 | (HV_PCI_MAP_ATTR_READ | | ||
197 | HV_PCI_MAP_ATTR_WRITE), | ||
198 | entry); | ||
199 | |||
200 | for (n = 0; n < npages; n++) { | ||
201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | ||
202 | if (unlikely(err < 0L)) | ||
203 | goto iommu_map_fail; | ||
204 | } | ||
205 | |||
206 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
207 | goto iommu_map_fail; | ||
208 | |||
209 | local_irq_restore(flags); | ||
210 | |||
211 | return ret; | ||
212 | |||
213 | iommu_map_fail: | ||
214 | /* Interrupts are disabled. */ | ||
215 | spin_lock(&iommu->lock); | ||
216 | pci_arena_free(&iommu->arena, entry, npages); | ||
217 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
218 | |||
219 | arena_alloc_fail: | ||
220 | free_pages(first_page, order); | ||
221 | return NULL; | ||
222 | } | ||
223 | |||
224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | ||
225 | { | ||
226 | struct pcidev_cookie *pcp; | ||
227 | struct pci_iommu *iommu; | ||
228 | unsigned long flags, order, npages, entry; | ||
229 | u32 devhandle; | ||
230 | |||
231 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
232 | pcp = pdev->sysdata; | ||
233 | iommu = pcp->pbm->iommu; | ||
234 | devhandle = pcp->pbm->devhandle; | ||
235 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
236 | |||
237 | spin_lock_irqsave(&iommu->lock, flags); | ||
238 | |||
239 | pci_arena_free(&iommu->arena, entry, npages); | ||
240 | |||
241 | do { | ||
242 | unsigned long num; | ||
243 | |||
244 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
245 | npages); | ||
246 | entry += num; | ||
247 | npages -= num; | ||
248 | } while (npages != 0); | ||
249 | |||
250 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
251 | |||
252 | order = get_order(size); | ||
253 | if (order < 10) | ||
254 | free_pages((unsigned long)cpu, order); | ||
255 | } | ||
256 | |||
257 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | ||
258 | { | ||
259 | struct pcidev_cookie *pcp; | ||
260 | struct pci_iommu *iommu; | ||
261 | unsigned long flags, npages, oaddr; | ||
262 | unsigned long i, base_paddr; | ||
263 | u32 bus_addr, ret; | ||
264 | unsigned long prot; | ||
265 | long entry; | ||
266 | |||
267 | pcp = pdev->sysdata; | ||
268 | iommu = pcp->pbm->iommu; | ||
269 | |||
270 | if (unlikely(direction == PCI_DMA_NONE)) | ||
271 | goto bad; | ||
272 | |||
273 | oaddr = (unsigned long)ptr; | ||
274 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
275 | npages >>= IO_PAGE_SHIFT; | ||
276 | |||
277 | spin_lock_irqsave(&iommu->lock, flags); | ||
278 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
279 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
280 | |||
281 | if (unlikely(entry < 0L)) | ||
282 | goto bad; | ||
283 | |||
284 | bus_addr = (iommu->page_table_map_base + | ||
285 | (entry << IO_PAGE_SHIFT)); | ||
286 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
287 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
288 | prot = HV_PCI_MAP_ATTR_READ; | ||
289 | if (direction != PCI_DMA_TODEVICE) | ||
290 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
291 | |||
292 | local_irq_save(flags); | ||
293 | |||
294 | pci_iommu_batch_start(pdev, prot, entry); | ||
295 | |||
296 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { | ||
297 | long err = pci_iommu_batch_add(base_paddr); | ||
298 | if (unlikely(err < 0L)) | ||
299 | goto iommu_map_fail; | ||
300 | } | ||
301 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
302 | goto iommu_map_fail; | ||
303 | |||
304 | local_irq_restore(flags); | ||
305 | |||
306 | return ret; | ||
307 | |||
308 | bad: | ||
309 | if (printk_ratelimit()) | ||
310 | WARN_ON(1); | ||
311 | return PCI_DMA_ERROR_CODE; | ||
312 | |||
313 | iommu_map_fail: | ||
314 | /* Interrupts are disabled. */ | ||
315 | spin_lock(&iommu->lock); | ||
316 | pci_arena_free(&iommu->arena, entry, npages); | ||
317 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
318 | |||
319 | return PCI_DMA_ERROR_CODE; | ||
320 | } | ||
321 | |||
322 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
323 | { | ||
324 | struct pcidev_cookie *pcp; | ||
325 | struct pci_iommu *iommu; | ||
326 | unsigned long flags, npages; | ||
327 | long entry; | ||
328 | u32 devhandle; | ||
329 | |||
330 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
331 | if (printk_ratelimit()) | ||
332 | WARN_ON(1); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | pcp = pdev->sysdata; | ||
337 | iommu = pcp->pbm->iommu; | ||
338 | devhandle = pcp->pbm->devhandle; | ||
339 | |||
340 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
341 | npages >>= IO_PAGE_SHIFT; | ||
342 | bus_addr &= IO_PAGE_MASK; | ||
343 | |||
344 | spin_lock_irqsave(&iommu->lock, flags); | ||
345 | |||
346 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
347 | pci_arena_free(&iommu->arena, entry, npages); | ||
348 | |||
349 | do { | ||
350 | unsigned long num; | ||
351 | |||
352 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
353 | npages); | ||
354 | entry += num; | ||
355 | npages -= num; | ||
356 | } while (npages != 0); | ||
357 | |||
358 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
359 | } | ||
360 | |||
361 | #define SG_ENT_PHYS_ADDRESS(SG) \ | ||
362 | (__pa(page_address((SG)->page)) + (SG)->offset) | ||
363 | |||
364 | static inline long fill_sg(long entry, struct pci_dev *pdev, | ||
365 | struct scatterlist *sg, | ||
366 | int nused, int nelems, unsigned long prot) | ||
367 | { | ||
368 | struct scatterlist *dma_sg = sg; | ||
369 | struct scatterlist *sg_end = sg + nelems; | ||
370 | unsigned long flags; | ||
371 | int i; | ||
372 | |||
373 | local_irq_save(flags); | ||
374 | |||
375 | pci_iommu_batch_start(pdev, prot, entry); | ||
376 | |||
377 | for (i = 0; i < nused; i++) { | ||
378 | unsigned long pteval = ~0UL; | ||
379 | u32 dma_npages; | ||
380 | |||
381 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | ||
382 | dma_sg->dma_length + | ||
383 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | ||
384 | do { | ||
385 | unsigned long offset; | ||
386 | signed int len; | ||
387 | |||
388 | /* If we are here, we know we have at least one | ||
389 | * more page to map. So walk forward until we | ||
390 | * hit a page crossing, and begin creating new | ||
391 | * mappings from that spot. | ||
392 | */ | ||
393 | for (;;) { | ||
394 | unsigned long tmp; | ||
395 | |||
396 | tmp = SG_ENT_PHYS_ADDRESS(sg); | ||
397 | len = sg->length; | ||
398 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | ||
399 | pteval = tmp & IO_PAGE_MASK; | ||
400 | offset = tmp & (IO_PAGE_SIZE - 1UL); | ||
401 | break; | ||
402 | } | ||
403 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | ||
404 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | ||
405 | offset = 0UL; | ||
406 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||
407 | break; | ||
408 | } | ||
409 | sg++; | ||
410 | } | ||
411 | |||
412 | pteval = (pteval & IOPTE_PAGE); | ||
413 | while (len > 0) { | ||
414 | long err; | ||
415 | |||
416 | err = pci_iommu_batch_add(pteval); | ||
417 | if (unlikely(err < 0L)) | ||
418 | goto iommu_map_failed; | ||
419 | |||
420 | pteval += IO_PAGE_SIZE; | ||
421 | len -= (IO_PAGE_SIZE - offset); | ||
422 | offset = 0; | ||
423 | dma_npages--; | ||
424 | } | ||
425 | |||
426 | pteval = (pteval & IOPTE_PAGE) + len; | ||
427 | sg++; | ||
428 | |||
429 | /* Skip over any tail mappings we've fully mapped, | ||
430 | * adjusting pteval along the way. Stop when we | ||
431 | * detect a page crossing event. | ||
432 | */ | ||
433 | while (sg < sg_end && | ||
434 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||
435 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||
436 | ((pteval ^ | ||
437 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||
438 | pteval += sg->length; | ||
439 | sg++; | ||
440 | } | ||
441 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||
442 | pteval = ~0UL; | ||
443 | } while (dma_npages != 0); | ||
444 | dma_sg++; | ||
445 | } | ||
446 | |||
447 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
448 | goto iommu_map_failed; | ||
449 | |||
450 | local_irq_restore(flags); | ||
451 | return 0; | ||
452 | |||
453 | iommu_map_failed: | ||
454 | local_irq_restore(flags); | ||
455 | return -1L; | ||
456 | } | ||
457 | |||
458 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
459 | { | ||
460 | struct pcidev_cookie *pcp; | ||
461 | struct pci_iommu *iommu; | ||
462 | unsigned long flags, npages, prot; | ||
463 | u32 dma_base; | ||
464 | struct scatterlist *sgtmp; | ||
465 | long entry, err; | ||
466 | int used; | ||
467 | |||
468 | /* Fast path single entry scatterlists. */ | ||
469 | if (nelems == 1) { | ||
470 | sglist->dma_address = | ||
471 | pci_4v_map_single(pdev, | ||
472 | (page_address(sglist->page) + sglist->offset), | ||
473 | sglist->length, direction); | ||
474 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | ||
475 | return 0; | ||
476 | sglist->dma_length = sglist->length; | ||
477 | return 1; | ||
478 | } | ||
479 | |||
480 | pcp = pdev->sysdata; | ||
481 | iommu = pcp->pbm->iommu; | ||
482 | |||
483 | if (unlikely(direction == PCI_DMA_NONE)) | ||
484 | goto bad; | ||
485 | |||
486 | /* Step 1: Prepare scatter list. */ | ||
487 | npages = prepare_sg(sglist, nelems); | ||
488 | |||
489 | /* Step 2: Allocate a cluster and context, if necessary. */ | ||
490 | spin_lock_irqsave(&iommu->lock, flags); | ||
491 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
492 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
493 | |||
494 | if (unlikely(entry < 0L)) | ||
495 | goto bad; | ||
496 | |||
497 | dma_base = iommu->page_table_map_base + | ||
498 | (entry << IO_PAGE_SHIFT); | ||
499 | |||
500 | /* Step 3: Normalize DMA addresses. */ | ||
501 | used = nelems; | ||
502 | |||
503 | sgtmp = sglist; | ||
504 | while (used && sgtmp->dma_length) { | ||
505 | sgtmp->dma_address += dma_base; | ||
506 | sgtmp++; | ||
507 | used--; | ||
508 | } | ||
509 | used = nelems - used; | ||
510 | |||
511 | /* Step 4: Create the mappings. */ | ||
512 | prot = HV_PCI_MAP_ATTR_READ; | ||
513 | if (direction != PCI_DMA_TODEVICE) | ||
514 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
515 | |||
516 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); | ||
517 | if (unlikely(err < 0L)) | ||
518 | goto iommu_map_failed; | ||
519 | |||
520 | return used; | ||
521 | |||
522 | bad: | ||
523 | if (printk_ratelimit()) | ||
524 | WARN_ON(1); | ||
525 | return 0; | ||
526 | |||
527 | iommu_map_failed: | ||
528 | spin_lock_irqsave(&iommu->lock, flags); | ||
529 | pci_arena_free(&iommu->arena, entry, npages); | ||
530 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
536 | { | ||
537 | struct pcidev_cookie *pcp; | ||
538 | struct pci_iommu *iommu; | ||
539 | unsigned long flags, i, npages; | ||
540 | long entry; | ||
541 | u32 devhandle, bus_addr; | ||
542 | |||
543 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
544 | if (printk_ratelimit()) | ||
545 | WARN_ON(1); | ||
546 | } | ||
547 | |||
548 | pcp = pdev->sysdata; | ||
549 | iommu = pcp->pbm->iommu; | ||
550 | devhandle = pcp->pbm->devhandle; | ||
551 | |||
552 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||
553 | |||
554 | for (i = 1; i < nelems; i++) | ||
555 | if (sglist[i].dma_length == 0) | ||
556 | break; | ||
557 | i--; | ||
558 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||
559 | bus_addr) >> IO_PAGE_SHIFT; | ||
560 | |||
561 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
562 | |||
563 | spin_lock_irqsave(&iommu->lock, flags); | ||
564 | |||
565 | pci_arena_free(&iommu->arena, entry, npages); | ||
566 | |||
567 | do { | ||
568 | unsigned long num; | ||
569 | |||
570 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
571 | npages); | ||
572 | entry += num; | ||
573 | npages -= num; | ||
574 | } while (npages != 0); | ||
575 | |||
576 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
577 | } | ||
578 | |||
579 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
580 | { | ||
581 | /* Nothing to do... */ | ||
582 | } | ||
583 | |||
584 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
585 | { | ||
586 | /* Nothing to do... */ | ||
587 | } | ||
588 | |||
589 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | ||
590 | .alloc_consistent = pci_4v_alloc_consistent, | ||
591 | .free_consistent = pci_4v_free_consistent, | ||
592 | .map_single = pci_4v_map_single, | ||
593 | .unmap_single = pci_4v_unmap_single, | ||
594 | .map_sg = pci_4v_map_sg, | ||
595 | .unmap_sg = pci_4v_unmap_sg, | ||
596 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | ||
597 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | ||
598 | }; | ||
599 | |||
600 | /* SUN4V PCI configuration space accessors. */ | ||
601 | |||
602 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | ||
603 | { | ||
604 | if (bus == pbm->pci_first_busno) { | ||
605 | if (device == 0 && func == 0) | ||
606 | return 0; | ||
607 | return 1; | ||
608 | } | ||
609 | |||
610 | if (bus < pbm->pci_first_busno || | ||
611 | bus > pbm->pci_last_busno) | ||
612 | return 1; | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
617 | int where, int size, u32 *value) | ||
618 | { | ||
619 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
620 | u32 devhandle = pbm->devhandle; | ||
621 | unsigned int bus = bus_dev->number; | ||
622 | unsigned int device = PCI_SLOT(devfn); | ||
623 | unsigned int func = PCI_FUNC(devfn); | ||
624 | unsigned long ret; | ||
625 | |||
626 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
627 | ret = ~0UL; | ||
628 | } else { | ||
629 | ret = pci_sun4v_config_get(devhandle, | ||
630 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
631 | where, size); | ||
632 | #if 0 | ||
633 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", | ||
634 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
635 | where, size, ret); | ||
636 | #endif | ||
637 | } | ||
638 | switch (size) { | ||
639 | case 1: | ||
640 | *value = ret & 0xff; | ||
641 | break; | ||
642 | case 2: | ||
643 | *value = ret & 0xffff; | ||
644 | break; | ||
645 | case 4: | ||
646 | *value = ret & 0xffffffff; | ||
647 | break; | ||
648 | }; | ||
649 | |||
650 | |||
651 | return PCIBIOS_SUCCESSFUL; | ||
652 | } | ||
653 | |||
654 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
655 | int where, int size, u32 value) | ||
656 | { | ||
657 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
658 | u32 devhandle = pbm->devhandle; | ||
659 | unsigned int bus = bus_dev->number; | ||
660 | unsigned int device = PCI_SLOT(devfn); | ||
661 | unsigned int func = PCI_FUNC(devfn); | ||
662 | unsigned long ret; | ||
663 | |||
664 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
665 | /* Do nothing. */ | ||
666 | } else { | ||
667 | ret = pci_sun4v_config_put(devhandle, | ||
668 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
669 | where, size, value); | ||
670 | #if 0 | ||
671 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", | ||
672 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
673 | where, size, value, ret); | ||
674 | #endif | ||
675 | } | ||
676 | return PCIBIOS_SUCCESSFUL; | ||
677 | } | ||
678 | |||
679 | static struct pci_ops pci_sun4v_ops = { | ||
680 | .read = pci_sun4v_read_pci_cfg, | ||
681 | .write = pci_sun4v_write_pci_cfg, | ||
682 | }; | ||
683 | |||
684 | |||
685 | static void pbm_scan_bus(struct pci_controller_info *p, | ||
686 | struct pci_pbm_info *pbm) | ||
687 | { | ||
688 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | ||
689 | |||
690 | if (!cookie) { | ||
691 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | ||
692 | prom_halt(); | ||
693 | } | ||
694 | |||
695 | /* All we care about is the PBM. */ | ||
696 | memset(cookie, 0, sizeof(*cookie)); | ||
697 | cookie->pbm = pbm; | ||
698 | |||
699 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); | ||
700 | #if 0 | ||
701 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
702 | pbm->pci_bus->self->sysdata = cookie; | ||
703 | #endif | ||
704 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, | ||
705 | pbm->prom_node); | ||
706 | pci_record_assignments(pbm, pbm->pci_bus); | ||
707 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
708 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
709 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
710 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
711 | } | ||
712 | |||
713 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) | ||
714 | { | ||
715 | if (p->pbm_A.prom_node) { | ||
716 | p->pbm_A.is_66mhz_capable = | ||
717 | prom_getbool(p->pbm_A.prom_node, "66mhz-capable"); | ||
718 | |||
719 | pbm_scan_bus(p, &p->pbm_A); | ||
720 | } | ||
721 | if (p->pbm_B.prom_node) { | ||
722 | p->pbm_B.is_66mhz_capable = | ||
723 | prom_getbool(p->pbm_B.prom_node, "66mhz-capable"); | ||
724 | |||
725 | pbm_scan_bus(p, &p->pbm_B); | ||
726 | } | ||
727 | |||
728 | /* XXX register error interrupt handlers XXX */ | ||
729 | } | ||
730 | |||
731 | static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, | ||
732 | struct pci_dev *pdev, | ||
733 | unsigned int devino) | ||
734 | { | ||
735 | u32 devhandle = pbm->devhandle; | ||
736 | int pil; | ||
737 | |||
738 | pil = 5; | ||
739 | if (pdev) { | ||
740 | switch ((pdev->class >> 16) & 0xff) { | ||
741 | case PCI_BASE_CLASS_STORAGE: | ||
742 | pil = 5; | ||
743 | break; | ||
744 | |||
745 | case PCI_BASE_CLASS_NETWORK: | ||
746 | pil = 6; | ||
747 | break; | ||
748 | |||
749 | case PCI_BASE_CLASS_DISPLAY: | ||
750 | pil = 9; | ||
751 | break; | ||
752 | |||
753 | case PCI_BASE_CLASS_MULTIMEDIA: | ||
754 | case PCI_BASE_CLASS_MEMORY: | ||
755 | case PCI_BASE_CLASS_BRIDGE: | ||
756 | case PCI_BASE_CLASS_SERIAL: | ||
757 | pil = 10; | ||
758 | break; | ||
759 | |||
760 | default: | ||
761 | pil = 5; | ||
762 | break; | ||
763 | }; | ||
764 | } | ||
765 | BUG_ON(PIL_RESERVED(pil)); | ||
766 | |||
767 | return sun4v_build_irq(devhandle, devino, pil, IBF_PCI); | ||
768 | } | ||
769 | |||
770 | static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) | ||
771 | { | ||
772 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
773 | struct pci_pbm_info *pbm = pcp->pbm; | ||
774 | struct resource *res, *root; | ||
775 | u32 reg; | ||
776 | int where, size, is_64bit; | ||
777 | |||
778 | res = &pdev->resource[resource]; | ||
779 | if (resource < 6) { | ||
780 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
781 | } else if (resource == PCI_ROM_RESOURCE) { | ||
782 | where = pdev->rom_base_reg; | ||
783 | } else { | ||
784 | /* Somebody might have asked allocation of a non-standard resource */ | ||
785 | return; | ||
786 | } | ||
787 | |||
788 | /* XXX 64-bit MEM handling is not %100 correct... XXX */ | ||
789 | is_64bit = 0; | ||
790 | if (res->flags & IORESOURCE_IO) | ||
791 | root = &pbm->io_space; | ||
792 | else { | ||
793 | root = &pbm->mem_space; | ||
794 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
795 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
796 | is_64bit = 1; | ||
797 | } | ||
798 | |||
799 | size = res->end - res->start; | ||
800 | pci_read_config_dword(pdev, where, ®); | ||
801 | reg = ((reg & size) | | ||
802 | (((u32)(res->start - root->start)) & ~size)); | ||
803 | if (resource == PCI_ROM_RESOURCE) { | ||
804 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
805 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
806 | } | ||
807 | pci_write_config_dword(pdev, where, reg); | ||
808 | |||
809 | /* This knows that the upper 32-bits of the address | ||
810 | * must be zero. Our PCI common layer enforces this. | ||
811 | */ | ||
812 | if (is_64bit) | ||
813 | pci_write_config_dword(pdev, where + 4, 0); | ||
814 | } | ||
815 | |||
816 | static void pci_sun4v_resource_adjust(struct pci_dev *pdev, | ||
817 | struct resource *res, | ||
818 | struct resource *root) | ||
819 | { | ||
820 | res->start += root->start; | ||
821 | res->end += root->start; | ||
822 | } | ||
823 | |||
824 | /* Use ranges property to determine where PCI MEM, I/O, and Config | ||
825 | * space are for this PCI bus module. | ||
826 | */ | ||
827 | static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
828 | { | ||
829 | int i, saw_mem, saw_io; | ||
830 | |||
831 | saw_mem = saw_io = 0; | ||
832 | for (i = 0; i < pbm->num_pbm_ranges; i++) { | ||
833 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | ||
834 | unsigned long a; | ||
835 | int type; | ||
836 | |||
837 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
838 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | ||
839 | ((unsigned long)pr->parent_phys_lo << 0UL)); | ||
840 | |||
841 | switch (type) { | ||
842 | case 1: | ||
843 | /* 16-bit IO space, 16MB */ | ||
844 | pbm->io_space.start = a; | ||
845 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
846 | pbm->io_space.flags = IORESOURCE_IO; | ||
847 | saw_io = 1; | ||
848 | break; | ||
849 | |||
850 | case 2: | ||
851 | /* 32-bit MEM space, 2GB */ | ||
852 | pbm->mem_space.start = a; | ||
853 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | ||
854 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
855 | saw_mem = 1; | ||
856 | break; | ||
857 | |||
858 | case 3: | ||
859 | /* XXX 64-bit MEM handling XXX */ | ||
860 | |||
861 | default: | ||
862 | break; | ||
863 | }; | ||
864 | } | ||
865 | |||
866 | if (!saw_io || !saw_mem) { | ||
867 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
868 | pbm->name, | ||
869 | (!saw_io ? "IO" : "MEM")); | ||
870 | prom_halt(); | ||
871 | } | ||
872 | |||
873 | printk("%s: PCI IO[%lx] MEM[%lx]\n", | ||
874 | pbm->name, | ||
875 | pbm->io_space.start, | ||
876 | pbm->mem_space.start); | ||
877 | } | ||
878 | |||
879 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
880 | struct pci_pbm_info *pbm) | ||
881 | { | ||
882 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
883 | |||
884 | request_resource(&ioport_resource, &pbm->io_space); | ||
885 | request_resource(&iomem_resource, &pbm->mem_space); | ||
886 | pci_register_legacy_regions(&pbm->io_space, | ||
887 | &pbm->mem_space); | ||
888 | } | ||
889 | |||
890 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | ||
891 | struct pci_iommu *iommu) | ||
892 | { | ||
893 | struct pci_iommu_arena *arena = &iommu->arena; | ||
894 | unsigned long i, cnt = 0; | ||
895 | u32 devhandle; | ||
896 | |||
897 | devhandle = pbm->devhandle; | ||
898 | for (i = 0; i < arena->limit; i++) { | ||
899 | unsigned long ret, io_attrs, ra; | ||
900 | |||
901 | ret = pci_sun4v_iommu_getmap(devhandle, | ||
902 | HV_PCI_TSBID(0, i), | ||
903 | &io_attrs, &ra); | ||
904 | if (ret == HV_EOK) { | ||
905 | cnt++; | ||
906 | __set_bit(i, arena->map); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return cnt; | ||
911 | } | ||
912 | |||
913 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | ||
914 | { | ||
915 | struct pci_iommu *iommu = pbm->iommu; | ||
916 | unsigned long num_tsb_entries, sz; | ||
917 | u32 vdma[2], dma_mask, dma_offset; | ||
918 | int err, tsbsize; | ||
919 | |||
920 | err = prom_getproperty(pbm->prom_node, "virtual-dma", | ||
921 | (char *)&vdma[0], sizeof(vdma)); | ||
922 | if (err == 0 || err == -1) { | ||
923 | /* No property, use default values. */ | ||
924 | vdma[0] = 0x80000000; | ||
925 | vdma[1] = 0x80000000; | ||
926 | } | ||
927 | |||
928 | dma_mask = vdma[0]; | ||
929 | switch (vdma[1]) { | ||
930 | case 0x20000000: | ||
931 | dma_mask |= 0x1fffffff; | ||
932 | tsbsize = 64; | ||
933 | break; | ||
934 | |||
935 | case 0x40000000: | ||
936 | dma_mask |= 0x3fffffff; | ||
937 | tsbsize = 128; | ||
938 | break; | ||
939 | |||
940 | case 0x80000000: | ||
941 | dma_mask |= 0x7fffffff; | ||
942 | tsbsize = 256; | ||
943 | break; | ||
944 | |||
945 | default: | ||
946 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | ||
947 | prom_halt(); | ||
948 | }; | ||
949 | |||
950 | tsbsize *= (8 * 1024); | ||
951 | |||
952 | num_tsb_entries = tsbsize / sizeof(iopte_t); | ||
953 | |||
954 | dma_offset = vdma[0]; | ||
955 | |||
956 | /* Setup initial software IOMMU state. */ | ||
957 | spin_lock_init(&iommu->lock); | ||
958 | iommu->ctx_lowest_free = 1; | ||
959 | iommu->page_table_map_base = dma_offset; | ||
960 | iommu->dma_addr_mask = dma_mask; | ||
961 | |||
962 | /* Allocate and initialize the free area map. */ | ||
963 | sz = num_tsb_entries / 8; | ||
964 | sz = (sz + 7UL) & ~7UL; | ||
965 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | ||
966 | if (!iommu->arena.map) { | ||
967 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | ||
968 | prom_halt(); | ||
969 | } | ||
970 | memset(iommu->arena.map, 0, sz); | ||
971 | iommu->arena.limit = num_tsb_entries; | ||
972 | |||
973 | sz = probe_existing_entries(pbm, iommu); | ||
974 | |||
975 | printk("%s: TSB entries [%lu], existing mapings [%lu]\n", | ||
976 | pbm->name, num_tsb_entries, sz); | ||
977 | } | ||
978 | |||
979 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) | ||
980 | { | ||
981 | unsigned int busrange[2]; | ||
982 | int prom_node = pbm->prom_node; | ||
983 | int err; | ||
984 | |||
985 | err = prom_getproperty(prom_node, "bus-range", | ||
986 | (char *)&busrange[0], | ||
987 | sizeof(busrange)); | ||
988 | if (err == 0 || err == -1) { | ||
989 | prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); | ||
990 | prom_halt(); | ||
991 | } | ||
992 | |||
993 | pbm->pci_first_busno = busrange[0]; | ||
994 | pbm->pci_last_busno = busrange[1]; | ||
995 | |||
996 | } | ||
997 | |||
998 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) | ||
999 | { | ||
1000 | struct pci_pbm_info *pbm; | ||
1001 | int err, i; | ||
1002 | |||
1003 | if (devhandle & 0x40) | ||
1004 | pbm = &p->pbm_B; | ||
1005 | else | ||
1006 | pbm = &p->pbm_A; | ||
1007 | |||
1008 | pbm->parent = p; | ||
1009 | pbm->prom_node = prom_node; | ||
1010 | pbm->pci_first_slot = 1; | ||
1011 | |||
1012 | pbm->devhandle = devhandle; | ||
1013 | |||
1014 | sprintf(pbm->name, "SUN4V-PCI%d PBM%c", | ||
1015 | p->index, (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1016 | |||
1017 | printk("%s: devhandle[%x] prom_node[%x:%x]\n", | ||
1018 | pbm->name, pbm->devhandle, | ||
1019 | pbm->prom_node, prom_getchild(pbm->prom_node)); | ||
1020 | |||
1021 | prom_getstring(prom_node, "name", | ||
1022 | pbm->prom_name, sizeof(pbm->prom_name)); | ||
1023 | |||
1024 | err = prom_getproperty(prom_node, "ranges", | ||
1025 | (char *) pbm->pbm_ranges, | ||
1026 | sizeof(pbm->pbm_ranges)); | ||
1027 | if (err == 0 || err == -1) { | ||
1028 | prom_printf("%s: Fatal error, no ranges property.\n", | ||
1029 | pbm->name); | ||
1030 | prom_halt(); | ||
1031 | } | ||
1032 | |||
1033 | pbm->num_pbm_ranges = | ||
1034 | (err / sizeof(struct linux_prom_pci_ranges)); | ||
1035 | |||
1036 | /* Mask out the top 8 bits of the ranges, leaving the real | ||
1037 | * physical address. | ||
1038 | */ | ||
1039 | for (i = 0; i < pbm->num_pbm_ranges; i++) | ||
1040 | pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; | ||
1041 | |||
1042 | pci_sun4v_determine_mem_io_space(pbm); | ||
1043 | pbm_register_toplevel_resources(p, pbm); | ||
1044 | |||
1045 | err = prom_getproperty(prom_node, "interrupt-map", | ||
1046 | (char *)pbm->pbm_intmap, | ||
1047 | sizeof(pbm->pbm_intmap)); | ||
1048 | if (err == 0 || err == -1) { | ||
1049 | prom_printf("%s: Fatal error, no interrupt-map property.\n", | ||
1050 | pbm->name); | ||
1051 | prom_halt(); | ||
1052 | } | ||
1053 | |||
1054 | pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); | ||
1055 | err = prom_getproperty(prom_node, "interrupt-map-mask", | ||
1056 | (char *)&pbm->pbm_intmask, | ||
1057 | sizeof(pbm->pbm_intmask)); | ||
1058 | if (err == 0 || err == -1) { | ||
1059 | prom_printf("%s: Fatal error, no interrupt-map-mask.\n", | ||
1060 | pbm->name); | ||
1061 | prom_halt(); | ||
1062 | } | ||
1063 | |||
1064 | pci_sun4v_get_bus_range(pbm); | ||
1065 | pci_sun4v_iommu_init(pbm); | ||
1066 | } | ||
1067 | |||
1068 | void sun4v_pci_init(int node, char *model_name) | ||
1069 | { | ||
1070 | struct pci_controller_info *p; | ||
1071 | struct pci_iommu *iommu; | ||
1072 | struct linux_prom64_registers regs; | ||
1073 | u32 devhandle; | ||
1074 | int i; | ||
1075 | |||
1076 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | ||
1077 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | ||
1078 | |||
1079 | for (p = pci_controller_root; p; p = p->next) { | ||
1080 | struct pci_pbm_info *pbm; | ||
1081 | |||
1082 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | ||
1083 | continue; | ||
1084 | |||
1085 | pbm = (p->pbm_A.prom_node ? | ||
1086 | &p->pbm_A : | ||
1087 | &p->pbm_B); | ||
1088 | |||
1089 | if (pbm->devhandle == (devhandle ^ 0x40)) { | ||
1090 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1091 | return; | ||
1092 | } | ||
1093 | } | ||
1094 | |||
1095 | for_each_cpu(i) { | ||
1096 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | ||
1097 | |||
1098 | if (!page) | ||
1099 | goto fatal_memory_error; | ||
1100 | |||
1101 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; | ||
1102 | } | ||
1103 | |||
1104 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | ||
1105 | if (!p) | ||
1106 | goto fatal_memory_error; | ||
1107 | |||
1108 | memset(p, 0, sizeof(*p)); | ||
1109 | |||
1110 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1111 | if (!iommu) | ||
1112 | goto fatal_memory_error; | ||
1113 | |||
1114 | memset(iommu, 0, sizeof(*iommu)); | ||
1115 | p->pbm_A.iommu = iommu; | ||
1116 | |||
1117 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1118 | if (!iommu) | ||
1119 | goto fatal_memory_error; | ||
1120 | |||
1121 | memset(iommu, 0, sizeof(*iommu)); | ||
1122 | p->pbm_B.iommu = iommu; | ||
1123 | |||
1124 | p->next = pci_controller_root; | ||
1125 | pci_controller_root = p; | ||
1126 | |||
1127 | p->index = pci_num_controllers++; | ||
1128 | p->pbms_same_domain = 0; | ||
1129 | |||
1130 | p->scan_bus = pci_sun4v_scan_bus; | ||
1131 | p->irq_build = pci_sun4v_irq_build; | ||
1132 | p->base_address_update = pci_sun4v_base_address_update; | ||
1133 | p->resource_adjust = pci_sun4v_resource_adjust; | ||
1134 | p->pci_ops = &pci_sun4v_ops; | ||
1135 | |||
1136 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | ||
1137 | * for memory space. | ||
1138 | */ | ||
1139 | pci_memspace_mask = 0x7fffffffUL; | ||
1140 | |||
1141 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1142 | return; | ||
1143 | |||
1144 | fatal_memory_error: | ||
1145 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | ||
1146 | prom_halt(); | ||
1147 | } | ||
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h new file mode 100644 index 000000000000..884d25f6158d --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* pci_sun4v.h: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _PCI_SUN4V_H | ||
7 | #define _PCI_SUN4V_H | ||
8 | |||
9 | extern long pci_sun4v_iommu_map(unsigned long devhandle, | ||
10 | unsigned long tsbid, | ||
11 | unsigned long num_ttes, | ||
12 | unsigned long io_attributes, | ||
13 | unsigned long io_page_list_pa); | ||
14 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, | ||
15 | unsigned long tsbid, | ||
16 | unsigned long num_ttes); | ||
17 | extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, | ||
18 | unsigned long tsbid, | ||
19 | unsigned long *io_attributes, | ||
20 | unsigned long *real_address); | ||
21 | extern unsigned long pci_sun4v_config_get(unsigned long devhandle, | ||
22 | unsigned long pci_device, | ||
23 | unsigned long config_offset, | ||
24 | unsigned long size); | ||
25 | extern int pci_sun4v_config_put(unsigned long devhandle, | ||
26 | unsigned long pci_device, | ||
27 | unsigned long config_offset, | ||
28 | unsigned long size, | ||
29 | unsigned long data); | ||
30 | |||
31 | #endif /* !(_PCI_SUN4V_H) */ | ||
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S new file mode 100644 index 000000000000..6604fdbf746c --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v_asm.S | |||
@@ -0,0 +1,95 @@ | |||
1 | /* pci_sun4v_asm: Hypervisor calls for PCI support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/hypervisor.h> | ||
7 | |||
8 | /* %o0: devhandle | ||
9 | * %o1: tsbid | ||
10 | * %o2: num ttes | ||
11 | * %o3: io_attributes | ||
12 | * %o4: io_page_list phys address | ||
13 | * | ||
14 | * returns %o0: -status if status was non-zero, else | ||
15 | * %o0: num pages mapped | ||
16 | */ | ||
17 | .globl pci_sun4v_iommu_map | ||
18 | pci_sun4v_iommu_map: | ||
19 | mov %o5, %g1 | ||
20 | mov HV_FAST_PCI_IOMMU_MAP, %o5 | ||
21 | ta HV_FAST_TRAP | ||
22 | brnz,pn %o0, 1f | ||
23 | sub %g0, %o0, %o0 | ||
24 | mov %o1, %o0 | ||
25 | 1: retl | ||
26 | nop | ||
27 | |||
28 | /* %o0: devhandle | ||
29 | * %o1: tsbid | ||
30 | * %o2: num ttes | ||
31 | * | ||
32 | * returns %o0: num ttes demapped | ||
33 | */ | ||
34 | .globl pci_sun4v_iommu_demap | ||
35 | pci_sun4v_iommu_demap: | ||
36 | mov HV_FAST_PCI_IOMMU_DEMAP, %o5 | ||
37 | ta HV_FAST_TRAP | ||
38 | retl | ||
39 | mov %o1, %o0 | ||
40 | |||
41 | /* %o0: devhandle | ||
42 | * %o1: tsbid | ||
43 | * %o2: &io_attributes | ||
44 | * %o3: &real_address | ||
45 | * | ||
46 | * returns %o0: status | ||
47 | */ | ||
48 | .globl pci_sun4v_iommu_getmap | ||
49 | pci_sun4v_iommu_getmap: | ||
50 | mov %o2, %o4 | ||
51 | mov HV_FAST_PCI_IOMMU_GETMAP, %o5 | ||
52 | ta HV_FAST_TRAP | ||
53 | stx %o1, [%o4] | ||
54 | stx %o2, [%o3] | ||
55 | retl | ||
56 | mov %o0, %o0 | ||
57 | |||
58 | /* %o0: devhandle | ||
59 | * %o1: pci_device | ||
60 | * %o2: pci_config_offset | ||
61 | * %o3: size | ||
62 | * | ||
63 | * returns %o0: data | ||
64 | * | ||
65 | * If there is an error, the data will be returned | ||
66 | * as all 1's. | ||
67 | */ | ||
68 | .globl pci_sun4v_config_get | ||
69 | pci_sun4v_config_get: | ||
70 | mov HV_FAST_PCI_CONFIG_GET, %o5 | ||
71 | ta HV_FAST_TRAP | ||
72 | brnz,a,pn %o1, 1f | ||
73 | mov -1, %o2 | ||
74 | 1: retl | ||
75 | mov %o2, %o0 | ||
76 | |||
77 | /* %o0: devhandle | ||
78 | * %o1: pci_device | ||
79 | * %o2: pci_config_offset | ||
80 | * %o3: size | ||
81 | * %o4: data | ||
82 | * | ||
83 | * returns %o0: status | ||
84 | * | ||
85 | * status will be zero if the operation completed | ||
86 | * successfully, else -1 if not | ||
87 | */ | ||
88 | .globl pci_sun4v_config_put | ||
89 | pci_sun4v_config_put: | ||
90 | mov HV_FAST_PCI_CONFIG_PUT, %o5 | ||
91 | ta HV_FAST_TRAP | ||
92 | brnz,a,pn %o1, 1f | ||
93 | mov -1, %o1 | ||
94 | 1: retl | ||
95 | mov %o1, %o0 | ||
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 059b0d025224..1c7ca2f712d9 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -44,83 +44,61 @@ | |||
44 | #include <asm/fpumacro.h> | 44 | #include <asm/fpumacro.h> |
45 | #include <asm/head.h> | 45 | #include <asm/head.h> |
46 | #include <asm/cpudata.h> | 46 | #include <asm/cpudata.h> |
47 | #include <asm/mmu_context.h> | ||
47 | #include <asm/unistd.h> | 48 | #include <asm/unistd.h> |
49 | #include <asm/hypervisor.h> | ||
48 | 50 | ||
49 | /* #define VERBOSE_SHOWREGS */ | 51 | /* #define VERBOSE_SHOWREGS */ |
50 | 52 | ||
51 | /* | 53 | static void sparc64_yield(void) |
52 | * Nothing special yet... | ||
53 | */ | ||
54 | void default_idle(void) | ||
55 | { | ||
56 | } | ||
57 | |||
58 | #ifndef CONFIG_SMP | ||
59 | |||
60 | /* | ||
61 | * the idle loop on a Sparc... ;) | ||
62 | */ | ||
63 | void cpu_idle(void) | ||
64 | { | 54 | { |
65 | /* endless idle loop with no priority at all */ | 55 | if (tlb_type != hypervisor) |
66 | for (;;) { | 56 | return; |
67 | /* If current->work.need_resched is zero we should really | ||
68 | * setup for a system wakup event and execute a shutdown | ||
69 | * instruction. | ||
70 | * | ||
71 | * But this requires writing back the contents of the | ||
72 | * L2 cache etc. so implement this later. -DaveM | ||
73 | */ | ||
74 | while (!need_resched()) | ||
75 | barrier(); | ||
76 | 57 | ||
77 | preempt_enable_no_resched(); | 58 | clear_thread_flag(TIF_POLLING_NRFLAG); |
78 | schedule(); | 59 | smp_mb__after_clear_bit(); |
79 | preempt_disable(); | 60 | |
80 | check_pgt_cache(); | 61 | while (!need_resched()) { |
62 | unsigned long pstate; | ||
63 | |||
64 | /* Disable interrupts. */ | ||
65 | __asm__ __volatile__( | ||
66 | "rdpr %%pstate, %0\n\t" | ||
67 | "andn %0, %1, %0\n\t" | ||
68 | "wrpr %0, %%g0, %%pstate" | ||
69 | : "=&r" (pstate) | ||
70 | : "i" (PSTATE_IE)); | ||
71 | |||
72 | if (!need_resched()) | ||
73 | sun4v_cpu_yield(); | ||
74 | |||
75 | /* Re-enable interrupts. */ | ||
76 | __asm__ __volatile__( | ||
77 | "rdpr %%pstate, %0\n\t" | ||
78 | "or %0, %1, %0\n\t" | ||
79 | "wrpr %0, %%g0, %%pstate" | ||
80 | : "=&r" (pstate) | ||
81 | : "i" (PSTATE_IE)); | ||
81 | } | 82 | } |
82 | } | ||
83 | 83 | ||
84 | #else | 84 | set_thread_flag(TIF_POLLING_NRFLAG); |
85 | } | ||
85 | 86 | ||
86 | /* | 87 | /* The idle loop on sparc64. */ |
87 | * the idle loop on a UltraMultiPenguin... | ||
88 | * | ||
89 | * TIF_POLLING_NRFLAG is set because we do not sleep the cpu | ||
90 | * inside of the idler task, so an interrupt is not needed | ||
91 | * to get a clean fast response. | ||
92 | * | ||
93 | * XXX Reverify this assumption... -DaveM | ||
94 | * | ||
95 | * Addendum: We do want it to do something for the signal | ||
96 | * delivery case, we detect that by just seeing | ||
97 | * if we are trying to send this to an idler or not. | ||
98 | */ | ||
99 | void cpu_idle(void) | 88 | void cpu_idle(void) |
100 | { | 89 | { |
101 | cpuinfo_sparc *cpuinfo = &local_cpu_data(); | ||
102 | set_thread_flag(TIF_POLLING_NRFLAG); | 90 | set_thread_flag(TIF_POLLING_NRFLAG); |
103 | 91 | ||
104 | while(1) { | 92 | while(1) { |
105 | if (need_resched()) { | 93 | if (need_resched()) { |
106 | cpuinfo->idle_volume = 0; | ||
107 | preempt_enable_no_resched(); | 94 | preempt_enable_no_resched(); |
108 | schedule(); | 95 | schedule(); |
109 | preempt_disable(); | 96 | preempt_disable(); |
110 | check_pgt_cache(); | ||
111 | } | 97 | } |
112 | cpuinfo->idle_volume++; | 98 | sparc64_yield(); |
113 | |||
114 | /* The store ordering is so that IRQ handlers on | ||
115 | * other cpus see our increasing idleness for the buddy | ||
116 | * redistribution algorithm. -DaveM | ||
117 | */ | ||
118 | membar_storeload_storestore(); | ||
119 | } | 99 | } |
120 | } | 100 | } |
121 | 101 | ||
122 | #endif | ||
123 | |||
124 | extern char reboot_command []; | 102 | extern char reboot_command []; |
125 | 103 | ||
126 | extern void (*prom_palette)(int); | 104 | extern void (*prom_palette)(int); |
@@ -354,6 +332,7 @@ void show_regs(struct pt_regs *regs) | |||
354 | extern long etrap, etraptl1; | 332 | extern long etrap, etraptl1; |
355 | #endif | 333 | #endif |
356 | __show_regs(regs); | 334 | __show_regs(regs); |
335 | #if 0 | ||
357 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
358 | { | 337 | { |
359 | extern void smp_report_regs(void); | 338 | extern void smp_report_regs(void); |
@@ -361,6 +340,7 @@ void show_regs(struct pt_regs *regs) | |||
361 | smp_report_regs(); | 340 | smp_report_regs(); |
362 | } | 341 | } |
363 | #endif | 342 | #endif |
343 | #endif | ||
364 | 344 | ||
365 | #ifdef VERBOSE_SHOWREGS | 345 | #ifdef VERBOSE_SHOWREGS |
366 | if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && | 346 | if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && |
@@ -433,30 +413,15 @@ void exit_thread(void) | |||
433 | void flush_thread(void) | 413 | void flush_thread(void) |
434 | { | 414 | { |
435 | struct thread_info *t = current_thread_info(); | 415 | struct thread_info *t = current_thread_info(); |
416 | struct mm_struct *mm; | ||
436 | 417 | ||
437 | if (t->flags & _TIF_ABI_PENDING) | 418 | if (t->flags & _TIF_ABI_PENDING) |
438 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 419 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
439 | 420 | ||
440 | if (t->task->mm) { | 421 | mm = t->task->mm; |
441 | unsigned long pgd_cache = 0UL; | 422 | if (mm) |
442 | if (test_thread_flag(TIF_32BIT)) { | 423 | tsb_context_switch(mm); |
443 | struct mm_struct *mm = t->task->mm; | ||
444 | pgd_t *pgd0 = &mm->pgd[0]; | ||
445 | pud_t *pud0 = pud_offset(pgd0, 0); | ||
446 | 424 | ||
447 | if (pud_none(*pud0)) { | ||
448 | pmd_t *page = pmd_alloc_one(mm, 0); | ||
449 | pud_set(pud0, page); | ||
450 | } | ||
451 | pgd_cache = get_pgd_cache(pgd0); | ||
452 | } | ||
453 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
454 | "membar #Sync" | ||
455 | : /* no outputs */ | ||
456 | : "r" (pgd_cache), | ||
457 | "r" (TSB_REG), | ||
458 | "i" (ASI_DMMU)); | ||
459 | } | ||
460 | set_thread_wsaved(0); | 425 | set_thread_wsaved(0); |
461 | 426 | ||
462 | /* Turn off performance counters if on. */ | 427 | /* Turn off performance counters if on. */ |
@@ -555,6 +520,18 @@ void synchronize_user_stack(void) | |||
555 | } | 520 | } |
556 | } | 521 | } |
557 | 522 | ||
523 | static void stack_unaligned(unsigned long sp) | ||
524 | { | ||
525 | siginfo_t info; | ||
526 | |||
527 | info.si_signo = SIGBUS; | ||
528 | info.si_errno = 0; | ||
529 | info.si_code = BUS_ADRALN; | ||
530 | info.si_addr = (void __user *) sp; | ||
531 | info.si_trapno = 0; | ||
532 | force_sig_info(SIGBUS, &info, current); | ||
533 | } | ||
534 | |||
558 | void fault_in_user_windows(void) | 535 | void fault_in_user_windows(void) |
559 | { | 536 | { |
560 | struct thread_info *t = current_thread_info(); | 537 | struct thread_info *t = current_thread_info(); |
@@ -570,13 +547,17 @@ void fault_in_user_windows(void) | |||
570 | flush_user_windows(); | 547 | flush_user_windows(); |
571 | window = get_thread_wsaved(); | 548 | window = get_thread_wsaved(); |
572 | 549 | ||
573 | if (window != 0) { | 550 | if (likely(window != 0)) { |
574 | window -= 1; | 551 | window -= 1; |
575 | do { | 552 | do { |
576 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | 553 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); |
577 | struct reg_window *rwin = &t->reg_window[window]; | 554 | struct reg_window *rwin = &t->reg_window[window]; |
578 | 555 | ||
579 | if (copy_to_user((char __user *)sp, rwin, winsize)) | 556 | if (unlikely(sp & 0x7UL)) |
557 | stack_unaligned(sp); | ||
558 | |||
559 | if (unlikely(copy_to_user((char __user *)sp, | ||
560 | rwin, winsize))) | ||
580 | goto barf; | 561 | goto barf; |
581 | } while (window--); | 562 | } while (window--); |
582 | } | 563 | } |
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 3f9746f856d2..eb93e9c52846 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -124,6 +124,9 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
124 | { | 124 | { |
125 | BUG_ON(len > PAGE_SIZE); | 125 | BUG_ON(len > PAGE_SIZE); |
126 | 126 | ||
127 | if (tlb_type == hypervisor) | ||
128 | return; | ||
129 | |||
127 | #ifdef DCACHE_ALIASING_POSSIBLE | 130 | #ifdef DCACHE_ALIASING_POSSIBLE |
128 | /* If bit 13 of the kernel address we used to access the | 131 | /* If bit 13 of the kernel address we used to access the |
129 | * user page is the same as the virtual address that page | 132 | * user page is the same as the virtual address that page |
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index b80eba0081ca..7130e866f935 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S | |||
@@ -223,12 +223,26 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
223 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 | 223 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 |
224 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 | 224 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 |
225 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 | 225 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 |
226 | mov TSB_REG, %g6 | 226 | brz,pt %l3, 1f |
227 | brnz,a,pn %l3, 1f | 227 | mov %g6, %l2 |
228 | ldxa [%g6] ASI_IMMU, %g5 | 228 | |
229 | 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | 229 | /* Must do this before thread reg is clobbered below. */ |
230 | LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) | ||
231 | 1: | ||
232 | ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | ||
230 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 | 233 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 |
231 | wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | 234 | |
235 | /* Normal globals are restored, go to trap globals. */ | ||
236 | 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | ||
237 | nop | ||
238 | .section .sun4v_2insn_patch, "ax" | ||
239 | .word 661b | ||
240 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
241 | SET_GL(1) | ||
242 | .previous | ||
243 | |||
244 | mov %l2, %g6 | ||
245 | |||
232 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 | 246 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 |
233 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 | 247 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 |
234 | 248 | ||
@@ -252,27 +266,108 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
252 | 266 | ||
253 | brnz,pn %l3, kern_rtt | 267 | brnz,pn %l3, kern_rtt |
254 | mov PRIMARY_CONTEXT, %l7 | 268 | mov PRIMARY_CONTEXT, %l7 |
255 | ldxa [%l7 + %l7] ASI_DMMU, %l0 | 269 | |
270 | 661: ldxa [%l7 + %l7] ASI_DMMU, %l0 | ||
271 | .section .sun4v_1insn_patch, "ax" | ||
272 | .word 661b | ||
273 | ldxa [%l7 + %l7] ASI_MMU, %l0 | ||
274 | .previous | ||
275 | |||
256 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 | 276 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 |
257 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 | 277 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 |
258 | or %l0, %l1, %l0 | 278 | or %l0, %l1, %l0 |
259 | stxa %l0, [%l7] ASI_DMMU | 279 | |
260 | flush %g6 | 280 | 661: stxa %l0, [%l7] ASI_DMMU |
281 | .section .sun4v_1insn_patch, "ax" | ||
282 | .word 661b | ||
283 | stxa %l0, [%l7] ASI_MMU | ||
284 | .previous | ||
285 | |||
286 | sethi %hi(KERNBASE), %l7 | ||
287 | flush %l7 | ||
261 | rdpr %wstate, %l1 | 288 | rdpr %wstate, %l1 |
262 | rdpr %otherwin, %l2 | 289 | rdpr %otherwin, %l2 |
263 | srl %l1, 3, %l1 | 290 | srl %l1, 3, %l1 |
264 | 291 | ||
265 | wrpr %l2, %g0, %canrestore | 292 | wrpr %l2, %g0, %canrestore |
266 | wrpr %l1, %g0, %wstate | 293 | wrpr %l1, %g0, %wstate |
267 | wrpr %g0, %g0, %otherwin | 294 | brnz,pt %l2, user_rtt_restore |
295 | wrpr %g0, %g0, %otherwin | ||
296 | |||
297 | ldx [%g6 + TI_FLAGS], %g3 | ||
298 | wr %g0, ASI_AIUP, %asi | ||
299 | rdpr %cwp, %g1 | ||
300 | andcc %g3, _TIF_32BIT, %g0 | ||
301 | sub %g1, 1, %g1 | ||
302 | bne,pt %xcc, user_rtt_fill_32bit | ||
303 | wrpr %g1, %cwp | ||
304 | ba,a,pt %xcc, user_rtt_fill_64bit | ||
305 | |||
306 | user_rtt_fill_fixup: | ||
307 | rdpr %cwp, %g1 | ||
308 | add %g1, 1, %g1 | ||
309 | wrpr %g1, 0x0, %cwp | ||
310 | |||
311 | rdpr %wstate, %g2 | ||
312 | sll %g2, 3, %g2 | ||
313 | wrpr %g2, 0x0, %wstate | ||
314 | |||
315 | /* We know %canrestore and %otherwin are both zero. */ | ||
316 | |||
317 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
318 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
319 | mov PRIMARY_CONTEXT, %g1 | ||
320 | |||
321 | 661: stxa %g2, [%g1] ASI_DMMU | ||
322 | .section .sun4v_1insn_patch, "ax" | ||
323 | .word 661b | ||
324 | stxa %g2, [%g1] ASI_MMU | ||
325 | .previous | ||
326 | |||
327 | sethi %hi(KERNBASE), %g1 | ||
328 | flush %g1 | ||
329 | |||
330 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
331 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
332 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
333 | |||
334 | mov %g6, %l1 | ||
335 | wrpr %g0, 0x0, %tl | ||
336 | |||
337 | 661: nop | ||
338 | .section .sun4v_1insn_patch, "ax" | ||
339 | .word 661b | ||
340 | SET_GL(0) | ||
341 | .previous | ||
342 | |||
343 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
344 | |||
345 | mov %l1, %g6 | ||
346 | ldx [%g6 + TI_TASK], %g4 | ||
347 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
348 | call do_sparc64_fault | ||
349 | add %sp, PTREGS_OFF, %o0 | ||
350 | ba,pt %xcc, rtrap | ||
351 | nop | ||
352 | |||
353 | user_rtt_pre_restore: | ||
354 | add %g1, 1, %g1 | ||
355 | wrpr %g1, 0x0, %cwp | ||
356 | |||
357 | user_rtt_restore: | ||
268 | restore | 358 | restore |
269 | rdpr %canrestore, %g1 | 359 | rdpr %canrestore, %g1 |
270 | wrpr %g1, 0x0, %cleanwin | 360 | wrpr %g1, 0x0, %cleanwin |
271 | retry | 361 | retry |
272 | nop | 362 | nop |
273 | 363 | ||
274 | kern_rtt: restore | 364 | kern_rtt: rdpr %canrestore, %g1 |
365 | brz,pn %g1, kern_rtt_fill | ||
366 | nop | ||
367 | kern_rtt_restore: | ||
368 | restore | ||
275 | retry | 369 | retry |
370 | |||
276 | to_kernel: | 371 | to_kernel: |
277 | #ifdef CONFIG_PREEMPT | 372 | #ifdef CONFIG_PREEMPT |
278 | ldsw [%g6 + TI_PRE_COUNT], %l5 | 373 | ldsw [%g6 + TI_PRE_COUNT], %l5 |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index d95a1bcf163d..1d6ffdeabd4c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -693,11 +693,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) | |||
693 | 693 | ||
694 | /* SBUS SYSIO INO number to Sparc PIL level. */ | 694 | /* SBUS SYSIO INO number to Sparc PIL level. */ |
695 | static unsigned char sysio_ino_to_pil[] = { | 695 | static unsigned char sysio_ino_to_pil[] = { |
696 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */ | 696 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 0 */ |
697 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */ | 697 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 1 */ |
698 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */ | 698 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 2 */ |
699 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */ | 699 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 3 */ |
700 | 4, /* Onboard SCSI */ | 700 | 5, /* Onboard SCSI */ |
701 | 5, /* Onboard Ethernet */ | 701 | 5, /* Onboard Ethernet */ |
702 | /*XXX*/ 8, /* Onboard BPP */ | 702 | /*XXX*/ 8, /* Onboard BPP */ |
703 | 0, /* Bogon */ | 703 | 0, /* Bogon */ |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 158bd31e15b7..7d0e67c1ce50 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -64,12 +64,6 @@ struct screen_info screen_info = { | |||
64 | 16 /* orig-video-points */ | 64 | 16 /* orig-video-points */ |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* Typing sync at the prom prompt calls the function pointed to by | ||
68 | * the sync callback which I set to the following function. | ||
69 | * This should sync all filesystems and return, for now it just | ||
70 | * prints out pretty messages and returns. | ||
71 | */ | ||
72 | |||
73 | void (*prom_palette)(int); | 67 | void (*prom_palette)(int); |
74 | void (*prom_keyboard)(void); | 68 | void (*prom_keyboard)(void); |
75 | 69 | ||
@@ -79,259 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n) | |||
79 | prom_write(s, n); | 73 | prom_write(s, n); |
80 | } | 74 | } |
81 | 75 | ||
82 | static struct console prom_console = { | ||
83 | .name = "prom", | ||
84 | .write = prom_console_write, | ||
85 | .flags = CON_CONSDEV | CON_ENABLED, | ||
86 | .index = -1, | ||
87 | }; | ||
88 | |||
89 | #define PROM_TRUE -1 | ||
90 | #define PROM_FALSE 0 | ||
91 | |||
92 | /* Pretty sick eh? */ | ||
93 | int prom_callback(long *args) | ||
94 | { | ||
95 | struct console *cons, *saved_console = NULL; | ||
96 | unsigned long flags; | ||
97 | char *cmd; | ||
98 | extern spinlock_t prom_entry_lock; | ||
99 | |||
100 | if (!args) | ||
101 | return -1; | ||
102 | if (!(cmd = (char *)args[0])) | ||
103 | return -1; | ||
104 | |||
105 | /* | ||
106 | * The callback can be invoked on the cpu that first dropped | ||
107 | * into prom_cmdline after taking the serial interrupt, or on | ||
108 | * a slave processor that was smp_captured() if the | ||
109 | * administrator has done a switch-cpu inside obp. In either | ||
110 | * case, the cpu is marked as in-interrupt. Drop IRQ locks. | ||
111 | */ | ||
112 | irq_exit(); | ||
113 | |||
114 | /* XXX Revisit the locking here someday. This is a debugging | ||
115 | * XXX feature so it isnt all that critical. -DaveM | ||
116 | */ | ||
117 | local_irq_save(flags); | ||
118 | |||
119 | spin_unlock(&prom_entry_lock); | ||
120 | cons = console_drivers; | ||
121 | while (cons) { | ||
122 | unregister_console(cons); | ||
123 | cons->flags &= ~(CON_PRINTBUFFER); | ||
124 | cons->next = saved_console; | ||
125 | saved_console = cons; | ||
126 | cons = console_drivers; | ||
127 | } | ||
128 | register_console(&prom_console); | ||
129 | if (!strcmp(cmd, "sync")) { | ||
130 | prom_printf("PROM `%s' command...\n", cmd); | ||
131 | show_free_areas(); | ||
132 | if (current->pid != 0) { | ||
133 | local_irq_enable(); | ||
134 | sys_sync(); | ||
135 | local_irq_disable(); | ||
136 | } | ||
137 | args[2] = 0; | ||
138 | args[args[1] + 3] = -1; | ||
139 | prom_printf("Returning to PROM\n"); | ||
140 | } else if (!strcmp(cmd, "va>tte-data")) { | ||
141 | unsigned long ctx, va; | ||
142 | unsigned long tte = 0; | ||
143 | long res = PROM_FALSE; | ||
144 | |||
145 | ctx = args[3]; | ||
146 | va = args[4]; | ||
147 | if (ctx) { | ||
148 | /* | ||
149 | * Find process owning ctx, lookup mapping. | ||
150 | */ | ||
151 | struct task_struct *p; | ||
152 | struct mm_struct *mm = NULL; | ||
153 | pgd_t *pgdp; | ||
154 | pud_t *pudp; | ||
155 | pmd_t *pmdp; | ||
156 | pte_t *ptep; | ||
157 | pte_t pte; | ||
158 | |||
159 | for_each_process(p) { | ||
160 | mm = p->mm; | ||
161 | if (CTX_NRBITS(mm->context) == ctx) | ||
162 | break; | ||
163 | } | ||
164 | if (!mm || | ||
165 | CTX_NRBITS(mm->context) != ctx) | ||
166 | goto done; | ||
167 | |||
168 | pgdp = pgd_offset(mm, va); | ||
169 | if (pgd_none(*pgdp)) | ||
170 | goto done; | ||
171 | pudp = pud_offset(pgdp, va); | ||
172 | if (pud_none(*pudp)) | ||
173 | goto done; | ||
174 | pmdp = pmd_offset(pudp, va); | ||
175 | if (pmd_none(*pmdp)) | ||
176 | goto done; | ||
177 | |||
178 | /* Preemption implicitly disabled by virtue of | ||
179 | * being called from inside OBP. | ||
180 | */ | ||
181 | ptep = pte_offset_map(pmdp, va); | ||
182 | pte = *ptep; | ||
183 | if (pte_present(pte)) { | ||
184 | tte = pte_val(pte); | ||
185 | res = PROM_TRUE; | ||
186 | } | ||
187 | pte_unmap(ptep); | ||
188 | goto done; | ||
189 | } | ||
190 | |||
191 | if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { | ||
192 | extern unsigned long sparc64_kern_pri_context; | ||
193 | |||
194 | /* Spitfire Errata #32 workaround */ | ||
195 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
196 | "flush %%g6" | ||
197 | : /* No outputs */ | ||
198 | : "r" (sparc64_kern_pri_context), | ||
199 | "r" (PRIMARY_CONTEXT), | ||
200 | "i" (ASI_DMMU)); | ||
201 | |||
202 | /* | ||
203 | * Locked down tlb entry. | ||
204 | */ | ||
205 | |||
206 | if (tlb_type == spitfire) | ||
207 | tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); | ||
208 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
209 | tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); | ||
210 | |||
211 | res = PROM_TRUE; | ||
212 | goto done; | ||
213 | } | ||
214 | |||
215 | if (va < PGDIR_SIZE) { | ||
216 | /* | ||
217 | * vmalloc or prom_inherited mapping. | ||
218 | */ | ||
219 | pgd_t *pgdp; | ||
220 | pud_t *pudp; | ||
221 | pmd_t *pmdp; | ||
222 | pte_t *ptep; | ||
223 | pte_t pte; | ||
224 | int error; | ||
225 | |||
226 | if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { | ||
227 | tte = prom_virt_to_phys(va, &error); | ||
228 | if (!error) | ||
229 | res = PROM_TRUE; | ||
230 | goto done; | ||
231 | } | ||
232 | pgdp = pgd_offset_k(va); | ||
233 | if (pgd_none(*pgdp)) | ||
234 | goto done; | ||
235 | pudp = pud_offset(pgdp, va); | ||
236 | if (pud_none(*pudp)) | ||
237 | goto done; | ||
238 | pmdp = pmd_offset(pudp, va); | ||
239 | if (pmd_none(*pmdp)) | ||
240 | goto done; | ||
241 | |||
242 | /* Preemption implicitly disabled by virtue of | ||
243 | * being called from inside OBP. | ||
244 | */ | ||
245 | ptep = pte_offset_kernel(pmdp, va); | ||
246 | pte = *ptep; | ||
247 | if (pte_present(pte)) { | ||
248 | tte = pte_val(pte); | ||
249 | res = PROM_TRUE; | ||
250 | } | ||
251 | goto done; | ||
252 | } | ||
253 | |||
254 | if (va < PAGE_OFFSET) { | ||
255 | /* | ||
256 | * No mappings here. | ||
257 | */ | ||
258 | goto done; | ||
259 | } | ||
260 | |||
261 | if (va & (1UL << 40)) { | ||
262 | /* | ||
263 | * I/O page. | ||
264 | */ | ||
265 | |||
266 | tte = (__pa(va) & _PAGE_PADDR) | | ||
267 | _PAGE_VALID | _PAGE_SZ4MB | | ||
268 | _PAGE_E | _PAGE_P | _PAGE_W; | ||
269 | res = PROM_TRUE; | ||
270 | goto done; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Normal page. | ||
275 | */ | ||
276 | tte = (__pa(va) & _PAGE_PADDR) | | ||
277 | _PAGE_VALID | _PAGE_SZ4MB | | ||
278 | _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W; | ||
279 | res = PROM_TRUE; | ||
280 | |||
281 | done: | ||
282 | if (res == PROM_TRUE) { | ||
283 | args[2] = 3; | ||
284 | args[args[1] + 3] = 0; | ||
285 | args[args[1] + 4] = res; | ||
286 | args[args[1] + 5] = tte; | ||
287 | } else { | ||
288 | args[2] = 2; | ||
289 | args[args[1] + 3] = 0; | ||
290 | args[args[1] + 4] = res; | ||
291 | } | ||
292 | } else if (!strcmp(cmd, ".soft1")) { | ||
293 | unsigned long tte; | ||
294 | |||
295 | tte = args[3]; | ||
296 | prom_printf("%lx:\"%s%s%s%s%s\" ", | ||
297 | (tte & _PAGE_SOFT) >> 7, | ||
298 | tte & _PAGE_MODIFIED ? "M" : "-", | ||
299 | tte & _PAGE_ACCESSED ? "A" : "-", | ||
300 | tte & _PAGE_READ ? "W" : "-", | ||
301 | tte & _PAGE_WRITE ? "R" : "-", | ||
302 | tte & _PAGE_PRESENT ? "P" : "-"); | ||
303 | |||
304 | args[2] = 2; | ||
305 | args[args[1] + 3] = 0; | ||
306 | args[args[1] + 4] = PROM_TRUE; | ||
307 | } else if (!strcmp(cmd, ".soft2")) { | ||
308 | unsigned long tte; | ||
309 | |||
310 | tte = args[3]; | ||
311 | prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50); | ||
312 | |||
313 | args[2] = 2; | ||
314 | args[args[1] + 3] = 0; | ||
315 | args[args[1] + 4] = PROM_TRUE; | ||
316 | } else { | ||
317 | prom_printf("unknown PROM `%s' command...\n", cmd); | ||
318 | } | ||
319 | unregister_console(&prom_console); | ||
320 | while (saved_console) { | ||
321 | cons = saved_console; | ||
322 | saved_console = cons->next; | ||
323 | register_console(cons); | ||
324 | } | ||
325 | spin_lock(&prom_entry_lock); | ||
326 | local_irq_restore(flags); | ||
327 | |||
328 | /* | ||
329 | * Restore in-interrupt status for a resume from obp. | ||
330 | */ | ||
331 | irq_enter(); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | unsigned int boot_flags = 0; | 76 | unsigned int boot_flags = 0; |
336 | #define BOOTME_DEBUG 0x1 | 77 | #define BOOTME_DEBUG 0x1 |
337 | #define BOOTME_SINGLE 0x2 | 78 | #define BOOTME_SINGLE 0x2 |
@@ -479,15 +220,99 @@ char reboot_command[COMMAND_LINE_SIZE]; | |||
479 | 220 | ||
480 | static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; | 221 | static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; |
481 | 222 | ||
482 | void register_prom_callbacks(void) | 223 | static void __init per_cpu_patch(void) |
483 | { | 224 | { |
484 | prom_setcallback(prom_callback); | 225 | struct cpuid_patch_entry *p; |
485 | prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; " | 226 | unsigned long ver; |
486 | "' linux-va>tte-data to va>tte-data"); | 227 | int is_jbus; |
487 | prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; " | 228 | |
488 | "' linux-.soft1 to .soft1"); | 229 | if (tlb_type == spitfire && !this_is_starfire) |
489 | prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; " | 230 | return; |
490 | "' linux-.soft2 to .soft2"); | 231 | |
232 | is_jbus = 0; | ||
233 | if (tlb_type != hypervisor) { | ||
234 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
235 | is_jbus = ((ver >> 32UL) == __JALAPENO_ID || | ||
236 | (ver >> 32UL) == __SERRANO_ID); | ||
237 | } | ||
238 | |||
239 | p = &__cpuid_patch; | ||
240 | while (p < &__cpuid_patch_end) { | ||
241 | unsigned long addr = p->addr; | ||
242 | unsigned int *insns; | ||
243 | |||
244 | switch (tlb_type) { | ||
245 | case spitfire: | ||
246 | insns = &p->starfire[0]; | ||
247 | break; | ||
248 | case cheetah: | ||
249 | case cheetah_plus: | ||
250 | if (is_jbus) | ||
251 | insns = &p->cheetah_jbus[0]; | ||
252 | else | ||
253 | insns = &p->cheetah_safari[0]; | ||
254 | break; | ||
255 | case hypervisor: | ||
256 | insns = &p->sun4v[0]; | ||
257 | break; | ||
258 | default: | ||
259 | prom_printf("Unknown cpu type, halting.\n"); | ||
260 | prom_halt(); | ||
261 | }; | ||
262 | |||
263 | *(unsigned int *) (addr + 0) = insns[0]; | ||
264 | wmb(); | ||
265 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
266 | |||
267 | *(unsigned int *) (addr + 4) = insns[1]; | ||
268 | wmb(); | ||
269 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
270 | |||
271 | *(unsigned int *) (addr + 8) = insns[2]; | ||
272 | wmb(); | ||
273 | __asm__ __volatile__("flush %0" : : "r" (addr + 8)); | ||
274 | |||
275 | *(unsigned int *) (addr + 12) = insns[3]; | ||
276 | wmb(); | ||
277 | __asm__ __volatile__("flush %0" : : "r" (addr + 12)); | ||
278 | |||
279 | p++; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static void __init sun4v_patch(void) | ||
284 | { | ||
285 | struct sun4v_1insn_patch_entry *p1; | ||
286 | struct sun4v_2insn_patch_entry *p2; | ||
287 | |||
288 | if (tlb_type != hypervisor) | ||
289 | return; | ||
290 | |||
291 | p1 = &__sun4v_1insn_patch; | ||
292 | while (p1 < &__sun4v_1insn_patch_end) { | ||
293 | unsigned long addr = p1->addr; | ||
294 | |||
295 | *(unsigned int *) (addr + 0) = p1->insn; | ||
296 | wmb(); | ||
297 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
298 | |||
299 | p1++; | ||
300 | } | ||
301 | |||
302 | p2 = &__sun4v_2insn_patch; | ||
303 | while (p2 < &__sun4v_2insn_patch_end) { | ||
304 | unsigned long addr = p2->addr; | ||
305 | |||
306 | *(unsigned int *) (addr + 0) = p2->insns[0]; | ||
307 | wmb(); | ||
308 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
309 | |||
310 | *(unsigned int *) (addr + 4) = p2->insns[1]; | ||
311 | wmb(); | ||
312 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
313 | |||
314 | p2++; | ||
315 | } | ||
491 | } | 316 | } |
492 | 317 | ||
493 | void __init setup_arch(char **cmdline_p) | 318 | void __init setup_arch(char **cmdline_p) |
@@ -496,7 +321,10 @@ void __init setup_arch(char **cmdline_p) | |||
496 | *cmdline_p = prom_getbootargs(); | 321 | *cmdline_p = prom_getbootargs(); |
497 | strcpy(saved_command_line, *cmdline_p); | 322 | strcpy(saved_command_line, *cmdline_p); |
498 | 323 | ||
499 | printk("ARCH: SUN4U\n"); | 324 | if (tlb_type == hypervisor) |
325 | printk("ARCH: SUN4V\n"); | ||
326 | else | ||
327 | printk("ARCH: SUN4U\n"); | ||
500 | 328 | ||
501 | #ifdef CONFIG_DUMMY_CONSOLE | 329 | #ifdef CONFIG_DUMMY_CONSOLE |
502 | conswitchp = &dummy_con; | 330 | conswitchp = &dummy_con; |
@@ -507,6 +335,13 @@ void __init setup_arch(char **cmdline_p) | |||
507 | /* Work out if we are starfire early on */ | 335 | /* Work out if we are starfire early on */ |
508 | check_if_starfire(); | 336 | check_if_starfire(); |
509 | 337 | ||
338 | /* Now we know enough to patch the get_cpuid sequences | ||
339 | * used by trap code. | ||
340 | */ | ||
341 | per_cpu_patch(); | ||
342 | |||
343 | sun4v_patch(); | ||
344 | |||
510 | boot_flags_init(*cmdline_p); | 345 | boot_flags_init(*cmdline_p); |
511 | 346 | ||
512 | idprom_init(); | 347 | idprom_init(); |
@@ -514,7 +349,7 @@ void __init setup_arch(char **cmdline_p) | |||
514 | if (!root_flags) | 349 | if (!root_flags) |
515 | root_mountflags &= ~MS_RDONLY; | 350 | root_mountflags &= ~MS_RDONLY; |
516 | ROOT_DEV = old_decode_dev(root_dev); | 351 | ROOT_DEV = old_decode_dev(root_dev); |
517 | #ifdef CONFIG_BLK_DEV_INITRD | 352 | #ifdef CONFIG_BLK_DEV_RAM |
518 | rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; | 353 | rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; |
519 | rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); | 354 | rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); |
520 | rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); | 355 | rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); |
@@ -544,6 +379,9 @@ void __init setup_arch(char **cmdline_p) | |||
544 | 379 | ||
545 | smp_setup_cpu_possible_map(); | 380 | smp_setup_cpu_possible_map(); |
546 | 381 | ||
382 | /* Get boot processor trap_block[] setup. */ | ||
383 | init_cur_cpu_trap(current_thread_info()); | ||
384 | |||
547 | paging_init(); | 385 | paging_init(); |
548 | } | 386 | } |
549 | 387 | ||
@@ -565,6 +403,12 @@ static int __init set_preferred_console(void) | |||
565 | serial_console = 2; | 403 | serial_console = 2; |
566 | } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { | 404 | } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { |
567 | serial_console = 3; | 405 | serial_console = 3; |
406 | } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) { | ||
407 | /* sunhv_console_init() doesn't check the serial_console | ||
408 | * value anyways... | ||
409 | */ | ||
410 | serial_console = 4; | ||
411 | return add_preferred_console("ttyHV", 0, NULL); | ||
568 | } else { | 412 | } else { |
569 | prom_printf("Inconsistent console: " | 413 | prom_printf("Inconsistent console: " |
570 | "input %d, output %d\n", | 414 | "input %d, output %d\n", |
@@ -598,9 +442,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
598 | seq_printf(m, | 442 | seq_printf(m, |
599 | "cpu\t\t: %s\n" | 443 | "cpu\t\t: %s\n" |
600 | "fpu\t\t: %s\n" | 444 | "fpu\t\t: %s\n" |
601 | "promlib\t\t: Version 3 Revision %d\n" | 445 | "prom\t\t: %s\n" |
602 | "prom\t\t: %d.%d.%d\n" | 446 | "type\t\t: %s\n" |
603 | "type\t\t: sun4u\n" | ||
604 | "ncpus probed\t: %d\n" | 447 | "ncpus probed\t: %d\n" |
605 | "ncpus active\t: %d\n" | 448 | "ncpus active\t: %d\n" |
606 | "D$ parity tl1\t: %u\n" | 449 | "D$ parity tl1\t: %u\n" |
@@ -612,10 +455,10 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
612 | , | 455 | , |
613 | sparc_cpu_type, | 456 | sparc_cpu_type, |
614 | sparc_fpu_type, | 457 | sparc_fpu_type, |
615 | prom_rev, | 458 | prom_version, |
616 | prom_prev >> 16, | 459 | ((tlb_type == hypervisor) ? |
617 | (prom_prev >> 8) & 0xff, | 460 | "sun4v" : |
618 | prom_prev & 0xff, | 461 | "sun4u"), |
619 | ncpus_probed, | 462 | ncpus_probed, |
620 | num_online_cpus(), | 463 | num_online_cpus(), |
621 | dcache_parity_tl1_occurred, | 464 | dcache_parity_tl1_occurred, |
@@ -692,15 +535,11 @@ static int __init topology_init(void) | |||
692 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) | 535 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) |
693 | ncpus_probed++; | 536 | ncpus_probed++; |
694 | 537 | ||
695 | for (i = 0; i < NR_CPUS; i++) { | 538 | for_each_cpu(i) { |
696 | if (cpu_possible(i)) { | 539 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
697 | struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL); | 540 | if (p) { |
698 | 541 | register_cpu(p, i, NULL); | |
699 | if (p) { | 542 | err = 0; |
700 | memset(p, 0, sizeof(*p)); | ||
701 | register_cpu(p, i, NULL); | ||
702 | err = 0; | ||
703 | } | ||
704 | } | 543 | } |
705 | } | 544 | } |
706 | 545 | ||
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 1f7ad8a69052..373a701c90a5 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/timer.h> | 38 | #include <asm/timer.h> |
39 | #include <asm/starfire.h> | 39 | #include <asm/starfire.h> |
40 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
41 | #include <asm/sections.h> | ||
41 | 42 | ||
42 | extern void calibrate_delay(void); | 43 | extern void calibrate_delay(void); |
43 | 44 | ||
@@ -46,6 +47,8 @@ static unsigned char boot_cpu_id; | |||
46 | 47 | ||
47 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | 48 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
48 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; | 49 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; |
50 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = | ||
51 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
49 | static cpumask_t smp_commenced_mask; | 52 | static cpumask_t smp_commenced_mask; |
50 | static cpumask_t cpu_callout_map; | 53 | static cpumask_t cpu_callout_map; |
51 | 54 | ||
@@ -77,7 +80,7 @@ void smp_bogo(struct seq_file *m) | |||
77 | 80 | ||
78 | void __init smp_store_cpu_info(int id) | 81 | void __init smp_store_cpu_info(int id) |
79 | { | 82 | { |
80 | int cpu_node; | 83 | int cpu_node, def; |
81 | 84 | ||
82 | /* multiplier and counter set by | 85 | /* multiplier and counter set by |
83 | smp_setup_percpu_timer() */ | 86 | smp_setup_percpu_timer() */ |
@@ -87,24 +90,32 @@ void __init smp_store_cpu_info(int id) | |||
87 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 90 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, |
88 | "clock-frequency", 0); | 91 | "clock-frequency", 0); |
89 | 92 | ||
90 | cpu_data(id).pgcache_size = 0; | 93 | def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); |
91 | cpu_data(id).pte_cache[0] = NULL; | ||
92 | cpu_data(id).pte_cache[1] = NULL; | ||
93 | cpu_data(id).pgd_cache = NULL; | ||
94 | cpu_data(id).idle_volume = 1; | ||
95 | |||
96 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", | 94 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", |
97 | 16 * 1024); | 95 | def); |
96 | |||
97 | def = 32; | ||
98 | cpu_data(id).dcache_line_size = | 98 | cpu_data(id).dcache_line_size = |
99 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | 99 | prom_getintdefault(cpu_node, "dcache-line-size", def); |
100 | |||
101 | def = 16 * 1024; | ||
100 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", | 102 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", |
101 | 16 * 1024); | 103 | def); |
104 | |||
105 | def = 32; | ||
102 | cpu_data(id).icache_line_size = | 106 | cpu_data(id).icache_line_size = |
103 | prom_getintdefault(cpu_node, "icache-line-size", 32); | 107 | prom_getintdefault(cpu_node, "icache-line-size", def); |
108 | |||
109 | def = ((tlb_type == hypervisor) ? | ||
110 | (3 * 1024 * 1024) : | ||
111 | (4 * 1024 * 1024)); | ||
104 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", | 112 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", |
105 | 4 * 1024 * 1024); | 113 | def); |
114 | |||
115 | def = 64; | ||
106 | cpu_data(id).ecache_line_size = | 116 | cpu_data(id).ecache_line_size = |
107 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | 117 | prom_getintdefault(cpu_node, "ecache-line-size", def); |
118 | |||
108 | printk("CPU[%d]: Caches " | 119 | printk("CPU[%d]: Caches " |
109 | "D[sz(%d):line_sz(%d)] " | 120 | "D[sz(%d):line_sz(%d)] " |
110 | "I[sz(%d):line_sz(%d)] " | 121 | "I[sz(%d):line_sz(%d)] " |
@@ -119,27 +130,16 @@ static void smp_setup_percpu_timer(void); | |||
119 | 130 | ||
120 | static volatile unsigned long callin_flag = 0; | 131 | static volatile unsigned long callin_flag = 0; |
121 | 132 | ||
122 | extern void inherit_locked_prom_mappings(int save_p); | ||
123 | |||
124 | static inline void cpu_setup_percpu_base(unsigned long cpu_id) | ||
125 | { | ||
126 | __asm__ __volatile__("mov %0, %%g5\n\t" | ||
127 | "stxa %0, [%1] %2\n\t" | ||
128 | "membar #Sync" | ||
129 | : /* no outputs */ | ||
130 | : "r" (__per_cpu_offset(cpu_id)), | ||
131 | "r" (TSB_REG), "i" (ASI_IMMU)); | ||
132 | } | ||
133 | |||
134 | void __init smp_callin(void) | 133 | void __init smp_callin(void) |
135 | { | 134 | { |
136 | int cpuid = hard_smp_processor_id(); | 135 | int cpuid = hard_smp_processor_id(); |
137 | 136 | ||
138 | inherit_locked_prom_mappings(0); | 137 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
139 | 138 | ||
140 | __flush_tlb_all(); | 139 | if (tlb_type == hypervisor) |
140 | sun4v_ktsb_register(); | ||
141 | 141 | ||
142 | cpu_setup_percpu_base(cpuid); | 142 | __flush_tlb_all(); |
143 | 143 | ||
144 | smp_setup_percpu_timer(); | 144 | smp_setup_percpu_timer(); |
145 | 145 | ||
@@ -316,6 +316,8 @@ static void smp_synchronize_one_tick(int cpu) | |||
316 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 316 | spin_unlock_irqrestore(&itc_sync_lock, flags); |
317 | } | 317 | } |
318 | 318 | ||
319 | extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load); | ||
320 | |||
319 | extern unsigned long sparc64_cpu_startup; | 321 | extern unsigned long sparc64_cpu_startup; |
320 | 322 | ||
321 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | 323 | /* The OBP cpu startup callback truncates the 3rd arg cookie to |
@@ -331,21 +333,31 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
331 | unsigned long cookie = | 333 | unsigned long cookie = |
332 | (unsigned long)(&cpu_new_thread); | 334 | (unsigned long)(&cpu_new_thread); |
333 | struct task_struct *p; | 335 | struct task_struct *p; |
334 | int timeout, ret, cpu_node; | 336 | int timeout, ret; |
335 | 337 | ||
336 | p = fork_idle(cpu); | 338 | p = fork_idle(cpu); |
337 | callin_flag = 0; | 339 | callin_flag = 0; |
338 | cpu_new_thread = task_thread_info(p); | 340 | cpu_new_thread = task_thread_info(p); |
339 | cpu_set(cpu, cpu_callout_map); | 341 | cpu_set(cpu, cpu_callout_map); |
340 | 342 | ||
341 | cpu_find_by_mid(cpu, &cpu_node); | 343 | if (tlb_type == hypervisor) { |
342 | prom_startcpu(cpu_node, entry, cookie); | 344 | /* Alloc the mondo queues, cpu will load them. */ |
345 | sun4v_init_mondo_queues(0, cpu, 1, 0); | ||
346 | |||
347 | prom_startcpu_cpuid(cpu, entry, cookie); | ||
348 | } else { | ||
349 | int cpu_node; | ||
350 | |||
351 | cpu_find_by_mid(cpu, &cpu_node); | ||
352 | prom_startcpu(cpu_node, entry, cookie); | ||
353 | } | ||
343 | 354 | ||
344 | for (timeout = 0; timeout < 5000000; timeout++) { | 355 | for (timeout = 0; timeout < 5000000; timeout++) { |
345 | if (callin_flag) | 356 | if (callin_flag) |
346 | break; | 357 | break; |
347 | udelay(100); | 358 | udelay(100); |
348 | } | 359 | } |
360 | |||
349 | if (callin_flag) { | 361 | if (callin_flag) { |
350 | ret = 0; | 362 | ret = 0; |
351 | } else { | 363 | } else { |
@@ -441,7 +453,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c | |||
441 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 453 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
442 | { | 454 | { |
443 | u64 pstate, ver; | 455 | u64 pstate, ver; |
444 | int nack_busy_id, is_jalapeno; | 456 | int nack_busy_id, is_jbus; |
445 | 457 | ||
446 | if (cpus_empty(mask)) | 458 | if (cpus_empty(mask)) |
447 | return; | 459 | return; |
@@ -451,7 +463,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas | |||
451 | * derivative processor. | 463 | * derivative processor. |
452 | */ | 464 | */ |
453 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 465 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
454 | is_jalapeno = ((ver >> 32) == 0x003e0016); | 466 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
467 | (ver >> 32) == __SERRANO_ID); | ||
455 | 468 | ||
456 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 469 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
457 | 470 | ||
@@ -476,7 +489,7 @@ retry: | |||
476 | for_each_cpu_mask(i, mask) { | 489 | for_each_cpu_mask(i, mask) { |
477 | u64 target = (i << 14) | 0x70; | 490 | u64 target = (i << 14) | 0x70; |
478 | 491 | ||
479 | if (!is_jalapeno) | 492 | if (!is_jbus) |
480 | target |= (nack_busy_id << 24); | 493 | target |= (nack_busy_id << 24); |
481 | __asm__ __volatile__( | 494 | __asm__ __volatile__( |
482 | "stxa %%g0, [%0] %1\n\t" | 495 | "stxa %%g0, [%0] %1\n\t" |
@@ -529,7 +542,7 @@ retry: | |||
529 | for_each_cpu_mask(i, mask) { | 542 | for_each_cpu_mask(i, mask) { |
530 | u64 check_mask; | 543 | u64 check_mask; |
531 | 544 | ||
532 | if (is_jalapeno) | 545 | if (is_jbus) |
533 | check_mask = (0x2UL << (2*i)); | 546 | check_mask = (0x2UL << (2*i)); |
534 | else | 547 | else |
535 | check_mask = (0x2UL << | 548 | check_mask = (0x2UL << |
@@ -544,6 +557,155 @@ retry: | |||
544 | } | 557 | } |
545 | } | 558 | } |
546 | 559 | ||
560 | /* Multi-cpu list version. */ | ||
561 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | ||
562 | { | ||
563 | struct trap_per_cpu *tb; | ||
564 | u16 *cpu_list; | ||
565 | u64 *mondo; | ||
566 | cpumask_t error_mask; | ||
567 | unsigned long flags, status; | ||
568 | int cnt, retries, this_cpu, prev_sent, i; | ||
569 | |||
570 | /* We have to do this whole thing with interrupts fully disabled. | ||
571 | * Otherwise if we send an xcall from interrupt context it will | ||
572 | * corrupt both our mondo block and cpu list state. | ||
573 | * | ||
574 | * One consequence of this is that we cannot use timeout mechanisms | ||
575 | * that depend upon interrupts being delivered locally. So, for | ||
576 | * example, we cannot sample jiffies and expect it to advance. | ||
577 | * | ||
578 | * Fortunately, udelay() uses %stick/%tick so we can use that. | ||
579 | */ | ||
580 | local_irq_save(flags); | ||
581 | |||
582 | this_cpu = smp_processor_id(); | ||
583 | tb = &trap_block[this_cpu]; | ||
584 | |||
585 | mondo = __va(tb->cpu_mondo_block_pa); | ||
586 | mondo[0] = data0; | ||
587 | mondo[1] = data1; | ||
588 | mondo[2] = data2; | ||
589 | wmb(); | ||
590 | |||
591 | cpu_list = __va(tb->cpu_list_pa); | ||
592 | |||
593 | /* Setup the initial cpu list. */ | ||
594 | cnt = 0; | ||
595 | for_each_cpu_mask(i, mask) | ||
596 | cpu_list[cnt++] = i; | ||
597 | |||
598 | cpus_clear(error_mask); | ||
599 | retries = 0; | ||
600 | prev_sent = 0; | ||
601 | do { | ||
602 | int forward_progress, n_sent; | ||
603 | |||
604 | status = sun4v_cpu_mondo_send(cnt, | ||
605 | tb->cpu_list_pa, | ||
606 | tb->cpu_mondo_block_pa); | ||
607 | |||
608 | /* HV_EOK means all cpus received the xcall, we're done. */ | ||
609 | if (likely(status == HV_EOK)) | ||
610 | break; | ||
611 | |||
612 | /* First, see if we made any forward progress. | ||
613 | * | ||
614 | * The hypervisor indicates successful sends by setting | ||
615 | * cpu list entries to the value 0xffff. | ||
616 | */ | ||
617 | n_sent = 0; | ||
618 | for (i = 0; i < cnt; i++) { | ||
619 | if (likely(cpu_list[i] == 0xffff)) | ||
620 | n_sent++; | ||
621 | } | ||
622 | |||
623 | forward_progress = 0; | ||
624 | if (n_sent > prev_sent) | ||
625 | forward_progress = 1; | ||
626 | |||
627 | prev_sent = n_sent; | ||
628 | |||
629 | /* If we get a HV_ECPUERROR, then one or more of the cpus | ||
630 | * in the list are in error state. Use the cpu_state() | ||
631 | * hypervisor call to find out which cpus are in error state. | ||
632 | */ | ||
633 | if (unlikely(status == HV_ECPUERROR)) { | ||
634 | for (i = 0; i < cnt; i++) { | ||
635 | long err; | ||
636 | u16 cpu; | ||
637 | |||
638 | cpu = cpu_list[i]; | ||
639 | if (cpu == 0xffff) | ||
640 | continue; | ||
641 | |||
642 | err = sun4v_cpu_state(cpu); | ||
643 | if (err >= 0 && | ||
644 | err == HV_CPU_STATE_ERROR) { | ||
645 | cpu_list[i] = 0xffff; | ||
646 | cpu_set(cpu, error_mask); | ||
647 | } | ||
648 | } | ||
649 | } else if (unlikely(status != HV_EWOULDBLOCK)) | ||
650 | goto fatal_mondo_error; | ||
651 | |||
652 | /* Don't bother rewriting the CPU list, just leave the | ||
653 | * 0xffff and non-0xffff entries in there and the | ||
654 | * hypervisor will do the right thing. | ||
655 | * | ||
656 | * Only advance timeout state if we didn't make any | ||
657 | * forward progress. | ||
658 | */ | ||
659 | if (unlikely(!forward_progress)) { | ||
660 | if (unlikely(++retries > 10000)) | ||
661 | goto fatal_mondo_timeout; | ||
662 | |||
663 | /* Delay a little bit to let other cpus catch up | ||
664 | * on their cpu mondo queue work. | ||
665 | */ | ||
666 | udelay(2 * cnt); | ||
667 | } | ||
668 | } while (1); | ||
669 | |||
670 | local_irq_restore(flags); | ||
671 | |||
672 | if (unlikely(!cpus_empty(error_mask))) | ||
673 | goto fatal_mondo_cpu_error; | ||
674 | |||
675 | return; | ||
676 | |||
677 | fatal_mondo_cpu_error: | ||
678 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | ||
679 | "were in error state\n", | ||
680 | this_cpu); | ||
681 | printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); | ||
682 | for_each_cpu_mask(i, error_mask) | ||
683 | printk("%d ", i); | ||
684 | printk("]\n"); | ||
685 | return; | ||
686 | |||
687 | fatal_mondo_timeout: | ||
688 | local_irq_restore(flags); | ||
689 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | ||
690 | " progress after %d retries.\n", | ||
691 | this_cpu, retries); | ||
692 | goto dump_cpu_list_and_out; | ||
693 | |||
694 | fatal_mondo_error: | ||
695 | local_irq_restore(flags); | ||
696 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | ||
697 | this_cpu, status); | ||
698 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | ||
699 | "mondo_block_pa(%lx)\n", | ||
700 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | ||
701 | |||
702 | dump_cpu_list_and_out: | ||
703 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | ||
704 | for (i = 0; i < cnt; i++) | ||
705 | printk("%u ", cpu_list[i]); | ||
706 | printk("]\n"); | ||
707 | } | ||
708 | |||
547 | /* Send cross call to all processors mentioned in MASK | 709 | /* Send cross call to all processors mentioned in MASK |
548 | * except self. | 710 | * except self. |
549 | */ | 711 | */ |
@@ -557,8 +719,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d | |||
557 | 719 | ||
558 | if (tlb_type == spitfire) | 720 | if (tlb_type == spitfire) |
559 | spitfire_xcall_deliver(data0, data1, data2, mask); | 721 | spitfire_xcall_deliver(data0, data1, data2, mask); |
560 | else | 722 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
561 | cheetah_xcall_deliver(data0, data1, data2, mask); | 723 | cheetah_xcall_deliver(data0, data1, data2, mask); |
724 | else | ||
725 | hypervisor_xcall_deliver(data0, data1, data2, mask); | ||
562 | /* NOTE: Caller runs local copy on master. */ | 726 | /* NOTE: Caller runs local copy on master. */ |
563 | 727 | ||
564 | put_cpu(); | 728 | put_cpu(); |
@@ -594,16 +758,13 @@ extern unsigned long xcall_call_function; | |||
594 | * You must not call this function with disabled interrupts or from a | 758 | * You must not call this function with disabled interrupts or from a |
595 | * hardware interrupt handler or from a bottom half handler. | 759 | * hardware interrupt handler or from a bottom half handler. |
596 | */ | 760 | */ |
597 | int smp_call_function(void (*func)(void *info), void *info, | 761 | static int smp_call_function_mask(void (*func)(void *info), void *info, |
598 | int nonatomic, int wait) | 762 | int nonatomic, int wait, cpumask_t mask) |
599 | { | 763 | { |
600 | struct call_data_struct data; | 764 | struct call_data_struct data; |
601 | int cpus = num_online_cpus() - 1; | 765 | int cpus; |
602 | long timeout; | 766 | long timeout; |
603 | 767 | ||
604 | if (!cpus) | ||
605 | return 0; | ||
606 | |||
607 | /* Can deadlock when called with interrupts disabled */ | 768 | /* Can deadlock when called with interrupts disabled */ |
608 | WARN_ON(irqs_disabled()); | 769 | WARN_ON(irqs_disabled()); |
609 | 770 | ||
@@ -614,9 +775,14 @@ int smp_call_function(void (*func)(void *info), void *info, | |||
614 | 775 | ||
615 | spin_lock(&call_lock); | 776 | spin_lock(&call_lock); |
616 | 777 | ||
778 | cpu_clear(smp_processor_id(), mask); | ||
779 | cpus = cpus_weight(mask); | ||
780 | if (!cpus) | ||
781 | goto out_unlock; | ||
782 | |||
617 | call_data = &data; | 783 | call_data = &data; |
618 | 784 | ||
619 | smp_cross_call(&xcall_call_function, 0, 0, 0); | 785 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
620 | 786 | ||
621 | /* | 787 | /* |
622 | * Wait for other cpus to complete function or at | 788 | * Wait for other cpus to complete function or at |
@@ -630,18 +796,25 @@ int smp_call_function(void (*func)(void *info), void *info, | |||
630 | udelay(1); | 796 | udelay(1); |
631 | } | 797 | } |
632 | 798 | ||
799 | out_unlock: | ||
633 | spin_unlock(&call_lock); | 800 | spin_unlock(&call_lock); |
634 | 801 | ||
635 | return 0; | 802 | return 0; |
636 | 803 | ||
637 | out_timeout: | 804 | out_timeout: |
638 | spin_unlock(&call_lock); | 805 | spin_unlock(&call_lock); |
639 | printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", | 806 | printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n", |
640 | (long) num_online_cpus() - 1L, | 807 | cpus, atomic_read(&data.finished)); |
641 | (long) atomic_read(&data.finished)); | ||
642 | return 0; | 808 | return 0; |
643 | } | 809 | } |
644 | 810 | ||
811 | int smp_call_function(void (*func)(void *info), void *info, | ||
812 | int nonatomic, int wait) | ||
813 | { | ||
814 | return smp_call_function_mask(func, info, nonatomic, wait, | ||
815 | cpu_online_map); | ||
816 | } | ||
817 | |||
645 | void smp_call_function_client(int irq, struct pt_regs *regs) | 818 | void smp_call_function_client(int irq, struct pt_regs *regs) |
646 | { | 819 | { |
647 | void (*func) (void *info) = call_data->func; | 820 | void (*func) (void *info) = call_data->func; |
@@ -659,13 +832,25 @@ void smp_call_function_client(int irq, struct pt_regs *regs) | |||
659 | } | 832 | } |
660 | } | 833 | } |
661 | 834 | ||
835 | static void tsb_sync(void *info) | ||
836 | { | ||
837 | struct mm_struct *mm = info; | ||
838 | |||
839 | if (current->active_mm == mm) | ||
840 | tsb_context_switch(mm); | ||
841 | } | ||
842 | |||
843 | void smp_tsb_sync(struct mm_struct *mm) | ||
844 | { | ||
845 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | ||
846 | } | ||
847 | |||
662 | extern unsigned long xcall_flush_tlb_mm; | 848 | extern unsigned long xcall_flush_tlb_mm; |
663 | extern unsigned long xcall_flush_tlb_pending; | 849 | extern unsigned long xcall_flush_tlb_pending; |
664 | extern unsigned long xcall_flush_tlb_kernel_range; | 850 | extern unsigned long xcall_flush_tlb_kernel_range; |
665 | extern unsigned long xcall_flush_tlb_all_spitfire; | ||
666 | extern unsigned long xcall_flush_tlb_all_cheetah; | ||
667 | extern unsigned long xcall_report_regs; | 851 | extern unsigned long xcall_report_regs; |
668 | extern unsigned long xcall_receive_signal; | 852 | extern unsigned long xcall_receive_signal; |
853 | extern unsigned long xcall_new_mmu_context_version; | ||
669 | 854 | ||
670 | #ifdef DCACHE_ALIASING_POSSIBLE | 855 | #ifdef DCACHE_ALIASING_POSSIBLE |
671 | extern unsigned long xcall_flush_dcache_page_cheetah; | 856 | extern unsigned long xcall_flush_dcache_page_cheetah; |
@@ -693,11 +878,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page) | |||
693 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | 878 | void smp_flush_dcache_page_impl(struct page *page, int cpu) |
694 | { | 879 | { |
695 | cpumask_t mask = cpumask_of_cpu(cpu); | 880 | cpumask_t mask = cpumask_of_cpu(cpu); |
696 | int this_cpu = get_cpu(); | 881 | int this_cpu; |
882 | |||
883 | if (tlb_type == hypervisor) | ||
884 | return; | ||
697 | 885 | ||
698 | #ifdef CONFIG_DEBUG_DCFLUSH | 886 | #ifdef CONFIG_DEBUG_DCFLUSH |
699 | atomic_inc(&dcpage_flushes); | 887 | atomic_inc(&dcpage_flushes); |
700 | #endif | 888 | #endif |
889 | |||
890 | this_cpu = get_cpu(); | ||
891 | |||
701 | if (cpu == this_cpu) { | 892 | if (cpu == this_cpu) { |
702 | __local_flush_dcache_page(page); | 893 | __local_flush_dcache_page(page); |
703 | } else if (cpu_online(cpu)) { | 894 | } else if (cpu_online(cpu)) { |
@@ -713,7 +904,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
713 | __pa(pg_addr), | 904 | __pa(pg_addr), |
714 | (u64) pg_addr, | 905 | (u64) pg_addr, |
715 | mask); | 906 | mask); |
716 | } else { | 907 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
717 | #ifdef DCACHE_ALIASING_POSSIBLE | 908 | #ifdef DCACHE_ALIASING_POSSIBLE |
718 | data0 = | 909 | data0 = |
719 | ((u64)&xcall_flush_dcache_page_cheetah); | 910 | ((u64)&xcall_flush_dcache_page_cheetah); |
@@ -735,7 +926,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
735 | void *pg_addr = page_address(page); | 926 | void *pg_addr = page_address(page); |
736 | cpumask_t mask = cpu_online_map; | 927 | cpumask_t mask = cpu_online_map; |
737 | u64 data0; | 928 | u64 data0; |
738 | int this_cpu = get_cpu(); | 929 | int this_cpu; |
930 | |||
931 | if (tlb_type == hypervisor) | ||
932 | return; | ||
933 | |||
934 | this_cpu = get_cpu(); | ||
739 | 935 | ||
740 | cpu_clear(this_cpu, mask); | 936 | cpu_clear(this_cpu, mask); |
741 | 937 | ||
@@ -752,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
752 | __pa(pg_addr), | 948 | __pa(pg_addr), |
753 | (u64) pg_addr, | 949 | (u64) pg_addr, |
754 | mask); | 950 | mask); |
755 | } else { | 951 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
756 | #ifdef DCACHE_ALIASING_POSSIBLE | 952 | #ifdef DCACHE_ALIASING_POSSIBLE |
757 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | 953 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
758 | cheetah_xcall_deliver(data0, | 954 | cheetah_xcall_deliver(data0, |
@@ -769,38 +965,58 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
769 | put_cpu(); | 965 | put_cpu(); |
770 | } | 966 | } |
771 | 967 | ||
968 | static void __smp_receive_signal_mask(cpumask_t mask) | ||
969 | { | ||
970 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | ||
971 | } | ||
972 | |||
772 | void smp_receive_signal(int cpu) | 973 | void smp_receive_signal(int cpu) |
773 | { | 974 | { |
774 | cpumask_t mask = cpumask_of_cpu(cpu); | 975 | cpumask_t mask = cpumask_of_cpu(cpu); |
775 | 976 | ||
776 | if (cpu_online(cpu)) { | 977 | if (cpu_online(cpu)) |
777 | u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); | 978 | __smp_receive_signal_mask(mask); |
778 | |||
779 | if (tlb_type == spitfire) | ||
780 | spitfire_xcall_deliver(data0, 0, 0, mask); | ||
781 | else | ||
782 | cheetah_xcall_deliver(data0, 0, 0, mask); | ||
783 | } | ||
784 | } | 979 | } |
785 | 980 | ||
786 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 981 | void smp_receive_signal_client(int irq, struct pt_regs *regs) |
787 | { | 982 | { |
788 | /* Just return, rtrap takes care of the rest. */ | ||
789 | clear_softint(1 << irq); | 983 | clear_softint(1 << irq); |
790 | } | 984 | } |
791 | 985 | ||
792 | void smp_report_regs(void) | 986 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
793 | { | 987 | { |
794 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | 988 | struct mm_struct *mm; |
989 | unsigned long flags; | ||
990 | |||
991 | clear_softint(1 << irq); | ||
992 | |||
993 | /* See if we need to allocate a new TLB context because | ||
994 | * the version of the one we are using is now out of date. | ||
995 | */ | ||
996 | mm = current->active_mm; | ||
997 | if (unlikely(!mm || (mm == &init_mm))) | ||
998 | return; | ||
999 | |||
1000 | spin_lock_irqsave(&mm->context.lock, flags); | ||
1001 | |||
1002 | if (unlikely(!CTX_VALID(mm->context))) | ||
1003 | get_new_mmu_context(mm); | ||
1004 | |||
1005 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
1006 | |||
1007 | load_secondary_context(mm); | ||
1008 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
1009 | SECONDARY_CONTEXT); | ||
795 | } | 1010 | } |
796 | 1011 | ||
797 | void smp_flush_tlb_all(void) | 1012 | void smp_new_mmu_context_version(void) |
798 | { | 1013 | { |
799 | if (tlb_type == spitfire) | 1014 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
800 | smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0); | 1015 | } |
801 | else | 1016 | |
802 | smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0); | 1017 | void smp_report_regs(void) |
803 | __flush_tlb_all(); | 1018 | { |
1019 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | ||
804 | } | 1020 | } |
805 | 1021 | ||
806 | /* We know that the window frames of the user have been flushed | 1022 | /* We know that the window frames of the user have been flushed |
@@ -944,24 +1160,19 @@ void smp_release(void) | |||
944 | * can service tlb flush xcalls... | 1160 | * can service tlb flush xcalls... |
945 | */ | 1161 | */ |
946 | extern void prom_world(int); | 1162 | extern void prom_world(int); |
947 | extern void save_alternate_globals(unsigned long *); | 1163 | |
948 | extern void restore_alternate_globals(unsigned long *); | ||
949 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | 1164 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) |
950 | { | 1165 | { |
951 | unsigned long global_save[24]; | ||
952 | |||
953 | clear_softint(1 << irq); | 1166 | clear_softint(1 << irq); |
954 | 1167 | ||
955 | preempt_disable(); | 1168 | preempt_disable(); |
956 | 1169 | ||
957 | __asm__ __volatile__("flushw"); | 1170 | __asm__ __volatile__("flushw"); |
958 | save_alternate_globals(global_save); | ||
959 | prom_world(1); | 1171 | prom_world(1); |
960 | atomic_inc(&smp_capture_registry); | 1172 | atomic_inc(&smp_capture_registry); |
961 | membar_storeload_storestore(); | 1173 | membar_storeload_storestore(); |
962 | while (penguins_are_doing_time) | 1174 | while (penguins_are_doing_time) |
963 | rmb(); | 1175 | rmb(); |
964 | restore_alternate_globals(global_save); | ||
965 | atomic_dec(&smp_capture_registry); | 1176 | atomic_dec(&smp_capture_registry); |
966 | prom_world(0); | 1177 | prom_world(0); |
967 | 1178 | ||
@@ -1082,6 +1293,8 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1082 | /* Constrain the number of cpus to max_cpus. */ | 1293 | /* Constrain the number of cpus to max_cpus. */ |
1083 | void __init smp_prepare_cpus(unsigned int max_cpus) | 1294 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1084 | { | 1295 | { |
1296 | int i; | ||
1297 | |||
1085 | if (num_possible_cpus() > max_cpus) { | 1298 | if (num_possible_cpus() > max_cpus) { |
1086 | int instance, mid; | 1299 | int instance, mid; |
1087 | 1300 | ||
@@ -1096,6 +1309,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
1096 | } | 1309 | } |
1097 | } | 1310 | } |
1098 | 1311 | ||
1312 | for_each_cpu(i) { | ||
1313 | if (tlb_type == hypervisor) { | ||
1314 | int j; | ||
1315 | |||
1316 | /* XXX get this mapping from machine description */ | ||
1317 | for_each_cpu(j) { | ||
1318 | if ((j >> 2) == (i >> 2)) | ||
1319 | cpu_set(j, cpu_sibling_map[i]); | ||
1320 | } | ||
1321 | } else { | ||
1322 | cpu_set(i, cpu_sibling_map[i]); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1099 | smp_store_cpu_info(boot_cpu_id); | 1326 | smp_store_cpu_info(boot_cpu_id); |
1100 | } | 1327 | } |
1101 | 1328 | ||
@@ -1117,12 +1344,15 @@ void __init smp_setup_cpu_possible_map(void) | |||
1117 | 1344 | ||
1118 | void __devinit smp_prepare_boot_cpu(void) | 1345 | void __devinit smp_prepare_boot_cpu(void) |
1119 | { | 1346 | { |
1120 | if (hard_smp_processor_id() >= NR_CPUS) { | 1347 | int cpu = hard_smp_processor_id(); |
1348 | |||
1349 | if (cpu >= NR_CPUS) { | ||
1121 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); | 1350 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); |
1122 | prom_halt(); | 1351 | prom_halt(); |
1123 | } | 1352 | } |
1124 | 1353 | ||
1125 | current_thread_info()->cpu = hard_smp_processor_id(); | 1354 | current_thread_info()->cpu = cpu; |
1355 | __local_per_cpu_offset = __per_cpu_offset(cpu); | ||
1126 | 1356 | ||
1127 | cpu_set(smp_processor_id(), cpu_online_map); | 1357 | cpu_set(smp_processor_id(), cpu_online_map); |
1128 | cpu_set(smp_processor_id(), phys_cpu_present_map); | 1358 | cpu_set(smp_processor_id(), phys_cpu_present_map); |
@@ -1139,7 +1369,11 @@ int __devinit __cpu_up(unsigned int cpu) | |||
1139 | if (!cpu_isset(cpu, cpu_online_map)) { | 1369 | if (!cpu_isset(cpu, cpu_online_map)) { |
1140 | ret = -ENODEV; | 1370 | ret = -ENODEV; |
1141 | } else { | 1371 | } else { |
1142 | smp_synchronize_one_tick(cpu); | 1372 | /* On SUN4V, writes to %tick and %stick are |
1373 | * not allowed. | ||
1374 | */ | ||
1375 | if (tlb_type != hypervisor) | ||
1376 | smp_synchronize_one_tick(cpu); | ||
1143 | } | 1377 | } |
1144 | } | 1378 | } |
1145 | return ret; | 1379 | return ret; |
@@ -1183,12 +1417,9 @@ void __init setup_per_cpu_areas(void) | |||
1183 | { | 1417 | { |
1184 | unsigned long goal, size, i; | 1418 | unsigned long goal, size, i; |
1185 | char *ptr; | 1419 | char *ptr; |
1186 | /* Created by linker magic */ | ||
1187 | extern char __per_cpu_start[], __per_cpu_end[]; | ||
1188 | 1420 | ||
1189 | /* Copy section for each CPU (we discard the original) */ | 1421 | /* Copy section for each CPU (we discard the original) */ |
1190 | goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | 1422 | goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); |
1191 | |||
1192 | #ifdef CONFIG_MODULES | 1423 | #ifdef CONFIG_MODULES |
1193 | if (goal < PERCPU_ENOUGH_ROOM) | 1424 | if (goal < PERCPU_ENOUGH_ROOM) |
1194 | goal = PERCPU_ENOUGH_ROOM; | 1425 | goal = PERCPU_ENOUGH_ROOM; |
@@ -1197,31 +1428,10 @@ void __init setup_per_cpu_areas(void) | |||
1197 | for (size = 1UL; size < goal; size <<= 1UL) | 1428 | for (size = 1UL; size < goal; size <<= 1UL) |
1198 | __per_cpu_shift++; | 1429 | __per_cpu_shift++; |
1199 | 1430 | ||
1200 | /* Make sure the resulting __per_cpu_base value | 1431 | ptr = alloc_bootmem(size * NR_CPUS); |
1201 | * will fit in the 43-bit sign extended IMMU | ||
1202 | * TSB register. | ||
1203 | */ | ||
1204 | ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE, | ||
1205 | (unsigned long) __per_cpu_start); | ||
1206 | 1432 | ||
1207 | __per_cpu_base = ptr - __per_cpu_start; | 1433 | __per_cpu_base = ptr - __per_cpu_start; |
1208 | 1434 | ||
1209 | if ((__per_cpu_shift < PAGE_SHIFT) || | ||
1210 | (__per_cpu_base & ~PAGE_MASK) || | ||
1211 | (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) { | ||
1212 | prom_printf("PER_CPU: Invalid layout, " | ||
1213 | "ptr[%p] shift[%lx] base[%lx]\n", | ||
1214 | ptr, __per_cpu_shift, __per_cpu_base); | ||
1215 | prom_halt(); | ||
1216 | } | ||
1217 | |||
1218 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 1435 | for (i = 0; i < NR_CPUS; i++, ptr += size) |
1219 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 1436 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
1220 | |||
1221 | /* Finally, load in the boot cpu's base value. | ||
1222 | * We abuse the IMMU TSB register for trap handler | ||
1223 | * entry and exit loading of %g5. That is why it | ||
1224 | * has to be page aligned. | ||
1225 | */ | ||
1226 | cpu_setup_percpu_base(hard_smp_processor_id()); | ||
1227 | } | 1437 | } |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 3c06bfb92a8c..9914a17651b4 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -95,9 +95,6 @@ extern int __ashrdi3(int, int); | |||
95 | 95 | ||
96 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); | 96 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); |
97 | 97 | ||
98 | extern unsigned long phys_base; | ||
99 | extern unsigned long pfn_base; | ||
100 | |||
101 | extern unsigned int sys_call_table[]; | 98 | extern unsigned int sys_call_table[]; |
102 | 99 | ||
103 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); | 100 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); |
@@ -108,6 +105,14 @@ extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, | |||
108 | extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, | 105 | extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, |
109 | unsigned long *, unsigned long *, unsigned long *); | 106 | unsigned long *, unsigned long *, unsigned long *); |
110 | 107 | ||
108 | extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); | ||
109 | extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, | ||
110 | unsigned long *); | ||
111 | extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, | ||
112 | unsigned long *, unsigned long *); | ||
113 | extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, | ||
114 | unsigned long *, unsigned long *, unsigned long *); | ||
115 | |||
111 | /* Per-CPU information table */ | 116 | /* Per-CPU information table */ |
112 | EXPORT_PER_CPU_SYMBOL(__cpu_data); | 117 | EXPORT_PER_CPU_SYMBOL(__cpu_data); |
113 | 118 | ||
@@ -241,10 +246,6 @@ EXPORT_SYMBOL(verify_compat_iovec); | |||
241 | #endif | 246 | #endif |
242 | 247 | ||
243 | EXPORT_SYMBOL(dump_fpu); | 248 | EXPORT_SYMBOL(dump_fpu); |
244 | EXPORT_SYMBOL(pte_alloc_one_kernel); | ||
245 | #ifndef CONFIG_SMP | ||
246 | EXPORT_SYMBOL(pgt_quicklists); | ||
247 | #endif | ||
248 | EXPORT_SYMBOL(put_fs_struct); | 249 | EXPORT_SYMBOL(put_fs_struct); |
249 | 250 | ||
250 | /* math-emu wants this */ | 251 | /* math-emu wants this */ |
@@ -339,14 +340,10 @@ EXPORT_SYMBOL(copy_to_user_fixup); | |||
339 | EXPORT_SYMBOL(copy_from_user_fixup); | 340 | EXPORT_SYMBOL(copy_from_user_fixup); |
340 | EXPORT_SYMBOL(copy_in_user_fixup); | 341 | EXPORT_SYMBOL(copy_in_user_fixup); |
341 | EXPORT_SYMBOL(__strncpy_from_user); | 342 | EXPORT_SYMBOL(__strncpy_from_user); |
342 | EXPORT_SYMBOL(__bzero_noasi); | 343 | EXPORT_SYMBOL(__clear_user); |
343 | 344 | ||
344 | /* Various address conversion macros use this. */ | 345 | /* Various address conversion macros use this. */ |
345 | EXPORT_SYMBOL(phys_base); | ||
346 | EXPORT_SYMBOL(pfn_base); | ||
347 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); | 346 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); |
348 | EXPORT_SYMBOL(page_to_pfn); | ||
349 | EXPORT_SYMBOL(pfn_to_page); | ||
350 | 347 | ||
351 | /* No version information on this, heavily used in inline asm, | 348 | /* No version information on this, heavily used in inline asm, |
352 | * and will always be 'void __ret_efault(void)'. | 349 | * and will always be 'void __ret_efault(void)'. |
@@ -392,4 +389,9 @@ EXPORT_SYMBOL(xor_vis_3); | |||
392 | EXPORT_SYMBOL(xor_vis_4); | 389 | EXPORT_SYMBOL(xor_vis_4); |
393 | EXPORT_SYMBOL(xor_vis_5); | 390 | EXPORT_SYMBOL(xor_vis_5); |
394 | 391 | ||
392 | EXPORT_SYMBOL(xor_niagara_2); | ||
393 | EXPORT_SYMBOL(xor_niagara_3); | ||
394 | EXPORT_SYMBOL(xor_niagara_4); | ||
395 | EXPORT_SYMBOL(xor_niagara_5); | ||
396 | |||
395 | EXPORT_SYMBOL(prom_palette); | 397 | EXPORT_SYMBOL(prom_palette); |
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S new file mode 100644 index 000000000000..b49a68bdda43 --- /dev/null +++ b/arch/sparc64/kernel/sun4v_ivec.S | |||
@@ -0,0 +1,334 @@ | |||
1 | /* sun4v_ivec.S: Sun4v interrupt vector handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/cpudata.h> | ||
7 | #include <asm/intr_queue.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | sun4v_cpu_mondo: | ||
13 | /* Head offset in %g2, tail offset in %g4. | ||
14 | * If they are the same, no work. | ||
15 | */ | ||
16 | mov INTRQ_CPU_MONDO_HEAD, %g2 | ||
17 | ldxa [%g2] ASI_QUEUE, %g2 | ||
18 | mov INTRQ_CPU_MONDO_TAIL, %g4 | ||
19 | ldxa [%g4] ASI_QUEUE, %g4 | ||
20 | cmp %g2, %g4 | ||
21 | be,pn %xcc, sun4v_cpu_mondo_queue_empty | ||
22 | nop | ||
23 | |||
24 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
25 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
26 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
27 | |||
28 | /* Get CPU mondo queue base phys address into %g7. */ | ||
29 | ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 | ||
30 | |||
31 | /* Now get the cross-call arguments and handler PC, same | ||
32 | * layout as sun4u: | ||
33 | * | ||
34 | * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it | ||
35 | * high half is context arg to MMU flushes, into %g5 | ||
36 | * 2nd 64-bit word: 64-bit arg, load into %g1 | ||
37 | * 3rd 64-bit word: 64-bit arg, load into %g7 | ||
38 | */ | ||
39 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 | ||
40 | add %g2, 0x8, %g2 | ||
41 | srlx %g3, 32, %g5 | ||
42 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
43 | add %g2, 0x8, %g2 | ||
44 | srl %g3, 0, %g3 | ||
45 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 | ||
46 | add %g2, 0x40 - 0x8 - 0x8, %g2 | ||
47 | |||
48 | /* Update queue head pointer. */ | ||
49 | sethi %hi(8192 - 1), %g4 | ||
50 | or %g4, %lo(8192 - 1), %g4 | ||
51 | and %g2, %g4, %g2 | ||
52 | |||
53 | mov INTRQ_CPU_MONDO_HEAD, %g4 | ||
54 | stxa %g2, [%g4] ASI_QUEUE | ||
55 | membar #Sync | ||
56 | |||
57 | jmpl %g3, %g0 | ||
58 | nop | ||
59 | |||
60 | sun4v_cpu_mondo_queue_empty: | ||
61 | retry | ||
62 | |||
63 | sun4v_dev_mondo: | ||
64 | /* Head offset in %g2, tail offset in %g4. */ | ||
65 | mov INTRQ_DEVICE_MONDO_HEAD, %g2 | ||
66 | ldxa [%g2] ASI_QUEUE, %g2 | ||
67 | mov INTRQ_DEVICE_MONDO_TAIL, %g4 | ||
68 | ldxa [%g4] ASI_QUEUE, %g4 | ||
69 | cmp %g2, %g4 | ||
70 | be,pn %xcc, sun4v_dev_mondo_queue_empty | ||
71 | nop | ||
72 | |||
73 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
75 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
76 | |||
77 | /* Get DEV mondo queue base phys address into %g5. */ | ||
78 | ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 | ||
79 | |||
80 | /* Load IVEC into %g3. */ | ||
81 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
82 | add %g2, 0x40, %g2 | ||
83 | |||
84 | /* XXX There can be a full 64-byte block of data here. | ||
85 | * XXX This is how we can get at MSI vector data. | ||
86 | * XXX Current we do not capture this, but when we do we'll | ||
87 | * XXX need to add a 64-byte storage area in the struct ino_bucket | ||
88 | * XXX or the struct irq_desc. | ||
89 | */ | ||
90 | |||
91 | /* Update queue head pointer, this frees up some registers. */ | ||
92 | sethi %hi(8192 - 1), %g4 | ||
93 | or %g4, %lo(8192 - 1), %g4 | ||
94 | and %g2, %g4, %g2 | ||
95 | |||
96 | mov INTRQ_DEVICE_MONDO_HEAD, %g4 | ||
97 | stxa %g2, [%g4] ASI_QUEUE | ||
98 | membar #Sync | ||
99 | |||
100 | /* Get &__irq_work[smp_processor_id()] into %g1. */ | ||
101 | TRAP_LOAD_IRQ_WORK(%g1, %g4) | ||
102 | |||
103 | /* Get &ivector_table[IVEC] into %g4. */ | ||
104 | sethi %hi(ivector_table), %g4 | ||
105 | sllx %g3, 5, %g3 | ||
106 | or %g4, %lo(ivector_table), %g4 | ||
107 | add %g4, %g3, %g4 | ||
108 | |||
109 | /* Load IRQ %pil into %g5. */ | ||
110 | ldub [%g4 + 0x04], %g5 | ||
111 | |||
112 | /* Insert ivector_table[] entry into __irq_work[] queue. */ | ||
113 | sllx %g5, 2, %g3 | ||
114 | lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */ | ||
115 | stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ | ||
116 | stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */ | ||
117 | |||
118 | /* Signal the interrupt by setting (1 << pil) in %softint. */ | ||
119 | mov 1, %g2 | ||
120 | sllx %g2, %g5, %g2 | ||
121 | wr %g2, 0x0, %set_softint | ||
122 | |||
123 | sun4v_dev_mondo_queue_empty: | ||
124 | retry | ||
125 | |||
126 | sun4v_res_mondo: | ||
127 | /* Head offset in %g2, tail offset in %g4. */ | ||
128 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
129 | ldxa [%g2] ASI_QUEUE, %g2 | ||
130 | mov INTRQ_RESUM_MONDO_TAIL, %g4 | ||
131 | ldxa [%g4] ASI_QUEUE, %g4 | ||
132 | cmp %g2, %g4 | ||
133 | be,pn %xcc, sun4v_res_mondo_queue_empty | ||
134 | nop | ||
135 | |||
136 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
137 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
138 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
139 | |||
140 | /* Get RES mondo queue base phys address into %g5. */ | ||
141 | ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 | ||
142 | |||
143 | /* Get RES kernel buffer base phys address into %g7. */ | ||
144 | ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 | ||
145 | |||
146 | /* If the first word is non-zero, queue is full. */ | ||
147 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
148 | brnz,pn %g1, sun4v_res_mondo_queue_full | ||
149 | nop | ||
150 | |||
151 | /* Remember this entry's offset in %g1. */ | ||
152 | mov %g2, %g1 | ||
153 | |||
154 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
155 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
156 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
157 | add %g2, 0x08, %g2 | ||
158 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
159 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
160 | add %g2, 0x08, %g2 | ||
161 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
162 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
163 | add %g2, 0x08, %g2 | ||
164 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
165 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
166 | add %g2, 0x08, %g2 | ||
167 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
168 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
169 | add %g2, 0x08, %g2 | ||
170 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
171 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
172 | add %g2, 0x08, %g2 | ||
173 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
174 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
175 | add %g2, 0x08, %g2 | ||
176 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
177 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
178 | add %g2, 0x08, %g2 | ||
179 | |||
180 | /* Update queue head pointer. */ | ||
181 | sethi %hi(8192 - 1), %g4 | ||
182 | or %g4, %lo(8192 - 1), %g4 | ||
183 | and %g2, %g4, %g2 | ||
184 | |||
185 | mov INTRQ_RESUM_MONDO_HEAD, %g4 | ||
186 | stxa %g2, [%g4] ASI_QUEUE | ||
187 | membar #Sync | ||
188 | |||
189 | /* Disable interrupts and save register state so we can call | ||
190 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
191 | * when it's done. | ||
192 | */ | ||
193 | rdpr %pil, %g2 | ||
194 | wrpr %g0, 15, %pil | ||
195 | mov %g1, %g4 | ||
196 | ba,pt %xcc, etrap_irq | ||
197 | rd %pc, %g7 | ||
198 | |||
199 | /* Log the event. */ | ||
200 | add %sp, PTREGS_OFF, %o0 | ||
201 | call sun4v_resum_error | ||
202 | mov %l4, %o1 | ||
203 | |||
204 | /* Return from trap. */ | ||
205 | ba,pt %xcc, rtrap_irq | ||
206 | nop | ||
207 | |||
208 | sun4v_res_mondo_queue_empty: | ||
209 | retry | ||
210 | |||
211 | sun4v_res_mondo_queue_full: | ||
212 | /* The queue is full, consolidate our damage by setting | ||
213 | * the head equal to the tail. We'll just trap again otherwise. | ||
214 | * Call C code to log the event. | ||
215 | */ | ||
216 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
217 | stxa %g4, [%g2] ASI_QUEUE | ||
218 | membar #Sync | ||
219 | |||
220 | rdpr %pil, %g2 | ||
221 | wrpr %g0, 15, %pil | ||
222 | ba,pt %xcc, etrap_irq | ||
223 | rd %pc, %g7 | ||
224 | |||
225 | call sun4v_resum_overflow | ||
226 | add %sp, PTREGS_OFF, %o0 | ||
227 | |||
228 | ba,pt %xcc, rtrap_irq | ||
229 | nop | ||
230 | |||
231 | sun4v_nonres_mondo: | ||
232 | /* Head offset in %g2, tail offset in %g4. */ | ||
233 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
234 | ldxa [%g2] ASI_QUEUE, %g2 | ||
235 | mov INTRQ_NONRESUM_MONDO_TAIL, %g4 | ||
236 | ldxa [%g4] ASI_QUEUE, %g4 | ||
237 | cmp %g2, %g4 | ||
238 | be,pn %xcc, sun4v_nonres_mondo_queue_empty | ||
239 | nop | ||
240 | |||
241 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
242 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
243 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
244 | |||
245 | /* Get RES mondo queue base phys address into %g5. */ | ||
246 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 | ||
247 | |||
248 | /* Get RES kernel buffer base phys address into %g7. */ | ||
249 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 | ||
250 | |||
251 | /* If the first word is non-zero, queue is full. */ | ||
252 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
253 | brnz,pn %g1, sun4v_nonres_mondo_queue_full | ||
254 | nop | ||
255 | |||
256 | /* Remember this entry's offset in %g1. */ | ||
257 | mov %g2, %g1 | ||
258 | |||
259 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
260 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
261 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
262 | add %g2, 0x08, %g2 | ||
263 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
264 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
265 | add %g2, 0x08, %g2 | ||
266 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
267 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
268 | add %g2, 0x08, %g2 | ||
269 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
270 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
271 | add %g2, 0x08, %g2 | ||
272 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
273 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
274 | add %g2, 0x08, %g2 | ||
275 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
276 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
277 | add %g2, 0x08, %g2 | ||
278 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
279 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
280 | add %g2, 0x08, %g2 | ||
281 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
282 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
283 | add %g2, 0x08, %g2 | ||
284 | |||
285 | /* Update queue head pointer. */ | ||
286 | sethi %hi(8192 - 1), %g4 | ||
287 | or %g4, %lo(8192 - 1), %g4 | ||
288 | and %g2, %g4, %g2 | ||
289 | |||
290 | mov INTRQ_NONRESUM_MONDO_HEAD, %g4 | ||
291 | stxa %g2, [%g4] ASI_QUEUE | ||
292 | membar #Sync | ||
293 | |||
294 | /* Disable interrupts and save register state so we can call | ||
295 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
296 | * when it's done. | ||
297 | */ | ||
298 | rdpr %pil, %g2 | ||
299 | wrpr %g0, 15, %pil | ||
300 | mov %g1, %g4 | ||
301 | ba,pt %xcc, etrap_irq | ||
302 | rd %pc, %g7 | ||
303 | |||
304 | /* Log the event. */ | ||
305 | add %sp, PTREGS_OFF, %o0 | ||
306 | call sun4v_nonresum_error | ||
307 | mov %l4, %o1 | ||
308 | |||
309 | /* Return from trap. */ | ||
310 | ba,pt %xcc, rtrap_irq | ||
311 | nop | ||
312 | |||
313 | sun4v_nonres_mondo_queue_empty: | ||
314 | retry | ||
315 | |||
316 | sun4v_nonres_mondo_queue_full: | ||
317 | /* The queue is full, consolidate our damage by setting | ||
318 | * the head equal to the tail. We'll just trap again otherwise. | ||
319 | * Call C code to log the event. | ||
320 | */ | ||
321 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
322 | stxa %g4, [%g2] ASI_QUEUE | ||
323 | membar #Sync | ||
324 | |||
325 | rdpr %pil, %g2 | ||
326 | wrpr %g0, 15, %pil | ||
327 | ba,pt %xcc, etrap_irq | ||
328 | rd %pc, %g7 | ||
329 | |||
330 | call sun4v_nonresum_overflow | ||
331 | add %sp, PTREGS_OFF, %o0 | ||
332 | |||
333 | ba,pt %xcc, rtrap_irq | ||
334 | nop | ||
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S new file mode 100644 index 000000000000..ab23ddb7116e --- /dev/null +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S | |||
@@ -0,0 +1,421 @@ | |||
1 | /* sun4v_tlb_miss.S: Sun4v TLB miss handlers. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | .text | ||
7 | .align 32 | ||
8 | |||
9 | /* Load ITLB fault information into VADDR and CTX, using BASE. */ | ||
10 | #define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ | ||
11 | ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ | ||
12 | ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; | ||
13 | |||
14 | /* Load DTLB fault information into VADDR and CTX, using BASE. */ | ||
15 | #define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ | ||
16 | ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ | ||
17 | ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; | ||
18 | |||
19 | /* DEST = (VADDR >> 22) | ||
20 | * | ||
21 | * Branch to ZERO_CTX_LABEL if context is zero. | ||
22 | */ | ||
23 | #define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ | ||
24 | srlx VADDR, 22, DEST; \ | ||
25 | brz,pn CTX, ZERO_CTX_LABEL; \ | ||
26 | nop; | ||
27 | |||
28 | /* Create TSB pointer. This is something like: | ||
29 | * | ||
30 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
31 | * tsb_base = tsb_reg & ~0x7UL; | ||
32 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
33 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
34 | */ | ||
35 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ | ||
36 | and TSB_PTR, 0x7, TMP1; \ | ||
37 | mov 512, TMP2; \ | ||
38 | andn TSB_PTR, 0x7, TSB_PTR; \ | ||
39 | sllx TMP2, TMP1, TMP2; \ | ||
40 | srlx VADDR, PAGE_SHIFT, TMP1; \ | ||
41 | sub TMP2, 1, TMP2; \ | ||
42 | and TMP1, TMP2, TMP1; \ | ||
43 | sllx TMP1, 4, TMP1; \ | ||
44 | add TSB_PTR, TMP1, TSB_PTR; | ||
45 | |||
46 | sun4v_itlb_miss: | ||
47 | /* Load MMU Miss base into %g2. */ | ||
48 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
49 | |||
50 | /* Load UTSB reg into %g1. */ | ||
51 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
52 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
53 | |||
54 | LOAD_ITLB_INFO(%g2, %g4, %g5) | ||
55 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) | ||
56 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | ||
57 | |||
58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
59 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
60 | cmp %g2, %g6 | ||
61 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
62 | mov FAULT_CODE_ITLB, %g3 | ||
63 | andcc %g3, _PAGE_EXEC_4V, %g0 | ||
64 | be,a,pn %xcc, tsb_do_fault | ||
65 | mov FAULT_CODE_ITLB, %g3 | ||
66 | |||
67 | /* We have a valid entry, make hypervisor call to load | ||
68 | * I-TLB and return from trap. | ||
69 | * | ||
70 | * %g3: PTE | ||
71 | * %g4: vaddr | ||
72 | */ | ||
73 | sun4v_itlb_load: | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
75 | mov %o0, %g1 ! save %o0 | ||
76 | mov %o1, %g2 ! save %o1 | ||
77 | mov %o2, %g5 ! save %o2 | ||
78 | mov %o3, %g7 ! save %o3 | ||
79 | mov %g4, %o0 ! vaddr | ||
80 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx | ||
81 | mov %g3, %o2 ! PTE | ||
82 | mov HV_MMU_IMMU, %o3 ! flags | ||
83 | ta HV_MMU_MAP_ADDR_TRAP | ||
84 | brnz,pn %o0, sun4v_itlb_error | ||
85 | mov %g2, %o1 ! restore %o1 | ||
86 | mov %g1, %o0 ! restore %o0 | ||
87 | mov %g5, %o2 ! restore %o2 | ||
88 | mov %g7, %o3 ! restore %o3 | ||
89 | |||
90 | retry | ||
91 | |||
92 | sun4v_dtlb_miss: | ||
93 | /* Load MMU Miss base into %g2. */ | ||
94 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
95 | |||
96 | /* Load UTSB reg into %g1. */ | ||
97 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
98 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
99 | |||
100 | LOAD_DTLB_INFO(%g2, %g4, %g5) | ||
101 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) | ||
102 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | ||
103 | |||
104 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
105 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
106 | cmp %g2, %g6 | ||
107 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
108 | mov FAULT_CODE_DTLB, %g3 | ||
109 | |||
110 | /* We have a valid entry, make hypervisor call to load | ||
111 | * D-TLB and return from trap. | ||
112 | * | ||
113 | * %g3: PTE | ||
114 | * %g4: vaddr | ||
115 | */ | ||
116 | sun4v_dtlb_load: | ||
117 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
118 | mov %o0, %g1 ! save %o0 | ||
119 | mov %o1, %g2 ! save %o1 | ||
120 | mov %o2, %g5 ! save %o2 | ||
121 | mov %o3, %g7 ! save %o3 | ||
122 | mov %g4, %o0 ! vaddr | ||
123 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx | ||
124 | mov %g3, %o2 ! PTE | ||
125 | mov HV_MMU_DMMU, %o3 ! flags | ||
126 | ta HV_MMU_MAP_ADDR_TRAP | ||
127 | brnz,pn %o0, sun4v_dtlb_error | ||
128 | mov %g2, %o1 ! restore %o1 | ||
129 | mov %g1, %o0 ! restore %o0 | ||
130 | mov %g5, %o2 ! restore %o2 | ||
131 | mov %g7, %o3 ! restore %o3 | ||
132 | |||
133 | retry | ||
134 | |||
135 | sun4v_dtlb_prot: | ||
136 | SET_GL(1) | ||
137 | |||
138 | /* Load MMU Miss base into %g5. */ | ||
139 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
140 | |||
141 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
142 | rdpr %tl, %g1 | ||
143 | cmp %g1, 1 | ||
144 | bgu,pn %xcc, winfix_trampoline | ||
145 | nop | ||
146 | ba,pt %xcc, sparc64_realfault_common | ||
147 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | ||
148 | |||
149 | /* Called from trap table: | ||
150 | * %g4: vaddr | ||
151 | * %g5: context | ||
152 | * %g6: TAG TARGET | ||
153 | */ | ||
154 | sun4v_itsb_miss: | ||
155 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
156 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
157 | brz,pn %g5, kvmap_itlb_4v | ||
158 | mov FAULT_CODE_ITLB, %g3 | ||
159 | ba,a,pt %xcc, sun4v_tsb_miss_common | ||
160 | |||
161 | /* Called from trap table: | ||
162 | * %g4: vaddr | ||
163 | * %g5: context | ||
164 | * %g6: TAG TARGET | ||
165 | */ | ||
166 | sun4v_dtsb_miss: | ||
167 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
168 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
169 | brz,pn %g5, kvmap_dtlb_4v | ||
170 | mov FAULT_CODE_DTLB, %g3 | ||
171 | |||
172 | /* fallthrough */ | ||
173 | |||
174 | /* Create TSB pointer into %g1. This is something like: | ||
175 | * | ||
176 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
177 | * tsb_base = tsb_reg & ~0x7UL; | ||
178 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
179 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
180 | */ | ||
181 | sun4v_tsb_miss_common: | ||
182 | COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) | ||
183 | |||
184 | /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS | ||
185 | * still in %g2, so it's quite trivial to get at the PGD PHYS value | ||
186 | * so we can preload it into %g7. | ||
187 | */ | ||
188 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
189 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath | ||
190 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 | ||
191 | |||
192 | sun4v_itlb_error: | ||
193 | sethi %hi(sun4v_err_itlb_vaddr), %g1 | ||
194 | stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] | ||
195 | sethi %hi(sun4v_err_itlb_ctx), %g1 | ||
196 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
197 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 | ||
198 | stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] | ||
199 | sethi %hi(sun4v_err_itlb_pte), %g1 | ||
200 | stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] | ||
201 | sethi %hi(sun4v_err_itlb_error), %g1 | ||
202 | stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] | ||
203 | |||
204 | rdpr %tl, %g4 | ||
205 | cmp %g4, 1 | ||
206 | ble,pt %icc, 1f | ||
207 | sethi %hi(2f), %g7 | ||
208 | ba,pt %xcc, etraptl1 | ||
209 | or %g7, %lo(2f), %g7 | ||
210 | |||
211 | 1: ba,pt %xcc, etrap | ||
212 | 2: or %g7, %lo(2b), %g7 | ||
213 | call sun4v_itlb_error_report | ||
214 | add %sp, PTREGS_OFF, %o0 | ||
215 | |||
216 | /* NOTREACHED */ | ||
217 | |||
218 | sun4v_dtlb_error: | ||
219 | sethi %hi(sun4v_err_dtlb_vaddr), %g1 | ||
220 | stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] | ||
221 | sethi %hi(sun4v_err_dtlb_ctx), %g1 | ||
222 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
223 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 | ||
224 | stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] | ||
225 | sethi %hi(sun4v_err_dtlb_pte), %g1 | ||
226 | stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] | ||
227 | sethi %hi(sun4v_err_dtlb_error), %g1 | ||
228 | stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] | ||
229 | |||
230 | rdpr %tl, %g4 | ||
231 | cmp %g4, 1 | ||
232 | ble,pt %icc, 1f | ||
233 | sethi %hi(2f), %g7 | ||
234 | ba,pt %xcc, etraptl1 | ||
235 | or %g7, %lo(2f), %g7 | ||
236 | |||
237 | 1: ba,pt %xcc, etrap | ||
238 | 2: or %g7, %lo(2b), %g7 | ||
239 | call sun4v_dtlb_error_report | ||
240 | add %sp, PTREGS_OFF, %o0 | ||
241 | |||
242 | /* NOTREACHED */ | ||
243 | |||
244 | /* Instruction Access Exception, tl0. */ | ||
245 | sun4v_iacc: | ||
246 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
247 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
248 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
249 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
250 | sllx %g3, 16, %g3 | ||
251 | or %g5, %g3, %g5 | ||
252 | ba,pt %xcc, etrap | ||
253 | rd %pc, %g7 | ||
254 | mov %l4, %o1 | ||
255 | mov %l5, %o2 | ||
256 | call sun4v_insn_access_exception | ||
257 | add %sp, PTREGS_OFF, %o0 | ||
258 | ba,a,pt %xcc, rtrap_clr_l6 | ||
259 | |||
260 | /* Instruction Access Exception, tl1. */ | ||
261 | sun4v_iacc_tl1: | ||
262 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
263 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
264 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
265 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
266 | sllx %g3, 16, %g3 | ||
267 | or %g5, %g3, %g5 | ||
268 | ba,pt %xcc, etraptl1 | ||
269 | rd %pc, %g7 | ||
270 | mov %l4, %o1 | ||
271 | mov %l5, %o2 | ||
272 | call sun4v_insn_access_exception_tl1 | ||
273 | add %sp, PTREGS_OFF, %o0 | ||
274 | ba,a,pt %xcc, rtrap_clr_l6 | ||
275 | |||
276 | /* Data Access Exception, tl0. */ | ||
277 | sun4v_dacc: | ||
278 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
279 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
280 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
281 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
282 | sllx %g3, 16, %g3 | ||
283 | or %g5, %g3, %g5 | ||
284 | ba,pt %xcc, etrap | ||
285 | rd %pc, %g7 | ||
286 | mov %l4, %o1 | ||
287 | mov %l5, %o2 | ||
288 | call sun4v_data_access_exception | ||
289 | add %sp, PTREGS_OFF, %o0 | ||
290 | ba,a,pt %xcc, rtrap_clr_l6 | ||
291 | |||
292 | /* Data Access Exception, tl1. */ | ||
293 | sun4v_dacc_tl1: | ||
294 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
295 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
296 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
297 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
298 | sllx %g3, 16, %g3 | ||
299 | or %g5, %g3, %g5 | ||
300 | ba,pt %xcc, etraptl1 | ||
301 | rd %pc, %g7 | ||
302 | mov %l4, %o1 | ||
303 | mov %l5, %o2 | ||
304 | call sun4v_data_access_exception_tl1 | ||
305 | add %sp, PTREGS_OFF, %o0 | ||
306 | ba,a,pt %xcc, rtrap_clr_l6 | ||
307 | |||
308 | /* Memory Address Unaligned. */ | ||
309 | sun4v_mna: | ||
310 | /* Window fixup? */ | ||
311 | rdpr %tl, %g2 | ||
312 | cmp %g2, 1 | ||
313 | ble,pt %icc, 1f | ||
314 | nop | ||
315 | |||
316 | SET_GL(1) | ||
317 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
318 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
319 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
320 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4 | ||
321 | sllx %g3, 16, %g3 | ||
322 | or %g4, %g3, %g4 | ||
323 | ba,pt %xcc, winfix_mna | ||
324 | rdpr %tpc, %g3 | ||
325 | /* not reached */ | ||
326 | |||
327 | 1: ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
328 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
329 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
330 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
331 | sllx %g3, 16, %g3 | ||
332 | or %g5, %g3, %g5 | ||
333 | |||
334 | ba,pt %xcc, etrap | ||
335 | rd %pc, %g7 | ||
336 | mov %l4, %o1 | ||
337 | mov %l5, %o2 | ||
338 | call sun4v_do_mna | ||
339 | add %sp, PTREGS_OFF, %o0 | ||
340 | ba,a,pt %xcc, rtrap_clr_l6 | ||
341 | |||
342 | /* Privileged Action. */ | ||
343 | sun4v_privact: | ||
344 | ba,pt %xcc, etrap | ||
345 | rd %pc, %g7 | ||
346 | call do_privact | ||
347 | add %sp, PTREGS_OFF, %o0 | ||
348 | ba,a,pt %xcc, rtrap_clr_l6 | ||
349 | |||
350 | /* Unaligned ldd float, tl0. */ | ||
351 | sun4v_lddfmna: | ||
352 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
353 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
354 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
355 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
356 | sllx %g3, 16, %g3 | ||
357 | or %g5, %g3, %g5 | ||
358 | ba,pt %xcc, etrap | ||
359 | rd %pc, %g7 | ||
360 | mov %l4, %o1 | ||
361 | mov %l5, %o2 | ||
362 | call handle_lddfmna | ||
363 | add %sp, PTREGS_OFF, %o0 | ||
364 | ba,a,pt %xcc, rtrap_clr_l6 | ||
365 | |||
366 | /* Unaligned std float, tl0. */ | ||
367 | sun4v_stdfmna: | ||
368 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
369 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
370 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
371 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
372 | sllx %g3, 16, %g3 | ||
373 | or %g5, %g3, %g5 | ||
374 | ba,pt %xcc, etrap | ||
375 | rd %pc, %g7 | ||
376 | mov %l4, %o1 | ||
377 | mov %l5, %o2 | ||
378 | call handle_stdfmna | ||
379 | add %sp, PTREGS_OFF, %o0 | ||
380 | ba,a,pt %xcc, rtrap_clr_l6 | ||
381 | |||
382 | #define BRANCH_ALWAYS 0x10680000 | ||
383 | #define NOP 0x01000000 | ||
384 | #define SUN4V_DO_PATCH(OLD, NEW) \ | ||
385 | sethi %hi(NEW), %g1; \ | ||
386 | or %g1, %lo(NEW), %g1; \ | ||
387 | sethi %hi(OLD), %g2; \ | ||
388 | or %g2, %lo(OLD), %g2; \ | ||
389 | sub %g1, %g2, %g1; \ | ||
390 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
391 | sll %g1, 11, %g1; \ | ||
392 | srl %g1, 11 + 2, %g1; \ | ||
393 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
394 | or %g3, %g1, %g3; \ | ||
395 | stw %g3, [%g2]; \ | ||
396 | sethi %hi(NOP), %g3; \ | ||
397 | or %g3, %lo(NOP), %g3; \ | ||
398 | stw %g3, [%g2 + 0x4]; \ | ||
399 | flush %g2; | ||
400 | |||
401 | .globl sun4v_patch_tlb_handlers | ||
402 | .type sun4v_patch_tlb_handlers,#function | ||
403 | sun4v_patch_tlb_handlers: | ||
404 | SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) | ||
405 | SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) | ||
406 | SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) | ||
407 | SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) | ||
408 | SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) | ||
409 | SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) | ||
410 | SUN4V_DO_PATCH(tl0_iax, sun4v_iacc) | ||
411 | SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1) | ||
412 | SUN4V_DO_PATCH(tl0_dax, sun4v_dacc) | ||
413 | SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1) | ||
414 | SUN4V_DO_PATCH(tl0_mna, sun4v_mna) | ||
415 | SUN4V_DO_PATCH(tl1_mna, sun4v_mna) | ||
416 | SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna) | ||
417 | SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna) | ||
418 | SUN4V_DO_PATCH(tl0_privact, sun4v_privact) | ||
419 | retl | ||
420 | nop | ||
421 | .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers | ||
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 5f8c822a2b4a..7a869138c37f 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c | |||
@@ -25,25 +25,93 @@ | |||
25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/ipc.h> | 26 | #include <linux/ipc.h> |
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/random.h> | ||
28 | 29 | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/ipc.h> | 31 | #include <asm/ipc.h> |
31 | #include <asm/utrap.h> | 32 | #include <asm/utrap.h> |
32 | #include <asm/perfctr.h> | 33 | #include <asm/perfctr.h> |
34 | #include <asm/a.out.h> | ||
33 | 35 | ||
34 | /* #define DEBUG_UNIMP_SYSCALL */ | 36 | /* #define DEBUG_UNIMP_SYSCALL */ |
35 | 37 | ||
36 | /* XXX Make this per-binary type, this way we can detect the type of | ||
37 | * XXX a binary. Every Sparc executable calls this very early on. | ||
38 | */ | ||
39 | asmlinkage unsigned long sys_getpagesize(void) | 38 | asmlinkage unsigned long sys_getpagesize(void) |
40 | { | 39 | { |
41 | return PAGE_SIZE; | 40 | return PAGE_SIZE; |
42 | } | 41 | } |
43 | 42 | ||
44 | #define COLOUR_ALIGN(addr,pgoff) \ | 43 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) |
45 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | 44 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) |
46 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | 45 | |
46 | /* Does addr --> addr+len fall within 4GB of the VA-space hole or | ||
47 | * overflow past the end of the 64-bit address space? | ||
48 | */ | ||
49 | static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | ||
50 | { | ||
51 | unsigned long va_exclude_start, va_exclude_end; | ||
52 | |||
53 | va_exclude_start = VA_EXCLUDE_START; | ||
54 | va_exclude_end = VA_EXCLUDE_END; | ||
55 | |||
56 | if (unlikely(len >= va_exclude_start)) | ||
57 | return 1; | ||
58 | |||
59 | if (unlikely((addr + len) < addr)) | ||
60 | return 1; | ||
61 | |||
62 | if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || | ||
63 | ((addr + len) >= va_exclude_start && | ||
64 | (addr + len) < va_exclude_end))) | ||
65 | return 1; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* Does start,end straddle the VA-space hole? */ | ||
71 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
72 | { | ||
73 | unsigned long va_exclude_start, va_exclude_end; | ||
74 | |||
75 | va_exclude_start = VA_EXCLUDE_START; | ||
76 | va_exclude_end = VA_EXCLUDE_END; | ||
77 | |||
78 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
79 | return 0; | ||
80 | |||
81 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
82 | return 0; | ||
83 | |||
84 | return 1; | ||
85 | } | ||
86 | |||
87 | /* These functions differ from the default implementations in | ||
88 | * mm/mmap.c in two ways: | ||
89 | * | ||
90 | * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, | ||
91 | * for fixed such mappings we just validate what the user gave us. | ||
92 | * 2) For 64-bit tasks we avoid mapping anything within 4GB of | ||
93 | * the spitfire/niagara VA-hole. | ||
94 | */ | ||
95 | |||
96 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, | ||
97 | unsigned long pgoff) | ||
98 | { | ||
99 | unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); | ||
100 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
101 | |||
102 | return base + off; | ||
103 | } | ||
104 | |||
105 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
106 | unsigned long pgoff) | ||
107 | { | ||
108 | unsigned long base = addr & ~(SHMLBA-1); | ||
109 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
110 | |||
111 | if (base + off <= addr) | ||
112 | return base + off; | ||
113 | return base - off; | ||
114 | } | ||
47 | 115 | ||
48 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 116 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
49 | { | 117 | { |
@@ -64,8 +132,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
64 | } | 132 | } |
65 | 133 | ||
66 | if (test_thread_flag(TIF_32BIT)) | 134 | if (test_thread_flag(TIF_32BIT)) |
67 | task_size = 0xf0000000UL; | 135 | task_size = STACK_TOP32; |
68 | if (len > task_size || len > -PAGE_OFFSET) | 136 | if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) |
69 | return -ENOMEM; | 137 | return -ENOMEM; |
70 | 138 | ||
71 | do_color_align = 0; | 139 | do_color_align = 0; |
@@ -84,11 +152,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
84 | return addr; | 152 | return addr; |
85 | } | 153 | } |
86 | 154 | ||
87 | if (len <= mm->cached_hole_size) { | 155 | if (len > mm->cached_hole_size) { |
156 | start_addr = addr = mm->free_area_cache; | ||
157 | } else { | ||
158 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
88 | mm->cached_hole_size = 0; | 159 | mm->cached_hole_size = 0; |
89 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
90 | } | 160 | } |
91 | start_addr = addr = mm->free_area_cache; | ||
92 | 161 | ||
93 | task_size -= len; | 162 | task_size -= len; |
94 | 163 | ||
@@ -100,11 +169,12 @@ full_search: | |||
100 | 169 | ||
101 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 170 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
102 | /* At this point: (!vma || addr < vma->vm_end). */ | 171 | /* At this point: (!vma || addr < vma->vm_end). */ |
103 | if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { | 172 | if (addr < VA_EXCLUDE_START && |
104 | addr = PAGE_OFFSET; | 173 | (addr + len) >= VA_EXCLUDE_START) { |
105 | vma = find_vma(mm, PAGE_OFFSET); | 174 | addr = VA_EXCLUDE_END; |
175 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
106 | } | 176 | } |
107 | if (task_size < addr) { | 177 | if (unlikely(task_size < addr)) { |
108 | if (start_addr != TASK_UNMAPPED_BASE) { | 178 | if (start_addr != TASK_UNMAPPED_BASE) { |
109 | start_addr = addr = TASK_UNMAPPED_BASE; | 179 | start_addr = addr = TASK_UNMAPPED_BASE; |
110 | mm->cached_hole_size = 0; | 180 | mm->cached_hole_size = 0; |
@@ -112,7 +182,7 @@ full_search: | |||
112 | } | 182 | } |
113 | return -ENOMEM; | 183 | return -ENOMEM; |
114 | } | 184 | } |
115 | if (!vma || addr + len <= vma->vm_start) { | 185 | if (likely(!vma || addr + len <= vma->vm_start)) { |
116 | /* | 186 | /* |
117 | * Remember the place where we stopped the search: | 187 | * Remember the place where we stopped the search: |
118 | */ | 188 | */ |
@@ -128,6 +198,121 @@ full_search: | |||
128 | } | 198 | } |
129 | } | 199 | } |
130 | 200 | ||
201 | unsigned long | ||
202 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
203 | const unsigned long len, const unsigned long pgoff, | ||
204 | const unsigned long flags) | ||
205 | { | ||
206 | struct vm_area_struct *vma; | ||
207 | struct mm_struct *mm = current->mm; | ||
208 | unsigned long task_size = STACK_TOP32; | ||
209 | unsigned long addr = addr0; | ||
210 | int do_color_align; | ||
211 | |||
212 | /* This should only ever run for 32-bit processes. */ | ||
213 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
214 | |||
215 | if (flags & MAP_FIXED) { | ||
216 | /* We do not accept a shared mapping if it would violate | ||
217 | * cache aliasing constraints. | ||
218 | */ | ||
219 | if ((flags & MAP_SHARED) && | ||
220 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
221 | return -EINVAL; | ||
222 | return addr; | ||
223 | } | ||
224 | |||
225 | if (unlikely(len > task_size)) | ||
226 | return -ENOMEM; | ||
227 | |||
228 | do_color_align = 0; | ||
229 | if (filp || (flags & MAP_SHARED)) | ||
230 | do_color_align = 1; | ||
231 | |||
232 | /* requesting a specific address */ | ||
233 | if (addr) { | ||
234 | if (do_color_align) | ||
235 | addr = COLOUR_ALIGN(addr, pgoff); | ||
236 | else | ||
237 | addr = PAGE_ALIGN(addr); | ||
238 | |||
239 | vma = find_vma(mm, addr); | ||
240 | if (task_size - len >= addr && | ||
241 | (!vma || addr + len <= vma->vm_start)) | ||
242 | return addr; | ||
243 | } | ||
244 | |||
245 | /* check if free_area_cache is useful for us */ | ||
246 | if (len <= mm->cached_hole_size) { | ||
247 | mm->cached_hole_size = 0; | ||
248 | mm->free_area_cache = mm->mmap_base; | ||
249 | } | ||
250 | |||
251 | /* either no address requested or can't fit in requested address hole */ | ||
252 | addr = mm->free_area_cache; | ||
253 | if (do_color_align) { | ||
254 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
255 | |||
256 | addr = base + len; | ||
257 | } | ||
258 | |||
259 | /* make sure it can fit in the remaining address space */ | ||
260 | if (likely(addr > len)) { | ||
261 | vma = find_vma(mm, addr-len); | ||
262 | if (!vma || addr <= vma->vm_start) { | ||
263 | /* remember the address as a hint for next time */ | ||
264 | return (mm->free_area_cache = addr-len); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | if (unlikely(mm->mmap_base < len)) | ||
269 | goto bottomup; | ||
270 | |||
271 | addr = mm->mmap_base-len; | ||
272 | if (do_color_align) | ||
273 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
274 | |||
275 | do { | ||
276 | /* | ||
277 | * Lookup failure means no vma is above this address, | ||
278 | * else if new region fits below vma->vm_start, | ||
279 | * return with success: | ||
280 | */ | ||
281 | vma = find_vma(mm, addr); | ||
282 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
283 | /* remember the address as a hint for next time */ | ||
284 | return (mm->free_area_cache = addr); | ||
285 | } | ||
286 | |||
287 | /* remember the largest hole we saw so far */ | ||
288 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
289 | mm->cached_hole_size = vma->vm_start - addr; | ||
290 | |||
291 | /* try just below the current vma->vm_start */ | ||
292 | addr = vma->vm_start-len; | ||
293 | if (do_color_align) | ||
294 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
295 | } while (likely(len < vma->vm_start)); | ||
296 | |||
297 | bottomup: | ||
298 | /* | ||
299 | * A failed mmap() very likely causes application failure, | ||
300 | * so fall back to the bottom-up function here. This scenario | ||
301 | * can happen with large stack limits and large mmap() | ||
302 | * allocations. | ||
303 | */ | ||
304 | mm->cached_hole_size = ~0UL; | ||
305 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
306 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
307 | /* | ||
308 | * Restore the topdown base: | ||
309 | */ | ||
310 | mm->free_area_cache = mm->mmap_base; | ||
311 | mm->cached_hole_size = ~0UL; | ||
312 | |||
313 | return addr; | ||
314 | } | ||
315 | |||
131 | /* Try to align mapping such that we align it as much as possible. */ | 316 | /* Try to align mapping such that we align it as much as possible. */ |
132 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 317 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
133 | { | 318 | { |
@@ -171,15 +356,57 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | |||
171 | return addr; | 356 | return addr; |
172 | } | 357 | } |
173 | 358 | ||
359 | /* Essentially the same as PowerPC... */ | ||
360 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
361 | { | ||
362 | unsigned long random_factor = 0UL; | ||
363 | |||
364 | if (current->flags & PF_RANDOMIZE) { | ||
365 | random_factor = get_random_int(); | ||
366 | if (test_thread_flag(TIF_32BIT)) | ||
367 | random_factor &= ((1 * 1024 * 1024) - 1); | ||
368 | else | ||
369 | random_factor = ((random_factor << PAGE_SHIFT) & | ||
370 | 0xffffffffUL); | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Fall back to the standard layout if the personality | ||
375 | * bit is set, or if the expected stack growth is unlimited: | ||
376 | */ | ||
377 | if (!test_thread_flag(TIF_32BIT) || | ||
378 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
379 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | ||
380 | sysctl_legacy_va_layout) { | ||
381 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
382 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
383 | mm->unmap_area = arch_unmap_area; | ||
384 | } else { | ||
385 | /* We know it's 32-bit */ | ||
386 | unsigned long task_size = STACK_TOP32; | ||
387 | unsigned long gap; | ||
388 | |||
389 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
390 | if (gap < 128 * 1024 * 1024) | ||
391 | gap = 128 * 1024 * 1024; | ||
392 | if (gap > (task_size / 6 * 5)) | ||
393 | gap = (task_size / 6 * 5); | ||
394 | |||
395 | mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); | ||
396 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
397 | mm->unmap_area = arch_unmap_area_topdown; | ||
398 | } | ||
399 | } | ||
400 | |||
174 | asmlinkage unsigned long sparc_brk(unsigned long brk) | 401 | asmlinkage unsigned long sparc_brk(unsigned long brk) |
175 | { | 402 | { |
176 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ | 403 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ |
177 | if (test_thread_flag(TIF_32BIT) && | 404 | if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) |
178 | brk >= 0xf0000000UL) | ||
179 | return current->mm->brk; | 405 | return current->mm->brk; |
180 | 406 | ||
181 | if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET)) | 407 | if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) |
182 | return current->mm->brk; | 408 | return current->mm->brk; |
409 | |||
183 | return sys_brk(brk); | 410 | return sys_brk(brk); |
184 | } | 411 | } |
185 | 412 | ||
@@ -340,13 +567,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | |||
340 | retval = -EINVAL; | 567 | retval = -EINVAL; |
341 | 568 | ||
342 | if (test_thread_flag(TIF_32BIT)) { | 569 | if (test_thread_flag(TIF_32BIT)) { |
343 | if (len > 0xf0000000UL || | 570 | if (len >= STACK_TOP32) |
344 | ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)) | 571 | goto out_putf; |
572 | |||
573 | if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len) | ||
345 | goto out_putf; | 574 | goto out_putf; |
346 | } else { | 575 | } else { |
347 | if (len > -PAGE_OFFSET || | 576 | if (len >= VA_EXCLUDE_START) |
348 | ((flags & MAP_FIXED) && | 577 | goto out_putf; |
349 | addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | 578 | |
579 | if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len)) | ||
350 | goto out_putf; | 580 | goto out_putf; |
351 | } | 581 | } |
352 | 582 | ||
@@ -365,9 +595,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len) | |||
365 | { | 595 | { |
366 | long ret; | 596 | long ret; |
367 | 597 | ||
368 | if (len > -PAGE_OFFSET || | 598 | if (invalid_64bit_range(addr, len)) |
369 | (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | ||
370 | return -EINVAL; | 599 | return -EINVAL; |
600 | |||
371 | down_write(¤t->mm->mmap_sem); | 601 | down_write(¤t->mm->mmap_sem); |
372 | ret = do_munmap(current->mm, addr, len); | 602 | ret = do_munmap(current->mm, addr, len); |
373 | up_write(¤t->mm->mmap_sem); | 603 | up_write(¤t->mm->mmap_sem); |
@@ -384,18 +614,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr, | |||
384 | { | 614 | { |
385 | struct vm_area_struct *vma; | 615 | struct vm_area_struct *vma; |
386 | unsigned long ret = -EINVAL; | 616 | unsigned long ret = -EINVAL; |
617 | |||
387 | if (test_thread_flag(TIF_32BIT)) | 618 | if (test_thread_flag(TIF_32BIT)) |
388 | goto out; | 619 | goto out; |
389 | if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET) | 620 | if (unlikely(new_len >= VA_EXCLUDE_START)) |
390 | goto out; | 621 | goto out; |
391 | if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET) | 622 | if (unlikely(invalid_64bit_range(addr, old_len))) |
392 | goto out; | 623 | goto out; |
624 | |||
393 | down_write(¤t->mm->mmap_sem); | 625 | down_write(¤t->mm->mmap_sem); |
394 | if (flags & MREMAP_FIXED) { | 626 | if (flags & MREMAP_FIXED) { |
395 | if (new_addr < PAGE_OFFSET && | 627 | if (invalid_64bit_range(new_addr, new_len)) |
396 | new_addr + new_len > -PAGE_OFFSET) | ||
397 | goto out_sem; | 628 | goto out_sem; |
398 | } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) { | 629 | } else if (invalid_64bit_range(addr, new_len)) { |
399 | unsigned long map_flags = 0; | 630 | unsigned long map_flags = 0; |
400 | struct file *file = NULL; | 631 | struct file *file = NULL; |
401 | 632 | ||
@@ -554,12 +785,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type, | |||
554 | } | 785 | } |
555 | if (!current_thread_info()->utraps) { | 786 | if (!current_thread_info()->utraps) { |
556 | current_thread_info()->utraps = | 787 | current_thread_info()->utraps = |
557 | kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); | 788 | kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); |
558 | if (!current_thread_info()->utraps) | 789 | if (!current_thread_info()->utraps) |
559 | return -ENOMEM; | 790 | return -ENOMEM; |
560 | current_thread_info()->utraps[0] = 1; | 791 | current_thread_info()->utraps[0] = 1; |
561 | memset(current_thread_info()->utraps+1, 0, | ||
562 | UT_TRAP_INSTRUCTION_31*sizeof(long)); | ||
563 | } else { | 792 | } else { |
564 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && | 793 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && |
565 | current_thread_info()->utraps[0] > 1) { | 794 | current_thread_info()->utraps[0] > 1) { |
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 417727bd87ba..0e41df024489 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <asm/fpumacro.h> | 62 | #include <asm/fpumacro.h> |
63 | #include <asm/semaphore.h> | 63 | #include <asm/semaphore.h> |
64 | #include <asm/mmu_context.h> | 64 | #include <asm/mmu_context.h> |
65 | #include <asm/a.out.h> | ||
65 | 66 | ||
66 | asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) | 67 | asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) |
67 | { | 68 | { |
@@ -1039,15 +1040,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr, | |||
1039 | unsigned long ret = -EINVAL; | 1040 | unsigned long ret = -EINVAL; |
1040 | unsigned long new_addr = __new_addr; | 1041 | unsigned long new_addr = __new_addr; |
1041 | 1042 | ||
1042 | if (old_len > 0xf0000000UL || new_len > 0xf0000000UL) | 1043 | if (old_len > STACK_TOP32 || new_len > STACK_TOP32) |
1043 | goto out; | 1044 | goto out; |
1044 | if (addr > 0xf0000000UL - old_len) | 1045 | if (addr > STACK_TOP32 - old_len) |
1045 | goto out; | 1046 | goto out; |
1046 | down_write(¤t->mm->mmap_sem); | 1047 | down_write(¤t->mm->mmap_sem); |
1047 | if (flags & MREMAP_FIXED) { | 1048 | if (flags & MREMAP_FIXED) { |
1048 | if (new_addr > 0xf0000000UL - new_len) | 1049 | if (new_addr > STACK_TOP32 - new_len) |
1049 | goto out_sem; | 1050 | goto out_sem; |
1050 | } else if (addr > 0xf0000000UL - new_len) { | 1051 | } else if (addr > STACK_TOP32 - new_len) { |
1051 | unsigned long map_flags = 0; | 1052 | unsigned long map_flags = 0; |
1052 | struct file *file = NULL; | 1053 | struct file *file = NULL; |
1053 | 1054 | ||
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index a22930d62adf..7d61f1bfd3d3 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/cpufreq.h> | 30 | #include <linux/cpufreq.h> |
31 | #include <linux/percpu.h> | 31 | #include <linux/percpu.h> |
32 | #include <linux/profile.h> | 32 | #include <linux/profile.h> |
33 | #include <linux/miscdevice.h> | ||
34 | #include <linux/rtc.h> | ||
33 | 35 | ||
34 | #include <asm/oplib.h> | 36 | #include <asm/oplib.h> |
35 | #include <asm/mostek.h> | 37 | #include <asm/mostek.h> |
@@ -45,6 +47,7 @@ | |||
45 | #include <asm/smp.h> | 47 | #include <asm/smp.h> |
46 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
47 | #include <asm/cpudata.h> | 49 | #include <asm/cpudata.h> |
50 | #include <asm/uaccess.h> | ||
48 | 51 | ||
49 | DEFINE_SPINLOCK(mostek_lock); | 52 | DEFINE_SPINLOCK(mostek_lock); |
50 | DEFINE_SPINLOCK(rtc_lock); | 53 | DEFINE_SPINLOCK(rtc_lock); |
@@ -193,16 +196,22 @@ struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; | |||
193 | 196 | ||
194 | static void stick_init_tick(unsigned long offset) | 197 | static void stick_init_tick(unsigned long offset) |
195 | { | 198 | { |
196 | tick_disable_protection(); | 199 | /* Writes to the %tick and %stick register are not |
197 | 200 | * allowed on sun4v. The Hypervisor controls that | |
198 | /* Let the user get at STICK too. */ | 201 | * bit, per-strand. |
199 | __asm__ __volatile__( | 202 | */ |
200 | " rd %%asr24, %%g2\n" | 203 | if (tlb_type != hypervisor) { |
201 | " andn %%g2, %0, %%g2\n" | 204 | tick_disable_protection(); |
202 | " wr %%g2, 0, %%asr24" | 205 | |
203 | : /* no outputs */ | 206 | /* Let the user get at STICK too. */ |
204 | : "r" (TICK_PRIV_BIT) | 207 | __asm__ __volatile__( |
205 | : "g1", "g2"); | 208 | " rd %%asr24, %%g2\n" |
209 | " andn %%g2, %0, %%g2\n" | ||
210 | " wr %%g2, 0, %%asr24" | ||
211 | : /* no outputs */ | ||
212 | : "r" (TICK_PRIV_BIT) | ||
213 | : "g1", "g2"); | ||
214 | } | ||
206 | 215 | ||
207 | __asm__ __volatile__( | 216 | __asm__ __volatile__( |
208 | " rd %%asr24, %%g1\n" | 217 | " rd %%asr24, %%g1\n" |
@@ -683,6 +692,83 @@ static void __init set_system_time(void) | |||
683 | } | 692 | } |
684 | } | 693 | } |
685 | 694 | ||
695 | /* davem suggests we keep this within the 4M locked kernel image */ | ||
696 | static u32 starfire_get_time(void) | ||
697 | { | ||
698 | static char obp_gettod[32]; | ||
699 | static u32 unix_tod; | ||
700 | |||
701 | sprintf(obp_gettod, "h# %08x unix-gettod", | ||
702 | (unsigned int) (long) &unix_tod); | ||
703 | prom_feval(obp_gettod); | ||
704 | |||
705 | return unix_tod; | ||
706 | } | ||
707 | |||
708 | static int starfire_set_time(u32 val) | ||
709 | { | ||
710 | /* Do nothing, time is set using the service processor | ||
711 | * console on this platform. | ||
712 | */ | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static u32 hypervisor_get_time(void) | ||
717 | { | ||
718 | register unsigned long func asm("%o5"); | ||
719 | register unsigned long arg0 asm("%o0"); | ||
720 | register unsigned long arg1 asm("%o1"); | ||
721 | int retries = 10000; | ||
722 | |||
723 | retry: | ||
724 | func = HV_FAST_TOD_GET; | ||
725 | arg0 = 0; | ||
726 | arg1 = 0; | ||
727 | __asm__ __volatile__("ta %6" | ||
728 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | ||
729 | : "0" (func), "1" (arg0), "2" (arg1), | ||
730 | "i" (HV_FAST_TRAP)); | ||
731 | if (arg0 == HV_EOK) | ||
732 | return arg1; | ||
733 | if (arg0 == HV_EWOULDBLOCK) { | ||
734 | if (--retries > 0) { | ||
735 | udelay(100); | ||
736 | goto retry; | ||
737 | } | ||
738 | printk(KERN_WARNING "SUN4V: tod_get() timed out.\n"); | ||
739 | return 0; | ||
740 | } | ||
741 | printk(KERN_WARNING "SUN4V: tod_get() not supported.\n"); | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int hypervisor_set_time(u32 secs) | ||
746 | { | ||
747 | register unsigned long func asm("%o5"); | ||
748 | register unsigned long arg0 asm("%o0"); | ||
749 | int retries = 10000; | ||
750 | |||
751 | retry: | ||
752 | func = HV_FAST_TOD_SET; | ||
753 | arg0 = secs; | ||
754 | __asm__ __volatile__("ta %4" | ||
755 | : "=&r" (func), "=&r" (arg0) | ||
756 | : "0" (func), "1" (arg0), | ||
757 | "i" (HV_FAST_TRAP)); | ||
758 | if (arg0 == HV_EOK) | ||
759 | return 0; | ||
760 | if (arg0 == HV_EWOULDBLOCK) { | ||
761 | if (--retries > 0) { | ||
762 | udelay(100); | ||
763 | goto retry; | ||
764 | } | ||
765 | printk(KERN_WARNING "SUN4V: tod_set() timed out.\n"); | ||
766 | return -EAGAIN; | ||
767 | } | ||
768 | printk(KERN_WARNING "SUN4V: tod_set() not supported.\n"); | ||
769 | return -EOPNOTSUPP; | ||
770 | } | ||
771 | |||
686 | void __init clock_probe(void) | 772 | void __init clock_probe(void) |
687 | { | 773 | { |
688 | struct linux_prom_registers clk_reg[2]; | 774 | struct linux_prom_registers clk_reg[2]; |
@@ -702,14 +788,14 @@ void __init clock_probe(void) | |||
702 | 788 | ||
703 | 789 | ||
704 | if (this_is_starfire) { | 790 | if (this_is_starfire) { |
705 | /* davem suggests we keep this within the 4M locked kernel image */ | 791 | xtime.tv_sec = starfire_get_time(); |
706 | static char obp_gettod[256]; | 792 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
707 | static u32 unix_tod; | 793 | set_normalized_timespec(&wall_to_monotonic, |
708 | 794 | -xtime.tv_sec, -xtime.tv_nsec); | |
709 | sprintf(obp_gettod, "h# %08x unix-gettod", | 795 | return; |
710 | (unsigned int) (long) &unix_tod); | 796 | } |
711 | prom_feval(obp_gettod); | 797 | if (tlb_type == hypervisor) { |
712 | xtime.tv_sec = unix_tod; | 798 | xtime.tv_sec = hypervisor_get_time(); |
713 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 799 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
714 | set_normalized_timespec(&wall_to_monotonic, | 800 | set_normalized_timespec(&wall_to_monotonic, |
715 | -xtime.tv_sec, -xtime.tv_nsec); | 801 | -xtime.tv_sec, -xtime.tv_nsec); |
@@ -981,11 +1067,10 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg | |||
981 | } | 1067 | } |
982 | 1068 | ||
983 | struct freq_table { | 1069 | struct freq_table { |
984 | unsigned long udelay_val_ref; | ||
985 | unsigned long clock_tick_ref; | 1070 | unsigned long clock_tick_ref; |
986 | unsigned int ref_freq; | 1071 | unsigned int ref_freq; |
987 | }; | 1072 | }; |
988 | static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 }; | 1073 | static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 }; |
989 | 1074 | ||
990 | unsigned long sparc64_get_clock_tick(unsigned int cpu) | 1075 | unsigned long sparc64_get_clock_tick(unsigned int cpu) |
991 | { | 1076 | { |
@@ -1007,16 +1092,11 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val | |||
1007 | 1092 | ||
1008 | if (!ft->ref_freq) { | 1093 | if (!ft->ref_freq) { |
1009 | ft->ref_freq = freq->old; | 1094 | ft->ref_freq = freq->old; |
1010 | ft->udelay_val_ref = cpu_data(cpu).udelay_val; | ||
1011 | ft->clock_tick_ref = cpu_data(cpu).clock_tick; | 1095 | ft->clock_tick_ref = cpu_data(cpu).clock_tick; |
1012 | } | 1096 | } |
1013 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | 1097 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
1014 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | 1098 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || |
1015 | (val == CPUFREQ_RESUMECHANGE)) { | 1099 | (val == CPUFREQ_RESUMECHANGE)) { |
1016 | cpu_data(cpu).udelay_val = | ||
1017 | cpufreq_scale(ft->udelay_val_ref, | ||
1018 | ft->ref_freq, | ||
1019 | freq->new); | ||
1020 | cpu_data(cpu).clock_tick = | 1100 | cpu_data(cpu).clock_tick = |
1021 | cpufreq_scale(ft->clock_tick_ref, | 1101 | cpufreq_scale(ft->clock_tick_ref, |
1022 | ft->ref_freq, | 1102 | ft->ref_freq, |
@@ -1179,3 +1259,246 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
1179 | return retval; | 1259 | return retval; |
1180 | } | 1260 | } |
1181 | } | 1261 | } |
1262 | |||
1263 | #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ | ||
1264 | static unsigned char mini_rtc_status; /* bitmapped status byte. */ | ||
1265 | |||
1266 | /* months start at 0 now */ | ||
1267 | static unsigned char days_in_mo[] = | ||
1268 | {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; | ||
1269 | |||
1270 | #define FEBRUARY 2 | ||
1271 | #define STARTOFTIME 1970 | ||
1272 | #define SECDAY 86400L | ||
1273 | #define SECYR (SECDAY * 365) | ||
1274 | #define leapyear(year) ((year) % 4 == 0 && \ | ||
1275 | ((year) % 100 != 0 || (year) % 400 == 0)) | ||
1276 | #define days_in_year(a) (leapyear(a) ? 366 : 365) | ||
1277 | #define days_in_month(a) (month_days[(a) - 1]) | ||
1278 | |||
1279 | static int month_days[12] = { | ||
1280 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | ||
1281 | }; | ||
1282 | |||
1283 | /* | ||
1284 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | ||
1285 | */ | ||
1286 | static void GregorianDay(struct rtc_time * tm) | ||
1287 | { | ||
1288 | int leapsToDate; | ||
1289 | int lastYear; | ||
1290 | int day; | ||
1291 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | ||
1292 | |||
1293 | lastYear = tm->tm_year - 1; | ||
1294 | |||
1295 | /* | ||
1296 | * Number of leap corrections to apply up to end of last year | ||
1297 | */ | ||
1298 | leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; | ||
1299 | |||
1300 | /* | ||
1301 | * This year is a leap year if it is divisible by 4 except when it is | ||
1302 | * divisible by 100 unless it is divisible by 400 | ||
1303 | * | ||
1304 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was | ||
1305 | */ | ||
1306 | day = tm->tm_mon > 2 && leapyear(tm->tm_year); | ||
1307 | |||
1308 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | ||
1309 | tm->tm_mday; | ||
1310 | |||
1311 | tm->tm_wday = day % 7; | ||
1312 | } | ||
1313 | |||
1314 | static void to_tm(int tim, struct rtc_time *tm) | ||
1315 | { | ||
1316 | register int i; | ||
1317 | register long hms, day; | ||
1318 | |||
1319 | day = tim / SECDAY; | ||
1320 | hms = tim % SECDAY; | ||
1321 | |||
1322 | /* Hours, minutes, seconds are easy */ | ||
1323 | tm->tm_hour = hms / 3600; | ||
1324 | tm->tm_min = (hms % 3600) / 60; | ||
1325 | tm->tm_sec = (hms % 3600) % 60; | ||
1326 | |||
1327 | /* Number of years in days */ | ||
1328 | for (i = STARTOFTIME; day >= days_in_year(i); i++) | ||
1329 | day -= days_in_year(i); | ||
1330 | tm->tm_year = i; | ||
1331 | |||
1332 | /* Number of months in days left */ | ||
1333 | if (leapyear(tm->tm_year)) | ||
1334 | days_in_month(FEBRUARY) = 29; | ||
1335 | for (i = 1; day >= days_in_month(i); i++) | ||
1336 | day -= days_in_month(i); | ||
1337 | days_in_month(FEBRUARY) = 28; | ||
1338 | tm->tm_mon = i; | ||
1339 | |||
1340 | /* Days are what is left over (+1) from all that. */ | ||
1341 | tm->tm_mday = day + 1; | ||
1342 | |||
1343 | /* | ||
1344 | * Determine the day of week | ||
1345 | */ | ||
1346 | GregorianDay(tm); | ||
1347 | } | ||
1348 | |||
1349 | /* Both Starfire and SUN4V give us seconds since Jan 1st, 1970, | ||
1350 | * aka Unix time. So we have to convert to/from rtc_time. | ||
1351 | */ | ||
1352 | static inline void mini_get_rtc_time(struct rtc_time *time) | ||
1353 | { | ||
1354 | unsigned long flags; | ||
1355 | u32 seconds; | ||
1356 | |||
1357 | spin_lock_irqsave(&rtc_lock, flags); | ||
1358 | seconds = 0; | ||
1359 | if (this_is_starfire) | ||
1360 | seconds = starfire_get_time(); | ||
1361 | else if (tlb_type == hypervisor) | ||
1362 | seconds = hypervisor_get_time(); | ||
1363 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
1364 | |||
1365 | to_tm(seconds, time); | ||
1366 | time->tm_year -= 1900; | ||
1367 | time->tm_mon -= 1; | ||
1368 | } | ||
1369 | |||
1370 | static inline int mini_set_rtc_time(struct rtc_time *time) | ||
1371 | { | ||
1372 | u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1, | ||
1373 | time->tm_mday, time->tm_hour, | ||
1374 | time->tm_min, time->tm_sec); | ||
1375 | unsigned long flags; | ||
1376 | int err; | ||
1377 | |||
1378 | spin_lock_irqsave(&rtc_lock, flags); | ||
1379 | err = -ENODEV; | ||
1380 | if (this_is_starfire) | ||
1381 | err = starfire_set_time(seconds); | ||
1382 | else if (tlb_type == hypervisor) | ||
1383 | err = hypervisor_set_time(seconds); | ||
1384 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
1385 | |||
1386 | return err; | ||
1387 | } | ||
1388 | |||
1389 | static int mini_rtc_ioctl(struct inode *inode, struct file *file, | ||
1390 | unsigned int cmd, unsigned long arg) | ||
1391 | { | ||
1392 | struct rtc_time wtime; | ||
1393 | void __user *argp = (void __user *)arg; | ||
1394 | |||
1395 | switch (cmd) { | ||
1396 | |||
1397 | case RTC_PLL_GET: | ||
1398 | return -EINVAL; | ||
1399 | |||
1400 | case RTC_PLL_SET: | ||
1401 | return -EINVAL; | ||
1402 | |||
1403 | case RTC_UIE_OFF: /* disable ints from RTC updates. */ | ||
1404 | return 0; | ||
1405 | |||
1406 | case RTC_UIE_ON: /* enable ints for RTC updates. */ | ||
1407 | return -EINVAL; | ||
1408 | |||
1409 | case RTC_RD_TIME: /* Read the time/date from RTC */ | ||
1410 | /* this doesn't get week-day, who cares */ | ||
1411 | memset(&wtime, 0, sizeof(wtime)); | ||
1412 | mini_get_rtc_time(&wtime); | ||
1413 | |||
1414 | return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0; | ||
1415 | |||
1416 | case RTC_SET_TIME: /* Set the RTC */ | ||
1417 | { | ||
1418 | int year; | ||
1419 | unsigned char leap_yr; | ||
1420 | |||
1421 | if (!capable(CAP_SYS_TIME)) | ||
1422 | return -EACCES; | ||
1423 | |||
1424 | if (copy_from_user(&wtime, argp, sizeof(wtime))) | ||
1425 | return -EFAULT; | ||
1426 | |||
1427 | year = wtime.tm_year + 1900; | ||
1428 | leap_yr = ((!(year % 4) && (year % 100)) || | ||
1429 | !(year % 400)); | ||
1430 | |||
1431 | if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) | ||
1432 | return -EINVAL; | ||
1433 | |||
1434 | if (wtime.tm_mday < 0 || wtime.tm_mday > | ||
1435 | (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) | ||
1436 | return -EINVAL; | ||
1437 | |||
1438 | if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || | ||
1439 | wtime.tm_min < 0 || wtime.tm_min >= 60 || | ||
1440 | wtime.tm_sec < 0 || wtime.tm_sec >= 60) | ||
1441 | return -EINVAL; | ||
1442 | |||
1443 | return mini_set_rtc_time(&wtime); | ||
1444 | } | ||
1445 | } | ||
1446 | |||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1450 | static int mini_rtc_open(struct inode *inode, struct file *file) | ||
1451 | { | ||
1452 | if (mini_rtc_status & RTC_IS_OPEN) | ||
1453 | return -EBUSY; | ||
1454 | |||
1455 | mini_rtc_status |= RTC_IS_OPEN; | ||
1456 | |||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | static int mini_rtc_release(struct inode *inode, struct file *file) | ||
1461 | { | ||
1462 | mini_rtc_status &= ~RTC_IS_OPEN; | ||
1463 | return 0; | ||
1464 | } | ||
1465 | |||
1466 | |||
1467 | static struct file_operations mini_rtc_fops = { | ||
1468 | .owner = THIS_MODULE, | ||
1469 | .ioctl = mini_rtc_ioctl, | ||
1470 | .open = mini_rtc_open, | ||
1471 | .release = mini_rtc_release, | ||
1472 | }; | ||
1473 | |||
1474 | static struct miscdevice rtc_mini_dev = | ||
1475 | { | ||
1476 | .minor = RTC_MINOR, | ||
1477 | .name = "rtc", | ||
1478 | .fops = &mini_rtc_fops, | ||
1479 | }; | ||
1480 | |||
1481 | static int __init rtc_mini_init(void) | ||
1482 | { | ||
1483 | int retval; | ||
1484 | |||
1485 | if (tlb_type != hypervisor && !this_is_starfire) | ||
1486 | return -ENODEV; | ||
1487 | |||
1488 | printk(KERN_INFO "Mini RTC Driver\n"); | ||
1489 | |||
1490 | retval = misc_register(&rtc_mini_dev); | ||
1491 | if (retval < 0) | ||
1492 | return retval; | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | static void __exit rtc_mini_exit(void) | ||
1498 | { | ||
1499 | misc_deregister(&rtc_mini_dev); | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | module_init(rtc_mini_init); | ||
1504 | module_exit(rtc_mini_exit); | ||
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 9478551cb020..a4dc01a3d238 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | #include <asm/hypervisor.h> | ||
20 | #include <asm/cpudata.h> | ||
19 | 21 | ||
20 | .data | 22 | .data |
21 | .align 8 | 23 | .align 8 |
@@ -28,14 +30,19 @@ itlb_load: | |||
28 | dtlb_load: | 30 | dtlb_load: |
29 | .asciz "SUNW,dtlb-load" | 31 | .asciz "SUNW,dtlb-load" |
30 | 32 | ||
33 | /* XXX __cpuinit this thing XXX */ | ||
34 | #define TRAMP_STACK_SIZE 1024 | ||
35 | .align 16 | ||
36 | tramp_stack: | ||
37 | .skip TRAMP_STACK_SIZE | ||
38 | |||
31 | .text | 39 | .text |
32 | .align 8 | 40 | .align 8 |
33 | .globl sparc64_cpu_startup, sparc64_cpu_startup_end | 41 | .globl sparc64_cpu_startup, sparc64_cpu_startup_end |
34 | sparc64_cpu_startup: | 42 | sparc64_cpu_startup: |
35 | flushw | 43 | BRANCH_IF_SUN4V(g1, niagara_startup) |
36 | 44 | BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) | |
37 | BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) | 45 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) |
38 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup) | ||
39 | 46 | ||
40 | ba,pt %xcc, spitfire_startup | 47 | ba,pt %xcc, spitfire_startup |
41 | nop | 48 | nop |
@@ -55,6 +62,7 @@ cheetah_startup: | |||
55 | or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 | 62 | or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 |
56 | stxa %g5, [%g0] ASI_DCU_CONTROL_REG | 63 | stxa %g5, [%g0] ASI_DCU_CONTROL_REG |
57 | membar #Sync | 64 | membar #Sync |
65 | /* fallthru */ | ||
58 | 66 | ||
59 | cheetah_generic_startup: | 67 | cheetah_generic_startup: |
60 | mov TSB_EXTENSION_P, %g3 | 68 | mov TSB_EXTENSION_P, %g3 |
@@ -70,7 +78,9 @@ cheetah_generic_startup: | |||
70 | stxa %g0, [%g3] ASI_DMMU | 78 | stxa %g0, [%g3] ASI_DMMU |
71 | stxa %g0, [%g3] ASI_IMMU | 79 | stxa %g0, [%g3] ASI_IMMU |
72 | membar #Sync | 80 | membar #Sync |
81 | /* fallthru */ | ||
73 | 82 | ||
83 | niagara_startup: | ||
74 | /* Disable STICK_INT interrupts. */ | 84 | /* Disable STICK_INT interrupts. */ |
75 | sethi %hi(0x80000000), %g5 | 85 | sethi %hi(0x80000000), %g5 |
76 | sllx %g5, 32, %g5 | 86 | sllx %g5, 32, %g5 |
@@ -85,17 +95,17 @@ spitfire_startup: | |||
85 | membar #Sync | 95 | membar #Sync |
86 | 96 | ||
87 | startup_continue: | 97 | startup_continue: |
88 | wrpr %g0, 15, %pil | ||
89 | |||
90 | sethi %hi(0x80000000), %g2 | 98 | sethi %hi(0x80000000), %g2 |
91 | sllx %g2, 32, %g2 | 99 | sllx %g2, 32, %g2 |
92 | wr %g2, 0, %tick_cmpr | 100 | wr %g2, 0, %tick_cmpr |
93 | 101 | ||
102 | mov %o0, %l0 | ||
103 | |||
104 | BRANCH_IF_SUN4V(g1, niagara_lock_tlb) | ||
105 | |||
94 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. | 106 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. |
95 | * We lock 2 consequetive entries if we are 'bigkernel'. | 107 | * We lock 2 consequetive entries if we are 'bigkernel'. |
96 | */ | 108 | */ |
97 | mov %o0, %l0 | ||
98 | |||
99 | sethi %hi(prom_entry_lock), %g2 | 109 | sethi %hi(prom_entry_lock), %g2 |
100 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | 110 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 |
101 | membar #StoreLoad | #StoreStore | 111 | membar #StoreLoad | #StoreStore |
@@ -105,7 +115,6 @@ startup_continue: | |||
105 | sethi %hi(p1275buf), %g2 | 115 | sethi %hi(p1275buf), %g2 |
106 | or %g2, %lo(p1275buf), %g2 | 116 | or %g2, %lo(p1275buf), %g2 |
107 | ldx [%g2 + 0x10], %l2 | 117 | ldx [%g2 + 0x10], %l2 |
108 | mov %sp, %l1 | ||
109 | add %l2, -(192 + 128), %sp | 118 | add %l2, -(192 + 128), %sp |
110 | flushw | 119 | flushw |
111 | 120 | ||
@@ -142,8 +151,7 @@ startup_continue: | |||
142 | 151 | ||
143 | sethi %hi(bigkernel), %g2 | 152 | sethi %hi(bigkernel), %g2 |
144 | lduw [%g2 + %lo(bigkernel)], %g2 | 153 | lduw [%g2 + %lo(bigkernel)], %g2 |
145 | cmp %g2, 0 | 154 | brz,pt %g2, do_dtlb |
146 | be,pt %icc, do_dtlb | ||
147 | nop | 155 | nop |
148 | 156 | ||
149 | sethi %hi(call_method), %g2 | 157 | sethi %hi(call_method), %g2 |
@@ -214,8 +222,7 @@ do_dtlb: | |||
214 | 222 | ||
215 | sethi %hi(bigkernel), %g2 | 223 | sethi %hi(bigkernel), %g2 |
216 | lduw [%g2 + %lo(bigkernel)], %g2 | 224 | lduw [%g2 + %lo(bigkernel)], %g2 |
217 | cmp %g2, 0 | 225 | brz,pt %g2, do_unlock |
218 | be,pt %icc, do_unlock | ||
219 | nop | 226 | nop |
220 | 227 | ||
221 | sethi %hi(call_method), %g2 | 228 | sethi %hi(call_method), %g2 |
@@ -257,99 +264,180 @@ do_unlock: | |||
257 | stb %g0, [%g2 + %lo(prom_entry_lock)] | 264 | stb %g0, [%g2 + %lo(prom_entry_lock)] |
258 | membar #StoreStore | #StoreLoad | 265 | membar #StoreStore | #StoreLoad |
259 | 266 | ||
260 | mov %l1, %sp | 267 | ba,pt %xcc, after_lock_tlb |
261 | flushw | 268 | nop |
269 | |||
270 | niagara_lock_tlb: | ||
271 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
272 | sethi %hi(KERNBASE), %o0 | ||
273 | clr %o1 | ||
274 | sethi %hi(kern_locked_tte_data), %o2 | ||
275 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
276 | mov HV_MMU_IMMU, %o3 | ||
277 | ta HV_FAST_TRAP | ||
278 | |||
279 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
280 | sethi %hi(KERNBASE), %o0 | ||
281 | clr %o1 | ||
282 | sethi %hi(kern_locked_tte_data), %o2 | ||
283 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
284 | mov HV_MMU_DMMU, %o3 | ||
285 | ta HV_FAST_TRAP | ||
262 | 286 | ||
263 | mov %l0, %o0 | 287 | sethi %hi(bigkernel), %g2 |
288 | lduw [%g2 + %lo(bigkernel)], %g2 | ||
289 | brz,pt %g2, after_lock_tlb | ||
290 | nop | ||
264 | 291 | ||
292 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
293 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
294 | clr %o1 | ||
295 | sethi %hi(kern_locked_tte_data), %o2 | ||
296 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
297 | sethi %hi(0x400000), %o3 | ||
298 | add %o2, %o3, %o2 | ||
299 | mov HV_MMU_IMMU, %o3 | ||
300 | ta HV_FAST_TRAP | ||
301 | |||
302 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
303 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
304 | clr %o1 | ||
305 | sethi %hi(kern_locked_tte_data), %o2 | ||
306 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
307 | sethi %hi(0x400000), %o3 | ||
308 | add %o2, %o3, %o2 | ||
309 | mov HV_MMU_DMMU, %o3 | ||
310 | ta HV_FAST_TRAP | ||
311 | |||
312 | after_lock_tlb: | ||
265 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate | 313 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate |
266 | wr %g0, 0, %fprs | 314 | wr %g0, 0, %fprs |
267 | 315 | ||
268 | /* XXX Buggy PROM... */ | ||
269 | srl %o0, 0, %o0 | ||
270 | ldx [%o0], %g6 | ||
271 | |||
272 | wr %g0, ASI_P, %asi | 316 | wr %g0, ASI_P, %asi |
273 | 317 | ||
274 | mov PRIMARY_CONTEXT, %g7 | 318 | mov PRIMARY_CONTEXT, %g7 |
275 | stxa %g0, [%g7] ASI_DMMU | 319 | |
320 | 661: stxa %g0, [%g7] ASI_DMMU | ||
321 | .section .sun4v_1insn_patch, "ax" | ||
322 | .word 661b | ||
323 | stxa %g0, [%g7] ASI_MMU | ||
324 | .previous | ||
325 | |||
276 | membar #Sync | 326 | membar #Sync |
277 | mov SECONDARY_CONTEXT, %g7 | 327 | mov SECONDARY_CONTEXT, %g7 |
278 | stxa %g0, [%g7] ASI_DMMU | 328 | |
329 | 661: stxa %g0, [%g7] ASI_DMMU | ||
330 | .section .sun4v_1insn_patch, "ax" | ||
331 | .word 661b | ||
332 | stxa %g0, [%g7] ASI_MMU | ||
333 | .previous | ||
334 | |||
279 | membar #Sync | 335 | membar #Sync |
280 | 336 | ||
281 | mov 1, %g5 | 337 | /* Everything we do here, until we properly take over the |
282 | sllx %g5, THREAD_SHIFT, %g5 | 338 | * trap table, must be done with extreme care. We cannot |
283 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | 339 | * make any references to %g6 (current thread pointer), |
284 | add %g6, %g5, %sp | 340 | * %g4 (current task pointer), or %g5 (base of current cpu's |
341 | * per-cpu area) until we properly take over the trap table | ||
342 | * from the firmware and hypervisor. | ||
343 | * | ||
344 | * Get onto temporary stack which is in the locked kernel image. | ||
345 | */ | ||
346 | sethi %hi(tramp_stack), %g1 | ||
347 | or %g1, %lo(tramp_stack), %g1 | ||
348 | add %g1, TRAMP_STACK_SIZE, %g1 | ||
349 | sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp | ||
285 | mov 0, %fp | 350 | mov 0, %fp |
286 | 351 | ||
287 | wrpr %g0, 0, %wstate | 352 | /* Put garbage in these registers to trap any access to them. */ |
288 | wrpr %g0, 0, %tl | 353 | set 0xdeadbeef, %g4 |
354 | set 0xdeadbeef, %g5 | ||
355 | set 0xdeadbeef, %g6 | ||
289 | 356 | ||
290 | /* Setup the trap globals, then we can resurface. */ | 357 | call init_irqwork_curcpu |
291 | rdpr %pstate, %o1 | 358 | nop |
292 | mov %g6, %o2 | ||
293 | wrpr %o1, PSTATE_AG, %pstate | ||
294 | sethi %hi(sparc64_ttable_tl0), %g5 | ||
295 | wrpr %g5, %tba | ||
296 | mov %o2, %g6 | ||
297 | |||
298 | wrpr %o1, PSTATE_MG, %pstate | ||
299 | #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) | ||
300 | #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
301 | |||
302 | mov TSB_REG, %g1 | ||
303 | stxa %g0, [%g1] ASI_DMMU | ||
304 | membar #Sync | ||
305 | mov TLB_SFSR, %g1 | ||
306 | sethi %uhi(KERN_HIGHBITS), %g2 | ||
307 | or %g2, %ulo(KERN_HIGHBITS), %g2 | ||
308 | sllx %g2, 32, %g2 | ||
309 | or %g2, KERN_LOWBITS, %g2 | ||
310 | 359 | ||
311 | BRANCH_IF_ANY_CHEETAH(g3,g7,9f) | 360 | sethi %hi(tlb_type), %g3 |
361 | lduw [%g3 + %lo(tlb_type)], %g2 | ||
362 | cmp %g2, 3 | ||
363 | bne,pt %icc, 1f | ||
364 | nop | ||
312 | 365 | ||
313 | ba,pt %xcc, 1f | 366 | call hard_smp_processor_id |
314 | nop | 367 | nop |
368 | |||
369 | mov %o0, %o1 | ||
370 | mov 0, %o0 | ||
371 | mov 0, %o2 | ||
372 | call sun4v_init_mondo_queues | ||
373 | mov 1, %o3 | ||
315 | 374 | ||
316 | 9: | 375 | 1: call init_cur_cpu_trap |
317 | sethi %uhi(VPTE_BASE_CHEETAH), %g3 | 376 | ldx [%l0], %o0 |
318 | or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 | 377 | |
319 | ba,pt %xcc, 2f | 378 | /* Start using proper page size encodings in ctx register. */ |
320 | sllx %g3, 32, %g3 | 379 | sethi %hi(sparc64_kern_pri_context), %g3 |
321 | 1: | 380 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
322 | sethi %uhi(VPTE_BASE_SPITFIRE), %g3 | 381 | mov PRIMARY_CONTEXT, %g1 |
323 | or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 | ||
324 | sllx %g3, 32, %g3 | ||
325 | 382 | ||
326 | 2: | 383 | 661: stxa %g2, [%g1] ASI_DMMU |
327 | clr %g7 | 384 | .section .sun4v_1insn_patch, "ax" |
328 | #undef KERN_HIGHBITS | 385 | .word 661b |
329 | #undef KERN_LOWBITS | 386 | stxa %g2, [%g1] ASI_MMU |
387 | .previous | ||
330 | 388 | ||
331 | wrpr %o1, 0x0, %pstate | 389 | membar #Sync |
332 | ldx [%g6 + TI_TASK], %g4 | ||
333 | 390 | ||
334 | wrpr %g0, 0, %wstate | 391 | wrpr %g0, 0, %wstate |
335 | 392 | ||
336 | call init_irqwork_curcpu | 393 | /* As a hack, put &init_thread_union into %g6. |
394 | * prom_world() loads from here to restore the %asi | ||
395 | * register. | ||
396 | */ | ||
397 | sethi %hi(init_thread_union), %g6 | ||
398 | or %g6, %lo(init_thread_union), %g6 | ||
399 | |||
400 | sethi %hi(is_sun4v), %o0 | ||
401 | lduw [%o0 + %lo(is_sun4v)], %o0 | ||
402 | brz,pt %o0, 1f | ||
337 | nop | 403 | nop |
338 | 404 | ||
339 | /* Start using proper page size encodings in ctx register. */ | 405 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) |
340 | sethi %hi(sparc64_kern_pri_context), %g3 | 406 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
341 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | 407 | stxa %g2, [%g0] ASI_SCRATCHPAD |
342 | mov PRIMARY_CONTEXT, %g1 | 408 | |
343 | stxa %g2, [%g1] ASI_DMMU | 409 | /* Compute physical address: |
344 | membar #Sync | 410 | * |
411 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) | ||
412 | */ | ||
413 | sethi %hi(KERNBASE), %g3 | ||
414 | sub %g2, %g3, %g2 | ||
415 | sethi %hi(kern_base), %g3 | ||
416 | ldx [%g3 + %lo(kern_base)], %g3 | ||
417 | add %g2, %g3, %o1 | ||
418 | |||
419 | call prom_set_trap_table_sun4v | ||
420 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
421 | |||
422 | ba,pt %xcc, 2f | ||
423 | nop | ||
424 | |||
425 | 1: call prom_set_trap_table | ||
426 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
427 | |||
428 | 2: ldx [%l0], %g6 | ||
429 | ldx [%g6 + TI_TASK], %g4 | ||
430 | |||
431 | mov 1, %g5 | ||
432 | sllx %g5, THREAD_SHIFT, %g5 | ||
433 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | ||
434 | add %g6, %g5, %sp | ||
435 | mov 0, %fp | ||
345 | 436 | ||
346 | rdpr %pstate, %o1 | 437 | rdpr %pstate, %o1 |
347 | or %o1, PSTATE_IE, %o1 | 438 | or %o1, PSTATE_IE, %o1 |
348 | wrpr %o1, 0, %pstate | 439 | wrpr %o1, 0, %pstate |
349 | 440 | ||
350 | call prom_set_trap_table | ||
351 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
352 | |||
353 | call smp_callin | 441 | call smp_callin |
354 | nop | 442 | nop |
355 | call cpu_idle | 443 | call cpu_idle |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8d44ae5a15e3..7f7dba0ca96a 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
40 | #include <asm/kdebug.h> | 40 | #include <asm/kdebug.h> |
41 | #include <asm/head.h> | ||
41 | #ifdef CONFIG_KMOD | 42 | #ifdef CONFIG_KMOD |
42 | #include <linux/kmod.h> | 43 | #include <linux/kmod.h> |
43 | #endif | 44 | #endif |
@@ -72,12 +73,14 @@ struct tl1_traplog { | |||
72 | 73 | ||
73 | static void dump_tl1_traplog(struct tl1_traplog *p) | 74 | static void dump_tl1_traplog(struct tl1_traplog *p) |
74 | { | 75 | { |
75 | int i; | 76 | int i, limit; |
77 | |||
78 | printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " | ||
79 | "dumping track stack.\n", p->tl); | ||
76 | 80 | ||
77 | printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", | 81 | limit = (tlb_type == hypervisor) ? 2 : 4; |
78 | p->tl); | 82 | for (i = 0; i < limit; i++) { |
79 | for (i = 0; i < 4; i++) { | 83 | printk(KERN_EMERG |
80 | printk(KERN_CRIT | ||
81 | "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " | 84 | "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " |
82 | "TNPC[%016lx] TT[%lx]\n", | 85 | "TNPC[%016lx] TT[%lx]\n", |
83 | i + 1, | 86 | i + 1, |
@@ -179,6 +182,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr | |||
179 | spitfire_insn_access_exception(regs, sfsr, sfar); | 182 | spitfire_insn_access_exception(regs, sfsr, sfar); |
180 | } | 183 | } |
181 | 184 | ||
185 | void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
186 | { | ||
187 | unsigned short type = (type_ctx >> 16); | ||
188 | unsigned short ctx = (type_ctx & 0xffff); | ||
189 | siginfo_t info; | ||
190 | |||
191 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | ||
192 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
193 | return; | ||
194 | |||
195 | if (regs->tstate & TSTATE_PRIV) { | ||
196 | printk("sun4v_insn_access_exception: ADDR[%016lx] " | ||
197 | "CTX[%04x] TYPE[%04x], going.\n", | ||
198 | addr, ctx, type); | ||
199 | die_if_kernel("Iax", regs); | ||
200 | } | ||
201 | |||
202 | if (test_thread_flag(TIF_32BIT)) { | ||
203 | regs->tpc &= 0xffffffff; | ||
204 | regs->tnpc &= 0xffffffff; | ||
205 | } | ||
206 | info.si_signo = SIGSEGV; | ||
207 | info.si_errno = 0; | ||
208 | info.si_code = SEGV_MAPERR; | ||
209 | info.si_addr = (void __user *) addr; | ||
210 | info.si_trapno = 0; | ||
211 | force_sig_info(SIGSEGV, &info, current); | ||
212 | } | ||
213 | |||
214 | void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
215 | { | ||
216 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | ||
217 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
218 | return; | ||
219 | |||
220 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
221 | sun4v_insn_access_exception(regs, addr, type_ctx); | ||
222 | } | ||
223 | |||
182 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | 224 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
183 | { | 225 | { |
184 | siginfo_t info; | 226 | siginfo_t info; |
@@ -227,6 +269,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr | |||
227 | spitfire_data_access_exception(regs, sfsr, sfar); | 269 | spitfire_data_access_exception(regs, sfsr, sfar); |
228 | } | 270 | } |
229 | 271 | ||
272 | void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
273 | { | ||
274 | unsigned short type = (type_ctx >> 16); | ||
275 | unsigned short ctx = (type_ctx & 0xffff); | ||
276 | siginfo_t info; | ||
277 | |||
278 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
279 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
280 | return; | ||
281 | |||
282 | if (regs->tstate & TSTATE_PRIV) { | ||
283 | printk("sun4v_data_access_exception: ADDR[%016lx] " | ||
284 | "CTX[%04x] TYPE[%04x], going.\n", | ||
285 | addr, ctx, type); | ||
286 | die_if_kernel("Dax", regs); | ||
287 | } | ||
288 | |||
289 | if (test_thread_flag(TIF_32BIT)) { | ||
290 | regs->tpc &= 0xffffffff; | ||
291 | regs->tnpc &= 0xffffffff; | ||
292 | } | ||
293 | info.si_signo = SIGSEGV; | ||
294 | info.si_errno = 0; | ||
295 | info.si_code = SEGV_MAPERR; | ||
296 | info.si_addr = (void __user *) addr; | ||
297 | info.si_trapno = 0; | ||
298 | force_sig_info(SIGSEGV, &info, current); | ||
299 | } | ||
300 | |||
301 | void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
302 | { | ||
303 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
304 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
305 | return; | ||
306 | |||
307 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
308 | sun4v_data_access_exception(regs, addr, type_ctx); | ||
309 | } | ||
310 | |||
230 | #ifdef CONFIG_PCI | 311 | #ifdef CONFIG_PCI |
231 | /* This is really pathetic... */ | 312 | /* This is really pathetic... */ |
232 | extern volatile int pci_poke_in_progress; | 313 | extern volatile int pci_poke_in_progress; |
@@ -788,7 +869,8 @@ void __init cheetah_ecache_flush_init(void) | |||
788 | cheetah_error_log[i].afsr = CHAFSR_INVALID; | 869 | cheetah_error_log[i].afsr = CHAFSR_INVALID; |
789 | 870 | ||
790 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 871 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
791 | if ((ver >> 32) == 0x003e0016) { | 872 | if ((ver >> 32) == __JALAPENO_ID || |
873 | (ver >> 32) == __SERRANO_ID) { | ||
792 | cheetah_error_table = &__jalapeno_error_table[0]; | 874 | cheetah_error_table = &__jalapeno_error_table[0]; |
793 | cheetah_afsr_errors = JPAFSR_ERRORS; | 875 | cheetah_afsr_errors = JPAFSR_ERRORS; |
794 | } else if ((ver >> 32) == 0x003e0015) { | 876 | } else if ((ver >> 32) == 0x003e0015) { |
@@ -1666,6 +1748,238 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) | |||
1666 | regs->tpc); | 1748 | regs->tpc); |
1667 | } | 1749 | } |
1668 | 1750 | ||
1751 | struct sun4v_error_entry { | ||
1752 | u64 err_handle; | ||
1753 | u64 err_stick; | ||
1754 | |||
1755 | u32 err_type; | ||
1756 | #define SUN4V_ERR_TYPE_UNDEFINED 0 | ||
1757 | #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 | ||
1758 | #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 | ||
1759 | #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 | ||
1760 | #define SUN4V_ERR_TYPE_WARNING_RES 4 | ||
1761 | |||
1762 | u32 err_attrs; | ||
1763 | #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 | ||
1764 | #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 | ||
1765 | #define SUN4V_ERR_ATTRS_PIO 0x00000004 | ||
1766 | #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 | ||
1767 | #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 | ||
1768 | #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 | ||
1769 | #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 | ||
1770 | #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 | ||
1771 | |||
1772 | u64 err_raddr; | ||
1773 | u32 err_size; | ||
1774 | u16 err_cpu; | ||
1775 | u16 err_pad; | ||
1776 | }; | ||
1777 | |||
1778 | static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); | ||
1779 | static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); | ||
1780 | |||
1781 | static const char *sun4v_err_type_to_str(u32 type) | ||
1782 | { | ||
1783 | switch (type) { | ||
1784 | case SUN4V_ERR_TYPE_UNDEFINED: | ||
1785 | return "undefined"; | ||
1786 | case SUN4V_ERR_TYPE_UNCORRECTED_RES: | ||
1787 | return "uncorrected resumable"; | ||
1788 | case SUN4V_ERR_TYPE_PRECISE_NONRES: | ||
1789 | return "precise nonresumable"; | ||
1790 | case SUN4V_ERR_TYPE_DEFERRED_NONRES: | ||
1791 | return "deferred nonresumable"; | ||
1792 | case SUN4V_ERR_TYPE_WARNING_RES: | ||
1793 | return "warning resumable"; | ||
1794 | default: | ||
1795 | return "unknown"; | ||
1796 | }; | ||
1797 | } | ||
1798 | |||
1799 | static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) | ||
1800 | { | ||
1801 | int cnt; | ||
1802 | |||
1803 | printk("%s: Reporting on cpu %d\n", pfx, cpu); | ||
1804 | printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n", | ||
1805 | pfx, | ||
1806 | ent->err_handle, ent->err_stick, | ||
1807 | ent->err_type, | ||
1808 | sun4v_err_type_to_str(ent->err_type)); | ||
1809 | printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", | ||
1810 | pfx, | ||
1811 | ent->err_attrs, | ||
1812 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? | ||
1813 | "processor" : ""), | ||
1814 | ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? | ||
1815 | "memory" : ""), | ||
1816 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? | ||
1817 | "pio" : ""), | ||
1818 | ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? | ||
1819 | "integer-regs" : ""), | ||
1820 | ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? | ||
1821 | "fpu-regs" : ""), | ||
1822 | ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? | ||
1823 | "user" : ""), | ||
1824 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? | ||
1825 | "privileged" : ""), | ||
1826 | ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? | ||
1827 | "queue-full" : "")); | ||
1828 | printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n", | ||
1829 | pfx, | ||
1830 | ent->err_raddr, ent->err_size, ent->err_cpu); | ||
1831 | |||
1832 | if ((cnt = atomic_read(ocnt)) != 0) { | ||
1833 | atomic_set(ocnt, 0); | ||
1834 | wmb(); | ||
1835 | printk("%s: Queue overflowed %d times.\n", | ||
1836 | pfx, cnt); | ||
1837 | } | ||
1838 | } | ||
1839 | |||
1840 | /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. | ||
1841 | * Log the event and clear the first word of the entry. | ||
1842 | */ | ||
1843 | void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) | ||
1844 | { | ||
1845 | struct sun4v_error_entry *ent, local_copy; | ||
1846 | struct trap_per_cpu *tb; | ||
1847 | unsigned long paddr; | ||
1848 | int cpu; | ||
1849 | |||
1850 | cpu = get_cpu(); | ||
1851 | |||
1852 | tb = &trap_block[cpu]; | ||
1853 | paddr = tb->resum_kernel_buf_pa + offset; | ||
1854 | ent = __va(paddr); | ||
1855 | |||
1856 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1857 | |||
1858 | /* We have a local copy now, so release the entry. */ | ||
1859 | ent->err_handle = 0; | ||
1860 | wmb(); | ||
1861 | |||
1862 | put_cpu(); | ||
1863 | |||
1864 | sun4v_log_error(&local_copy, cpu, | ||
1865 | KERN_ERR "RESUMABLE ERROR", | ||
1866 | &sun4v_resum_oflow_cnt); | ||
1867 | } | ||
1868 | |||
1869 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1870 | * to retake locks this cpu already holds or causing more errors. So | ||
1871 | * just bump a counter, and we'll report these counter bumps above. | ||
1872 | */ | ||
1873 | void sun4v_resum_overflow(struct pt_regs *regs) | ||
1874 | { | ||
1875 | atomic_inc(&sun4v_resum_oflow_cnt); | ||
1876 | } | ||
1877 | |||
1878 | /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. | ||
1879 | * Log the event, clear the first word of the entry, and die. | ||
1880 | */ | ||
1881 | void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) | ||
1882 | { | ||
1883 | struct sun4v_error_entry *ent, local_copy; | ||
1884 | struct trap_per_cpu *tb; | ||
1885 | unsigned long paddr; | ||
1886 | int cpu; | ||
1887 | |||
1888 | cpu = get_cpu(); | ||
1889 | |||
1890 | tb = &trap_block[cpu]; | ||
1891 | paddr = tb->nonresum_kernel_buf_pa + offset; | ||
1892 | ent = __va(paddr); | ||
1893 | |||
1894 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1895 | |||
1896 | /* We have a local copy now, so release the entry. */ | ||
1897 | ent->err_handle = 0; | ||
1898 | wmb(); | ||
1899 | |||
1900 | put_cpu(); | ||
1901 | |||
1902 | #ifdef CONFIG_PCI | ||
1903 | /* Check for the special PCI poke sequence. */ | ||
1904 | if (pci_poke_in_progress && pci_poke_cpu == cpu) { | ||
1905 | pci_poke_faulted = 1; | ||
1906 | regs->tpc += 4; | ||
1907 | regs->tnpc = regs->tpc + 4; | ||
1908 | return; | ||
1909 | } | ||
1910 | #endif | ||
1911 | |||
1912 | sun4v_log_error(&local_copy, cpu, | ||
1913 | KERN_EMERG "NON-RESUMABLE ERROR", | ||
1914 | &sun4v_nonresum_oflow_cnt); | ||
1915 | |||
1916 | panic("Non-resumable error."); | ||
1917 | } | ||
1918 | |||
1919 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1920 | * to retake locks this cpu already holds or causing more errors. So | ||
1921 | * just bump a counter, and we'll report these counter bumps above. | ||
1922 | */ | ||
1923 | void sun4v_nonresum_overflow(struct pt_regs *regs) | ||
1924 | { | ||
1925 | /* XXX Actually even this can make not that much sense. Perhaps | ||
1926 | * XXX we should just pull the plug and panic directly from here? | ||
1927 | */ | ||
1928 | atomic_inc(&sun4v_nonresum_oflow_cnt); | ||
1929 | } | ||
1930 | |||
1931 | unsigned long sun4v_err_itlb_vaddr; | ||
1932 | unsigned long sun4v_err_itlb_ctx; | ||
1933 | unsigned long sun4v_err_itlb_pte; | ||
1934 | unsigned long sun4v_err_itlb_error; | ||
1935 | |||
1936 | void sun4v_itlb_error_report(struct pt_regs *regs, int tl) | ||
1937 | { | ||
1938 | if (tl > 1) | ||
1939 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1940 | |||
1941 | printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", | ||
1942 | regs->tpc, tl); | ||
1943 | printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " | ||
1944 | "pte[%lx] error[%lx]\n", | ||
1945 | sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, | ||
1946 | sun4v_err_itlb_pte, sun4v_err_itlb_error); | ||
1947 | |||
1948 | prom_halt(); | ||
1949 | } | ||
1950 | |||
1951 | unsigned long sun4v_err_dtlb_vaddr; | ||
1952 | unsigned long sun4v_err_dtlb_ctx; | ||
1953 | unsigned long sun4v_err_dtlb_pte; | ||
1954 | unsigned long sun4v_err_dtlb_error; | ||
1955 | |||
1956 | void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) | ||
1957 | { | ||
1958 | if (tl > 1) | ||
1959 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1960 | |||
1961 | printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", | ||
1962 | regs->tpc, tl); | ||
1963 | printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " | ||
1964 | "pte[%lx] error[%lx]\n", | ||
1965 | sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, | ||
1966 | sun4v_err_dtlb_pte, sun4v_err_dtlb_error); | ||
1967 | |||
1968 | prom_halt(); | ||
1969 | } | ||
1970 | |||
1971 | void hypervisor_tlbop_error(unsigned long err, unsigned long op) | ||
1972 | { | ||
1973 | printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", | ||
1974 | err, op); | ||
1975 | } | ||
1976 | |||
1977 | void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) | ||
1978 | { | ||
1979 | printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", | ||
1980 | err, op); | ||
1981 | } | ||
1982 | |||
1669 | void do_fpe_common(struct pt_regs *regs) | 1983 | void do_fpe_common(struct pt_regs *regs) |
1670 | { | 1984 | { |
1671 | if (regs->tstate & TSTATE_PRIV) { | 1985 | if (regs->tstate & TSTATE_PRIV) { |
@@ -1924,10 +2238,11 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
1924 | } | 2238 | } |
1925 | user_instruction_dump ((unsigned int __user *) regs->tpc); | 2239 | user_instruction_dump ((unsigned int __user *) regs->tpc); |
1926 | } | 2240 | } |
2241 | #if 0 | ||
1927 | #ifdef CONFIG_SMP | 2242 | #ifdef CONFIG_SMP |
1928 | smp_report_regs(); | 2243 | smp_report_regs(); |
1929 | #endif | 2244 | #endif |
1930 | 2245 | #endif | |
1931 | if (regs->tstate & TSTATE_PRIV) | 2246 | if (regs->tstate & TSTATE_PRIV) |
1932 | do_exit(SIGKILL); | 2247 | do_exit(SIGKILL); |
1933 | do_exit(SIGSEGV); | 2248 | do_exit(SIGSEGV); |
@@ -1958,6 +2273,11 @@ void do_illegal_instruction(struct pt_regs *regs) | |||
1958 | } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { | 2273 | } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { |
1959 | if (handle_ldf_stq(insn, regs)) | 2274 | if (handle_ldf_stq(insn, regs)) |
1960 | return; | 2275 | return; |
2276 | } else if (tlb_type == hypervisor) { | ||
2277 | extern int vis_emul(struct pt_regs *, unsigned int); | ||
2278 | |||
2279 | if (!vis_emul(regs, insn)) | ||
2280 | return; | ||
1961 | } | 2281 | } |
1962 | } | 2282 | } |
1963 | info.si_signo = SIGILL; | 2283 | info.si_signo = SIGILL; |
@@ -1968,6 +2288,8 @@ void do_illegal_instruction(struct pt_regs *regs) | |||
1968 | force_sig_info(SIGILL, &info, current); | 2288 | force_sig_info(SIGILL, &info, current); |
1969 | } | 2289 | } |
1970 | 2290 | ||
2291 | extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); | ||
2292 | |||
1971 | void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) | 2293 | void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) |
1972 | { | 2294 | { |
1973 | siginfo_t info; | 2295 | siginfo_t info; |
@@ -1977,13 +2299,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo | |||
1977 | return; | 2299 | return; |
1978 | 2300 | ||
1979 | if (regs->tstate & TSTATE_PRIV) { | 2301 | if (regs->tstate & TSTATE_PRIV) { |
1980 | extern void kernel_unaligned_trap(struct pt_regs *regs, | 2302 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); |
1981 | unsigned int insn, | ||
1982 | unsigned long sfar, | ||
1983 | unsigned long sfsr); | ||
1984 | |||
1985 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), | ||
1986 | sfar, sfsr); | ||
1987 | return; | 2303 | return; |
1988 | } | 2304 | } |
1989 | info.si_signo = SIGBUS; | 2305 | info.si_signo = SIGBUS; |
@@ -1994,6 +2310,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo | |||
1994 | force_sig_info(SIGBUS, &info, current); | 2310 | force_sig_info(SIGBUS, &info, current); |
1995 | } | 2311 | } |
1996 | 2312 | ||
2313 | void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
2314 | { | ||
2315 | siginfo_t info; | ||
2316 | |||
2317 | if (notify_die(DIE_TRAP, "memory address unaligned", regs, | ||
2318 | 0, 0x34, SIGSEGV) == NOTIFY_STOP) | ||
2319 | return; | ||
2320 | |||
2321 | if (regs->tstate & TSTATE_PRIV) { | ||
2322 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); | ||
2323 | return; | ||
2324 | } | ||
2325 | info.si_signo = SIGBUS; | ||
2326 | info.si_errno = 0; | ||
2327 | info.si_code = BUS_ADRALN; | ||
2328 | info.si_addr = (void __user *) addr; | ||
2329 | info.si_trapno = 0; | ||
2330 | force_sig_info(SIGBUS, &info, current); | ||
2331 | } | ||
2332 | |||
1997 | void do_privop(struct pt_regs *regs) | 2333 | void do_privop(struct pt_regs *regs) |
1998 | { | 2334 | { |
1999 | siginfo_t info; | 2335 | siginfo_t info; |
@@ -2130,7 +2466,22 @@ void do_getpsr(struct pt_regs *regs) | |||
2130 | } | 2466 | } |
2131 | } | 2467 | } |
2132 | 2468 | ||
2469 | struct trap_per_cpu trap_block[NR_CPUS]; | ||
2470 | |||
2471 | /* This can get invoked before sched_init() so play it super safe | ||
2472 | * and use hard_smp_processor_id(). | ||
2473 | */ | ||
2474 | void init_cur_cpu_trap(struct thread_info *t) | ||
2475 | { | ||
2476 | int cpu = hard_smp_processor_id(); | ||
2477 | struct trap_per_cpu *p = &trap_block[cpu]; | ||
2478 | |||
2479 | p->thread = t; | ||
2480 | p->pgd_paddr = 0; | ||
2481 | } | ||
2482 | |||
2133 | extern void thread_info_offsets_are_bolixed_dave(void); | 2483 | extern void thread_info_offsets_are_bolixed_dave(void); |
2484 | extern void trap_per_cpu_offsets_are_bolixed_dave(void); | ||
2134 | 2485 | ||
2135 | /* Only invoked on boot processor. */ | 2486 | /* Only invoked on boot processor. */ |
2136 | void __init trap_init(void) | 2487 | void __init trap_init(void) |
@@ -2154,7 +2505,6 @@ void __init trap_init(void) | |||
2154 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || | 2505 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || |
2155 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || | 2506 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || |
2156 | TI_PCR != offsetof(struct thread_info, pcr_reg) || | 2507 | TI_PCR != offsetof(struct thread_info, pcr_reg) || |
2157 | TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || | ||
2158 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || | 2508 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || |
2159 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | 2509 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || |
2160 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || | 2510 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || |
@@ -2165,6 +2515,29 @@ void __init trap_init(void) | |||
2165 | (TI_FPREGS & (64 - 1))) | 2515 | (TI_FPREGS & (64 - 1))) |
2166 | thread_info_offsets_are_bolixed_dave(); | 2516 | thread_info_offsets_are_bolixed_dave(); |
2167 | 2517 | ||
2518 | if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || | ||
2519 | (TRAP_PER_CPU_PGD_PADDR != | ||
2520 | offsetof(struct trap_per_cpu, pgd_paddr)) || | ||
2521 | (TRAP_PER_CPU_CPU_MONDO_PA != | ||
2522 | offsetof(struct trap_per_cpu, cpu_mondo_pa)) || | ||
2523 | (TRAP_PER_CPU_DEV_MONDO_PA != | ||
2524 | offsetof(struct trap_per_cpu, dev_mondo_pa)) || | ||
2525 | (TRAP_PER_CPU_RESUM_MONDO_PA != | ||
2526 | offsetof(struct trap_per_cpu, resum_mondo_pa)) || | ||
2527 | (TRAP_PER_CPU_RESUM_KBUF_PA != | ||
2528 | offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || | ||
2529 | (TRAP_PER_CPU_NONRESUM_MONDO_PA != | ||
2530 | offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || | ||
2531 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != | ||
2532 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || | ||
2533 | (TRAP_PER_CPU_FAULT_INFO != | ||
2534 | offsetof(struct trap_per_cpu, fault_info)) || | ||
2535 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | ||
2536 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | ||
2537 | (TRAP_PER_CPU_CPU_LIST_PA != | ||
2538 | offsetof(struct trap_per_cpu, cpu_list_pa))) | ||
2539 | trap_per_cpu_offsets_are_bolixed_dave(); | ||
2540 | |||
2168 | /* Attach to the address space of init_task. On SMP we | 2541 | /* Attach to the address space of init_task. On SMP we |
2169 | * do this in smp.c:smp_callin for other cpus. | 2542 | * do this in smp.c:smp_callin for other cpus. |
2170 | */ | 2543 | */ |
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S new file mode 100644 index 000000000000..118baea44f69 --- /dev/null +++ b/arch/sparc64/kernel/tsb.S | |||
@@ -0,0 +1,442 @@ | |||
1 | /* tsb.S: Sparc64 TSB table handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/tsb.h> | ||
7 | #include <asm/hypervisor.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | /* Invoked from TLB miss handler, we are in the | ||
13 | * MMU global registers and they are setup like | ||
14 | * this: | ||
15 | * | ||
16 | * %g1: TSB entry pointer | ||
17 | * %g2: available temporary | ||
18 | * %g3: FAULT_CODE_{D,I}TLB | ||
19 | * %g4: available temporary | ||
20 | * %g5: available temporary | ||
21 | * %g6: TAG TARGET | ||
22 | * %g7: available temporary, will be loaded by us with | ||
23 | * the physical address base of the linux page | ||
24 | * tables for the current address space | ||
25 | */ | ||
26 | tsb_miss_dtlb: | ||
27 | mov TLB_TAG_ACCESS, %g4 | ||
28 | ba,pt %xcc, tsb_miss_page_table_walk | ||
29 | ldxa [%g4] ASI_DMMU, %g4 | ||
30 | |||
31 | tsb_miss_itlb: | ||
32 | mov TLB_TAG_ACCESS, %g4 | ||
33 | ba,pt %xcc, tsb_miss_page_table_walk | ||
34 | ldxa [%g4] ASI_IMMU, %g4 | ||
35 | |||
36 | /* At this point we have: | ||
37 | * %g1 -- TSB entry address | ||
38 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
39 | * %g4 -- missing virtual address | ||
40 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
41 | */ | ||
42 | tsb_miss_page_table_walk: | ||
43 | TRAP_LOAD_PGD_PHYS(%g7, %g5) | ||
44 | |||
45 | /* And now we have the PGD base physical address in %g7. */ | ||
46 | tsb_miss_page_table_walk_sun4v_fastpath: | ||
47 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | ||
48 | |||
49 | /* At this point we have: | ||
50 | * %g1 -- TSB entry address | ||
51 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
52 | * %g5 -- physical address of PTE in Linux page tables | ||
53 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
54 | */ | ||
55 | tsb_reload: | ||
56 | TSB_LOCK_TAG(%g1, %g2, %g7) | ||
57 | |||
58 | /* Load and check PTE. */ | ||
59 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
60 | mov 1, %g7 | ||
61 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
62 | brgez,a,pn %g5, tsb_do_fault | ||
63 | TSB_STORE(%g1, %g7) | ||
64 | |||
65 | TSB_WRITE(%g1, %g5, %g6) | ||
66 | |||
67 | /* Finally, load TLB and return from trap. */ | ||
68 | tsb_tlb_reload: | ||
69 | cmp %g3, FAULT_CODE_DTLB | ||
70 | bne,pn %xcc, tsb_itlb_load | ||
71 | nop | ||
72 | |||
73 | tsb_dtlb_load: | ||
74 | |||
75 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
76 | retry | ||
77 | .section .sun4v_2insn_patch, "ax" | ||
78 | .word 661b | ||
79 | nop | ||
80 | nop | ||
81 | .previous | ||
82 | |||
83 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
84 | * instruction get nop'd out and we get here to branch | ||
85 | * to the sun4v tlb load code. The registers are setup | ||
86 | * as follows: | ||
87 | * | ||
88 | * %g4: vaddr | ||
89 | * %g5: PTE | ||
90 | * %g6: TAG | ||
91 | * | ||
92 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
93 | * up here. | ||
94 | */ | ||
95 | ba,pt %xcc, sun4v_dtlb_load | ||
96 | mov %g5, %g3 | ||
97 | |||
98 | tsb_itlb_load: | ||
99 | /* Executable bit must be set. */ | ||
100 | 661: andcc %g5, _PAGE_EXEC_4U, %g0 | ||
101 | .section .sun4v_1insn_patch, "ax" | ||
102 | .word 661b | ||
103 | andcc %g5, _PAGE_EXEC_4V, %g0 | ||
104 | .previous | ||
105 | |||
106 | be,pn %xcc, tsb_do_fault | ||
107 | nop | ||
108 | |||
109 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
110 | retry | ||
111 | .section .sun4v_2insn_patch, "ax" | ||
112 | .word 661b | ||
113 | nop | ||
114 | nop | ||
115 | .previous | ||
116 | |||
117 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
118 | * instruction get nop'd out and we get here to branch | ||
119 | * to the sun4v tlb load code. The registers are setup | ||
120 | * as follows: | ||
121 | * | ||
122 | * %g4: vaddr | ||
123 | * %g5: PTE | ||
124 | * %g6: TAG | ||
125 | * | ||
126 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
127 | * up here. | ||
128 | */ | ||
129 | ba,pt %xcc, sun4v_itlb_load | ||
130 | mov %g5, %g3 | ||
131 | |||
132 | /* No valid entry in the page tables, do full fault | ||
133 | * processing. | ||
134 | */ | ||
135 | |||
136 | .globl tsb_do_fault | ||
137 | tsb_do_fault: | ||
138 | cmp %g3, FAULT_CODE_DTLB | ||
139 | |||
140 | 661: rdpr %pstate, %g5 | ||
141 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
142 | .section .sun4v_2insn_patch, "ax" | ||
143 | .word 661b | ||
144 | SET_GL(1) | ||
145 | ldxa [%g0] ASI_SCRATCHPAD, %g4 | ||
146 | .previous | ||
147 | |||
148 | bne,pn %xcc, tsb_do_itlb_fault | ||
149 | nop | ||
150 | |||
151 | tsb_do_dtlb_fault: | ||
152 | rdpr %tl, %g3 | ||
153 | cmp %g3, 1 | ||
154 | |||
155 | 661: mov TLB_TAG_ACCESS, %g4 | ||
156 | ldxa [%g4] ASI_DMMU, %g5 | ||
157 | .section .sun4v_2insn_patch, "ax" | ||
158 | .word 661b | ||
159 | ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
160 | nop | ||
161 | .previous | ||
162 | |||
163 | be,pt %xcc, sparc64_realfault_common | ||
164 | mov FAULT_CODE_DTLB, %g4 | ||
165 | ba,pt %xcc, winfix_trampoline | ||
166 | nop | ||
167 | |||
168 | tsb_do_itlb_fault: | ||
169 | rdpr %tpc, %g5 | ||
170 | ba,pt %xcc, sparc64_realfault_common | ||
171 | mov FAULT_CODE_ITLB, %g4 | ||
172 | |||
173 | .globl sparc64_realfault_common | ||
174 | sparc64_realfault_common: | ||
175 | /* fault code in %g4, fault address in %g5, etrap will | ||
176 | * preserve these two values in %l4 and %l5 respectively | ||
177 | */ | ||
178 | ba,pt %xcc, etrap ! Save trap state | ||
179 | 1: rd %pc, %g7 ! ... | ||
180 | stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code | ||
181 | stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address | ||
182 | call do_sparc64_fault ! Call fault handler | ||
183 | add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg | ||
184 | ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state | ||
185 | nop ! Delay slot (fill me) | ||
186 | |||
187 | winfix_trampoline: | ||
188 | rdpr %tpc, %g3 ! Prepare winfixup TNPC | ||
189 | or %g3, 0x7c, %g3 ! Compute branch offset | ||
190 | wrpr %g3, %tnpc ! Write it into TNPC | ||
191 | done ! Trap return | ||
192 | |||
193 | /* Insert an entry into the TSB. | ||
194 | * | ||
195 | * %o0: TSB entry pointer (virt or phys address) | ||
196 | * %o1: tag | ||
197 | * %o2: pte | ||
198 | */ | ||
199 | .align 32 | ||
200 | .globl __tsb_insert | ||
201 | __tsb_insert: | ||
202 | rdpr %pstate, %o5 | ||
203 | wrpr %o5, PSTATE_IE, %pstate | ||
204 | TSB_LOCK_TAG(%o0, %g2, %g3) | ||
205 | TSB_WRITE(%o0, %o2, %o1) | ||
206 | wrpr %o5, %pstate | ||
207 | retl | ||
208 | nop | ||
209 | .size __tsb_insert, .-__tsb_insert | ||
210 | |||
211 | /* Flush the given TSB entry if it has the matching | ||
212 | * tag. | ||
213 | * | ||
214 | * %o0: TSB entry pointer (virt or phys address) | ||
215 | * %o1: tag | ||
216 | */ | ||
217 | .align 32 | ||
218 | .globl tsb_flush | ||
219 | .type tsb_flush,#function | ||
220 | tsb_flush: | ||
221 | sethi %hi(TSB_TAG_LOCK_HIGH), %g2 | ||
222 | 1: TSB_LOAD_TAG(%o0, %g1) | ||
223 | srlx %g1, 32, %o3 | ||
224 | andcc %o3, %g2, %g0 | ||
225 | bne,pn %icc, 1b | ||
226 | membar #LoadLoad | ||
227 | cmp %g1, %o1 | ||
228 | mov 1, %o3 | ||
229 | bne,pt %xcc, 2f | ||
230 | sllx %o3, TSB_TAG_INVALID_BIT, %o3 | ||
231 | TSB_CAS_TAG(%o0, %g1, %o3) | ||
232 | cmp %g1, %o3 | ||
233 | bne,pn %xcc, 1b | ||
234 | nop | ||
235 | 2: retl | ||
236 | TSB_MEMBAR | ||
237 | .size tsb_flush, .-tsb_flush | ||
238 | |||
239 | /* Reload MMU related context switch state at | ||
240 | * schedule() time. | ||
241 | * | ||
242 | * %o0: page table physical address | ||
243 | * %o1: TSB register value | ||
244 | * %o2: TSB virtual address | ||
245 | * %o3: TSB mapping locked PTE | ||
246 | * %o4: Hypervisor TSB descriptor physical address | ||
247 | * | ||
248 | * We have to run this whole thing with interrupts | ||
249 | * disabled so that the current cpu doesn't change | ||
250 | * due to preemption. | ||
251 | */ | ||
252 | .align 32 | ||
253 | .globl __tsb_context_switch | ||
254 | .type __tsb_context_switch,#function | ||
255 | __tsb_context_switch: | ||
256 | rdpr %pstate, %o5 | ||
257 | wrpr %o5, PSTATE_IE, %pstate | ||
258 | |||
259 | ldub [%g6 + TI_CPU], %g1 | ||
260 | sethi %hi(trap_block), %g2 | ||
261 | sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1 | ||
262 | or %g2, %lo(trap_block), %g2 | ||
263 | add %g2, %g1, %g2 | ||
264 | stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] | ||
265 | |||
266 | sethi %hi(tlb_type), %g1 | ||
267 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
268 | cmp %g1, 3 | ||
269 | bne,pt %icc, 1f | ||
270 | nop | ||
271 | |||
272 | /* Hypervisor TSB switch. */ | ||
273 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
274 | stxa %o1, [%g1] ASI_SCRATCHPAD | ||
275 | mov -1, %g2 | ||
276 | mov SCRATCHPAD_UTSBREG2, %g1 | ||
277 | stxa %g2, [%g1] ASI_SCRATCHPAD | ||
278 | |||
279 | /* Save away %o5's %pstate, we have to use %o5 for | ||
280 | * the hypervisor call. | ||
281 | */ | ||
282 | mov %o5, %g1 | ||
283 | |||
284 | mov HV_FAST_MMU_TSB_CTXNON0, %o5 | ||
285 | mov 1, %o0 | ||
286 | mov %o4, %o1 | ||
287 | ta HV_FAST_TRAP | ||
288 | |||
289 | /* Finish up and restore %o5. */ | ||
290 | ba,pt %xcc, 9f | ||
291 | mov %g1, %o5 | ||
292 | |||
293 | /* SUN4U TSB switch. */ | ||
294 | 1: mov TSB_REG, %g1 | ||
295 | stxa %o1, [%g1] ASI_DMMU | ||
296 | membar #Sync | ||
297 | stxa %o1, [%g1] ASI_IMMU | ||
298 | membar #Sync | ||
299 | |||
300 | 2: brz %o2, 9f | ||
301 | nop | ||
302 | |||
303 | sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 | ||
304 | mov TLB_TAG_ACCESS, %g1 | ||
305 | lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 | ||
306 | stxa %o2, [%g1] ASI_DMMU | ||
307 | membar #Sync | ||
308 | sllx %g2, 3, %g2 | ||
309 | stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS | ||
310 | membar #Sync | ||
311 | 9: | ||
312 | wrpr %o5, %pstate | ||
313 | |||
314 | retl | ||
315 | nop | ||
316 | .size __tsb_context_switch, .-__tsb_context_switch | ||
317 | |||
318 | #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ | ||
319 | (1 << TSB_TAG_INVALID_BIT)) | ||
320 | |||
321 | .align 32 | ||
322 | .globl copy_tsb | ||
323 | .type copy_tsb,#function | ||
324 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | ||
325 | * %o2=new_tsb_base, %o3=new_tsb_size | ||
326 | */ | ||
327 | sethi %uhi(TSB_PASS_BITS), %g7 | ||
328 | srlx %o3, 4, %o3 | ||
329 | add %o0, %o1, %g1 /* end of old tsb */ | ||
330 | sllx %g7, 32, %g7 | ||
331 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ | ||
332 | |||
333 | 661: prefetcha [%o0] ASI_N, #one_read | ||
334 | .section .tsb_phys_patch, "ax" | ||
335 | .word 661b | ||
336 | prefetcha [%o0] ASI_PHYS_USE_EC, #one_read | ||
337 | .previous | ||
338 | |||
339 | 90: andcc %o0, (64 - 1), %g0 | ||
340 | bne 1f | ||
341 | add %o0, 64, %o5 | ||
342 | |||
343 | 661: prefetcha [%o5] ASI_N, #one_read | ||
344 | .section .tsb_phys_patch, "ax" | ||
345 | .word 661b | ||
346 | prefetcha [%o5] ASI_PHYS_USE_EC, #one_read | ||
347 | .previous | ||
348 | |||
349 | 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ | ||
350 | andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ | ||
351 | bne,pn %xcc, 80f /* Skip it */ | ||
352 | sllx %g2, 22, %o4 /* TAG --> VADDR */ | ||
353 | |||
354 | /* This can definitely be computed faster... */ | ||
355 | srlx %o0, 4, %o5 /* Build index */ | ||
356 | and %o5, 511, %o5 /* Mask index */ | ||
357 | sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ | ||
358 | or %o4, %o5, %o4 /* Full VADDR. */ | ||
359 | srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ | ||
360 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ | ||
361 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ | ||
362 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ | ||
363 | add %o4, 0x8, %o4 /* Advance to TTE */ | ||
364 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ | ||
365 | |||
366 | 80: add %o0, 16, %o0 | ||
367 | cmp %o0, %g1 | ||
368 | bne,pt %xcc, 90b | ||
369 | nop | ||
370 | |||
371 | retl | ||
372 | TSB_MEMBAR | ||
373 | .size copy_tsb, .-copy_tsb | ||
374 | |||
375 | /* Set the invalid bit in all TSB entries. */ | ||
376 | .align 32 | ||
377 | .globl tsb_init | ||
378 | .type tsb_init,#function | ||
379 | tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ | ||
380 | prefetch [%o0 + 0x000], #n_writes | ||
381 | mov 1, %g1 | ||
382 | prefetch [%o0 + 0x040], #n_writes | ||
383 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
384 | prefetch [%o0 + 0x080], #n_writes | ||
385 | 1: prefetch [%o0 + 0x0c0], #n_writes | ||
386 | stx %g1, [%o0 + 0x00] | ||
387 | stx %g1, [%o0 + 0x10] | ||
388 | stx %g1, [%o0 + 0x20] | ||
389 | stx %g1, [%o0 + 0x30] | ||
390 | prefetch [%o0 + 0x100], #n_writes | ||
391 | stx %g1, [%o0 + 0x40] | ||
392 | stx %g1, [%o0 + 0x50] | ||
393 | stx %g1, [%o0 + 0x60] | ||
394 | stx %g1, [%o0 + 0x70] | ||
395 | prefetch [%o0 + 0x140], #n_writes | ||
396 | stx %g1, [%o0 + 0x80] | ||
397 | stx %g1, [%o0 + 0x90] | ||
398 | stx %g1, [%o0 + 0xa0] | ||
399 | stx %g1, [%o0 + 0xb0] | ||
400 | prefetch [%o0 + 0x180], #n_writes | ||
401 | stx %g1, [%o0 + 0xc0] | ||
402 | stx %g1, [%o0 + 0xd0] | ||
403 | stx %g1, [%o0 + 0xe0] | ||
404 | stx %g1, [%o0 + 0xf0] | ||
405 | subcc %o1, 0x100, %o1 | ||
406 | bne,pt %xcc, 1b | ||
407 | add %o0, 0x100, %o0 | ||
408 | retl | ||
409 | nop | ||
410 | nop | ||
411 | nop | ||
412 | .size tsb_init, .-tsb_init | ||
413 | |||
414 | .globl NGtsb_init | ||
415 | .type NGtsb_init,#function | ||
416 | NGtsb_init: | ||
417 | rd %asi, %g2 | ||
418 | mov 1, %g1 | ||
419 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
420 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
421 | 1: stxa %g1, [%o0 + 0x00] %asi | ||
422 | stxa %g1, [%o0 + 0x10] %asi | ||
423 | stxa %g1, [%o0 + 0x20] %asi | ||
424 | stxa %g1, [%o0 + 0x30] %asi | ||
425 | stxa %g1, [%o0 + 0x40] %asi | ||
426 | stxa %g1, [%o0 + 0x50] %asi | ||
427 | stxa %g1, [%o0 + 0x60] %asi | ||
428 | stxa %g1, [%o0 + 0x70] %asi | ||
429 | stxa %g1, [%o0 + 0x80] %asi | ||
430 | stxa %g1, [%o0 + 0x90] %asi | ||
431 | stxa %g1, [%o0 + 0xa0] %asi | ||
432 | stxa %g1, [%o0 + 0xb0] %asi | ||
433 | stxa %g1, [%o0 + 0xc0] %asi | ||
434 | stxa %g1, [%o0 + 0xd0] %asi | ||
435 | stxa %g1, [%o0 + 0xe0] %asi | ||
436 | stxa %g1, [%o0 + 0xf0] %asi | ||
437 | subcc %o1, 0x100, %o1 | ||
438 | bne,pt %xcc, 1b | ||
439 | add %o0, 0x100, %o0 | ||
440 | retl | ||
441 | wr %g2, 0x0, %asi | ||
442 | .size NGtsb_init, .-NGtsb_init | ||
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 8365bc1f81f3..5d901519db55 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $ | 1 | /* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions. |
2 | * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <linux/config.h> | 6 | #include <linux/config.h> |
@@ -19,7 +18,7 @@ tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) | |||
19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) | 18 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) |
20 | tl0_iax: membar #Sync | 19 | tl0_iax: membar #Sync |
21 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) | 20 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) |
22 | tl0_resv009: BTRAP(0x9) | 21 | tl0_itsb_4v: SUN4V_ITSB_MISS |
23 | tl0_iae: membar #Sync | 22 | tl0_iae: membar #Sync |
24 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 23 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
25 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) | 24 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) |
@@ -38,7 +37,7 @@ tl0_div0: TRAP(do_div0) | |||
38 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) | 37 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) |
39 | tl0_resv02f: BTRAP(0x2f) | 38 | tl0_resv02f: BTRAP(0x2f) |
40 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) | 39 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) |
41 | tl0_resv031: BTRAP(0x31) | 40 | tl0_dtsb_4v: SUN4V_DTSB_MISS |
42 | tl0_dae: membar #Sync | 41 | tl0_dae: membar #Sync |
43 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 42 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
44 | tl0_resv033: BTRAP(0x33) | 43 | tl0_resv033: BTRAP(0x33) |
@@ -52,12 +51,13 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) | |||
52 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) | 51 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) |
53 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) | 52 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) |
54 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) | 53 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) |
54 | tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) | ||
55 | #else | 55 | #else |
56 | tl0_irq1: BTRAP(0x41) | 56 | tl0_irq1: BTRAP(0x41) |
57 | tl0_irq2: BTRAP(0x42) | 57 | tl0_irq2: BTRAP(0x42) |
58 | tl0_irq3: BTRAP(0x43) | 58 | tl0_irq3: BTRAP(0x43) |
59 | tl0_irq4: BTRAP(0x44) | ||
59 | #endif | 60 | #endif |
60 | tl0_irq4: TRAP_IRQ(handler_irq, 4) | ||
61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) | 61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) |
62 | tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) | 62 | tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) |
63 | tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) | 63 | tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) |
@@ -78,9 +78,9 @@ tl0_vaw: TRAP(do_vaw) | |||
78 | tl0_cee: membar #Sync | 78 | tl0_cee: membar #Sync |
79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) | 79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) |
80 | tl0_iamiss: | 80 | tl0_iamiss: |
81 | #include "itlb_base.S" | 81 | #include "itlb_miss.S" |
82 | tl0_damiss: | 82 | tl0_damiss: |
83 | #include "dtlb_base.S" | 83 | #include "dtlb_miss.S" |
84 | tl0_daprot: | 84 | tl0_daprot: |
85 | #include "dtlb_prot.S" | 85 | #include "dtlb_prot.S" |
86 | tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ | 86 | tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ |
@@ -88,15 +88,18 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ | |||
88 | tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ | 88 | tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ |
89 | tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) | 89 | tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) |
90 | tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) | 90 | tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) |
91 | tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) | 91 | tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) |
92 | tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) | ||
93 | tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) | ||
94 | tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) | ||
92 | tl0_s0n: SPILL_0_NORMAL | 95 | tl0_s0n: SPILL_0_NORMAL |
93 | tl0_s1n: SPILL_1_NORMAL | 96 | tl0_s1n: SPILL_1_NORMAL |
94 | tl0_s2n: SPILL_2_NORMAL | 97 | tl0_s2n: SPILL_2_NORMAL |
95 | tl0_s3n: SPILL_3_NORMAL | 98 | tl0_s3n: SPILL_0_NORMAL_ETRAP |
96 | tl0_s4n: SPILL_4_NORMAL | 99 | tl0_s4n: SPILL_1_GENERIC_ETRAP |
97 | tl0_s5n: SPILL_5_NORMAL | 100 | tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP |
98 | tl0_s6n: SPILL_6_NORMAL | 101 | tl0_s6n: SPILL_2_GENERIC_ETRAP |
99 | tl0_s7n: SPILL_7_NORMAL | 102 | tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP |
100 | tl0_s0o: SPILL_0_OTHER | 103 | tl0_s0o: SPILL_0_OTHER |
101 | tl0_s1o: SPILL_1_OTHER | 104 | tl0_s1o: SPILL_1_OTHER |
102 | tl0_s2o: SPILL_2_OTHER | 105 | tl0_s2o: SPILL_2_OTHER |
@@ -110,9 +113,9 @@ tl0_f1n: FILL_1_NORMAL | |||
110 | tl0_f2n: FILL_2_NORMAL | 113 | tl0_f2n: FILL_2_NORMAL |
111 | tl0_f3n: FILL_3_NORMAL | 114 | tl0_f3n: FILL_3_NORMAL |
112 | tl0_f4n: FILL_4_NORMAL | 115 | tl0_f4n: FILL_4_NORMAL |
113 | tl0_f5n: FILL_5_NORMAL | 116 | tl0_f5n: FILL_0_NORMAL_RTRAP |
114 | tl0_f6n: FILL_6_NORMAL | 117 | tl0_f6n: FILL_1_GENERIC_RTRAP |
115 | tl0_f7n: FILL_7_NORMAL | 118 | tl0_f7n: FILL_2_GENERIC_RTRAP |
116 | tl0_f0o: FILL_0_OTHER | 119 | tl0_f0o: FILL_0_OTHER |
117 | tl0_f1o: FILL_1_OTHER | 120 | tl0_f1o: FILL_1_OTHER |
118 | tl0_f2o: FILL_2_OTHER | 121 | tl0_f2o: FILL_2_OTHER |
@@ -128,7 +131,7 @@ tl0_flushw: FLUSH_WINDOW_TRAP | |||
128 | tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) | 131 | tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) |
129 | .globl tl0_solaris | 132 | .globl tl0_solaris |
130 | tl0_solaris: SOLARIS_SYSCALL_TRAP | 133 | tl0_solaris: SOLARIS_SYSCALL_TRAP |
131 | tl0_netbsd: NETBSD_SYSCALL_TRAP | 134 | tl0_resv109: BTRAP(0x109) |
132 | tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) | 135 | tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) |
133 | tl0_resv10f: BTRAP(0x10f) | 136 | tl0_resv10f: BTRAP(0x10f) |
134 | tl0_linux32: LINUX_32BIT_SYSCALL_TRAP | 137 | tl0_linux32: LINUX_32BIT_SYSCALL_TRAP |
@@ -179,7 +182,7 @@ sparc64_ttable_tl1: | |||
179 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) | 182 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) |
180 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) | 183 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) |
181 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) | 184 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) |
182 | tl1_resv009: BTRAPTL1(0x9) | 185 | tl1_itsb_4v: SUN4V_ITSB_MISS |
183 | tl1_iae: membar #Sync | 186 | tl1_iae: membar #Sync |
184 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 187 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
185 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) | 188 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) |
@@ -198,7 +201,7 @@ tl1_div0: TRAPTL1(do_div0_tl1) | |||
198 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) | 201 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) |
199 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) | 202 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) |
200 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) | 203 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) |
201 | tl1_resv031: BTRAPTL1(0x31) | 204 | tl1_dtsb_4v: SUN4V_DTSB_MISS |
202 | tl1_dae: membar #Sync | 205 | tl1_dae: membar #Sync |
203 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 206 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
204 | tl1_resv033: BTRAPTL1(0x33) | 207 | tl1_resv033: BTRAPTL1(0x33) |
@@ -222,26 +225,10 @@ tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f) | |||
222 | tl1_ivec: TRAP_IVEC | 225 | tl1_ivec: TRAP_IVEC |
223 | tl1_paw: TRAPTL1(do_paw_tl1) | 226 | tl1_paw: TRAPTL1(do_paw_tl1) |
224 | tl1_vaw: TRAPTL1(do_vaw_tl1) | 227 | tl1_vaw: TRAPTL1(do_vaw_tl1) |
225 | 228 | tl1_cee: BTRAPTL1(0x63) | |
226 | /* The grotty trick to save %g1 into current->thread.cee_stuff | ||
227 | * is because when we take this trap we could be interrupting | ||
228 | * trap code already using the trap alternate global registers. | ||
229 | * | ||
230 | * We cross our fingers and pray that this store/load does | ||
231 | * not cause yet another CEE trap. | ||
232 | */ | ||
233 | tl1_cee: membar #Sync | ||
234 | stx %g1, [%g6 + TI_CEE_STUFF] | ||
235 | ldxa [%g0] ASI_AFSR, %g1 | ||
236 | membar #Sync | ||
237 | stxa %g1, [%g0] ASI_AFSR | ||
238 | membar #Sync | ||
239 | ldx [%g6 + TI_CEE_STUFF], %g1 | ||
240 | retry | ||
241 | |||
242 | tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) | 229 | tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) |
243 | tl1_damiss: | 230 | tl1_damiss: |
244 | #include "dtlb_backend.S" | 231 | #include "dtlb_miss.S" |
245 | tl1_daprot: | 232 | tl1_daprot: |
246 | #include "dtlb_prot.S" | 233 | #include "dtlb_prot.S" |
247 | tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ | 234 | tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ |
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 70faf630603b..001e8518331f 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -277,7 +277,7 @@ static void kernel_mna_trap_fault(void) | |||
277 | regs->tstate |= (ASI_AIUS << 24UL); | 277 | regs->tstate |= (ASI_AIUS << 24UL); |
278 | } | 278 | } |
279 | 279 | ||
280 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr) | 280 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) |
281 | { | 281 | { |
282 | enum direction dir = decode_direction(insn); | 282 | enum direction dir = decode_direction(insn); |
283 | int size = decode_access_size(insn); | 283 | int size = decode_access_size(insn); |
@@ -405,6 +405,9 @@ extern void do_privact(struct pt_regs *regs); | |||
405 | extern void spitfire_data_access_exception(struct pt_regs *regs, | 405 | extern void spitfire_data_access_exception(struct pt_regs *regs, |
406 | unsigned long sfsr, | 406 | unsigned long sfsr, |
407 | unsigned long sfar); | 407 | unsigned long sfar); |
408 | extern void sun4v_data_access_exception(struct pt_regs *regs, | ||
409 | unsigned long addr, | ||
410 | unsigned long type_ctx); | ||
408 | 411 | ||
409 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | 412 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
410 | { | 413 | { |
@@ -447,14 +450,20 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
447 | break; | 450 | break; |
448 | } | 451 | } |
449 | default: | 452 | default: |
450 | spitfire_data_access_exception(regs, 0, addr); | 453 | if (tlb_type == hypervisor) |
454 | sun4v_data_access_exception(regs, addr, 0); | ||
455 | else | ||
456 | spitfire_data_access_exception(regs, 0, addr); | ||
451 | return 1; | 457 | return 1; |
452 | } | 458 | } |
453 | if (put_user (first >> 32, (u32 __user *)addr) || | 459 | if (put_user (first >> 32, (u32 __user *)addr) || |
454 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || | 460 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || |
455 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || | 461 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || |
456 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { | 462 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { |
457 | spitfire_data_access_exception(regs, 0, addr); | 463 | if (tlb_type == hypervisor) |
464 | sun4v_data_access_exception(regs, addr, 0); | ||
465 | else | ||
466 | spitfire_data_access_exception(regs, 0, addr); | ||
458 | return 1; | 467 | return 1; |
459 | } | 468 | } |
460 | } else { | 469 | } else { |
@@ -467,7 +476,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
467 | do_privact(regs); | 476 | do_privact(regs); |
468 | return 1; | 477 | return 1; |
469 | } else if (asi > ASI_SNFL) { | 478 | } else if (asi > ASI_SNFL) { |
470 | spitfire_data_access_exception(regs, 0, addr); | 479 | if (tlb_type == hypervisor) |
480 | sun4v_data_access_exception(regs, addr, 0); | ||
481 | else | ||
482 | spitfire_data_access_exception(regs, 0, addr); | ||
471 | return 1; | 483 | return 1; |
472 | } | 484 | } |
473 | switch (insn & 0x180000) { | 485 | switch (insn & 0x180000) { |
@@ -484,7 +496,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
484 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); | 496 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); |
485 | } | 497 | } |
486 | if (err && !(asi & 0x2 /* NF */)) { | 498 | if (err && !(asi & 0x2 /* NF */)) { |
487 | spitfire_data_access_exception(regs, 0, addr); | 499 | if (tlb_type == hypervisor) |
500 | sun4v_data_access_exception(regs, addr, 0); | ||
501 | else | ||
502 | spitfire_data_access_exception(regs, 0, addr); | ||
488 | return 1; | 503 | return 1; |
489 | } | 504 | } |
490 | if (asi & 0x8) /* Little */ { | 505 | if (asi & 0x8) /* Little */ { |
@@ -548,7 +563,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
548 | u32 insn; | 563 | u32 insn; |
549 | u32 first, second; | 564 | u32 first, second; |
550 | u64 value; | 565 | u64 value; |
551 | u8 asi, freg; | 566 | u8 freg; |
552 | int flag; | 567 | int flag; |
553 | struct fpustate *f = FPUSTATE; | 568 | struct fpustate *f = FPUSTATE; |
554 | 569 | ||
@@ -557,7 +572,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
557 | if (test_thread_flag(TIF_32BIT)) | 572 | if (test_thread_flag(TIF_32BIT)) |
558 | pc = (u32)pc; | 573 | pc = (u32)pc; |
559 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 574 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
560 | asi = sfsr >> 16; | 575 | int asi = decode_asi(insn, regs); |
561 | if ((asi > ASI_SNFL) || | 576 | if ((asi > ASI_SNFL) || |
562 | (asi < ASI_P)) | 577 | (asi < ASI_P)) |
563 | goto daex; | 578 | goto daex; |
@@ -587,7 +602,11 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
587 | *(u64 *)(f->regs + freg) = value; | 602 | *(u64 *)(f->regs + freg) = value; |
588 | current_thread_info()->fpsaved[0] |= flag; | 603 | current_thread_info()->fpsaved[0] |= flag; |
589 | } else { | 604 | } else { |
590 | daex: spitfire_data_access_exception(regs, sfsr, sfar); | 605 | daex: |
606 | if (tlb_type == hypervisor) | ||
607 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
608 | else | ||
609 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
591 | return; | 610 | return; |
592 | } | 611 | } |
593 | advance(regs); | 612 | advance(regs); |
@@ -600,7 +619,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
600 | unsigned long tstate = regs->tstate; | 619 | unsigned long tstate = regs->tstate; |
601 | u32 insn; | 620 | u32 insn; |
602 | u64 value; | 621 | u64 value; |
603 | u8 asi, freg; | 622 | u8 freg; |
604 | int flag; | 623 | int flag; |
605 | struct fpustate *f = FPUSTATE; | 624 | struct fpustate *f = FPUSTATE; |
606 | 625 | ||
@@ -609,8 +628,8 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
609 | if (test_thread_flag(TIF_32BIT)) | 628 | if (test_thread_flag(TIF_32BIT)) |
610 | pc = (u32)pc; | 629 | pc = (u32)pc; |
611 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 630 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
631 | int asi = decode_asi(insn, regs); | ||
612 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | 632 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
613 | asi = sfsr >> 16; | ||
614 | value = 0; | 633 | value = 0; |
615 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | 634 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
616 | if ((asi > ASI_SNFL) || | 635 | if ((asi > ASI_SNFL) || |
@@ -631,7 +650,11 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
631 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) | 650 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) |
632 | goto daex; | 651 | goto daex; |
633 | } else { | 652 | } else { |
634 | daex: spitfire_data_access_exception(regs, sfsr, sfar); | 653 | daex: |
654 | if (tlb_type == hypervisor) | ||
655 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
656 | else | ||
657 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
635 | return; | 658 | return; |
636 | } | 659 | } |
637 | advance(regs); | 660 | advance(regs); |
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index b35dc8dc995a..1f83fe6a82d6 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c | |||
@@ -346,6 +346,9 @@ static int __init us2e_freq_init(void) | |||
346 | unsigned long manuf, impl, ver; | 346 | unsigned long manuf, impl, ver; |
347 | int ret; | 347 | int ret; |
348 | 348 | ||
349 | if (tlb_type != spitfire) | ||
350 | return -ENODEV; | ||
351 | |||
349 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | 352 | __asm__("rdpr %%ver, %0" : "=r" (ver)); |
350 | manuf = ((ver >> 48) & 0xffff); | 353 | manuf = ((ver >> 48) & 0xffff); |
351 | impl = ((ver >> 32) & 0xffff); | 354 | impl = ((ver >> 32) & 0xffff); |
@@ -354,20 +357,16 @@ static int __init us2e_freq_init(void) | |||
354 | struct cpufreq_driver *driver; | 357 | struct cpufreq_driver *driver; |
355 | 358 | ||
356 | ret = -ENOMEM; | 359 | ret = -ENOMEM; |
357 | driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | 360 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); |
358 | if (!driver) | 361 | if (!driver) |
359 | goto err_out; | 362 | goto err_out; |
360 | memset(driver, 0, sizeof(*driver)); | ||
361 | 363 | ||
362 | us2e_freq_table = kmalloc( | 364 | us2e_freq_table = kzalloc( |
363 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), | 365 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), |
364 | GFP_KERNEL); | 366 | GFP_KERNEL); |
365 | if (!us2e_freq_table) | 367 | if (!us2e_freq_table) |
366 | goto err_out; | 368 | goto err_out; |
367 | 369 | ||
368 | memset(us2e_freq_table, 0, | ||
369 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); | ||
370 | |||
371 | driver->init = us2e_freq_cpu_init; | 370 | driver->init = us2e_freq_cpu_init; |
372 | driver->verify = us2e_freq_verify; | 371 | driver->verify = us2e_freq_verify; |
373 | driver->target = us2e_freq_target; | 372 | driver->target = us2e_freq_target; |
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 6d1f9a3c464f..47e3acafb5be 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c | |||
@@ -203,6 +203,9 @@ static int __init us3_freq_init(void) | |||
203 | unsigned long manuf, impl, ver; | 203 | unsigned long manuf, impl, ver; |
204 | int ret; | 204 | int ret; |
205 | 205 | ||
206 | if (tlb_type != cheetah && tlb_type != cheetah_plus) | ||
207 | return -ENODEV; | ||
208 | |||
206 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | 209 | __asm__("rdpr %%ver, %0" : "=r" (ver)); |
207 | manuf = ((ver >> 48) & 0xffff); | 210 | manuf = ((ver >> 48) & 0xffff); |
208 | impl = ((ver >> 32) & 0xffff); | 211 | impl = ((ver >> 32) & 0xffff); |
@@ -215,20 +218,16 @@ static int __init us3_freq_init(void) | |||
215 | struct cpufreq_driver *driver; | 218 | struct cpufreq_driver *driver; |
216 | 219 | ||
217 | ret = -ENOMEM; | 220 | ret = -ENOMEM; |
218 | driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | 221 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); |
219 | if (!driver) | 222 | if (!driver) |
220 | goto err_out; | 223 | goto err_out; |
221 | memset(driver, 0, sizeof(*driver)); | ||
222 | 224 | ||
223 | us3_freq_table = kmalloc( | 225 | us3_freq_table = kzalloc( |
224 | (NR_CPUS * sizeof(struct us3_freq_percpu_info)), | 226 | (NR_CPUS * sizeof(struct us3_freq_percpu_info)), |
225 | GFP_KERNEL); | 227 | GFP_KERNEL); |
226 | if (!us3_freq_table) | 228 | if (!us3_freq_table) |
227 | goto err_out; | 229 | goto err_out; |
228 | 230 | ||
229 | memset(us3_freq_table, 0, | ||
230 | (NR_CPUS * sizeof(struct us3_freq_percpu_info))); | ||
231 | |||
232 | driver->init = us3_freq_cpu_init; | 231 | driver->init = us3_freq_cpu_init; |
233 | driver->verify = us3_freq_verify; | 232 | driver->verify = us3_freq_verify; |
234 | driver->target = us3_freq_target; | 233 | driver->target = us3_freq_target; |
diff --git a/arch/sparc64/kernel/visemul.c b/arch/sparc64/kernel/visemul.c new file mode 100644 index 000000000000..84fedaa38aae --- /dev/null +++ b/arch/sparc64/kernel/visemul.c | |||
@@ -0,0 +1,894 @@ | |||
1 | /* visemul.c: Emulation of VIS instructions. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/thread_info.h> | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/pstate.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/fpumacro.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* OPF field of various VIS instructions. */ | ||
16 | |||
17 | /* 000111011 - four 16-bit packs */ | ||
18 | #define FPACK16_OPF 0x03b | ||
19 | |||
20 | /* 000111010 - two 32-bit packs */ | ||
21 | #define FPACK32_OPF 0x03a | ||
22 | |||
23 | /* 000111101 - four 16-bit packs */ | ||
24 | #define FPACKFIX_OPF 0x03d | ||
25 | |||
26 | /* 001001101 - four 16-bit expands */ | ||
27 | #define FEXPAND_OPF 0x04d | ||
28 | |||
29 | /* 001001011 - two 32-bit merges */ | ||
30 | #define FPMERGE_OPF 0x04b | ||
31 | |||
32 | /* 000110001 - 8-by-16-bit partitoned product */ | ||
33 | #define FMUL8x16_OPF 0x031 | ||
34 | |||
35 | /* 000110011 - 8-by-16-bit upper alpha partitioned product */ | ||
36 | #define FMUL8x16AU_OPF 0x033 | ||
37 | |||
38 | /* 000110101 - 8-by-16-bit lower alpha partitioned product */ | ||
39 | #define FMUL8x16AL_OPF 0x035 | ||
40 | |||
41 | /* 000110110 - upper 8-by-16-bit partitioned product */ | ||
42 | #define FMUL8SUx16_OPF 0x036 | ||
43 | |||
44 | /* 000110111 - lower 8-by-16-bit partitioned product */ | ||
45 | #define FMUL8ULx16_OPF 0x037 | ||
46 | |||
47 | /* 000111000 - upper 8-by-16-bit partitioned product */ | ||
48 | #define FMULD8SUx16_OPF 0x038 | ||
49 | |||
50 | /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ | ||
51 | #define FMULD8ULx16_OPF 0x039 | ||
52 | |||
53 | /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ | ||
54 | #define FCMPGT16_OPF 0x028 | ||
55 | |||
56 | /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ | ||
57 | #define FCMPGT32_OPF 0x02c | ||
58 | |||
59 | /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ | ||
60 | #define FCMPLE16_OPF 0x020 | ||
61 | |||
62 | /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ | ||
63 | #define FCMPLE32_OPF 0x024 | ||
64 | |||
65 | /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ | ||
66 | #define FCMPNE16_OPF 0x022 | ||
67 | |||
68 | /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ | ||
69 | #define FCMPNE32_OPF 0x026 | ||
70 | |||
71 | /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ | ||
72 | #define FCMPEQ16_OPF 0x02a | ||
73 | |||
74 | /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ | ||
75 | #define FCMPEQ32_OPF 0x02e | ||
76 | |||
77 | /* 000000000 - Eight 8-bit edge boundary processing */ | ||
78 | #define EDGE8_OPF 0x000 | ||
79 | |||
80 | /* 000000001 - Eight 8-bit edge boundary processing, no CC */ | ||
81 | #define EDGE8N_OPF 0x001 | ||
82 | |||
83 | /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ | ||
84 | #define EDGE8L_OPF 0x002 | ||
85 | |||
86 | /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ | ||
87 | #define EDGE8LN_OPF 0x003 | ||
88 | |||
89 | /* 000000100 - Four 16-bit edge boundary processing */ | ||
90 | #define EDGE16_OPF 0x004 | ||
91 | |||
92 | /* 000000101 - Four 16-bit edge boundary processing, no CC */ | ||
93 | #define EDGE16N_OPF 0x005 | ||
94 | |||
95 | /* 000000110 - Four 16-bit edge boundary processing, little-endian */ | ||
96 | #define EDGE16L_OPF 0x006 | ||
97 | |||
98 | /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ | ||
99 | #define EDGE16LN_OPF 0x007 | ||
100 | |||
101 | /* 000001000 - Two 32-bit edge boundary processing */ | ||
102 | #define EDGE32_OPF 0x008 | ||
103 | |||
104 | /* 000001001 - Two 32-bit edge boundary processing, no CC */ | ||
105 | #define EDGE32N_OPF 0x009 | ||
106 | |||
107 | /* 000001010 - Two 32-bit edge boundary processing, little-endian */ | ||
108 | #define EDGE32L_OPF 0x00a | ||
109 | |||
110 | /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ | ||
111 | #define EDGE32LN_OPF 0x00b | ||
112 | |||
113 | /* 000111110 - distance between 8 8-bit components */ | ||
114 | #define PDIST_OPF 0x03e | ||
115 | |||
116 | /* 000010000 - convert 8-bit 3-D address to blocked byte address */ | ||
117 | #define ARRAY8_OPF 0x010 | ||
118 | |||
119 | /* 000010010 - convert 16-bit 3-D address to blocked byte address */ | ||
120 | #define ARRAY16_OPF 0x012 | ||
121 | |||
122 | /* 000010100 - convert 32-bit 3-D address to blocked byte address */ | ||
123 | #define ARRAY32_OPF 0x014 | ||
124 | |||
125 | /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ | ||
126 | #define BMASK_OPF 0x019 | ||
127 | |||
128 | /* 001001100 - Permute bytes as specified by GSR.MASK */ | ||
129 | #define BSHUFFLE_OPF 0x04c | ||
130 | |||
131 | #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) | ||
132 | #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) | ||
133 | |||
134 | #define VIS_OPF_SHIFT 5 | ||
135 | #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) | ||
136 | |||
137 | #define RS1(INSN) (((INSN) >> 24) & 0x1f) | ||
138 | #define RS2(INSN) (((INSN) >> 0) & 0x1f) | ||
139 | #define RD(INSN) (((INSN) >> 25) & 0x1f) | ||
140 | |||
141 | static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, | ||
142 | unsigned int rd, int from_kernel) | ||
143 | { | ||
144 | if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { | ||
145 | if (from_kernel != 0) | ||
146 | __asm__ __volatile__("flushw"); | ||
147 | else | ||
148 | flushw_user(); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | ||
153 | { | ||
154 | unsigned long value; | ||
155 | |||
156 | if (reg < 16) | ||
157 | return (!reg ? 0 : regs->u_regs[reg]); | ||
158 | if (regs->tstate & TSTATE_PRIV) { | ||
159 | struct reg_window *win; | ||
160 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
161 | value = win->locals[reg - 16]; | ||
162 | } else if (test_thread_flag(TIF_32BIT)) { | ||
163 | struct reg_window32 __user *win32; | ||
164 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
165 | get_user(value, &win32->locals[reg - 16]); | ||
166 | } else { | ||
167 | struct reg_window __user *win; | ||
168 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
169 | get_user(value, &win->locals[reg - 16]); | ||
170 | } | ||
171 | return value; | ||
172 | } | ||
173 | |||
174 | static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, | ||
175 | struct pt_regs *regs) | ||
176 | { | ||
177 | BUG_ON(reg < 16); | ||
178 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
179 | |||
180 | if (test_thread_flag(TIF_32BIT)) { | ||
181 | struct reg_window32 __user *win32; | ||
182 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
183 | return (unsigned long __user *)&win32->locals[reg - 16]; | ||
184 | } else { | ||
185 | struct reg_window __user *win; | ||
186 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
187 | return &win->locals[reg - 16]; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, | ||
192 | struct pt_regs *regs) | ||
193 | { | ||
194 | BUG_ON(reg >= 16); | ||
195 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
196 | |||
197 | return ®s->u_regs[reg]; | ||
198 | } | ||
199 | |||
200 | static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) | ||
201 | { | ||
202 | if (rd < 16) { | ||
203 | unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); | ||
204 | |||
205 | *rd_kern = val; | ||
206 | } else { | ||
207 | unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); | ||
208 | |||
209 | if (test_thread_flag(TIF_32BIT)) | ||
210 | __put_user((u32)val, (u32 __user *)rd_user); | ||
211 | else | ||
212 | __put_user(val, rd_user); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static inline unsigned long fpd_regval(struct fpustate *f, | ||
217 | unsigned int insn_regnum) | ||
218 | { | ||
219 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
220 | (insn_regnum & 0x1e)); | ||
221 | |||
222 | return *(unsigned long *) &f->regs[insn_regnum]; | ||
223 | } | ||
224 | |||
225 | static inline unsigned long *fpd_regaddr(struct fpustate *f, | ||
226 | unsigned int insn_regnum) | ||
227 | { | ||
228 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
229 | (insn_regnum & 0x1e)); | ||
230 | |||
231 | return (unsigned long *) &f->regs[insn_regnum]; | ||
232 | } | ||
233 | |||
234 | static inline unsigned int fps_regval(struct fpustate *f, | ||
235 | unsigned int insn_regnum) | ||
236 | { | ||
237 | return f->regs[insn_regnum]; | ||
238 | } | ||
239 | |||
240 | static inline unsigned int *fps_regaddr(struct fpustate *f, | ||
241 | unsigned int insn_regnum) | ||
242 | { | ||
243 | return &f->regs[insn_regnum]; | ||
244 | } | ||
245 | |||
246 | struct edge_tab { | ||
247 | u16 left, right; | ||
248 | }; | ||
249 | struct edge_tab edge8_tab[8] = { | ||
250 | { 0xff, 0x80 }, | ||
251 | { 0x7f, 0xc0 }, | ||
252 | { 0x3f, 0xe0 }, | ||
253 | { 0x1f, 0xf0 }, | ||
254 | { 0x0f, 0xf8 }, | ||
255 | { 0x07, 0xfc }, | ||
256 | { 0x03, 0xfe }, | ||
257 | { 0x01, 0xff }, | ||
258 | }; | ||
259 | struct edge_tab edge8_tab_l[8] = { | ||
260 | { 0xff, 0x01 }, | ||
261 | { 0xfe, 0x03 }, | ||
262 | { 0xfc, 0x07 }, | ||
263 | { 0xf8, 0x0f }, | ||
264 | { 0xf0, 0x1f }, | ||
265 | { 0xe0, 0x3f }, | ||
266 | { 0xc0, 0x7f }, | ||
267 | { 0x80, 0xff }, | ||
268 | }; | ||
269 | struct edge_tab edge16_tab[4] = { | ||
270 | { 0xf, 0x8 }, | ||
271 | { 0x7, 0xc }, | ||
272 | { 0x3, 0xe }, | ||
273 | { 0x1, 0xf }, | ||
274 | }; | ||
275 | struct edge_tab edge16_tab_l[4] = { | ||
276 | { 0xf, 0x1 }, | ||
277 | { 0xe, 0x3 }, | ||
278 | { 0xc, 0x7 }, | ||
279 | { 0x8, 0xf }, | ||
280 | }; | ||
281 | struct edge_tab edge32_tab[2] = { | ||
282 | { 0x3, 0x2 }, | ||
283 | { 0x1, 0x3 }, | ||
284 | }; | ||
285 | struct edge_tab edge32_tab_l[2] = { | ||
286 | { 0x3, 0x1 }, | ||
287 | { 0x2, 0x3 }, | ||
288 | }; | ||
289 | |||
290 | static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
291 | { | ||
292 | unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; | ||
293 | u16 left, right; | ||
294 | |||
295 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
296 | orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); | ||
297 | orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); | ||
298 | |||
299 | if (test_thread_flag(TIF_32BIT)) { | ||
300 | rs1 = rs1 & 0xffffffff; | ||
301 | rs2 = rs2 & 0xffffffff; | ||
302 | } | ||
303 | switch (opf) { | ||
304 | default: | ||
305 | case EDGE8_OPF: | ||
306 | case EDGE8N_OPF: | ||
307 | left = edge8_tab[rs1 & 0x7].left; | ||
308 | right = edge8_tab[rs2 & 0x7].right; | ||
309 | break; | ||
310 | case EDGE8L_OPF: | ||
311 | case EDGE8LN_OPF: | ||
312 | left = edge8_tab_l[rs1 & 0x7].left; | ||
313 | right = edge8_tab_l[rs2 & 0x7].right; | ||
314 | break; | ||
315 | |||
316 | case EDGE16_OPF: | ||
317 | case EDGE16N_OPF: | ||
318 | left = edge16_tab[(rs1 >> 1) & 0x3].left; | ||
319 | right = edge16_tab[(rs2 >> 1) & 0x3].right; | ||
320 | break; | ||
321 | |||
322 | case EDGE16L_OPF: | ||
323 | case EDGE16LN_OPF: | ||
324 | left = edge16_tab_l[(rs1 >> 1) & 0x3].left; | ||
325 | right = edge16_tab_l[(rs2 >> 1) & 0x3].right; | ||
326 | break; | ||
327 | |||
328 | case EDGE32_OPF: | ||
329 | case EDGE32N_OPF: | ||
330 | left = edge32_tab[(rs1 >> 2) & 0x1].left; | ||
331 | right = edge32_tab[(rs2 >> 2) & 0x1].right; | ||
332 | break; | ||
333 | |||
334 | case EDGE32L_OPF: | ||
335 | case EDGE32LN_OPF: | ||
336 | left = edge32_tab_l[(rs1 >> 2) & 0x1].left; | ||
337 | right = edge32_tab_l[(rs2 >> 2) & 0x1].right; | ||
338 | break; | ||
339 | }; | ||
340 | |||
341 | if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) | ||
342 | rd_val = right & left; | ||
343 | else | ||
344 | rd_val = left; | ||
345 | |||
346 | store_reg(regs, rd_val, RD(insn)); | ||
347 | |||
348 | switch (opf) { | ||
349 | case EDGE8_OPF: | ||
350 | case EDGE8L_OPF: | ||
351 | case EDGE16_OPF: | ||
352 | case EDGE16L_OPF: | ||
353 | case EDGE32_OPF: | ||
354 | case EDGE32L_OPF: { | ||
355 | unsigned long ccr, tstate; | ||
356 | |||
357 | __asm__ __volatile__("subcc %1, %2, %%g0\n\t" | ||
358 | "rd %%ccr, %0" | ||
359 | : "=r" (ccr) | ||
360 | : "r" (orig_rs1), "r" (orig_rs2) | ||
361 | : "cc"); | ||
362 | tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); | ||
363 | regs->tstate = tstate | (ccr << 32UL); | ||
364 | } | ||
365 | }; | ||
366 | } | ||
367 | |||
368 | static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
369 | { | ||
370 | unsigned long rs1, rs2, rd_val; | ||
371 | unsigned int bits, bits_mask; | ||
372 | |||
373 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
374 | rs1 = fetch_reg(RS1(insn), regs); | ||
375 | rs2 = fetch_reg(RS2(insn), regs); | ||
376 | |||
377 | bits = (rs2 > 5 ? 5 : rs2); | ||
378 | bits_mask = (1UL << bits) - 1UL; | ||
379 | |||
380 | rd_val = ((((rs1 >> 11) & 0x3) << 0) | | ||
381 | (((rs1 >> 33) & 0x3) << 2) | | ||
382 | (((rs1 >> 55) & 0x1) << 4) | | ||
383 | (((rs1 >> 13) & 0xf) << 5) | | ||
384 | (((rs1 >> 35) & 0xf) << 9) | | ||
385 | (((rs1 >> 56) & 0xf) << 13) | | ||
386 | (((rs1 >> 17) & bits_mask) << 17) | | ||
387 | (((rs1 >> 39) & bits_mask) << (17 + bits)) | | ||
388 | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); | ||
389 | |||
390 | switch (opf) { | ||
391 | case ARRAY16_OPF: | ||
392 | rd_val <<= 1; | ||
393 | break; | ||
394 | |||
395 | case ARRAY32_OPF: | ||
396 | rd_val <<= 2; | ||
397 | }; | ||
398 | |||
399 | store_reg(regs, rd_val, RD(insn)); | ||
400 | } | ||
401 | |||
402 | static void bmask(struct pt_regs *regs, unsigned int insn) | ||
403 | { | ||
404 | unsigned long rs1, rs2, rd_val, gsr; | ||
405 | |||
406 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
407 | rs1 = fetch_reg(RS1(insn), regs); | ||
408 | rs2 = fetch_reg(RS2(insn), regs); | ||
409 | rd_val = rs1 + rs2; | ||
410 | |||
411 | store_reg(regs, rd_val, RD(insn)); | ||
412 | |||
413 | gsr = current_thread_info()->gsr[0] & 0xffffffff; | ||
414 | gsr |= rd_val << 32UL; | ||
415 | current_thread_info()->gsr[0] = gsr; | ||
416 | } | ||
417 | |||
418 | static void bshuffle(struct pt_regs *regs, unsigned int insn) | ||
419 | { | ||
420 | struct fpustate *f = FPUSTATE; | ||
421 | unsigned long rs1, rs2, rd_val; | ||
422 | unsigned long bmask, i; | ||
423 | |||
424 | bmask = current_thread_info()->gsr[0] >> 32UL; | ||
425 | |||
426 | rs1 = fpd_regval(f, RS1(insn)); | ||
427 | rs2 = fpd_regval(f, RS2(insn)); | ||
428 | |||
429 | rd_val = 0UL; | ||
430 | for (i = 0; i < 8; i++) { | ||
431 | unsigned long which = (bmask >> (i * 4)) & 0xf; | ||
432 | unsigned long byte; | ||
433 | |||
434 | if (which < 8) | ||
435 | byte = (rs1 >> (which * 8)) & 0xff; | ||
436 | else | ||
437 | byte = (rs2 >> ((which-8)*8)) & 0xff; | ||
438 | rd_val |= (byte << (i * 8)); | ||
439 | } | ||
440 | |||
441 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
442 | } | ||
443 | |||
444 | static void pdist(struct pt_regs *regs, unsigned int insn) | ||
445 | { | ||
446 | struct fpustate *f = FPUSTATE; | ||
447 | unsigned long rs1, rs2, *rd, rd_val; | ||
448 | unsigned long i; | ||
449 | |||
450 | rs1 = fpd_regval(f, RS1(insn)); | ||
451 | rs2 = fpd_regval(f, RS1(insn)); | ||
452 | rd = fpd_regaddr(f, RD(insn)); | ||
453 | |||
454 | rd_val = *rd; | ||
455 | |||
456 | for (i = 0; i < 8; i++) { | ||
457 | s16 s1, s2; | ||
458 | |||
459 | s1 = (rs1 >> (56 - (i * 8))) & 0xff; | ||
460 | s2 = (rs2 >> (56 - (i * 8))) & 0xff; | ||
461 | |||
462 | /* Absolute value of difference. */ | ||
463 | s1 -= s2; | ||
464 | if (s1 < 0) | ||
465 | s1 = ~s1 + 1; | ||
466 | |||
467 | rd_val += s1; | ||
468 | } | ||
469 | |||
470 | *rd = rd_val; | ||
471 | } | ||
472 | |||
473 | static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
474 | { | ||
475 | struct fpustate *f = FPUSTATE; | ||
476 | unsigned long rs1, rs2, gsr, scale, rd_val; | ||
477 | |||
478 | gsr = current_thread_info()->gsr[0]; | ||
479 | scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); | ||
480 | switch (opf) { | ||
481 | case FPACK16_OPF: { | ||
482 | unsigned long byte; | ||
483 | |||
484 | rs2 = fpd_regval(f, RS2(insn)); | ||
485 | rd_val = 0; | ||
486 | for (byte = 0; byte < 4; byte++) { | ||
487 | unsigned int val; | ||
488 | s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; | ||
489 | int scaled = src << scale; | ||
490 | int from_fixed = scaled >> 7; | ||
491 | |||
492 | val = ((from_fixed < 0) ? | ||
493 | 0 : | ||
494 | (from_fixed > 255) ? | ||
495 | 255 : from_fixed); | ||
496 | |||
497 | rd_val |= (val << (8 * byte)); | ||
498 | } | ||
499 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
500 | break; | ||
501 | } | ||
502 | |||
503 | case FPACK32_OPF: { | ||
504 | unsigned long word; | ||
505 | |||
506 | rs1 = fpd_regval(f, RS1(insn)); | ||
507 | rs2 = fpd_regval(f, RS2(insn)); | ||
508 | rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); | ||
509 | for (word = 0; word < 2; word++) { | ||
510 | unsigned long val; | ||
511 | s32 src = (rs2 >> (word * 32UL)); | ||
512 | s64 scaled = src << scale; | ||
513 | s64 from_fixed = scaled >> 23; | ||
514 | |||
515 | val = ((from_fixed < 0) ? | ||
516 | 0 : | ||
517 | (from_fixed > 255) ? | ||
518 | 255 : from_fixed); | ||
519 | |||
520 | rd_val |= (val << (32 * word)); | ||
521 | } | ||
522 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | case FPACKFIX_OPF: { | ||
527 | unsigned long word; | ||
528 | |||
529 | rs2 = fpd_regval(f, RS2(insn)); | ||
530 | |||
531 | rd_val = 0; | ||
532 | for (word = 0; word < 2; word++) { | ||
533 | long val; | ||
534 | s32 src = (rs2 >> (word * 32UL)); | ||
535 | s64 scaled = src << scale; | ||
536 | s64 from_fixed = scaled >> 16; | ||
537 | |||
538 | val = ((from_fixed < -32768) ? | ||
539 | -32768 : | ||
540 | (from_fixed > 32767) ? | ||
541 | 32767 : from_fixed); | ||
542 | |||
543 | rd_val |= ((val & 0xffff) << (word * 16)); | ||
544 | } | ||
545 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
546 | break; | ||
547 | } | ||
548 | |||
549 | case FEXPAND_OPF: { | ||
550 | unsigned long byte; | ||
551 | |||
552 | rs2 = fps_regval(f, RS2(insn)); | ||
553 | |||
554 | rd_val = 0; | ||
555 | for (byte = 0; byte < 4; byte++) { | ||
556 | unsigned long val; | ||
557 | u8 src = (rs2 >> (byte * 8)) & 0xff; | ||
558 | |||
559 | val = src << 4; | ||
560 | |||
561 | rd_val |= (val << (byte * 16)); | ||
562 | } | ||
563 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
564 | break; | ||
565 | } | ||
566 | |||
567 | case FPMERGE_OPF: { | ||
568 | rs1 = fps_regval(f, RS1(insn)); | ||
569 | rs2 = fps_regval(f, RS2(insn)); | ||
570 | |||
571 | rd_val = (((rs2 & 0x000000ff) << 0) | | ||
572 | ((rs1 & 0x000000ff) << 8) | | ||
573 | ((rs2 & 0x0000ff00) << 8) | | ||
574 | ((rs1 & 0x0000ff00) << 16) | | ||
575 | ((rs2 & 0x00ff0000) << 16) | | ||
576 | ((rs1 & 0x00ff0000) << 24) | | ||
577 | ((rs2 & 0xff000000) << 24) | | ||
578 | ((rs1 & 0xff000000) << 32)); | ||
579 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
580 | break; | ||
581 | } | ||
582 | }; | ||
583 | } | ||
584 | |||
585 | static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
586 | { | ||
587 | struct fpustate *f = FPUSTATE; | ||
588 | unsigned long rs1, rs2, rd_val; | ||
589 | |||
590 | switch (opf) { | ||
591 | case FMUL8x16_OPF: { | ||
592 | unsigned long byte; | ||
593 | |||
594 | rs1 = fps_regval(f, RS1(insn)); | ||
595 | rs2 = fpd_regval(f, RS2(insn)); | ||
596 | |||
597 | rd_val = 0; | ||
598 | for (byte = 0; byte < 4; byte++) { | ||
599 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
600 | s16 src2 = (rs2 >> (byte * 16)) & 0xffff; | ||
601 | u32 prod = src1 * src2; | ||
602 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
603 | |||
604 | /* Round up. */ | ||
605 | if (prod & 0x80) | ||
606 | scaled++; | ||
607 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
608 | } | ||
609 | |||
610 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
611 | break; | ||
612 | } | ||
613 | |||
614 | case FMUL8x16AU_OPF: | ||
615 | case FMUL8x16AL_OPF: { | ||
616 | unsigned long byte; | ||
617 | s16 src2; | ||
618 | |||
619 | rs1 = fps_regval(f, RS1(insn)); | ||
620 | rs2 = fps_regval(f, RS2(insn)); | ||
621 | |||
622 | rd_val = 0; | ||
623 | src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); | ||
624 | for (byte = 0; byte < 4; byte++) { | ||
625 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
626 | u32 prod = src1 * src2; | ||
627 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
628 | |||
629 | /* Round up. */ | ||
630 | if (prod & 0x80) | ||
631 | scaled++; | ||
632 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
633 | } | ||
634 | |||
635 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
636 | break; | ||
637 | } | ||
638 | |||
639 | case FMUL8SUx16_OPF: | ||
640 | case FMUL8ULx16_OPF: { | ||
641 | unsigned long byte, ushift; | ||
642 | |||
643 | rs1 = fpd_regval(f, RS1(insn)); | ||
644 | rs2 = fpd_regval(f, RS2(insn)); | ||
645 | |||
646 | rd_val = 0; | ||
647 | ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; | ||
648 | for (byte = 0; byte < 4; byte++) { | ||
649 | u16 src1; | ||
650 | s16 src2; | ||
651 | u32 prod; | ||
652 | u16 scaled; | ||
653 | |||
654 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
655 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
656 | prod = src1 * src2; | ||
657 | scaled = ((prod & 0x00ffff00) >> 8); | ||
658 | |||
659 | /* Round up. */ | ||
660 | if (prod & 0x80) | ||
661 | scaled++; | ||
662 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
663 | } | ||
664 | |||
665 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
666 | break; | ||
667 | } | ||
668 | |||
669 | case FMULD8SUx16_OPF: | ||
670 | case FMULD8ULx16_OPF: { | ||
671 | unsigned long byte, ushift; | ||
672 | |||
673 | rs1 = fps_regval(f, RS1(insn)); | ||
674 | rs2 = fps_regval(f, RS2(insn)); | ||
675 | |||
676 | rd_val = 0; | ||
677 | ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; | ||
678 | for (byte = 0; byte < 2; byte++) { | ||
679 | u16 src1; | ||
680 | s16 src2; | ||
681 | u32 prod; | ||
682 | u16 scaled; | ||
683 | |||
684 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
685 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
686 | prod = src1 * src2; | ||
687 | scaled = ((prod & 0x00ffff00) >> 8); | ||
688 | |||
689 | /* Round up. */ | ||
690 | if (prod & 0x80) | ||
691 | scaled++; | ||
692 | rd_val |= ((scaled & 0xffffUL) << | ||
693 | ((byte * 32UL) + 7UL)); | ||
694 | } | ||
695 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
696 | break; | ||
697 | } | ||
698 | }; | ||
699 | } | ||
700 | |||
701 | static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
702 | { | ||
703 | struct fpustate *f = FPUSTATE; | ||
704 | unsigned long rs1, rs2, rd_val, i; | ||
705 | |||
706 | rs1 = fpd_regval(f, RS1(insn)); | ||
707 | rs2 = fpd_regval(f, RS2(insn)); | ||
708 | |||
709 | rd_val = 0; | ||
710 | |||
711 | switch (opf) { | ||
712 | case FCMPGT16_OPF: | ||
713 | for (i = 0; i < 4; i++) { | ||
714 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
715 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
716 | |||
717 | if (a > b) | ||
718 | rd_val |= 1 << i; | ||
719 | } | ||
720 | break; | ||
721 | |||
722 | case FCMPGT32_OPF: | ||
723 | for (i = 0; i < 2; i++) { | ||
724 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
725 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
726 | |||
727 | if (a > b) | ||
728 | rd_val |= 1 << i; | ||
729 | } | ||
730 | break; | ||
731 | |||
732 | case FCMPLE16_OPF: | ||
733 | for (i = 0; i < 4; i++) { | ||
734 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
735 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
736 | |||
737 | if (a <= b) | ||
738 | rd_val |= 1 << i; | ||
739 | } | ||
740 | break; | ||
741 | |||
742 | case FCMPLE32_OPF: | ||
743 | for (i = 0; i < 2; i++) { | ||
744 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
745 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
746 | |||
747 | if (a <= b) | ||
748 | rd_val |= 1 << i; | ||
749 | } | ||
750 | break; | ||
751 | |||
752 | case FCMPNE16_OPF: | ||
753 | for (i = 0; i < 4; i++) { | ||
754 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
755 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
756 | |||
757 | if (a != b) | ||
758 | rd_val |= 1 << i; | ||
759 | } | ||
760 | break; | ||
761 | |||
762 | case FCMPNE32_OPF: | ||
763 | for (i = 0; i < 2; i++) { | ||
764 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
765 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
766 | |||
767 | if (a != b) | ||
768 | rd_val |= 1 << i; | ||
769 | } | ||
770 | break; | ||
771 | |||
772 | case FCMPEQ16_OPF: | ||
773 | for (i = 0; i < 4; i++) { | ||
774 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
775 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
776 | |||
777 | if (a == b) | ||
778 | rd_val |= 1 << i; | ||
779 | } | ||
780 | break; | ||
781 | |||
782 | case FCMPEQ32_OPF: | ||
783 | for (i = 0; i < 2; i++) { | ||
784 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
785 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
786 | |||
787 | if (a == b) | ||
788 | rd_val |= 1 << i; | ||
789 | } | ||
790 | break; | ||
791 | }; | ||
792 | |||
793 | maybe_flush_windows(0, 0, RD(insn), 0); | ||
794 | store_reg(regs, rd_val, RD(insn)); | ||
795 | } | ||
796 | |||
797 | /* Emulate the VIS instructions which are not implemented in | ||
798 | * hardware on Niagara. | ||
799 | */ | ||
800 | int vis_emul(struct pt_regs *regs, unsigned int insn) | ||
801 | { | ||
802 | unsigned long pc = regs->tpc; | ||
803 | unsigned int opf; | ||
804 | |||
805 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
806 | |||
807 | if (test_thread_flag(TIF_32BIT)) | ||
808 | pc = (u32)pc; | ||
809 | |||
810 | if (get_user(insn, (u32 __user *) pc)) | ||
811 | return -EFAULT; | ||
812 | |||
813 | if ((insn & VIS_OPCODE_MASK) != VIS_OPCODE_VAL) | ||
814 | return -EINVAL; | ||
815 | |||
816 | opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; | ||
817 | switch (opf) { | ||
818 | default: | ||
819 | return -EINVAL; | ||
820 | |||
821 | /* Pixel Formatting Instructions. */ | ||
822 | case FPACK16_OPF: | ||
823 | case FPACK32_OPF: | ||
824 | case FPACKFIX_OPF: | ||
825 | case FEXPAND_OPF: | ||
826 | case FPMERGE_OPF: | ||
827 | pformat(regs, insn, opf); | ||
828 | break; | ||
829 | |||
830 | /* Partitioned Multiply Instructions */ | ||
831 | case FMUL8x16_OPF: | ||
832 | case FMUL8x16AU_OPF: | ||
833 | case FMUL8x16AL_OPF: | ||
834 | case FMUL8SUx16_OPF: | ||
835 | case FMUL8ULx16_OPF: | ||
836 | case FMULD8SUx16_OPF: | ||
837 | case FMULD8ULx16_OPF: | ||
838 | pmul(regs, insn, opf); | ||
839 | break; | ||
840 | |||
841 | /* Pixel Compare Instructions */ | ||
842 | case FCMPGT16_OPF: | ||
843 | case FCMPGT32_OPF: | ||
844 | case FCMPLE16_OPF: | ||
845 | case FCMPLE32_OPF: | ||
846 | case FCMPNE16_OPF: | ||
847 | case FCMPNE32_OPF: | ||
848 | case FCMPEQ16_OPF: | ||
849 | case FCMPEQ32_OPF: | ||
850 | pcmp(regs, insn, opf); | ||
851 | break; | ||
852 | |||
853 | /* Edge Handling Instructions */ | ||
854 | case EDGE8_OPF: | ||
855 | case EDGE8N_OPF: | ||
856 | case EDGE8L_OPF: | ||
857 | case EDGE8LN_OPF: | ||
858 | case EDGE16_OPF: | ||
859 | case EDGE16N_OPF: | ||
860 | case EDGE16L_OPF: | ||
861 | case EDGE16LN_OPF: | ||
862 | case EDGE32_OPF: | ||
863 | case EDGE32N_OPF: | ||
864 | case EDGE32L_OPF: | ||
865 | case EDGE32LN_OPF: | ||
866 | edge(regs, insn, opf); | ||
867 | break; | ||
868 | |||
869 | /* Pixel Component Distance */ | ||
870 | case PDIST_OPF: | ||
871 | pdist(regs, insn); | ||
872 | break; | ||
873 | |||
874 | /* Three-Dimensional Array Addressing Instructions */ | ||
875 | case ARRAY8_OPF: | ||
876 | case ARRAY16_OPF: | ||
877 | case ARRAY32_OPF: | ||
878 | array(regs, insn, opf); | ||
879 | break; | ||
880 | |||
881 | /* Byte Mask and Shuffle Instructions */ | ||
882 | case BMASK_OPF: | ||
883 | bmask(regs, insn); | ||
884 | break; | ||
885 | |||
886 | case BSHUFFLE_OPF: | ||
887 | bshuffle(regs, insn); | ||
888 | break; | ||
889 | }; | ||
890 | |||
891 | regs->tpc = regs->tnpc; | ||
892 | regs->tnpc += 4; | ||
893 | return 0; | ||
894 | } | ||
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 467d13a0d5c1..b097379a49a8 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -70,6 +70,22 @@ SECTIONS | |||
70 | .con_initcall.init : { *(.con_initcall.init) } | 70 | .con_initcall.init : { *(.con_initcall.init) } |
71 | __con_initcall_end = .; | 71 | __con_initcall_end = .; |
72 | SECURITY_INIT | 72 | SECURITY_INIT |
73 | . = ALIGN(4); | ||
74 | __tsb_ldquad_phys_patch = .; | ||
75 | .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) } | ||
76 | __tsb_ldquad_phys_patch_end = .; | ||
77 | __tsb_phys_patch = .; | ||
78 | .tsb_phys_patch : { *(.tsb_phys_patch) } | ||
79 | __tsb_phys_patch_end = .; | ||
80 | __cpuid_patch = .; | ||
81 | .cpuid_patch : { *(.cpuid_patch) } | ||
82 | __cpuid_patch_end = .; | ||
83 | __sun4v_1insn_patch = .; | ||
84 | .sun4v_1insn_patch : { *(.sun4v_1insn_patch) } | ||
85 | __sun4v_1insn_patch_end = .; | ||
86 | __sun4v_2insn_patch = .; | ||
87 | .sun4v_2insn_patch : { *(.sun4v_2insn_patch) } | ||
88 | __sun4v_2insn_patch_end = .; | ||
73 | . = ALIGN(8192); | 89 | . = ALIGN(8192); |
74 | __initramfs_start = .; | 90 | __initramfs_start = .; |
75 | .init.ramfs : { *(.init.ramfs) } | 91 | .init.ramfs : { *(.init.ramfs) } |
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 39160926267b..c4aa110a10e5 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S | |||
@@ -1,8 +1,6 @@ | |||
1 | /* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $ | 1 | /* winfixup.S: Handle cases where user stack pointer is found to be bogus. |
2 | * | 2 | * |
3 | * winfixup.S: Handle cases where user stack pointer is found to be bogus. | 3 | * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net) |
4 | * | ||
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | 4 | */ |
7 | 5 | ||
8 | #include <asm/asi.h> | 6 | #include <asm/asi.h> |
@@ -15,374 +13,144 @@ | |||
15 | 13 | ||
16 | .text | 14 | .text |
17 | 15 | ||
18 | set_pcontext: | 16 | /* It used to be the case that these register window fault |
19 | sethi %hi(sparc64_kern_pri_context), %l1 | 17 | * handlers could run via the save and restore instructions |
20 | ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 | 18 | * done by the trap entry and exit code. They now do the |
21 | mov PRIMARY_CONTEXT, %g1 | 19 | * window spill/fill by hand, so that case no longer can occur. |
22 | stxa %l1, [%g1] ASI_DMMU | 20 | */ |
23 | flush %g6 | ||
24 | retl | ||
25 | nop | ||
26 | 21 | ||
27 | .align 32 | 22 | .align 32 |
28 | |||
29 | /* Here are the rules, pay attention. | ||
30 | * | ||
31 | * The kernel is disallowed from touching user space while | ||
32 | * the trap level is greater than zero, except for from within | ||
33 | * the window spill/fill handlers. This must be followed | ||
34 | * so that we can easily detect the case where we tried to | ||
35 | * spill/fill with a bogus (or unmapped) user stack pointer. | ||
36 | * | ||
37 | * These are layed out in a special way for cache reasons, | ||
38 | * don't touch... | ||
39 | */ | ||
40 | .globl fill_fixup, spill_fixup | ||
41 | fill_fixup: | 23 | fill_fixup: |
42 | rdpr %tstate, %g1 | 24 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
43 | andcc %g1, TSTATE_PRIV, %g0 | 25 | rdpr %tstate, %g1 |
44 | or %g4, FAULT_CODE_WINFIXUP, %g4 | 26 | and %g1, TSTATE_CWP, %g1 |
45 | be,pt %xcc, window_scheisse_from_user_common | 27 | or %g4, FAULT_CODE_WINFIXUP, %g4 |
46 | and %g1, TSTATE_CWP, %g1 | 28 | stb %g4, [%g6 + TI_FAULT_CODE] |
47 | 29 | stx %g5, [%g6 + TI_FAULT_ADDR] | |
48 | /* This is the extremely complex case, but it does happen from | 30 | wrpr %g1, %cwp |
49 | * time to time if things are just right. Essentially the restore | 31 | ba,pt %xcc, etrap |
50 | * done in rtrap right before going back to user mode, with tl=1 | 32 | rd %pc, %g7 |
51 | * and that levels trap stack registers all setup, took a fill trap, | 33 | call do_sparc64_fault |
52 | * the user stack was not mapped in the tlb, and tlb miss occurred, | 34 | add %sp, PTREGS_OFF, %o0 |
53 | * the pte found was not valid, and a simple ref bit watch update | 35 | ba,pt %xcc, rtrap_clr_l6 |
54 | * could not satisfy the miss, so we got here. | ||
55 | * | ||
56 | * We must carefully unwind the state so we get back to tl=0, preserve | ||
57 | * all the register values we were going to give to the user. Luckily | ||
58 | * most things are where they need to be, we also have the address | ||
59 | * which triggered the fault handy as well. | ||
60 | * | ||
61 | * Also note that we must preserve %l5 and %l6. If the user was | ||
62 | * returning from a system call, we must make it look this way | ||
63 | * after we process the fill fault on the users stack. | ||
64 | * | ||
65 | * First, get into the window where the original restore was executed. | ||
66 | */ | ||
67 | |||
68 | rdpr %wstate, %g2 ! Grab user mode wstate. | ||
69 | wrpr %g1, %cwp ! Get into the right window. | ||
70 | sll %g2, 3, %g2 ! NORMAL-->OTHER | ||
71 | |||
72 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | ||
73 | wrpr %g2, 0x0, %wstate ! This must be consistent. | ||
74 | wrpr %g0, 0x0, %otherwin ! We know this. | ||
75 | call set_pcontext ! Change contexts... | ||
76 | nop | 36 | nop |
77 | rdpr %pstate, %l1 ! Prepare to change globals. | ||
78 | mov %g6, %o7 ! Get current. | ||
79 | |||
80 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | ||
81 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
82 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
83 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
84 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
85 | mov %o7, %g6 | ||
86 | ldx [%g6 + TI_TASK], %g4 | ||
87 | #ifdef CONFIG_SMP | ||
88 | mov TSB_REG, %g1 | ||
89 | ldxa [%g1] ASI_IMMU, %g5 | ||
90 | #endif | ||
91 | 37 | ||
92 | /* This is the same as below, except we handle this a bit special | 38 | /* Be very careful about usage of the trap globals here. |
93 | * since we must preserve %l5 and %l6, see comment above. | 39 | * You cannot touch %g5 as that has the fault information. |
94 | */ | ||
95 | call do_sparc64_fault | ||
96 | add %sp, PTREGS_OFF, %o0 | ||
97 | ba,pt %xcc, rtrap | ||
98 | nop ! yes, nop is correct | ||
99 | |||
100 | /* Be very careful about usage of the alternate globals here. | ||
101 | * You cannot touch %g4/%g5 as that has the fault information | ||
102 | * should this be from usermode. Also be careful for the case | ||
103 | * where we get here from the save instruction in etrap.S when | ||
104 | * coming from either user or kernel (does not matter which, it | ||
105 | * is the same problem in both cases). Essentially this means | ||
106 | * do not touch %g7 or %g2 so we handle the two cases fine. | ||
107 | */ | 40 | */ |
108 | spill_fixup: | 41 | spill_fixup: |
109 | ldx [%g6 + TI_FLAGS], %g1 | 42 | spill_fixup_mna: |
110 | andcc %g1, _TIF_32BIT, %g0 | 43 | spill_fixup_dax: |
111 | ldub [%g6 + TI_WSAVED], %g1 | 44 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
112 | 45 | ldx [%g6 + TI_FLAGS], %g1 | |
113 | sll %g1, 3, %g3 | 46 | andcc %g1, _TIF_32BIT, %g0 |
114 | add %g6, %g3, %g3 | 47 | ldub [%g6 + TI_WSAVED], %g1 |
115 | stx %sp, [%g3 + TI_RWIN_SPTRS] | 48 | sll %g1, 3, %g3 |
116 | sll %g1, 7, %g3 | 49 | add %g6, %g3, %g3 |
117 | bne,pt %xcc, 1f | 50 | stx %sp, [%g3 + TI_RWIN_SPTRS] |
118 | add %g6, %g3, %g3 | 51 | sll %g1, 7, %g3 |
119 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | 52 | bne,pt %xcc, 1f |
120 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | 53 | add %g6, %g3, %g3 |
121 | 54 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | |
122 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | 55 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] |
123 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | 56 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] |
124 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | 57 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] |
125 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | 58 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] |
126 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | 59 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] |
127 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | 60 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] |
128 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | 61 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] |
129 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | 62 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] |
130 | 63 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | |
131 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | 64 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] |
132 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | 65 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] |
133 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | 66 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] |
134 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | 67 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] |
135 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | 68 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] |
136 | b,pt %xcc, 2f | 69 | ba,pt %xcc, 2f |
137 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | 70 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] |
138 | 1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] | 71 | 1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] |
139 | 72 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04] | |
140 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04] | 73 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08] |
141 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08] | 74 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] |
142 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] | 75 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10] |
143 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10] | 76 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14] |
144 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14] | 77 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18] |
145 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18] | 78 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] |
146 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] | 79 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20] |
147 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20] | 80 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24] |
148 | 81 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28] | |
149 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24] | 82 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] |
150 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28] | 83 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30] |
151 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] | 84 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34] |
152 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30] | 85 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38] |
153 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34] | 86 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] |
154 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38] | 87 | 2: add %g1, 1, %g1 |
155 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] | 88 | stb %g1, [%g6 + TI_WSAVED] |
156 | 2: add %g1, 1, %g1 | 89 | rdpr %tstate, %g1 |
157 | 90 | andcc %g1, TSTATE_PRIV, %g0 | |
158 | stb %g1, [%g6 + TI_WSAVED] | ||
159 | rdpr %tstate, %g1 | ||
160 | andcc %g1, TSTATE_PRIV, %g0 | ||
161 | saved | 91 | saved |
162 | and %g1, TSTATE_CWP, %g1 | 92 | be,pn %xcc, 1f |
163 | be,pn %xcc, window_scheisse_from_user_common | 93 | and %g1, TSTATE_CWP, %g1 |
164 | mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 | ||
165 | retry | 94 | retry |
95 | 1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 | ||
96 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
97 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
98 | wrpr %g1, %cwp | ||
99 | ba,pt %xcc, etrap | ||
100 | rd %pc, %g7 | ||
101 | call do_sparc64_fault | ||
102 | add %sp, PTREGS_OFF, %o0 | ||
103 | ba,a,pt %xcc, rtrap_clr_l6 | ||
166 | 104 | ||
167 | window_scheisse_from_user_common: | ||
168 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
169 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
170 | wrpr %g1, %cwp | ||
171 | ba,pt %xcc, etrap | ||
172 | rd %pc, %g7 | ||
173 | call do_sparc64_fault | ||
174 | add %sp, PTREGS_OFF, %o0 | ||
175 | ba,a,pt %xcc, rtrap_clr_l6 | ||
176 | |||
177 | .globl winfix_mna, fill_fixup_mna, spill_fixup_mna | ||
178 | winfix_mna: | 105 | winfix_mna: |
179 | andn %g3, 0x7f, %g3 | 106 | andn %g3, 0x7f, %g3 |
180 | add %g3, 0x78, %g3 | 107 | add %g3, 0x78, %g3 |
181 | wrpr %g3, %tnpc | 108 | wrpr %g3, %tnpc |
182 | done | 109 | done |
183 | fill_fixup_mna: | ||
184 | rdpr %tstate, %g1 | ||
185 | andcc %g1, TSTATE_PRIV, %g0 | ||
186 | be,pt %xcc, window_mna_from_user_common | ||
187 | and %g1, TSTATE_CWP, %g1 | ||
188 | 110 | ||
189 | /* Please, see fill_fixup commentary about why we must preserve | 111 | fill_fixup_mna: |
190 | * %l5 and %l6 to preserve absolute correct semantics. | 112 | rdpr %tstate, %g1 |
191 | */ | 113 | and %g1, TSTATE_CWP, %g1 |
192 | rdpr %wstate, %g2 ! Grab user mode wstate. | 114 | wrpr %g1, %cwp |
193 | wrpr %g1, %cwp ! Get into the right window. | 115 | ba,pt %xcc, etrap |
194 | sll %g2, 3, %g2 ! NORMAL-->OTHER | 116 | rd %pc, %g7 |
195 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | 117 | sethi %hi(tlb_type), %g1 |
196 | 118 | lduw [%g1 + %lo(tlb_type)], %g1 | |
197 | wrpr %g2, 0x0, %wstate ! This must be consistent. | 119 | cmp %g1, 3 |
198 | wrpr %g0, 0x0, %otherwin ! We know this. | 120 | bne,pt %icc, 1f |
199 | call set_pcontext ! Change contexts... | 121 | add %sp, PTREGS_OFF, %o0 |
122 | mov %l4, %o2 | ||
123 | call sun4v_do_mna | ||
124 | mov %l5, %o1 | ||
125 | ba,a,pt %xcc, rtrap_clr_l6 | ||
126 | 1: mov %l4, %o1 | ||
127 | mov %l5, %o2 | ||
128 | call mem_address_unaligned | ||
200 | nop | 129 | nop |
201 | rdpr %pstate, %l1 ! Prepare to change globals. | 130 | ba,a,pt %xcc, rtrap_clr_l6 |
202 | mov %g4, %o2 ! Setup args for | ||
203 | mov %g5, %o1 ! final call to mem_address_unaligned. | ||
204 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | ||
205 | 131 | ||
206 | mov %g6, %o7 ! Stash away current. | ||
207 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
208 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
209 | mov %o7, %g6 ! Get current back. | ||
210 | ldx [%g6 + TI_TASK], %g4 ! Finish it. | ||
211 | #ifdef CONFIG_SMP | ||
212 | mov TSB_REG, %g1 | ||
213 | ldxa [%g1] ASI_IMMU, %g5 | ||
214 | #endif | ||
215 | call mem_address_unaligned | ||
216 | add %sp, PTREGS_OFF, %o0 | ||
217 | |||
218 | b,pt %xcc, rtrap | ||
219 | nop ! yes, the nop is correct | ||
220 | spill_fixup_mna: | ||
221 | ldx [%g6 + TI_FLAGS], %g1 | ||
222 | andcc %g1, _TIF_32BIT, %g0 | ||
223 | ldub [%g6 + TI_WSAVED], %g1 | ||
224 | sll %g1, 3, %g3 | ||
225 | add %g6, %g3, %g3 | ||
226 | stx %sp, [%g3 + TI_RWIN_SPTRS] | ||
227 | |||
228 | sll %g1, 7, %g3 | ||
229 | bne,pt %xcc, 1f | ||
230 | add %g6, %g3, %g3 | ||
231 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
232 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | ||
233 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | ||
234 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | ||
235 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | ||
236 | |||
237 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | ||
238 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | ||
239 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | ||
240 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | ||
241 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | ||
242 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | ||
243 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | ||
244 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | ||
245 | |||
246 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | ||
247 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | ||
248 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | ||
249 | b,pt %xcc, 2f | ||
250 | add %g1, 1, %g1 | ||
251 | 1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
252 | std %l2, [%g3 + TI_REG_WINDOW + 0x08] | ||
253 | std %l4, [%g3 + TI_REG_WINDOW + 0x10] | ||
254 | |||
255 | std %l6, [%g3 + TI_REG_WINDOW + 0x18] | ||
256 | std %i0, [%g3 + TI_REG_WINDOW + 0x20] | ||
257 | std %i2, [%g3 + TI_REG_WINDOW + 0x28] | ||
258 | std %i4, [%g3 + TI_REG_WINDOW + 0x30] | ||
259 | std %i6, [%g3 + TI_REG_WINDOW + 0x38] | ||
260 | add %g1, 1, %g1 | ||
261 | 2: stb %g1, [%g6 + TI_WSAVED] | ||
262 | rdpr %tstate, %g1 | ||
263 | |||
264 | andcc %g1, TSTATE_PRIV, %g0 | ||
265 | saved | ||
266 | be,pn %xcc, window_mna_from_user_common | ||
267 | and %g1, TSTATE_CWP, %g1 | ||
268 | retry | ||
269 | window_mna_from_user_common: | ||
270 | wrpr %g1, %cwp | ||
271 | sethi %hi(109f), %g7 | ||
272 | ba,pt %xcc, etrap | ||
273 | 109: or %g7, %lo(109b), %g7 | ||
274 | mov %l4, %o2 | ||
275 | mov %l5, %o1 | ||
276 | call mem_address_unaligned | ||
277 | add %sp, PTREGS_OFF, %o0 | ||
278 | ba,pt %xcc, rtrap | ||
279 | clr %l6 | ||
280 | |||
281 | /* These are only needed for 64-bit mode processes which | ||
282 | * put their stack pointer into the VPTE area and there | ||
283 | * happens to be a VPTE tlb entry mapped there during | ||
284 | * a spill/fill trap to that stack frame. | ||
285 | */ | ||
286 | .globl winfix_dax, fill_fixup_dax, spill_fixup_dax | ||
287 | winfix_dax: | 132 | winfix_dax: |
288 | andn %g3, 0x7f, %g3 | 133 | andn %g3, 0x7f, %g3 |
289 | add %g3, 0x74, %g3 | 134 | add %g3, 0x74, %g3 |
290 | wrpr %g3, %tnpc | 135 | wrpr %g3, %tnpc |
291 | done | 136 | done |
292 | fill_fixup_dax: | ||
293 | rdpr %tstate, %g1 | ||
294 | andcc %g1, TSTATE_PRIV, %g0 | ||
295 | be,pt %xcc, window_dax_from_user_common | ||
296 | and %g1, TSTATE_CWP, %g1 | ||
297 | |||
298 | /* Please, see fill_fixup commentary about why we must preserve | ||
299 | * %l5 and %l6 to preserve absolute correct semantics. | ||
300 | */ | ||
301 | rdpr %wstate, %g2 ! Grab user mode wstate. | ||
302 | wrpr %g1, %cwp ! Get into the right window. | ||
303 | sll %g2, 3, %g2 ! NORMAL-->OTHER | ||
304 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | ||
305 | 137 | ||
306 | wrpr %g2, 0x0, %wstate ! This must be consistent. | 138 | fill_fixup_dax: |
307 | wrpr %g0, 0x0, %otherwin ! We know this. | 139 | rdpr %tstate, %g1 |
308 | call set_pcontext ! Change contexts... | 140 | and %g1, TSTATE_CWP, %g1 |
141 | wrpr %g1, %cwp | ||
142 | ba,pt %xcc, etrap | ||
143 | rd %pc, %g7 | ||
144 | sethi %hi(tlb_type), %g1 | ||
145 | mov %l4, %o1 | ||
146 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
147 | mov %l5, %o2 | ||
148 | cmp %g1, 3 | ||
149 | bne,pt %icc, 1f | ||
150 | add %sp, PTREGS_OFF, %o0 | ||
151 | call sun4v_data_access_exception | ||
309 | nop | 152 | nop |
310 | rdpr %pstate, %l1 ! Prepare to change globals. | 153 | ba,a,pt %xcc, rtrap_clr_l6 |
311 | mov %g4, %o1 ! Setup args for | 154 | 1: call spitfire_data_access_exception |
312 | mov %g5, %o2 ! final call to spitfire_data_access_exception. | 155 | nop |
313 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | 156 | ba,a,pt %xcc, rtrap_clr_l6 |
314 | |||
315 | mov %g6, %o7 ! Stash away current. | ||
316 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
317 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
318 | mov %o7, %g6 ! Get current back. | ||
319 | ldx [%g6 + TI_TASK], %g4 ! Finish it. | ||
320 | #ifdef CONFIG_SMP | ||
321 | mov TSB_REG, %g1 | ||
322 | ldxa [%g1] ASI_IMMU, %g5 | ||
323 | #endif | ||
324 | call spitfire_data_access_exception | ||
325 | add %sp, PTREGS_OFF, %o0 | ||
326 | |||
327 | b,pt %xcc, rtrap | ||
328 | nop ! yes, the nop is correct | ||
329 | spill_fixup_dax: | ||
330 | ldx [%g6 + TI_FLAGS], %g1 | ||
331 | andcc %g1, _TIF_32BIT, %g0 | ||
332 | ldub [%g6 + TI_WSAVED], %g1 | ||
333 | sll %g1, 3, %g3 | ||
334 | add %g6, %g3, %g3 | ||
335 | stx %sp, [%g3 + TI_RWIN_SPTRS] | ||
336 | |||
337 | sll %g1, 7, %g3 | ||
338 | bne,pt %xcc, 1f | ||
339 | add %g6, %g3, %g3 | ||
340 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
341 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | ||
342 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | ||
343 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | ||
344 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | ||
345 | |||
346 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | ||
347 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | ||
348 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | ||
349 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | ||
350 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | ||
351 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | ||
352 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | ||
353 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | ||
354 | |||
355 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | ||
356 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | ||
357 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | ||
358 | b,pt %xcc, 2f | ||
359 | add %g1, 1, %g1 | ||
360 | 1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
361 | std %l2, [%g3 + TI_REG_WINDOW + 0x08] | ||
362 | std %l4, [%g3 + TI_REG_WINDOW + 0x10] | ||
363 | |||
364 | std %l6, [%g3 + TI_REG_WINDOW + 0x18] | ||
365 | std %i0, [%g3 + TI_REG_WINDOW + 0x20] | ||
366 | std %i2, [%g3 + TI_REG_WINDOW + 0x28] | ||
367 | std %i4, [%g3 + TI_REG_WINDOW + 0x30] | ||
368 | std %i6, [%g3 + TI_REG_WINDOW + 0x38] | ||
369 | add %g1, 1, %g1 | ||
370 | 2: stb %g1, [%g6 + TI_WSAVED] | ||
371 | rdpr %tstate, %g1 | ||
372 | |||
373 | andcc %g1, TSTATE_PRIV, %g0 | ||
374 | saved | ||
375 | be,pn %xcc, window_dax_from_user_common | ||
376 | and %g1, TSTATE_CWP, %g1 | ||
377 | retry | ||
378 | window_dax_from_user_common: | ||
379 | wrpr %g1, %cwp | ||
380 | sethi %hi(109f), %g7 | ||
381 | ba,pt %xcc, etrap | ||
382 | 109: or %g7, %lo(109b), %g7 | ||
383 | mov %l4, %o1 | ||
384 | mov %l5, %o2 | ||
385 | call spitfire_data_access_exception | ||
386 | add %sp, PTREGS_OFF, %o0 | ||
387 | ba,pt %xcc, rtrap | ||
388 | clr %l6 | ||
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index c295806500f7..8812ded19f01 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
@@ -11,6 +11,8 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
11 | VISsave.o atomic.o bitops.o \ | 11 | VISsave.o atomic.o bitops.o \ |
12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ | 12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ |
13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ | 13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ |
14 | NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ | ||
15 | NGpage.o NGbzero.o \ | ||
14 | copy_in_user.o user_fixup.o memmove.o \ | 16 | copy_in_user.o user_fixup.o memmove.o \ |
15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 17 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o |
16 | 18 | ||
diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S new file mode 100644 index 000000000000..e86baece5cc8 --- /dev/null +++ b/arch/sparc64/lib/NGbzero.S | |||
@@ -0,0 +1,163 @@ | |||
1 | /* NGbzero.S: Niagara optimized memset/clear_user. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <asm/asi.h> | ||
6 | |||
7 | #define EX_ST(x,y) \ | ||
8 | 98: x,y; \ | ||
9 | .section .fixup; \ | ||
10 | .align 4; \ | ||
11 | 99: retl; \ | ||
12 | mov %o1, %o0; \ | ||
13 | .section __ex_table; \ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | .text | ||
20 | |||
21 | .globl NGmemset | ||
22 | .type NGmemset, #function | ||
23 | NGmemset: /* %o0=buf, %o1=pat, %o2=len */ | ||
24 | and %o1, 0xff, %o3 | ||
25 | mov %o2, %o1 | ||
26 | sllx %o3, 8, %g1 | ||
27 | or %g1, %o3, %o2 | ||
28 | sllx %o2, 16, %g1 | ||
29 | or %g1, %o2, %o2 | ||
30 | sllx %o2, 32, %g1 | ||
31 | ba,pt %xcc, 1f | ||
32 | or %g1, %o2, %o2 | ||
33 | |||
34 | .globl NGbzero | ||
35 | .type NGbzero, #function | ||
36 | NGbzero: | ||
37 | clr %o2 | ||
38 | 1: brz,pn %o1, NGbzero_return | ||
39 | mov %o0, %o3 | ||
40 | |||
41 | /* %o5: saved %asi, restored at NGbzero_done | ||
42 | * %g7: store-init %asi to use | ||
43 | * %o4: non-store-init %asi to use | ||
44 | */ | ||
45 | rd %asi, %o5 | ||
46 | mov ASI_BLK_INIT_QUAD_LDD_P, %g7 | ||
47 | mov ASI_P, %o4 | ||
48 | wr %o4, 0x0, %asi | ||
49 | |||
50 | NGbzero_from_clear_user: | ||
51 | cmp %o1, 15 | ||
52 | bl,pn %icc, NGbzero_tiny | ||
53 | andcc %o0, 0x7, %g1 | ||
54 | be,pt %xcc, 2f | ||
55 | mov 8, %g2 | ||
56 | sub %g2, %g1, %g1 | ||
57 | sub %o1, %g1, %o1 | ||
58 | 1: EX_ST(stba %o2, [%o0 + 0x00] %asi) | ||
59 | subcc %g1, 1, %g1 | ||
60 | bne,pt %xcc, 1b | ||
61 | add %o0, 1, %o0 | ||
62 | 2: cmp %o1, 128 | ||
63 | bl,pn %icc, NGbzero_medium | ||
64 | andcc %o0, (64 - 1), %g1 | ||
65 | be,pt %xcc, NGbzero_pre_loop | ||
66 | mov 64, %g2 | ||
67 | sub %g2, %g1, %g1 | ||
68 | sub %o1, %g1, %o1 | ||
69 | 1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
70 | subcc %g1, 8, %g1 | ||
71 | bne,pt %xcc, 1b | ||
72 | add %o0, 8, %o0 | ||
73 | |||
74 | NGbzero_pre_loop: | ||
75 | wr %g7, 0x0, %asi | ||
76 | andn %o1, (64 - 1), %g1 | ||
77 | sub %o1, %g1, %o1 | ||
78 | NGbzero_loop: | ||
79 | EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
80 | EX_ST(stxa %o2, [%o0 + 0x08] %asi) | ||
81 | EX_ST(stxa %o2, [%o0 + 0x10] %asi) | ||
82 | EX_ST(stxa %o2, [%o0 + 0x18] %asi) | ||
83 | EX_ST(stxa %o2, [%o0 + 0x20] %asi) | ||
84 | EX_ST(stxa %o2, [%o0 + 0x28] %asi) | ||
85 | EX_ST(stxa %o2, [%o0 + 0x30] %asi) | ||
86 | EX_ST(stxa %o2, [%o0 + 0x38] %asi) | ||
87 | subcc %g1, 64, %g1 | ||
88 | bne,pt %xcc, NGbzero_loop | ||
89 | add %o0, 64, %o0 | ||
90 | |||
91 | wr %o4, 0x0, %asi | ||
92 | brz,pn %o1, NGbzero_done | ||
93 | NGbzero_medium: | ||
94 | andncc %o1, 0x7, %g1 | ||
95 | be,pn %xcc, 2f | ||
96 | sub %o1, %g1, %o1 | ||
97 | 1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
98 | subcc %g1, 8, %g1 | ||
99 | bne,pt %xcc, 1b | ||
100 | add %o0, 8, %o0 | ||
101 | 2: brz,pt %o1, NGbzero_done | ||
102 | nop | ||
103 | |||
104 | NGbzero_tiny: | ||
105 | 1: EX_ST(stba %o2, [%o0 + 0x00] %asi) | ||
106 | subcc %o1, 1, %o1 | ||
107 | bne,pt %icc, 1b | ||
108 | add %o0, 1, %o0 | ||
109 | |||
110 | /* fallthrough */ | ||
111 | |||
112 | NGbzero_done: | ||
113 | wr %o5, 0x0, %asi | ||
114 | |||
115 | NGbzero_return: | ||
116 | retl | ||
117 | mov %o3, %o0 | ||
118 | .size NGbzero, .-NGbzero | ||
119 | .size NGmemset, .-NGmemset | ||
120 | |||
121 | .globl NGclear_user | ||
122 | .type NGclear_user, #function | ||
123 | NGclear_user: /* %o0=buf, %o1=len */ | ||
124 | rd %asi, %o5 | ||
125 | brz,pn %o1, NGbzero_done | ||
126 | clr %o3 | ||
127 | cmp %o5, ASI_AIUS | ||
128 | bne,pn %icc, NGbzero | ||
129 | clr %o2 | ||
130 | mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7 | ||
131 | ba,pt %xcc, NGbzero_from_clear_user | ||
132 | mov ASI_AIUS, %o4 | ||
133 | .size NGclear_user, .-NGclear_user | ||
134 | |||
135 | #define BRANCH_ALWAYS 0x10680000 | ||
136 | #define NOP 0x01000000 | ||
137 | #define NG_DO_PATCH(OLD, NEW) \ | ||
138 | sethi %hi(NEW), %g1; \ | ||
139 | or %g1, %lo(NEW), %g1; \ | ||
140 | sethi %hi(OLD), %g2; \ | ||
141 | or %g2, %lo(OLD), %g2; \ | ||
142 | sub %g1, %g2, %g1; \ | ||
143 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
144 | sll %g1, 11, %g1; \ | ||
145 | srl %g1, 11 + 2, %g1; \ | ||
146 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
147 | or %g3, %g1, %g3; \ | ||
148 | stw %g3, [%g2]; \ | ||
149 | sethi %hi(NOP), %g3; \ | ||
150 | or %g3, %lo(NOP), %g3; \ | ||
151 | stw %g3, [%g2 + 0x4]; \ | ||
152 | flush %g2; | ||
153 | |||
154 | .globl niagara_patch_bzero | ||
155 | .type niagara_patch_bzero,#function | ||
156 | niagara_patch_bzero: | ||
157 | NG_DO_PATCH(memset, NGmemset) | ||
158 | NG_DO_PATCH(__bzero, NGbzero) | ||
159 | NG_DO_PATCH(__clear_user, NGclear_user) | ||
160 | NG_DO_PATCH(tsb_init, NGtsb_init) | ||
161 | retl | ||
162 | nop | ||
163 | .size niagara_patch_bzero,.-niagara_patch_bzero | ||
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S new file mode 100644 index 000000000000..2d93456f76dd --- /dev/null +++ b/arch/sparc64/lib/NGcopy_from_user.S | |||
@@ -0,0 +1,37 @@ | |||
1 | /* NGcopy_from_user.S: Niagara optimized copy from userspace. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #define EX_LD(x) \ | ||
7 | 98: x; \ | ||
8 | .section .fixup; \ | ||
9 | .align 4; \ | ||
10 | 99: wr %g0, ASI_AIUS, %asi;\ | ||
11 | retl; \ | ||
12 | mov 1, %o0; \ | ||
13 | .section __ex_table,"a";\ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | #ifndef ASI_AIUS | ||
20 | #define ASI_AIUS 0x11 | ||
21 | #endif | ||
22 | |||
23 | #define FUNC_NAME NGcopy_from_user | ||
24 | #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest | ||
25 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ | ||
26 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 | ||
27 | #define EX_RETVAL(x) 0 | ||
28 | |||
29 | #ifdef __KERNEL__ | ||
30 | #define PREAMBLE \ | ||
31 | rd %asi, %g1; \ | ||
32 | cmp %g1, ASI_AIUS; \ | ||
33 | bne,pn %icc, memcpy_user_stub; \ | ||
34 | nop | ||
35 | #endif | ||
36 | |||
37 | #include "NGmemcpy.S" | ||
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S new file mode 100644 index 000000000000..34112d5054ef --- /dev/null +++ b/arch/sparc64/lib/NGcopy_to_user.S | |||
@@ -0,0 +1,40 @@ | |||
1 | /* NGcopy_to_user.S: Niagara optimized copy to userspace. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #define EX_ST(x) \ | ||
7 | 98: x; \ | ||
8 | .section .fixup; \ | ||
9 | .align 4; \ | ||
10 | 99: wr %g0, ASI_AIUS, %asi;\ | ||
11 | retl; \ | ||
12 | mov 1, %o0; \ | ||
13 | .section __ex_table,"a";\ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | #ifndef ASI_AIUS | ||
20 | #define ASI_AIUS 0x11 | ||
21 | #endif | ||
22 | |||
23 | #define FUNC_NAME NGcopy_to_user | ||
24 | #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS | ||
25 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS | ||
26 | #define EX_RETVAL(x) 0 | ||
27 | |||
28 | #ifdef __KERNEL__ | ||
29 | /* Writing to %asi is _expensive_ so we hardcode it. | ||
30 | * Reading %asi to check for KERNEL_DS is comparatively | ||
31 | * cheap. | ||
32 | */ | ||
33 | #define PREAMBLE \ | ||
34 | rd %asi, %g1; \ | ||
35 | cmp %g1, ASI_AIUS; \ | ||
36 | bne,pn %icc, memcpy_user_stub; \ | ||
37 | nop | ||
38 | #endif | ||
39 | |||
40 | #include "NGmemcpy.S" | ||
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S new file mode 100644 index 000000000000..8e522b3dc095 --- /dev/null +++ b/arch/sparc64/lib/NGmemcpy.S | |||
@@ -0,0 +1,368 @@ | |||
1 | /* NGmemcpy.S: Niagara optimized memcpy. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | #include <asm/asi.h> | ||
8 | #include <asm/thread_info.h> | ||
9 | #define GLOBAL_SPARE %g7 | ||
10 | #define RESTORE_ASI(TMP) \ | ||
11 | ldub [%g6 + TI_CURRENT_DS], TMP; \ | ||
12 | wr TMP, 0x0, %asi; | ||
13 | #else | ||
14 | #define GLOBAL_SPARE %g5 | ||
15 | #define RESTORE_ASI(TMP) \ | ||
16 | wr %g0, ASI_PNF, %asi | ||
17 | #endif | ||
18 | |||
19 | #ifndef STORE_ASI | ||
20 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P | ||
21 | #endif | ||
22 | |||
23 | #ifndef EX_LD | ||
24 | #define EX_LD(x) x | ||
25 | #endif | ||
26 | |||
27 | #ifndef EX_ST | ||
28 | #define EX_ST(x) x | ||
29 | #endif | ||
30 | |||
31 | #ifndef EX_RETVAL | ||
32 | #define EX_RETVAL(x) x | ||
33 | #endif | ||
34 | |||
35 | #ifndef LOAD | ||
36 | #ifndef MEMCPY_DEBUG | ||
37 | #define LOAD(type,addr,dest) type [addr], dest | ||
38 | #else | ||
39 | #define LOAD(type,addr,dest) type##a [addr] 0x80, dest | ||
40 | #endif | ||
41 | #endif | ||
42 | |||
43 | #ifndef LOAD_TWIN | ||
44 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ | ||
45 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0 | ||
46 | #endif | ||
47 | |||
48 | #ifndef STORE | ||
49 | #define STORE(type,src,addr) type src, [addr] | ||
50 | #endif | ||
51 | |||
52 | #ifndef STORE_INIT | ||
53 | #define STORE_INIT(src,addr) stxa src, [addr] %asi | ||
54 | #endif | ||
55 | |||
56 | #ifndef FUNC_NAME | ||
57 | #define FUNC_NAME NGmemcpy | ||
58 | #endif | ||
59 | |||
60 | #ifndef PREAMBLE | ||
61 | #define PREAMBLE | ||
62 | #endif | ||
63 | |||
64 | #ifndef XCC | ||
65 | #define XCC xcc | ||
66 | #endif | ||
67 | |||
68 | .register %g2,#scratch | ||
69 | .register %g3,#scratch | ||
70 | |||
71 | .text | ||
72 | .align 64 | ||
73 | |||
74 | .globl FUNC_NAME | ||
75 | .type FUNC_NAME,#function | ||
76 | FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | ||
77 | srlx %o2, 31, %g2 | ||
78 | cmp %g2, 0 | ||
79 | tne %xcc, 5 | ||
80 | PREAMBLE | ||
81 | mov %o0, GLOBAL_SPARE | ||
82 | cmp %o2, 0 | ||
83 | be,pn %XCC, 85f | ||
84 | or %o0, %o1, %o3 | ||
85 | cmp %o2, 16 | ||
86 | blu,a,pn %XCC, 80f | ||
87 | or %o3, %o2, %o3 | ||
88 | |||
89 | /* 2 blocks (128 bytes) is the minimum we can do the block | ||
90 | * copy with. We need to ensure that we'll iterate at least | ||
91 | * once in the block copy loop. At worst we'll need to align | ||
92 | * the destination to a 64-byte boundary which can chew up | ||
93 | * to (64 - 1) bytes from the length before we perform the | ||
94 | * block copy loop. | ||
95 | */ | ||
96 | cmp %o2, (2 * 64) | ||
97 | blu,pt %XCC, 70f | ||
98 | andcc %o3, 0x7, %g0 | ||
99 | |||
100 | /* %o0: dst | ||
101 | * %o1: src | ||
102 | * %o2: len (known to be >= 128) | ||
103 | * | ||
104 | * The block copy loops will use %o4/%o5,%g2/%g3 as | ||
105 | * temporaries while copying the data. | ||
106 | */ | ||
107 | |||
108 | LOAD(prefetch, %o1, #one_read) | ||
109 | wr %g0, STORE_ASI, %asi | ||
110 | |||
111 | /* Align destination on 64-byte boundary. */ | ||
112 | andcc %o0, (64 - 1), %o4 | ||
113 | be,pt %XCC, 2f | ||
114 | sub %o4, 64, %o4 | ||
115 | sub %g0, %o4, %o4 ! bytes to align dst | ||
116 | sub %o2, %o4, %o2 | ||
117 | 1: subcc %o4, 1, %o4 | ||
118 | EX_LD(LOAD(ldub, %o1, %g1)) | ||
119 | EX_ST(STORE(stb, %g1, %o0)) | ||
120 | add %o1, 1, %o1 | ||
121 | bne,pt %XCC, 1b | ||
122 | add %o0, 1, %o0 | ||
123 | |||
124 | /* If the source is on a 16-byte boundary we can do | ||
125 | * the direct block copy loop. If it is 8-byte aligned | ||
126 | * we can do the 16-byte loads offset by -8 bytes and the | ||
127 | * init stores offset by one register. | ||
128 | * | ||
129 | * If the source is not even 8-byte aligned, we need to do | ||
130 | * shifting and masking (basically integer faligndata). | ||
131 | * | ||
132 | * The careful bit with init stores is that if we store | ||
133 | * to any part of the cache line we have to store the whole | ||
134 | * cacheline else we can end up with corrupt L2 cache line | ||
135 | * contents. Since the loop works on 64-bytes of 64-byte | ||
136 | * aligned store data at a time, this is easy to ensure. | ||
137 | */ | ||
138 | 2: | ||
139 | andcc %o1, (16 - 1), %o4 | ||
140 | andn %o2, (64 - 1), %g1 ! block copy loop iterator | ||
141 | sub %o2, %g1, %o2 ! final sub-block copy bytes | ||
142 | be,pt %XCC, 50f | ||
143 | cmp %o4, 8 | ||
144 | be,a,pt %XCC, 10f | ||
145 | sub %o1, 0x8, %o1 | ||
146 | |||
147 | /* Neither 8-byte nor 16-byte aligned, shift and mask. */ | ||
148 | mov %g1, %o4 | ||
149 | and %o1, 0x7, %g1 | ||
150 | sll %g1, 3, %g1 | ||
151 | mov 64, %o3 | ||
152 | andn %o1, 0x7, %o1 | ||
153 | EX_LD(LOAD(ldx, %o1, %g2)) | ||
154 | sub %o3, %g1, %o3 | ||
155 | sllx %g2, %g1, %g2 | ||
156 | |||
157 | #define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ | ||
158 | EX_LD(LOAD(ldx, SRC, TMP1)); \ | ||
159 | srlx TMP1, PRE_SHIFT, TMP2; \ | ||
160 | or TMP2, PRE_VAL, TMP2; \ | ||
161 | EX_ST(STORE_INIT(TMP2, DST)); \ | ||
162 | sllx TMP1, POST_SHIFT, PRE_VAL; | ||
163 | |||
164 | 1: add %o1, 0x8, %o1 | ||
165 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) | ||
166 | add %o1, 0x8, %o1 | ||
167 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) | ||
168 | add %o1, 0x8, %o1 | ||
169 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) | ||
170 | add %o1, 0x8, %o1 | ||
171 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) | ||
172 | add %o1, 32, %o1 | ||
173 | LOAD(prefetch, %o1, #one_read) | ||
174 | sub %o1, 32 - 8, %o1 | ||
175 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) | ||
176 | add %o1, 8, %o1 | ||
177 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) | ||
178 | add %o1, 8, %o1 | ||
179 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) | ||
180 | add %o1, 8, %o1 | ||
181 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) | ||
182 | subcc %o4, 64, %o4 | ||
183 | bne,pt %XCC, 1b | ||
184 | add %o0, 64, %o0 | ||
185 | |||
186 | #undef SWIVEL_ONE_DWORD | ||
187 | |||
188 | srl %g1, 3, %g1 | ||
189 | ba,pt %XCC, 60f | ||
190 | add %o1, %g1, %o1 | ||
191 | |||
192 | 10: /* Destination is 64-byte aligned, source was only 8-byte | ||
193 | * aligned but it has been subtracted by 8 and we perform | ||
194 | * one twin load ahead, then add 8 back into source when | ||
195 | * we finish the loop. | ||
196 | */ | ||
197 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
198 | 1: add %o1, 16, %o1 | ||
199 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
200 | add %o1, 16 + 32, %o1 | ||
201 | LOAD(prefetch, %o1, #one_read) | ||
202 | sub %o1, 32, %o1 | ||
203 | EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line | ||
204 | EX_ST(STORE_INIT(%g2, %o0 + 0x08)) | ||
205 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
206 | add %o1, 16, %o1 | ||
207 | EX_ST(STORE_INIT(%g3, %o0 + 0x10)) | ||
208 | EX_ST(STORE_INIT(%o4, %o0 + 0x18)) | ||
209 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
210 | add %o1, 16, %o1 | ||
211 | EX_ST(STORE_INIT(%o5, %o0 + 0x20)) | ||
212 | EX_ST(STORE_INIT(%g2, %o0 + 0x28)) | ||
213 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
214 | EX_ST(STORE_INIT(%g3, %o0 + 0x30)) | ||
215 | EX_ST(STORE_INIT(%o4, %o0 + 0x38)) | ||
216 | subcc %g1, 64, %g1 | ||
217 | bne,pt %XCC, 1b | ||
218 | add %o0, 64, %o0 | ||
219 | |||
220 | ba,pt %XCC, 60f | ||
221 | add %o1, 0x8, %o1 | ||
222 | |||
223 | 50: /* Destination is 64-byte aligned, and source is 16-byte | ||
224 | * aligned. | ||
225 | */ | ||
226 | 1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
227 | add %o1, 16, %o1 | ||
228 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
229 | add %o1, 16 + 32, %o1 | ||
230 | LOAD(prefetch, %o1, #one_read) | ||
231 | sub %o1, 32, %o1 | ||
232 | EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line | ||
233 | EX_ST(STORE_INIT(%o5, %o0 + 0x08)) | ||
234 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
235 | add %o1, 16, %o1 | ||
236 | EX_ST(STORE_INIT(%g2, %o0 + 0x10)) | ||
237 | EX_ST(STORE_INIT(%g3, %o0 + 0x18)) | ||
238 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
239 | add %o1, 16, %o1 | ||
240 | EX_ST(STORE_INIT(%o4, %o0 + 0x20)) | ||
241 | EX_ST(STORE_INIT(%o5, %o0 + 0x28)) | ||
242 | EX_ST(STORE_INIT(%g2, %o0 + 0x30)) | ||
243 | EX_ST(STORE_INIT(%g3, %o0 + 0x38)) | ||
244 | subcc %g1, 64, %g1 | ||
245 | bne,pt %XCC, 1b | ||
246 | add %o0, 64, %o0 | ||
247 | /* fall through */ | ||
248 | |||
249 | 60: | ||
250 | /* %o2 contains any final bytes still needed to be copied | ||
251 | * over. If anything is left, we copy it one byte at a time. | ||
252 | */ | ||
253 | RESTORE_ASI(%o3) | ||
254 | brz,pt %o2, 85f | ||
255 | sub %o0, %o1, %o3 | ||
256 | ba,a,pt %XCC, 90f | ||
257 | |||
258 | .align 64 | ||
259 | 70: /* 16 < len <= 64 */ | ||
260 | bne,pn %XCC, 75f | ||
261 | sub %o0, %o1, %o3 | ||
262 | |||
263 | 72: | ||
264 | andn %o2, 0xf, %o4 | ||
265 | and %o2, 0xf, %o2 | ||
266 | 1: subcc %o4, 0x10, %o4 | ||
267 | EX_LD(LOAD(ldx, %o1, %o5)) | ||
268 | add %o1, 0x08, %o1 | ||
269 | EX_LD(LOAD(ldx, %o1, %g1)) | ||
270 | sub %o1, 0x08, %o1 | ||
271 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | ||
272 | add %o1, 0x8, %o1 | ||
273 | EX_ST(STORE(stx, %g1, %o1 + %o3)) | ||
274 | bgu,pt %XCC, 1b | ||
275 | add %o1, 0x8, %o1 | ||
276 | 73: andcc %o2, 0x8, %g0 | ||
277 | be,pt %XCC, 1f | ||
278 | nop | ||
279 | sub %o2, 0x8, %o2 | ||
280 | EX_LD(LOAD(ldx, %o1, %o5)) | ||
281 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | ||
282 | add %o1, 0x8, %o1 | ||
283 | 1: andcc %o2, 0x4, %g0 | ||
284 | be,pt %XCC, 1f | ||
285 | nop | ||
286 | sub %o2, 0x4, %o2 | ||
287 | EX_LD(LOAD(lduw, %o1, %o5)) | ||
288 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | ||
289 | add %o1, 0x4, %o1 | ||
290 | 1: cmp %o2, 0 | ||
291 | be,pt %XCC, 85f | ||
292 | nop | ||
293 | ba,pt %xcc, 90f | ||
294 | nop | ||
295 | |||
296 | 75: | ||
297 | andcc %o0, 0x7, %g1 | ||
298 | sub %g1, 0x8, %g1 | ||
299 | be,pn %icc, 2f | ||
300 | sub %g0, %g1, %g1 | ||
301 | sub %o2, %g1, %o2 | ||
302 | |||
303 | 1: subcc %g1, 1, %g1 | ||
304 | EX_LD(LOAD(ldub, %o1, %o5)) | ||
305 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | ||
306 | bgu,pt %icc, 1b | ||
307 | add %o1, 1, %o1 | ||
308 | |||
309 | 2: add %o1, %o3, %o0 | ||
310 | andcc %o1, 0x7, %g1 | ||
311 | bne,pt %icc, 8f | ||
312 | sll %g1, 3, %g1 | ||
313 | |||
314 | cmp %o2, 16 | ||
315 | bgeu,pt %icc, 72b | ||
316 | nop | ||
317 | ba,a,pt %xcc, 73b | ||
318 | |||
319 | 8: mov 64, %o3 | ||
320 | andn %o1, 0x7, %o1 | ||
321 | EX_LD(LOAD(ldx, %o1, %g2)) | ||
322 | sub %o3, %g1, %o3 | ||
323 | andn %o2, 0x7, %o4 | ||
324 | sllx %g2, %g1, %g2 | ||
325 | 1: add %o1, 0x8, %o1 | ||
326 | EX_LD(LOAD(ldx, %o1, %g3)) | ||
327 | subcc %o4, 0x8, %o4 | ||
328 | srlx %g3, %o3, %o5 | ||
329 | or %o5, %g2, %o5 | ||
330 | EX_ST(STORE(stx, %o5, %o0)) | ||
331 | add %o0, 0x8, %o0 | ||
332 | bgu,pt %icc, 1b | ||
333 | sllx %g3, %g1, %g2 | ||
334 | |||
335 | srl %g1, 3, %g1 | ||
336 | andcc %o2, 0x7, %o2 | ||
337 | be,pn %icc, 85f | ||
338 | add %o1, %g1, %o1 | ||
339 | ba,pt %xcc, 90f | ||
340 | sub %o0, %o1, %o3 | ||
341 | |||
342 | .align 64 | ||
343 | 80: /* 0 < len <= 16 */ | ||
344 | andcc %o3, 0x3, %g0 | ||
345 | bne,pn %XCC, 90f | ||
346 | sub %o0, %o1, %o3 | ||
347 | |||
348 | 1: | ||
349 | subcc %o2, 4, %o2 | ||
350 | EX_LD(LOAD(lduw, %o1, %g1)) | ||
351 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | ||
352 | bgu,pt %XCC, 1b | ||
353 | add %o1, 4, %o1 | ||
354 | |||
355 | 85: retl | ||
356 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | ||
357 | |||
358 | .align 32 | ||
359 | 90: | ||
360 | subcc %o2, 1, %o2 | ||
361 | EX_LD(LOAD(ldub, %o1, %g1)) | ||
362 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | ||
363 | bgu,pt %XCC, 90b | ||
364 | add %o1, 1, %o1 | ||
365 | retl | ||
366 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | ||
367 | |||
368 | .size FUNC_NAME, .-FUNC_NAME | ||
diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S new file mode 100644 index 000000000000..7d7c3bb8dcbf --- /dev/null +++ b/arch/sparc64/lib/NGpage.S | |||
@@ -0,0 +1,96 @@ | |||
1 | /* NGpage.S: Niagara optimize clear and copy page. | ||
2 | * | ||
3 | * Copyright (C) 2006 (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <asm/asi.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | /* This is heavily simplified from the sun4u variants | ||
13 | * because Niagara does not have any D-cache aliasing issues | ||
14 | * and also we don't need to use the FPU in order to implement | ||
15 | * an optimal page copy/clear. | ||
16 | */ | ||
17 | |||
18 | NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ | ||
19 | prefetch [%o1 + 0x00], #one_read | ||
20 | mov 8, %g1 | ||
21 | mov 16, %g2 | ||
22 | mov 24, %g3 | ||
23 | set PAGE_SIZE, %g7 | ||
24 | |||
25 | 1: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 | ||
26 | ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 | ||
27 | prefetch [%o1 + 0x40], #one_read | ||
28 | add %o1, 32, %o1 | ||
29 | stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
30 | stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
31 | ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 | ||
32 | stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
33 | stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
34 | ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 | ||
35 | add %o1, 32, %o1 | ||
36 | add %o0, 32, %o0 | ||
37 | stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
38 | stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
39 | stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
40 | stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
41 | subcc %g7, 64, %g7 | ||
42 | bne,pt %xcc, 1b | ||
43 | add %o0, 32, %o0 | ||
44 | retl | ||
45 | nop | ||
46 | |||
47 | NGclear_page: /* %o0=dest */ | ||
48 | NGclear_user_page: /* %o0=dest, %o1=vaddr */ | ||
49 | mov 8, %g1 | ||
50 | mov 16, %g2 | ||
51 | mov 24, %g3 | ||
52 | set PAGE_SIZE, %g7 | ||
53 | |||
54 | 1: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
55 | stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
56 | stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
57 | stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
58 | add %o0, 32, %o0 | ||
59 | stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
60 | stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
61 | stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
62 | stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
63 | subcc %g7, 64, %g7 | ||
64 | bne,pt %xcc, 1b | ||
65 | add %o0, 32, %o0 | ||
66 | retl | ||
67 | nop | ||
68 | |||
69 | #define BRANCH_ALWAYS 0x10680000 | ||
70 | #define NOP 0x01000000 | ||
71 | #define NG_DO_PATCH(OLD, NEW) \ | ||
72 | sethi %hi(NEW), %g1; \ | ||
73 | or %g1, %lo(NEW), %g1; \ | ||
74 | sethi %hi(OLD), %g2; \ | ||
75 | or %g2, %lo(OLD), %g2; \ | ||
76 | sub %g1, %g2, %g1; \ | ||
77 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
78 | sll %g1, 11, %g1; \ | ||
79 | srl %g1, 11 + 2, %g1; \ | ||
80 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
81 | or %g3, %g1, %g3; \ | ||
82 | stw %g3, [%g2]; \ | ||
83 | sethi %hi(NOP), %g3; \ | ||
84 | or %g3, %lo(NOP), %g3; \ | ||
85 | stw %g3, [%g2 + 0x4]; \ | ||
86 | flush %g2; | ||
87 | |||
88 | .globl niagara_patch_pageops | ||
89 | .type niagara_patch_pageops,#function | ||
90 | niagara_patch_pageops: | ||
91 | NG_DO_PATCH(copy_user_page, NGcopy_user_page) | ||
92 | NG_DO_PATCH(_clear_page, NGclear_page) | ||
93 | NG_DO_PATCH(clear_user_page, NGclear_user_page) | ||
94 | retl | ||
95 | nop | ||
96 | .size niagara_patch_pageops,.-niagara_patch_pageops | ||
diff --git a/arch/sparc64/lib/NGpatch.S b/arch/sparc64/lib/NGpatch.S new file mode 100644 index 000000000000..3b0674fc3366 --- /dev/null +++ b/arch/sparc64/lib/NGpatch.S | |||
@@ -0,0 +1,33 @@ | |||
1 | /* NGpatch.S: Patch Ultra-I routines with Niagara variant. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #define BRANCH_ALWAYS 0x10680000 | ||
7 | #define NOP 0x01000000 | ||
8 | #define NG_DO_PATCH(OLD, NEW) \ | ||
9 | sethi %hi(NEW), %g1; \ | ||
10 | or %g1, %lo(NEW), %g1; \ | ||
11 | sethi %hi(OLD), %g2; \ | ||
12 | or %g2, %lo(OLD), %g2; \ | ||
13 | sub %g1, %g2, %g1; \ | ||
14 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
15 | sll %g1, 11, %g1; \ | ||
16 | srl %g1, 11 + 2, %g1; \ | ||
17 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
18 | or %g3, %g1, %g3; \ | ||
19 | stw %g3, [%g2]; \ | ||
20 | sethi %hi(NOP), %g3; \ | ||
21 | or %g3, %lo(NOP), %g3; \ | ||
22 | stw %g3, [%g2 + 0x4]; \ | ||
23 | flush %g2; | ||
24 | |||
25 | .globl niagara_patch_copyops | ||
26 | .type niagara_patch_copyops,#function | ||
27 | niagara_patch_copyops: | ||
28 | NG_DO_PATCH(memcpy, NGmemcpy) | ||
29 | NG_DO_PATCH(___copy_from_user, NGcopy_from_user) | ||
30 | NG_DO_PATCH(___copy_to_user, NGcopy_to_user) | ||
31 | retl | ||
32 | nop | ||
33 | .size niagara_patch_copyops,.-niagara_patch_copyops | ||
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S index e2b6c5e4b95a..ecc302619a6e 100644 --- a/arch/sparc64/lib/U3patch.S +++ b/arch/sparc64/lib/U3patch.S | |||
@@ -12,7 +12,8 @@ | |||
12 | or %g2, %lo(OLD), %g2; \ | 12 | or %g2, %lo(OLD), %g2; \ |
13 | sub %g1, %g2, %g1; \ | 13 | sub %g1, %g2, %g1; \ |
14 | sethi %hi(BRANCH_ALWAYS), %g3; \ | 14 | sethi %hi(BRANCH_ALWAYS), %g3; \ |
15 | srl %g1, 2, %g1; \ | 15 | sll %g1, 11, %g1; \ |
16 | srl %g1, 11 + 2, %g1; \ | ||
16 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | 17 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ |
17 | or %g3, %g1, %g3; \ | 18 | or %g3, %g1, %g3; \ |
18 | stw %g3, [%g2]; \ | 19 | stw %g3, [%g2]; \ |
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S index 1d2abcfa4e52..c7bbae8c590f 100644 --- a/arch/sparc64/lib/bzero.S +++ b/arch/sparc64/lib/bzero.S | |||
@@ -98,12 +98,12 @@ __bzero_done: | |||
98 | .text; \ | 98 | .text; \ |
99 | .align 4; | 99 | .align 4; |
100 | 100 | ||
101 | .globl __bzero_noasi | 101 | .globl __clear_user |
102 | .type __bzero_noasi, #function | 102 | .type __clear_user, #function |
103 | __bzero_noasi: /* %o0=buf, %o1=len */ | 103 | __clear_user: /* %o0=buf, %o1=len */ |
104 | brz,pn %o1, __bzero_noasi_done | 104 | brz,pn %o1, __clear_user_done |
105 | cmp %o1, 16 | 105 | cmp %o1, 16 |
106 | bl,pn %icc, __bzero_noasi_tiny | 106 | bl,pn %icc, __clear_user_tiny |
107 | EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) | 107 | EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) |
108 | andcc %o0, 0x3, %g0 | 108 | andcc %o0, 0x3, %g0 |
109 | be,pt %icc, 2f | 109 | be,pt %icc, 2f |
@@ -145,14 +145,14 @@ __bzero_noasi: /* %o0=buf, %o1=len */ | |||
145 | subcc %g1, 8, %g1 | 145 | subcc %g1, 8, %g1 |
146 | bne,pt %icc, 5b | 146 | bne,pt %icc, 5b |
147 | add %o0, 0x8, %o0 | 147 | add %o0, 0x8, %o0 |
148 | 6: brz,pt %o1, __bzero_noasi_done | 148 | 6: brz,pt %o1, __clear_user_done |
149 | nop | 149 | nop |
150 | __bzero_noasi_tiny: | 150 | __clear_user_tiny: |
151 | 1: EX_ST(stba %g0, [%o0 + 0x00] %asi) | 151 | 1: EX_ST(stba %g0, [%o0 + 0x00] %asi) |
152 | subcc %o1, 1, %o1 | 152 | subcc %o1, 1, %o1 |
153 | bne,pt %icc, 1b | 153 | bne,pt %icc, 1b |
154 | add %o0, 1, %o0 | 154 | add %o0, 1, %o0 |
155 | __bzero_noasi_done: | 155 | __clear_user_done: |
156 | retl | 156 | retl |
157 | clr %o0 | 157 | clr %o0 |
158 | .size __bzero_noasi, .-__bzero_noasi | 158 | .size __clear_user, .-__clear_user |
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S index b59884ef051d..77e531f6c2a7 100644 --- a/arch/sparc64/lib/clear_page.S +++ b/arch/sparc64/lib/clear_page.S | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
12 | #include <asm/head.h> | ||
12 | 13 | ||
13 | /* What we used to do was lock a TLB entry into a specific | 14 | /* What we used to do was lock a TLB entry into a specific |
14 | * TLB slot, clear the page with interrupts disabled, then | 15 | * TLB slot, clear the page with interrupts disabled, then |
@@ -22,9 +23,6 @@ | |||
22 | * disable preemption during the clear. | 23 | * disable preemption during the clear. |
23 | */ | 24 | */ |
24 | 25 | ||
25 | #define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) | ||
26 | #define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) | ||
27 | |||
28 | .text | 26 | .text |
29 | 27 | ||
30 | .globl _clear_page | 28 | .globl _clear_page |
@@ -43,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ | |||
43 | sethi %hi(PAGE_SIZE), %o4 | 41 | sethi %hi(PAGE_SIZE), %o4 |
44 | 42 | ||
45 | sllx %g2, 32, %g2 | 43 | sllx %g2, 32, %g2 |
46 | sethi %uhi(TTE_BITS_TOP), %g3 | 44 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
47 | 45 | ||
48 | sllx %g3, 32, %g3 | 46 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
49 | sub %o0, %g2, %g1 ! paddr | 47 | sub %o0, %g2, %g1 ! paddr |
50 | 48 | ||
51 | or %g3, TTE_BITS_BOTTOM, %g3 | ||
52 | and %o1, %o4, %o0 ! vaddr D-cache alias bit | 49 | and %o1, %o4, %o0 ! vaddr D-cache alias bit |
53 | 50 | ||
54 | or %g1, %g3, %g1 ! TTE data | 51 | or %g1, %g3, %g1 ! TTE data |
@@ -66,7 +63,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ | |||
66 | wrpr %o4, PSTATE_IE, %pstate | 63 | wrpr %o4, PSTATE_IE, %pstate |
67 | stxa %o0, [%g3] ASI_DMMU | 64 | stxa %o0, [%g3] ASI_DMMU |
68 | stxa %g1, [%g0] ASI_DTLB_DATA_IN | 65 | stxa %g1, [%g0] ASI_DTLB_DATA_IN |
69 | flush %g6 | 66 | sethi %hi(KERNBASE), %g1 |
67 | flush %g1 | ||
70 | wrpr %o4, 0x0, %pstate | 68 | wrpr %o4, 0x0, %pstate |
71 | 69 | ||
72 | mov 1, %o4 | 70 | mov 1, %o4 |
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S index feebb14fd27a..37460666a5c3 100644 --- a/arch/sparc64/lib/copy_page.S +++ b/arch/sparc64/lib/copy_page.S | |||
@@ -23,8 +23,6 @@ | |||
23 | * disable preemption during the clear. | 23 | * disable preemption during the clear. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) | ||
27 | #define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) | ||
28 | #define DCACHE_SIZE (PAGE_SIZE * 2) | 26 | #define DCACHE_SIZE (PAGE_SIZE * 2) |
29 | 27 | ||
30 | #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) | 28 | #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) |
@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ | |||
52 | sethi %hi(PAGE_SIZE), %o3 | 50 | sethi %hi(PAGE_SIZE), %o3 |
53 | 51 | ||
54 | sllx %g2, 32, %g2 | 52 | sllx %g2, 32, %g2 |
55 | sethi %uhi(TTE_BITS_TOP), %g3 | 53 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
56 | 54 | ||
57 | sllx %g3, 32, %g3 | 55 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
58 | sub %o0, %g2, %g1 ! dest paddr | 56 | sub %o0, %g2, %g1 ! dest paddr |
59 | 57 | ||
60 | sub %o1, %g2, %g2 ! src paddr | 58 | sub %o1, %g2, %g2 ! src paddr |
61 | or %g3, TTE_BITS_BOTTOM, %g3 | ||
62 | 59 | ||
63 | and %o2, %o3, %o0 ! vaddr D-cache alias bit | 60 | and %o2, %o3, %o0 ! vaddr D-cache alias bit |
64 | or %g1, %g3, %g1 ! dest TTE data | 61 | or %g1, %g3, %g1 ! dest TTE data |
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c index e8808727617a..fb27e54a03ee 100644 --- a/arch/sparc64/lib/delay.c +++ b/arch/sparc64/lib/delay.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* delay.c: Delay loops for sparc64 | 1 | /* delay.c: Delay loops for sparc64 |
2 | * | 2 | * |
3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> | 3 | * Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net> |
4 | * | 4 | * |
5 | * Based heavily upon x86 variant which is: | 5 | * Based heavily upon x86 variant which is: |
6 | * Copyright (C) 1993 Linus Torvalds | 6 | * Copyright (C) 1993 Linus Torvalds |
@@ -8,19 +8,16 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <asm/timer.h> | ||
11 | 12 | ||
12 | void __delay(unsigned long loops) | 13 | void __delay(unsigned long loops) |
13 | { | 14 | { |
14 | __asm__ __volatile__( | 15 | unsigned long bclock, now; |
15 | " b,pt %%xcc, 1f\n" | 16 | |
16 | " cmp %0, 0\n" | 17 | bclock = tick_ops->get_tick(); |
17 | " .align 32\n" | 18 | do { |
18 | "1:\n" | 19 | now = tick_ops->get_tick(); |
19 | " bne,pt %%xcc, 1b\n" | 20 | } while ((now-bclock) < loops); |
20 | " subcc %0, 1, %0\n" | ||
21 | : "=&r" (loops) | ||
22 | : "0" (loops) | ||
23 | : "cc"); | ||
24 | } | 21 | } |
25 | 22 | ||
26 | /* We used to multiply by HZ after shifting down by 32 bits | 23 | /* We used to multiply by HZ after shifting down by 32 bits |
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S index 4cd5d2be1ae1..a79c8888170d 100644 --- a/arch/sparc64/lib/xor.S +++ b/arch/sparc64/lib/xor.S | |||
@@ -2,9 +2,10 @@ | |||
2 | * arch/sparc64/lib/xor.S | 2 | * arch/sparc64/lib/xor.S |
3 | * | 3 | * |
4 | * High speed xor_block operation for RAID4/5 utilizing the | 4 | * High speed xor_block operation for RAID4/5 utilizing the |
5 | * UltraSparc Visual Instruction Set. | 5 | * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. |
6 | * | 6 | * |
7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | 7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) |
8 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <asm/visasm.h> | 11 | #include <asm/visasm.h> |
@@ -19,6 +20,8 @@ | |||
19 | */ | 20 | */ |
20 | .text | 21 | .text |
21 | .align 32 | 22 | .align 32 |
23 | |||
24 | /* VIS versions. */ | ||
22 | .globl xor_vis_2 | 25 | .globl xor_vis_2 |
23 | .type xor_vis_2,#function | 26 | .type xor_vis_2,#function |
24 | xor_vis_2: | 27 | xor_vis_2: |
@@ -352,3 +355,298 @@ xor_vis_5: | |||
352 | ret | 355 | ret |
353 | restore | 356 | restore |
354 | .size xor_vis_5, .-xor_vis_5 | 357 | .size xor_vis_5, .-xor_vis_5 |
358 | |||
359 | /* Niagara versions. */ | ||
360 | .globl xor_niagara_2 | ||
361 | .type xor_niagara_2,#function | ||
362 | xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */ | ||
363 | save %sp, -192, %sp | ||
364 | prefetch [%i1], #n_writes | ||
365 | prefetch [%i2], #one_read | ||
366 | rd %asi, %g7 | ||
367 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
368 | srlx %i0, 6, %g1 | ||
369 | mov %i1, %i0 | ||
370 | mov %i2, %i1 | ||
371 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ | ||
372 | ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ | ||
373 | ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ | ||
374 | ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ | ||
375 | prefetch [%i1 + 0x40], #one_read | ||
376 | ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ | ||
377 | ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ | ||
378 | ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ | ||
379 | ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ | ||
380 | prefetch [%i0 + 0x40], #n_writes | ||
381 | xor %o0, %i2, %o0 | ||
382 | xor %o1, %i3, %o1 | ||
383 | stxa %o0, [%i0 + 0x00] %asi | ||
384 | stxa %o1, [%i0 + 0x08] %asi | ||
385 | xor %o2, %i4, %o2 | ||
386 | xor %o3, %i5, %o3 | ||
387 | stxa %o2, [%i0 + 0x10] %asi | ||
388 | stxa %o3, [%i0 + 0x18] %asi | ||
389 | xor %o4, %g2, %o4 | ||
390 | xor %o5, %g3, %o5 | ||
391 | stxa %o4, [%i0 + 0x20] %asi | ||
392 | stxa %o5, [%i0 + 0x28] %asi | ||
393 | xor %l2, %l0, %l2 | ||
394 | xor %l3, %l1, %l3 | ||
395 | stxa %l2, [%i0 + 0x30] %asi | ||
396 | stxa %l3, [%i0 + 0x38] %asi | ||
397 | add %i0, 0x40, %i0 | ||
398 | subcc %g1, 1, %g1 | ||
399 | bne,pt %xcc, 1b | ||
400 | add %i1, 0x40, %i1 | ||
401 | membar #Sync | ||
402 | wr %g7, 0x0, %asi | ||
403 | ret | ||
404 | restore | ||
405 | .size xor_niagara_2, .-xor_niagara_2 | ||
406 | |||
407 | .globl xor_niagara_3 | ||
408 | .type xor_niagara_3,#function | ||
409 | xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ | ||
410 | save %sp, -192, %sp | ||
411 | prefetch [%i1], #n_writes | ||
412 | prefetch [%i2], #one_read | ||
413 | prefetch [%i3], #one_read | ||
414 | rd %asi, %g7 | ||
415 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
416 | srlx %i0, 6, %g1 | ||
417 | mov %i1, %i0 | ||
418 | mov %i2, %i1 | ||
419 | mov %i3, %l7 | ||
420 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
421 | ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ | ||
422 | ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ | ||
423 | ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ | ||
424 | ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ | ||
425 | ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ | ||
426 | xor %g2, %i2, %g2 | ||
427 | xor %g3, %i3, %g3 | ||
428 | xor %o0, %g2, %o0 | ||
429 | xor %o1, %g3, %o1 | ||
430 | stxa %o0, [%i0 + 0x00] %asi | ||
431 | stxa %o1, [%i0 + 0x08] %asi | ||
432 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
433 | ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ | ||
434 | ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ | ||
435 | xor %l0, %i4, %l0 | ||
436 | xor %l1, %i5, %l1 | ||
437 | xor %o2, %l0, %o2 | ||
438 | xor %o3, %l1, %o3 | ||
439 | stxa %o2, [%i0 + 0x10] %asi | ||
440 | stxa %o3, [%i0 + 0x18] %asi | ||
441 | ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ | ||
442 | ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ | ||
443 | ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ | ||
444 | prefetch [%i1 + 0x40], #one_read | ||
445 | prefetch [%l7 + 0x40], #one_read | ||
446 | prefetch [%i0 + 0x40], #n_writes | ||
447 | xor %g2, %i2, %g2 | ||
448 | xor %g3, %i3, %g3 | ||
449 | xor %o0, %g2, %o0 | ||
450 | xor %o1, %g3, %o1 | ||
451 | stxa %o0, [%i0 + 0x20] %asi | ||
452 | stxa %o1, [%i0 + 0x28] %asi | ||
453 | xor %l0, %i4, %l0 | ||
454 | xor %l1, %i5, %l1 | ||
455 | xor %o2, %l0, %o2 | ||
456 | xor %o3, %l1, %o3 | ||
457 | stxa %o2, [%i0 + 0x30] %asi | ||
458 | stxa %o3, [%i0 + 0x38] %asi | ||
459 | add %i0, 0x40, %i0 | ||
460 | add %i1, 0x40, %i1 | ||
461 | subcc %g1, 1, %g1 | ||
462 | bne,pt %xcc, 1b | ||
463 | add %l7, 0x40, %l7 | ||
464 | membar #Sync | ||
465 | wr %g7, 0x0, %asi | ||
466 | ret | ||
467 | restore | ||
468 | .size xor_niagara_3, .-xor_niagara_3 | ||
469 | |||
470 | .globl xor_niagara_4 | ||
471 | .type xor_niagara_4,#function | ||
472 | xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ | ||
473 | save %sp, -192, %sp | ||
474 | prefetch [%i1], #n_writes | ||
475 | prefetch [%i2], #one_read | ||
476 | prefetch [%i3], #one_read | ||
477 | prefetch [%i4], #one_read | ||
478 | rd %asi, %g7 | ||
479 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
480 | srlx %i0, 6, %g1 | ||
481 | mov %i1, %i0 | ||
482 | mov %i2, %i1 | ||
483 | mov %i3, %l7 | ||
484 | mov %i4, %l6 | ||
485 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
486 | ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ | ||
487 | ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ | ||
488 | ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ | ||
489 | xor %i4, %i2, %i4 | ||
490 | xor %i5, %i3, %i5 | ||
491 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ | ||
492 | xor %g2, %i4, %g2 | ||
493 | xor %g3, %i5, %g3 | ||
494 | ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ | ||
495 | xor %l0, %g2, %l0 | ||
496 | xor %l1, %g3, %l1 | ||
497 | stxa %l0, [%i0 + 0x00] %asi | ||
498 | stxa %l1, [%i0 + 0x08] %asi | ||
499 | ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ | ||
500 | ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ | ||
501 | |||
502 | xor %i4, %i2, %i4 | ||
503 | xor %i5, %i3, %i5 | ||
504 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
505 | xor %g2, %i4, %g2 | ||
506 | xor %g3, %i5, %g3 | ||
507 | ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ | ||
508 | xor %l0, %g2, %l0 | ||
509 | xor %l1, %g3, %l1 | ||
510 | stxa %l0, [%i0 + 0x10] %asi | ||
511 | stxa %l1, [%i0 + 0x18] %asi | ||
512 | ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ | ||
513 | ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ | ||
514 | |||
515 | xor %i4, %i2, %i4 | ||
516 | xor %i5, %i3, %i5 | ||
517 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ | ||
518 | xor %g2, %i4, %g2 | ||
519 | xor %g3, %i5, %g3 | ||
520 | ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ | ||
521 | xor %l0, %g2, %l0 | ||
522 | xor %l1, %g3, %l1 | ||
523 | stxa %l0, [%i0 + 0x20] %asi | ||
524 | stxa %l1, [%i0 + 0x28] %asi | ||
525 | ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ | ||
526 | ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ | ||
527 | |||
528 | prefetch [%i1 + 0x40], #one_read | ||
529 | prefetch [%l7 + 0x40], #one_read | ||
530 | prefetch [%l6 + 0x40], #one_read | ||
531 | prefetch [%i0 + 0x40], #n_writes | ||
532 | |||
533 | xor %i4, %i2, %i4 | ||
534 | xor %i5, %i3, %i5 | ||
535 | xor %g2, %i4, %g2 | ||
536 | xor %g3, %i5, %g3 | ||
537 | xor %l0, %g2, %l0 | ||
538 | xor %l1, %g3, %l1 | ||
539 | stxa %l0, [%i0 + 0x30] %asi | ||
540 | stxa %l1, [%i0 + 0x38] %asi | ||
541 | |||
542 | add %i0, 0x40, %i0 | ||
543 | add %i1, 0x40, %i1 | ||
544 | add %l7, 0x40, %l7 | ||
545 | subcc %g1, 1, %g1 | ||
546 | bne,pt %xcc, 1b | ||
547 | add %l6, 0x40, %l6 | ||
548 | membar #Sync | ||
549 | wr %g7, 0x0, %asi | ||
550 | ret | ||
551 | restore | ||
552 | .size xor_niagara_4, .-xor_niagara_4 | ||
553 | |||
554 | .globl xor_niagara_5 | ||
555 | .type xor_niagara_5,#function | ||
556 | xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ | ||
557 | save %sp, -192, %sp | ||
558 | prefetch [%i1], #n_writes | ||
559 | prefetch [%i2], #one_read | ||
560 | prefetch [%i3], #one_read | ||
561 | prefetch [%i4], #one_read | ||
562 | prefetch [%i5], #one_read | ||
563 | rd %asi, %g7 | ||
564 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
565 | srlx %i0, 6, %g1 | ||
566 | mov %i1, %i0 | ||
567 | mov %i2, %i1 | ||
568 | mov %i3, %l7 | ||
569 | mov %i4, %l6 | ||
570 | mov %i5, %l5 | ||
571 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
572 | ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ | ||
573 | ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ | ||
574 | ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ | ||
575 | ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ | ||
576 | xor %i4, %i2, %i4 | ||
577 | xor %i5, %i3, %i5 | ||
578 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ | ||
579 | xor %g2, %i4, %g2 | ||
580 | xor %g3, %i5, %g3 | ||
581 | ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ | ||
582 | xor %l0, %g2, %l0 | ||
583 | xor %l1, %g3, %l1 | ||
584 | ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ | ||
585 | xor %l2, %l0, %l2 | ||
586 | xor %l3, %l1, %l3 | ||
587 | stxa %l2, [%i0 + 0x00] %asi | ||
588 | stxa %l3, [%i0 + 0x08] %asi | ||
589 | ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ | ||
590 | ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ | ||
591 | |||
592 | xor %i4, %i2, %i4 | ||
593 | xor %i5, %i3, %i5 | ||
594 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
595 | xor %g2, %i4, %g2 | ||
596 | xor %g3, %i5, %g3 | ||
597 | ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ | ||
598 | xor %l0, %g2, %l0 | ||
599 | xor %l1, %g3, %l1 | ||
600 | ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ | ||
601 | xor %l2, %l0, %l2 | ||
602 | xor %l3, %l1, %l3 | ||
603 | stxa %l2, [%i0 + 0x10] %asi | ||
604 | stxa %l3, [%i0 + 0x18] %asi | ||
605 | ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ | ||
606 | ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ | ||
607 | |||
608 | xor %i4, %i2, %i4 | ||
609 | xor %i5, %i3, %i5 | ||
610 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ | ||
611 | xor %g2, %i4, %g2 | ||
612 | xor %g3, %i5, %g3 | ||
613 | ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ | ||
614 | xor %l0, %g2, %l0 | ||
615 | xor %l1, %g3, %l1 | ||
616 | ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ | ||
617 | xor %l2, %l0, %l2 | ||
618 | xor %l3, %l1, %l3 | ||
619 | stxa %l2, [%i0 + 0x20] %asi | ||
620 | stxa %l3, [%i0 + 0x28] %asi | ||
621 | ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ | ||
622 | ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ | ||
623 | |||
624 | prefetch [%i1 + 0x40], #one_read | ||
625 | prefetch [%l7 + 0x40], #one_read | ||
626 | prefetch [%l6 + 0x40], #one_read | ||
627 | prefetch [%l5 + 0x40], #one_read | ||
628 | prefetch [%i0 + 0x40], #n_writes | ||
629 | |||
630 | xor %i4, %i2, %i4 | ||
631 | xor %i5, %i3, %i5 | ||
632 | xor %g2, %i4, %g2 | ||
633 | xor %g3, %i5, %g3 | ||
634 | xor %l0, %g2, %l0 | ||
635 | xor %l1, %g3, %l1 | ||
636 | xor %l2, %l0, %l2 | ||
637 | xor %l3, %l1, %l3 | ||
638 | stxa %l2, [%i0 + 0x30] %asi | ||
639 | stxa %l3, [%i0 + 0x38] %asi | ||
640 | |||
641 | add %i0, 0x40, %i0 | ||
642 | add %i1, 0x40, %i1 | ||
643 | add %l7, 0x40, %l7 | ||
644 | add %l6, 0x40, %l6 | ||
645 | subcc %g1, 1, %g1 | ||
646 | bne,pt %xcc, 1b | ||
647 | add %l5, 0x40, %l5 | ||
648 | membar #Sync | ||
649 | wr %g7, 0x0, %asi | ||
650 | ret | ||
651 | restore | ||
652 | .size xor_niagara_5, .-xor_niagara_5 | ||
diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c index 2ae05cd7b773..6ee496c2864a 100644 --- a/arch/sparc64/math-emu/math.c +++ b/arch/sparc64/math-emu/math.c | |||
@@ -206,9 +206,29 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) | |||
206 | case FSTOQ: TYPE(3,3,1,1,1,0,0); break; | 206 | case FSTOQ: TYPE(3,3,1,1,1,0,0); break; |
207 | case FDTOQ: TYPE(3,3,1,2,1,0,0); break; | 207 | case FDTOQ: TYPE(3,3,1,2,1,0,0); break; |
208 | case FQTOI: TYPE(3,1,0,3,1,0,0); break; | 208 | case FQTOI: TYPE(3,1,0,3,1,0,0); break; |
209 | |||
210 | /* We can get either unimplemented or unfinished | ||
211 | * for these cases. Pre-Niagara systems generate | ||
212 | * unfinished fpop for SUBNORMAL cases, and Niagara | ||
213 | * always gives unimplemented fpop for fsqrt{s,d}. | ||
214 | */ | ||
215 | case FSQRTS: { | ||
216 | unsigned long x = current_thread_info()->xfsr[0]; | ||
217 | |||
218 | x = (x >> 14) & 0xf; | ||
219 | TYPE(x,1,1,1,1,0,0); | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | case FSQRTD: { | ||
224 | unsigned long x = current_thread_info()->xfsr[0]; | ||
225 | |||
226 | x = (x >> 14) & 0xf; | ||
227 | TYPE(x,2,1,2,1,0,0); | ||
228 | break; | ||
229 | } | ||
230 | |||
209 | /* SUBNORMAL - ftt == 2 */ | 231 | /* SUBNORMAL - ftt == 2 */ |
210 | case FSQRTS: TYPE(2,1,1,1,1,0,0); break; | ||
211 | case FSQRTD: TYPE(2,2,1,2,1,0,0); break; | ||
212 | case FADDD: | 232 | case FADDD: |
213 | case FSUBD: | 233 | case FSUBD: |
214 | case FMULD: | 234 | case FMULD: |
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index 9d0960e69f48..e415bf942bcd 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile | |||
@@ -5,6 +5,6 @@ | |||
5 | EXTRA_AFLAGS := -ansi | 5 | EXTRA_AFLAGS := -ansi |
6 | EXTRA_CFLAGS := -Werror | 6 | EXTRA_CFLAGS := -Werror |
7 | 7 | ||
8 | obj-y := ultra.o tlb.o fault.o init.o generic.o | 8 | obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o |
9 | 9 | ||
10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 6f0539aa44d0..63b6cc0cd5d5 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/lsu.h> | 29 | #include <asm/lsu.h> |
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <asm/kdebug.h> | 31 | #include <asm/kdebug.h> |
32 | #include <asm/mmu_context.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * To debug kernel to catch accesses to certain virtual/physical addresses. | 35 | * To debug kernel to catch accesses to certain virtual/physical addresses. |
@@ -91,12 +92,13 @@ static void __kprobes unhandled_fault(unsigned long address, | |||
91 | die_if_kernel("Oops", regs); | 92 | die_if_kernel("Oops", regs); |
92 | } | 93 | } |
93 | 94 | ||
94 | static void bad_kernel_pc(struct pt_regs *regs) | 95 | static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) |
95 | { | 96 | { |
96 | unsigned long *ksp; | 97 | unsigned long *ksp; |
97 | 98 | ||
98 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", | 99 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", |
99 | regs->tpc); | 100 | regs->tpc); |
101 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); | ||
100 | __asm__("mov %%sp, %0" : "=r" (ksp)); | 102 | __asm__("mov %%sp, %0" : "=r" (ksp)); |
101 | show_stack(current, ksp); | 103 | show_stack(current, ksp); |
102 | unhandled_fault(regs->tpc, current, regs); | 104 | unhandled_fault(regs->tpc, current, regs); |
@@ -137,7 +139,7 @@ static unsigned int get_user_insn(unsigned long tpc) | |||
137 | if (!pte_present(pte)) | 139 | if (!pte_present(pte)) |
138 | goto out; | 140 | goto out; |
139 | 141 | ||
140 | pa = (pte_val(pte) & _PAGE_PADDR); | 142 | pa = (pte_pfn(pte) << PAGE_SHIFT); |
141 | pa += (tpc & ~PAGE_MASK); | 143 | pa += (tpc & ~PAGE_MASK); |
142 | 144 | ||
143 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | 145 | /* Use phys bypass so we don't pollute dtlb/dcache. */ |
@@ -257,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
257 | struct vm_area_struct *vma; | 259 | struct vm_area_struct *vma; |
258 | unsigned int insn = 0; | 260 | unsigned int insn = 0; |
259 | int si_code, fault_code; | 261 | int si_code, fault_code; |
260 | unsigned long address; | 262 | unsigned long address, mm_rss; |
261 | 263 | ||
262 | fault_code = get_thread_fault_code(); | 264 | fault_code = get_thread_fault_code(); |
263 | 265 | ||
@@ -280,7 +282,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
280 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { | 282 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { |
281 | /* Valid, no problems... */ | 283 | /* Valid, no problems... */ |
282 | } else { | 284 | } else { |
283 | bad_kernel_pc(regs); | 285 | bad_kernel_pc(regs, address); |
284 | return; | 286 | return; |
285 | } | 287 | } |
286 | } | 288 | } |
@@ -406,6 +408,11 @@ good_area: | |||
406 | } | 408 | } |
407 | 409 | ||
408 | up_read(&mm->mmap_sem); | 410 | up_read(&mm->mmap_sem); |
411 | |||
412 | mm_rss = get_mm_rss(mm); | ||
413 | if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) | ||
414 | tsb_grow(mm, mm_rss); | ||
415 | |||
409 | return; | 416 | return; |
410 | 417 | ||
411 | /* | 418 | /* |
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 580b63da836b..5fc5c579e35e 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c | |||
@@ -15,15 +15,6 @@ | |||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
17 | 17 | ||
18 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) | ||
19 | { | ||
20 | pte_t pte; | ||
21 | pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & | ||
22 | ~(unsigned long)_PAGE_CACHE); | ||
23 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
24 | return pte; | ||
25 | } | ||
26 | |||
27 | /* Remap IO memory, the same way as remap_pfn_range(), but use | 18 | /* Remap IO memory, the same way as remap_pfn_range(), but use |
28 | * the obio memory space. | 19 | * the obio memory space. |
29 | * | 20 | * |
@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, | |||
48 | pte_t entry; | 39 | pte_t entry; |
49 | unsigned long curend = address + PAGE_SIZE; | 40 | unsigned long curend = address + PAGE_SIZE; |
50 | 41 | ||
51 | entry = mk_pte_io(offset, prot, space); | 42 | entry = mk_pte_io(offset, prot, space, PAGE_SIZE); |
52 | if (!(address & 0xffff)) { | 43 | if (!(address & 0xffff)) { |
53 | if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { | 44 | if (PAGE_SIZE < (4 * 1024 * 1024) && |
54 | entry = mk_pte_io(offset, | 45 | !(address & 0x3fffff) && |
55 | __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), | 46 | !(offset & 0x3ffffe) && |
56 | space); | 47 | end >= address + 0x400000) { |
48 | entry = mk_pte_io(offset, prot, space, | ||
49 | 4 * 1024 * 1024); | ||
57 | curend = address + 0x400000; | 50 | curend = address + 0x400000; |
58 | offset += 0x400000; | 51 | offset += 0x400000; |
59 | } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { | 52 | } else if (PAGE_SIZE < (512 * 1024) && |
60 | entry = mk_pte_io(offset, | 53 | !(address & 0x7ffff) && |
61 | __pgprot(pgprot_val (prot) | _PAGE_SZ512K), | 54 | !(offset & 0x7fffe) && |
62 | space); | 55 | end >= address + 0x80000) { |
56 | entry = mk_pte_io(offset, prot, space, | ||
57 | 512 * 1024 * 1024); | ||
63 | curend = address + 0x80000; | 58 | curend = address + 0x80000; |
64 | offset += 0x80000; | 59 | offset += 0x80000; |
65 | } else if (!(offset & 0xfffe) && end >= address + 0x10000) { | 60 | } else if (PAGE_SIZE < (64 * 1024) && |
66 | entry = mk_pte_io(offset, | 61 | !(offset & 0xfffe) && |
67 | __pgprot(pgprot_val (prot) | _PAGE_SZ64K), | 62 | end >= address + 0x10000) { |
68 | space); | 63 | entry = mk_pte_io(offset, prot, space, |
64 | 64 * 1024); | ||
69 | curend = address + 0x10000; | 65 | curend = address + 0x10000; |
70 | offset += 0x10000; | 66 | offset += 0x10000; |
71 | } else | 67 | } else |
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 625cbb336a23..a7a24869d045 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SPARC64 Huge TLB page support. | 2 | * SPARC64 Huge TLB page support. |
3 | * | 3 | * |
4 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
@@ -22,6 +22,175 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
24 | 24 | ||
25 | /* Slightly simplified from the non-hugepage variant because by | ||
26 | * definition we don't have to worry about any page coloring stuff | ||
27 | */ | ||
28 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
29 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
30 | |||
31 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | ||
32 | unsigned long addr, | ||
33 | unsigned long len, | ||
34 | unsigned long pgoff, | ||
35 | unsigned long flags) | ||
36 | { | ||
37 | struct mm_struct *mm = current->mm; | ||
38 | struct vm_area_struct * vma; | ||
39 | unsigned long task_size = TASK_SIZE; | ||
40 | unsigned long start_addr; | ||
41 | |||
42 | if (test_thread_flag(TIF_32BIT)) | ||
43 | task_size = STACK_TOP32; | ||
44 | if (unlikely(len >= VA_EXCLUDE_START)) | ||
45 | return -ENOMEM; | ||
46 | |||
47 | if (len > mm->cached_hole_size) { | ||
48 | start_addr = addr = mm->free_area_cache; | ||
49 | } else { | ||
50 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
51 | mm->cached_hole_size = 0; | ||
52 | } | ||
53 | |||
54 | task_size -= len; | ||
55 | |||
56 | full_search: | ||
57 | addr = ALIGN(addr, HPAGE_SIZE); | ||
58 | |||
59 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
60 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
61 | if (addr < VA_EXCLUDE_START && | ||
62 | (addr + len) >= VA_EXCLUDE_START) { | ||
63 | addr = VA_EXCLUDE_END; | ||
64 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
65 | } | ||
66 | if (unlikely(task_size < addr)) { | ||
67 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
68 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
69 | mm->cached_hole_size = 0; | ||
70 | goto full_search; | ||
71 | } | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
75 | /* | ||
76 | * Remember the place where we stopped the search: | ||
77 | */ | ||
78 | mm->free_area_cache = addr + len; | ||
79 | return addr; | ||
80 | } | ||
81 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
82 | mm->cached_hole_size = vma->vm_start - addr; | ||
83 | |||
84 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static unsigned long | ||
89 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
90 | const unsigned long len, | ||
91 | const unsigned long pgoff, | ||
92 | const unsigned long flags) | ||
93 | { | ||
94 | struct vm_area_struct *vma; | ||
95 | struct mm_struct *mm = current->mm; | ||
96 | unsigned long addr = addr0; | ||
97 | |||
98 | /* This should only ever run for 32-bit processes. */ | ||
99 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
100 | |||
101 | /* check if free_area_cache is useful for us */ | ||
102 | if (len <= mm->cached_hole_size) { | ||
103 | mm->cached_hole_size = 0; | ||
104 | mm->free_area_cache = mm->mmap_base; | ||
105 | } | ||
106 | |||
107 | /* either no address requested or can't fit in requested address hole */ | ||
108 | addr = mm->free_area_cache & HPAGE_MASK; | ||
109 | |||
110 | /* make sure it can fit in the remaining address space */ | ||
111 | if (likely(addr > len)) { | ||
112 | vma = find_vma(mm, addr-len); | ||
113 | if (!vma || addr <= vma->vm_start) { | ||
114 | /* remember the address as a hint for next time */ | ||
115 | return (mm->free_area_cache = addr-len); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | if (unlikely(mm->mmap_base < len)) | ||
120 | goto bottomup; | ||
121 | |||
122 | addr = (mm->mmap_base-len) & HPAGE_MASK; | ||
123 | |||
124 | do { | ||
125 | /* | ||
126 | * Lookup failure means no vma is above this address, | ||
127 | * else if new region fits below vma->vm_start, | ||
128 | * return with success: | ||
129 | */ | ||
130 | vma = find_vma(mm, addr); | ||
131 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
132 | /* remember the address as a hint for next time */ | ||
133 | return (mm->free_area_cache = addr); | ||
134 | } | ||
135 | |||
136 | /* remember the largest hole we saw so far */ | ||
137 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
138 | mm->cached_hole_size = vma->vm_start - addr; | ||
139 | |||
140 | /* try just below the current vma->vm_start */ | ||
141 | addr = (vma->vm_start-len) & HPAGE_MASK; | ||
142 | } while (likely(len < vma->vm_start)); | ||
143 | |||
144 | bottomup: | ||
145 | /* | ||
146 | * A failed mmap() very likely causes application failure, | ||
147 | * so fall back to the bottom-up function here. This scenario | ||
148 | * can happen with large stack limits and large mmap() | ||
149 | * allocations. | ||
150 | */ | ||
151 | mm->cached_hole_size = ~0UL; | ||
152 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
153 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
154 | /* | ||
155 | * Restore the topdown base: | ||
156 | */ | ||
157 | mm->free_area_cache = mm->mmap_base; | ||
158 | mm->cached_hole_size = ~0UL; | ||
159 | |||
160 | return addr; | ||
161 | } | ||
162 | |||
163 | unsigned long | ||
164 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
165 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
166 | { | ||
167 | struct mm_struct *mm = current->mm; | ||
168 | struct vm_area_struct *vma; | ||
169 | unsigned long task_size = TASK_SIZE; | ||
170 | |||
171 | if (test_thread_flag(TIF_32BIT)) | ||
172 | task_size = STACK_TOP32; | ||
173 | |||
174 | if (len & ~HPAGE_MASK) | ||
175 | return -EINVAL; | ||
176 | if (len > task_size) | ||
177 | return -ENOMEM; | ||
178 | |||
179 | if (addr) { | ||
180 | addr = ALIGN(addr, HPAGE_SIZE); | ||
181 | vma = find_vma(mm, addr); | ||
182 | if (task_size - len >= addr && | ||
183 | (!vma || addr + len <= vma->vm_start)) | ||
184 | return addr; | ||
185 | } | ||
186 | if (mm->get_unmapped_area == arch_get_unmapped_area) | ||
187 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
188 | pgoff, flags); | ||
189 | else | ||
190 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
191 | pgoff, flags); | ||
192 | } | ||
193 | |||
25 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 194 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
26 | { | 195 | { |
27 | pgd_t *pgd; | 196 | pgd_t *pgd; |
@@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
48 | pmd_t *pmd; | 217 | pmd_t *pmd; |
49 | pte_t *pte = NULL; | 218 | pte_t *pte = NULL; |
50 | 219 | ||
220 | addr &= HPAGE_MASK; | ||
221 | |||
51 | pgd = pgd_offset(mm, addr); | 222 | pgd = pgd_offset(mm, addr); |
52 | if (pgd) { | 223 | if (!pgd_none(*pgd)) { |
53 | pud = pud_offset(pgd, addr); | 224 | pud = pud_offset(pgd, addr); |
54 | if (pud) { | 225 | if (!pud_none(*pud)) { |
55 | pmd = pmd_offset(pud, addr); | 226 | pmd = pmd_offset(pud, addr); |
56 | if (pmd) | 227 | if (!pmd_none(*pmd)) |
57 | pte = pte_offset_map(pmd, addr); | 228 | pte = pte_offset_map(pmd, addr); |
58 | } | 229 | } |
59 | } | 230 | } |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e44ee26cee8..c2b556106fc1 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/module.h> | ||
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/string.h> | 12 | #include <linux/string.h> |
@@ -39,9 +40,27 @@ | |||
39 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
40 | #include <asm/spitfire.h> | 41 | #include <asm/spitfire.h> |
41 | #include <asm/sections.h> | 42 | #include <asm/sections.h> |
43 | #include <asm/tsb.h> | ||
44 | #include <asm/hypervisor.h> | ||
42 | 45 | ||
43 | extern void device_scan(void); | 46 | extern void device_scan(void); |
44 | 47 | ||
48 | #define MAX_PHYS_ADDRESS (1UL << 42UL) | ||
49 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | ||
50 | #define KPTE_BITMAP_BYTES \ | ||
51 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) | ||
52 | |||
53 | unsigned long kern_linear_pte_xor[2] __read_mostly; | ||
54 | |||
55 | /* A bitmap, one bit for every 256MB of physical memory. If the bit | ||
56 | * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else | ||
57 | * if set we should use a 256MB page (via kern_linear_pte_xor[1]). | ||
58 | */ | ||
59 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
60 | |||
61 | /* A special kernel TSB for 4MB and 256MB linear mappings. */ | ||
62 | struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | ||
63 | |||
45 | #define MAX_BANKS 32 | 64 | #define MAX_BANKS 32 |
46 | 65 | ||
47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | 66 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; |
@@ -111,11 +130,9 @@ static void __init read_obp_memory(const char *property, | |||
111 | 130 | ||
112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | 131 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; |
113 | 132 | ||
114 | /* Ugly, but necessary... -DaveM */ | 133 | /* Kernel physical address base and size in bytes. */ |
115 | unsigned long phys_base __read_mostly; | ||
116 | unsigned long kern_base __read_mostly; | 134 | unsigned long kern_base __read_mostly; |
117 | unsigned long kern_size __read_mostly; | 135 | unsigned long kern_size __read_mostly; |
118 | unsigned long pfn_base __read_mostly; | ||
119 | 136 | ||
120 | /* get_new_mmu_context() uses "cache + 1". */ | 137 | /* get_new_mmu_context() uses "cache + 1". */ |
121 | DEFINE_SPINLOCK(ctx_alloc_lock); | 138 | DEFINE_SPINLOCK(ctx_alloc_lock); |
@@ -141,24 +158,28 @@ unsigned long sparc64_kern_sec_context __read_mostly; | |||
141 | 158 | ||
142 | int bigkernel = 0; | 159 | int bigkernel = 0; |
143 | 160 | ||
144 | /* XXX Tune this... */ | 161 | kmem_cache_t *pgtable_cache __read_mostly; |
145 | #define PGT_CACHE_LOW 25 | 162 | |
146 | #define PGT_CACHE_HIGH 50 | 163 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
164 | { | ||
165 | clear_page(addr); | ||
166 | } | ||
167 | |||
168 | extern void tsb_cache_init(void); | ||
147 | 169 | ||
148 | void check_pgt_cache(void) | 170 | void pgtable_cache_init(void) |
149 | { | 171 | { |
150 | preempt_disable(); | 172 | pgtable_cache = kmem_cache_create("pgtable_cache", |
151 | if (pgtable_cache_size > PGT_CACHE_HIGH) { | 173 | PAGE_SIZE, PAGE_SIZE, |
152 | do { | 174 | SLAB_HWCACHE_ALIGN | |
153 | if (pgd_quicklist) | 175 | SLAB_MUST_HWCACHE_ALIGN, |
154 | free_pgd_slow(get_pgd_fast()); | 176 | zero_ctor, |
155 | if (pte_quicklist[0]) | 177 | NULL); |
156 | free_pte_slow(pte_alloc_one_fast(NULL, 0)); | 178 | if (!pgtable_cache) { |
157 | if (pte_quicklist[1]) | 179 | prom_printf("Could not create pgtable_cache\n"); |
158 | free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); | 180 | prom_halt(); |
159 | } while (pgtable_cache_size > PGT_CACHE_LOW); | ||
160 | } | 181 | } |
161 | preempt_enable(); | 182 | tsb_cache_init(); |
162 | } | 183 | } |
163 | 184 | ||
164 | #ifdef CONFIG_DEBUG_DCFLUSH | 185 | #ifdef CONFIG_DEBUG_DCFLUSH |
@@ -168,8 +189,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | |||
168 | #endif | 189 | #endif |
169 | #endif | 190 | #endif |
170 | 191 | ||
171 | __inline__ void flush_dcache_page_impl(struct page *page) | 192 | inline void flush_dcache_page_impl(struct page *page) |
172 | { | 193 | { |
194 | BUG_ON(tlb_type == hypervisor); | ||
173 | #ifdef CONFIG_DEBUG_DCFLUSH | 195 | #ifdef CONFIG_DEBUG_DCFLUSH |
174 | atomic_inc(&dcpage_flushes); | 196 | atomic_inc(&dcpage_flushes); |
175 | #endif | 197 | #endif |
@@ -186,8 +208,8 @@ __inline__ void flush_dcache_page_impl(struct page *page) | |||
186 | } | 208 | } |
187 | 209 | ||
188 | #define PG_dcache_dirty PG_arch_1 | 210 | #define PG_dcache_dirty PG_arch_1 |
189 | #define PG_dcache_cpu_shift 24 | 211 | #define PG_dcache_cpu_shift 24UL |
190 | #define PG_dcache_cpu_mask (256 - 1) | 212 | #define PG_dcache_cpu_mask (256UL - 1UL) |
191 | 213 | ||
192 | #if NR_CPUS > 256 | 214 | #if NR_CPUS > 256 |
193 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus | 215 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus |
@@ -243,32 +265,61 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c | |||
243 | : "g1", "g7"); | 265 | : "g1", "g7"); |
244 | } | 266 | } |
245 | 267 | ||
268 | static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) | ||
269 | { | ||
270 | unsigned long tsb_addr = (unsigned long) ent; | ||
271 | |||
272 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
273 | tsb_addr = __pa(tsb_addr); | ||
274 | |||
275 | __tsb_insert(tsb_addr, tag, pte); | ||
276 | } | ||
277 | |||
278 | unsigned long _PAGE_ALL_SZ_BITS __read_mostly; | ||
279 | unsigned long _PAGE_SZBITS __read_mostly; | ||
280 | |||
246 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | 281 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
247 | { | 282 | { |
248 | struct page *page; | 283 | struct mm_struct *mm; |
249 | unsigned long pfn; | 284 | struct tsb *tsb; |
250 | unsigned long pg_flags; | 285 | unsigned long tag, flags; |
251 | 286 | ||
252 | pfn = pte_pfn(pte); | 287 | if (tlb_type != hypervisor) { |
253 | if (pfn_valid(pfn) && | 288 | unsigned long pfn = pte_pfn(pte); |
254 | (page = pfn_to_page(pfn), page_mapping(page)) && | 289 | unsigned long pg_flags; |
255 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | 290 | struct page *page; |
256 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & | 291 | |
257 | PG_dcache_cpu_mask); | 292 | if (pfn_valid(pfn) && |
258 | int this_cpu = get_cpu(); | 293 | (page = pfn_to_page(pfn), page_mapping(page)) && |
259 | 294 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | |
260 | /* This is just to optimize away some function calls | 295 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & |
261 | * in the SMP case. | 296 | PG_dcache_cpu_mask); |
262 | */ | 297 | int this_cpu = get_cpu(); |
263 | if (cpu == this_cpu) | 298 | |
264 | flush_dcache_page_impl(page); | 299 | /* This is just to optimize away some function calls |
265 | else | 300 | * in the SMP case. |
266 | smp_flush_dcache_page_impl(page, cpu); | 301 | */ |
302 | if (cpu == this_cpu) | ||
303 | flush_dcache_page_impl(page); | ||
304 | else | ||
305 | smp_flush_dcache_page_impl(page, cpu); | ||
267 | 306 | ||
268 | clear_dcache_dirty_cpu(page, cpu); | 307 | clear_dcache_dirty_cpu(page, cpu); |
269 | 308 | ||
270 | put_cpu(); | 309 | put_cpu(); |
310 | } | ||
271 | } | 311 | } |
312 | |||
313 | mm = vma->vm_mm; | ||
314 | |||
315 | spin_lock_irqsave(&mm->context.lock, flags); | ||
316 | |||
317 | tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & | ||
318 | (mm->context.tsb_nentries - 1UL)]; | ||
319 | tag = (address >> 22UL); | ||
320 | tsb_insert(tsb, tag, pte_val(pte)); | ||
321 | |||
322 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
272 | } | 323 | } |
273 | 324 | ||
274 | void flush_dcache_page(struct page *page) | 325 | void flush_dcache_page(struct page *page) |
@@ -276,6 +327,9 @@ void flush_dcache_page(struct page *page) | |||
276 | struct address_space *mapping; | 327 | struct address_space *mapping; |
277 | int this_cpu; | 328 | int this_cpu; |
278 | 329 | ||
330 | if (tlb_type == hypervisor) | ||
331 | return; | ||
332 | |||
279 | /* Do not bother with the expensive D-cache flush if it | 333 | /* Do not bother with the expensive D-cache flush if it |
280 | * is merely the zero page. The 'bigcore' testcase in GDB | 334 | * is merely the zero page. The 'bigcore' testcase in GDB |
281 | * causes this case to run millions of times. | 335 | * causes this case to run millions of times. |
@@ -311,7 +365,7 @@ out: | |||
311 | 365 | ||
312 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) | 366 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) |
313 | { | 367 | { |
314 | /* Cheetah has coherent I-cache. */ | 368 | /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ |
315 | if (tlb_type == spitfire) { | 369 | if (tlb_type == spitfire) { |
316 | unsigned long kaddr; | 370 | unsigned long kaddr; |
317 | 371 | ||
@@ -320,16 +374,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) | |||
320 | } | 374 | } |
321 | } | 375 | } |
322 | 376 | ||
323 | unsigned long page_to_pfn(struct page *page) | ||
324 | { | ||
325 | return (unsigned long) ((page - mem_map) + pfn_base); | ||
326 | } | ||
327 | |||
328 | struct page *pfn_to_page(unsigned long pfn) | ||
329 | { | ||
330 | return (mem_map + (pfn - pfn_base)); | ||
331 | } | ||
332 | |||
333 | void show_mem(void) | 377 | void show_mem(void) |
334 | { | 378 | { |
335 | printk("Mem-info:\n"); | 379 | printk("Mem-info:\n"); |
@@ -338,7 +382,6 @@ void show_mem(void) | |||
338 | nr_swap_pages << (PAGE_SHIFT-10)); | 382 | nr_swap_pages << (PAGE_SHIFT-10)); |
339 | printk("%ld pages of RAM\n", num_physpages); | 383 | printk("%ld pages of RAM\n", num_physpages); |
340 | printk("%d free pages\n", nr_free_pages()); | 384 | printk("%d free pages\n", nr_free_pages()); |
341 | printk("%d pages in page table cache\n",pgtable_cache_size); | ||
342 | } | 385 | } |
343 | 386 | ||
344 | void mmu_info(struct seq_file *m) | 387 | void mmu_info(struct seq_file *m) |
@@ -349,6 +392,8 @@ void mmu_info(struct seq_file *m) | |||
349 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | 392 | seq_printf(m, "MMU Type\t: Cheetah+\n"); |
350 | else if (tlb_type == spitfire) | 393 | else if (tlb_type == spitfire) |
351 | seq_printf(m, "MMU Type\t: Spitfire\n"); | 394 | seq_printf(m, "MMU Type\t: Spitfire\n"); |
395 | else if (tlb_type == hypervisor) | ||
396 | seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); | ||
352 | else | 397 | else |
353 | seq_printf(m, "MMU Type\t: ???\n"); | 398 | seq_printf(m, "MMU Type\t: ???\n"); |
354 | 399 | ||
@@ -371,45 +416,13 @@ struct linux_prom_translation { | |||
371 | /* Exported for kernel TLB miss handling in ktlb.S */ | 416 | /* Exported for kernel TLB miss handling in ktlb.S */ |
372 | struct linux_prom_translation prom_trans[512] __read_mostly; | 417 | struct linux_prom_translation prom_trans[512] __read_mostly; |
373 | unsigned int prom_trans_ents __read_mostly; | 418 | unsigned int prom_trans_ents __read_mostly; |
374 | unsigned int swapper_pgd_zero __read_mostly; | ||
375 | |||
376 | extern unsigned long prom_boot_page; | ||
377 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | ||
378 | extern int prom_get_mmu_ihandle(void); | ||
379 | extern void register_prom_callbacks(void); | ||
380 | 419 | ||
381 | /* Exported for SMP bootup purposes. */ | 420 | /* Exported for SMP bootup purposes. */ |
382 | unsigned long kern_locked_tte_data; | 421 | unsigned long kern_locked_tte_data; |
383 | 422 | ||
384 | /* | ||
385 | * Translate PROM's mapping we capture at boot time into physical address. | ||
386 | * The second parameter is only set from prom_callback() invocations. | ||
387 | */ | ||
388 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | ||
389 | { | ||
390 | int i; | ||
391 | |||
392 | for (i = 0; i < prom_trans_ents; i++) { | ||
393 | struct linux_prom_translation *p = &prom_trans[i]; | ||
394 | |||
395 | if (promva >= p->virt && | ||
396 | promva < (p->virt + p->size)) { | ||
397 | unsigned long base = p->data & _PAGE_PADDR; | ||
398 | |||
399 | if (error) | ||
400 | *error = 0; | ||
401 | return base + (promva & (8192 - 1)); | ||
402 | } | ||
403 | } | ||
404 | if (error) | ||
405 | *error = 1; | ||
406 | return 0UL; | ||
407 | } | ||
408 | |||
409 | /* The obp translations are saved based on 8k pagesize, since obp can | 423 | /* The obp translations are saved based on 8k pagesize, since obp can |
410 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | 424 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> |
411 | * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte | 425 | * HI_OBP_ADDRESS range are handled in ktlb.S. |
412 | * scheme (also, see rant in inherit_locked_prom_mappings()). | ||
413 | */ | 426 | */ |
414 | static inline int in_obp_range(unsigned long vaddr) | 427 | static inline int in_obp_range(unsigned long vaddr) |
415 | { | 428 | { |
@@ -490,6 +503,36 @@ static void __init read_obp_translations(void) | |||
490 | } | 503 | } |
491 | } | 504 | } |
492 | 505 | ||
506 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | ||
507 | unsigned long pte, | ||
508 | unsigned long mmu) | ||
509 | { | ||
510 | register unsigned long func asm("%o5"); | ||
511 | register unsigned long arg0 asm("%o0"); | ||
512 | register unsigned long arg1 asm("%o1"); | ||
513 | register unsigned long arg2 asm("%o2"); | ||
514 | register unsigned long arg3 asm("%o3"); | ||
515 | |||
516 | func = HV_FAST_MMU_MAP_PERM_ADDR; | ||
517 | arg0 = vaddr; | ||
518 | arg1 = 0; | ||
519 | arg2 = pte; | ||
520 | arg3 = mmu; | ||
521 | __asm__ __volatile__("ta 0x80" | ||
522 | : "=&r" (func), "=&r" (arg0), | ||
523 | "=&r" (arg1), "=&r" (arg2), | ||
524 | "=&r" (arg3) | ||
525 | : "0" (func), "1" (arg0), "2" (arg1), | ||
526 | "3" (arg2), "4" (arg3)); | ||
527 | if (arg0 != 0) { | ||
528 | prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " | ||
529 | "errors with %lx\n", vaddr, 0, pte, mmu, arg0); | ||
530 | prom_halt(); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static unsigned long kern_large_tte(unsigned long paddr); | ||
535 | |||
493 | static void __init remap_kernel(void) | 536 | static void __init remap_kernel(void) |
494 | { | 537 | { |
495 | unsigned long phys_page, tte_vaddr, tte_data; | 538 | unsigned long phys_page, tte_vaddr, tte_data; |
@@ -497,25 +540,34 @@ static void __init remap_kernel(void) | |||
497 | 540 | ||
498 | tte_vaddr = (unsigned long) KERNBASE; | 541 | tte_vaddr = (unsigned long) KERNBASE; |
499 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 542 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
500 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | | 543 | tte_data = kern_large_tte(phys_page); |
501 | _PAGE_CP | _PAGE_CV | _PAGE_P | | ||
502 | _PAGE_L | _PAGE_W)); | ||
503 | 544 | ||
504 | kern_locked_tte_data = tte_data; | 545 | kern_locked_tte_data = tte_data; |
505 | 546 | ||
506 | /* Now lock us into the TLBs via OBP. */ | 547 | /* Now lock us into the TLBs via Hypervisor or OBP. */ |
507 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | 548 | if (tlb_type == hypervisor) { |
508 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | 549 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
509 | if (bigkernel) { | 550 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
510 | tlb_ent -= 1; | 551 | if (bigkernel) { |
511 | prom_dtlb_load(tlb_ent, | 552 | tte_vaddr += 0x400000; |
512 | tte_data + 0x400000, | 553 | tte_data += 0x400000; |
513 | tte_vaddr + 0x400000); | 554 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
514 | prom_itlb_load(tlb_ent, | 555 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
515 | tte_data + 0x400000, | 556 | } |
516 | tte_vaddr + 0x400000); | 557 | } else { |
558 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | ||
559 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | ||
560 | if (bigkernel) { | ||
561 | tlb_ent -= 1; | ||
562 | prom_dtlb_load(tlb_ent, | ||
563 | tte_data + 0x400000, | ||
564 | tte_vaddr + 0x400000); | ||
565 | prom_itlb_load(tlb_ent, | ||
566 | tte_data + 0x400000, | ||
567 | tte_vaddr + 0x400000); | ||
568 | } | ||
569 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | ||
517 | } | 570 | } |
518 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | ||
519 | if (tlb_type == cheetah_plus) { | 571 | if (tlb_type == cheetah_plus) { |
520 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | 572 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | |
521 | CTX_CHEETAH_PLUS_NUC); | 573 | CTX_CHEETAH_PLUS_NUC); |
@@ -533,372 +585,14 @@ static void __init inherit_prom_mappings(void) | |||
533 | prom_printf("Remapping the kernel... "); | 585 | prom_printf("Remapping the kernel... "); |
534 | remap_kernel(); | 586 | remap_kernel(); |
535 | prom_printf("done.\n"); | 587 | prom_printf("done.\n"); |
536 | |||
537 | prom_printf("Registering callbacks... "); | ||
538 | register_prom_callbacks(); | ||
539 | prom_printf("done.\n"); | ||
540 | } | ||
541 | |||
542 | /* The OBP specifications for sun4u mark 0xfffffffc00000000 and | ||
543 | * upwards as reserved for use by the firmware (I wonder if this | ||
544 | * will be the same on Cheetah...). We use this virtual address | ||
545 | * range for the VPTE table mappings of the nucleus so we need | ||
546 | * to zap them when we enter the PROM. -DaveM | ||
547 | */ | ||
548 | static void __flush_nucleus_vptes(void) | ||
549 | { | ||
550 | unsigned long prom_reserved_base = 0xfffffffc00000000UL; | ||
551 | int i; | ||
552 | |||
553 | /* Only DTLB must be checked for VPTE entries. */ | ||
554 | if (tlb_type == spitfire) { | ||
555 | for (i = 0; i < 63; i++) { | ||
556 | unsigned long tag; | ||
557 | |||
558 | /* Spitfire Errata #32 workaround */ | ||
559 | /* NOTE: Always runs on spitfire, so no cheetah+ | ||
560 | * page size encodings. | ||
561 | */ | ||
562 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
563 | "flush %%g6" | ||
564 | : /* No outputs */ | ||
565 | : "r" (0), | ||
566 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
567 | |||
568 | tag = spitfire_get_dtlb_tag(i); | ||
569 | if (((tag & ~(PAGE_MASK)) == 0) && | ||
570 | ((tag & (PAGE_MASK)) >= prom_reserved_base)) { | ||
571 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
572 | "membar #Sync" | ||
573 | : /* no outputs */ | ||
574 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
575 | spitfire_put_dtlb_data(i, 0x0UL); | ||
576 | } | ||
577 | } | ||
578 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
579 | for (i = 0; i < 512; i++) { | ||
580 | unsigned long tag = cheetah_get_dtlb_tag(i, 2); | ||
581 | |||
582 | if ((tag & ~PAGE_MASK) == 0 && | ||
583 | (tag & PAGE_MASK) >= prom_reserved_base) { | ||
584 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
585 | "membar #Sync" | ||
586 | : /* no outputs */ | ||
587 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
588 | cheetah_put_dtlb_data(i, 0x0UL, 2); | ||
589 | } | ||
590 | |||
591 | if (tlb_type != cheetah_plus) | ||
592 | continue; | ||
593 | |||
594 | tag = cheetah_get_dtlb_tag(i, 3); | ||
595 | |||
596 | if ((tag & ~PAGE_MASK) == 0 && | ||
597 | (tag & PAGE_MASK) >= prom_reserved_base) { | ||
598 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
599 | "membar #Sync" | ||
600 | : /* no outputs */ | ||
601 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
602 | cheetah_put_dtlb_data(i, 0x0UL, 3); | ||
603 | } | ||
604 | } | ||
605 | } else { | ||
606 | /* Implement me :-) */ | ||
607 | BUG(); | ||
608 | } | ||
609 | } | 588 | } |
610 | 589 | ||
611 | static int prom_ditlb_set; | ||
612 | struct prom_tlb_entry { | ||
613 | int tlb_ent; | ||
614 | unsigned long tlb_tag; | ||
615 | unsigned long tlb_data; | ||
616 | }; | ||
617 | struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; | ||
618 | |||
619 | void prom_world(int enter) | 590 | void prom_world(int enter) |
620 | { | 591 | { |
621 | unsigned long pstate; | ||
622 | int i; | ||
623 | |||
624 | if (!enter) | 592 | if (!enter) |
625 | set_fs((mm_segment_t) { get_thread_current_ds() }); | 593 | set_fs((mm_segment_t) { get_thread_current_ds() }); |
626 | 594 | ||
627 | if (!prom_ditlb_set) | 595 | __asm__ __volatile__("flushw"); |
628 | return; | ||
629 | |||
630 | /* Make sure the following runs atomically. */ | ||
631 | __asm__ __volatile__("flushw\n\t" | ||
632 | "rdpr %%pstate, %0\n\t" | ||
633 | "wrpr %0, %1, %%pstate" | ||
634 | : "=r" (pstate) | ||
635 | : "i" (PSTATE_IE)); | ||
636 | |||
637 | if (enter) { | ||
638 | /* Kick out nucleus VPTEs. */ | ||
639 | __flush_nucleus_vptes(); | ||
640 | |||
641 | /* Install PROM world. */ | ||
642 | for (i = 0; i < 16; i++) { | ||
643 | if (prom_dtlb[i].tlb_ent != -1) { | ||
644 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
645 | "membar #Sync" | ||
646 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | ||
647 | "i" (ASI_DMMU)); | ||
648 | if (tlb_type == spitfire) | ||
649 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | ||
650 | prom_dtlb[i].tlb_data); | ||
651 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
652 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | ||
653 | prom_dtlb[i].tlb_data); | ||
654 | } | ||
655 | if (prom_itlb[i].tlb_ent != -1) { | ||
656 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
657 | "membar #Sync" | ||
658 | : : "r" (prom_itlb[i].tlb_tag), | ||
659 | "r" (TLB_TAG_ACCESS), | ||
660 | "i" (ASI_IMMU)); | ||
661 | if (tlb_type == spitfire) | ||
662 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | ||
663 | prom_itlb[i].tlb_data); | ||
664 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
665 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | ||
666 | prom_itlb[i].tlb_data); | ||
667 | } | ||
668 | } | ||
669 | } else { | ||
670 | for (i = 0; i < 16; i++) { | ||
671 | if (prom_dtlb[i].tlb_ent != -1) { | ||
672 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
673 | "membar #Sync" | ||
674 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
675 | if (tlb_type == spitfire) | ||
676 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | ||
677 | else | ||
678 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | ||
679 | } | ||
680 | if (prom_itlb[i].tlb_ent != -1) { | ||
681 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
682 | "membar #Sync" | ||
683 | : : "r" (TLB_TAG_ACCESS), | ||
684 | "i" (ASI_IMMU)); | ||
685 | if (tlb_type == spitfire) | ||
686 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); | ||
687 | else | ||
688 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); | ||
689 | } | ||
690 | } | ||
691 | } | ||
692 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
693 | : : "r" (pstate)); | ||
694 | } | ||
695 | |||
696 | void inherit_locked_prom_mappings(int save_p) | ||
697 | { | ||
698 | int i; | ||
699 | int dtlb_seen = 0; | ||
700 | int itlb_seen = 0; | ||
701 | |||
702 | /* Fucking losing PROM has more mappings in the TLB, but | ||
703 | * it (conveniently) fails to mention any of these in the | ||
704 | * translations property. The only ones that matter are | ||
705 | * the locked PROM tlb entries, so we impose the following | ||
706 | * irrecovable rule on the PROM, it is allowed 8 locked | ||
707 | * entries in the ITLB and 8 in the DTLB. | ||
708 | * | ||
709 | * Supposedly the upper 16GB of the address space is | ||
710 | * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED | ||
711 | * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface | ||
712 | * used between the client program and the firmware on sun5 | ||
713 | * systems to coordinate mmu mappings is also COMPLETELY | ||
714 | * UNDOCUMENTED!!!!!! Thanks S(t)un! | ||
715 | */ | ||
716 | if (save_p) { | ||
717 | for (i = 0; i < 16; i++) { | ||
718 | prom_itlb[i].tlb_ent = -1; | ||
719 | prom_dtlb[i].tlb_ent = -1; | ||
720 | } | ||
721 | } | ||
722 | if (tlb_type == spitfire) { | ||
723 | int high = sparc64_highest_unlocked_tlb_ent; | ||
724 | for (i = 0; i <= high; i++) { | ||
725 | unsigned long data; | ||
726 | |||
727 | /* Spitfire Errata #32 workaround */ | ||
728 | /* NOTE: Always runs on spitfire, so no cheetah+ | ||
729 | * page size encodings. | ||
730 | */ | ||
731 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
732 | "flush %%g6" | ||
733 | : /* No outputs */ | ||
734 | : "r" (0), | ||
735 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
736 | |||
737 | data = spitfire_get_dtlb_data(i); | ||
738 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
739 | unsigned long tag; | ||
740 | |||
741 | /* Spitfire Errata #32 workaround */ | ||
742 | /* NOTE: Always runs on spitfire, so no | ||
743 | * cheetah+ page size encodings. | ||
744 | */ | ||
745 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
746 | "flush %%g6" | ||
747 | : /* No outputs */ | ||
748 | : "r" (0), | ||
749 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
750 | |||
751 | tag = spitfire_get_dtlb_tag(i); | ||
752 | if (save_p) { | ||
753 | prom_dtlb[dtlb_seen].tlb_ent = i; | ||
754 | prom_dtlb[dtlb_seen].tlb_tag = tag; | ||
755 | prom_dtlb[dtlb_seen].tlb_data = data; | ||
756 | } | ||
757 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
758 | "membar #Sync" | ||
759 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
760 | spitfire_put_dtlb_data(i, 0x0UL); | ||
761 | |||
762 | dtlb_seen++; | ||
763 | if (dtlb_seen > 15) | ||
764 | break; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | for (i = 0; i < high; i++) { | ||
769 | unsigned long data; | ||
770 | |||
771 | /* Spitfire Errata #32 workaround */ | ||
772 | /* NOTE: Always runs on spitfire, so no | ||
773 | * cheetah+ page size encodings. | ||
774 | */ | ||
775 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
776 | "flush %%g6" | ||
777 | : /* No outputs */ | ||
778 | : "r" (0), | ||
779 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
780 | |||
781 | data = spitfire_get_itlb_data(i); | ||
782 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
783 | unsigned long tag; | ||
784 | |||
785 | /* Spitfire Errata #32 workaround */ | ||
786 | /* NOTE: Always runs on spitfire, so no | ||
787 | * cheetah+ page size encodings. | ||
788 | */ | ||
789 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
790 | "flush %%g6" | ||
791 | : /* No outputs */ | ||
792 | : "r" (0), | ||
793 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
794 | |||
795 | tag = spitfire_get_itlb_tag(i); | ||
796 | if (save_p) { | ||
797 | prom_itlb[itlb_seen].tlb_ent = i; | ||
798 | prom_itlb[itlb_seen].tlb_tag = tag; | ||
799 | prom_itlb[itlb_seen].tlb_data = data; | ||
800 | } | ||
801 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
802 | "membar #Sync" | ||
803 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
804 | spitfire_put_itlb_data(i, 0x0UL); | ||
805 | |||
806 | itlb_seen++; | ||
807 | if (itlb_seen > 15) | ||
808 | break; | ||
809 | } | ||
810 | } | ||
811 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
812 | int high = sparc64_highest_unlocked_tlb_ent; | ||
813 | |||
814 | for (i = 0; i <= high; i++) { | ||
815 | unsigned long data; | ||
816 | |||
817 | data = cheetah_get_ldtlb_data(i); | ||
818 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
819 | unsigned long tag; | ||
820 | |||
821 | tag = cheetah_get_ldtlb_tag(i); | ||
822 | if (save_p) { | ||
823 | prom_dtlb[dtlb_seen].tlb_ent = i; | ||
824 | prom_dtlb[dtlb_seen].tlb_tag = tag; | ||
825 | prom_dtlb[dtlb_seen].tlb_data = data; | ||
826 | } | ||
827 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
828 | "membar #Sync" | ||
829 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
830 | cheetah_put_ldtlb_data(i, 0x0UL); | ||
831 | |||
832 | dtlb_seen++; | ||
833 | if (dtlb_seen > 15) | ||
834 | break; | ||
835 | } | ||
836 | } | ||
837 | |||
838 | for (i = 0; i < high; i++) { | ||
839 | unsigned long data; | ||
840 | |||
841 | data = cheetah_get_litlb_data(i); | ||
842 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
843 | unsigned long tag; | ||
844 | |||
845 | tag = cheetah_get_litlb_tag(i); | ||
846 | if (save_p) { | ||
847 | prom_itlb[itlb_seen].tlb_ent = i; | ||
848 | prom_itlb[itlb_seen].tlb_tag = tag; | ||
849 | prom_itlb[itlb_seen].tlb_data = data; | ||
850 | } | ||
851 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
852 | "membar #Sync" | ||
853 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
854 | cheetah_put_litlb_data(i, 0x0UL); | ||
855 | |||
856 | itlb_seen++; | ||
857 | if (itlb_seen > 15) | ||
858 | break; | ||
859 | } | ||
860 | } | ||
861 | } else { | ||
862 | /* Implement me :-) */ | ||
863 | BUG(); | ||
864 | } | ||
865 | if (save_p) | ||
866 | prom_ditlb_set = 1; | ||
867 | } | ||
868 | |||
869 | /* Give PROM back his world, done during reboots... */ | ||
870 | void prom_reload_locked(void) | ||
871 | { | ||
872 | int i; | ||
873 | |||
874 | for (i = 0; i < 16; i++) { | ||
875 | if (prom_dtlb[i].tlb_ent != -1) { | ||
876 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
877 | "membar #Sync" | ||
878 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | ||
879 | "i" (ASI_DMMU)); | ||
880 | if (tlb_type == spitfire) | ||
881 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | ||
882 | prom_dtlb[i].tlb_data); | ||
883 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
884 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | ||
885 | prom_dtlb[i].tlb_data); | ||
886 | } | ||
887 | |||
888 | if (prom_itlb[i].tlb_ent != -1) { | ||
889 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
890 | "membar #Sync" | ||
891 | : : "r" (prom_itlb[i].tlb_tag), | ||
892 | "r" (TLB_TAG_ACCESS), | ||
893 | "i" (ASI_IMMU)); | ||
894 | if (tlb_type == spitfire) | ||
895 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | ||
896 | prom_itlb[i].tlb_data); | ||
897 | else | ||
898 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | ||
899 | prom_itlb[i].tlb_data); | ||
900 | } | ||
901 | } | ||
902 | } | 596 | } |
903 | 597 | ||
904 | #ifdef DCACHE_ALIASING_POSSIBLE | 598 | #ifdef DCACHE_ALIASING_POSSIBLE |
@@ -914,7 +608,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
914 | if (++n >= 512) | 608 | if (++n >= 512) |
915 | break; | 609 | break; |
916 | } | 610 | } |
917 | } else { | 611 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
918 | start = __pa(start); | 612 | start = __pa(start); |
919 | end = __pa(end); | 613 | end = __pa(end); |
920 | for (va = start; va < end; va += 32) | 614 | for (va = start; va < end; va += 32) |
@@ -927,63 +621,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
927 | } | 621 | } |
928 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 622 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
929 | 623 | ||
930 | /* If not locked, zap it. */ | ||
931 | void __flush_tlb_all(void) | ||
932 | { | ||
933 | unsigned long pstate; | ||
934 | int i; | ||
935 | |||
936 | __asm__ __volatile__("flushw\n\t" | ||
937 | "rdpr %%pstate, %0\n\t" | ||
938 | "wrpr %0, %1, %%pstate" | ||
939 | : "=r" (pstate) | ||
940 | : "i" (PSTATE_IE)); | ||
941 | if (tlb_type == spitfire) { | ||
942 | for (i = 0; i < 64; i++) { | ||
943 | /* Spitfire Errata #32 workaround */ | ||
944 | /* NOTE: Always runs on spitfire, so no | ||
945 | * cheetah+ page size encodings. | ||
946 | */ | ||
947 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
948 | "flush %%g6" | ||
949 | : /* No outputs */ | ||
950 | : "r" (0), | ||
951 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
952 | |||
953 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { | ||
954 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
955 | "membar #Sync" | ||
956 | : /* no outputs */ | ||
957 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
958 | spitfire_put_dtlb_data(i, 0x0UL); | ||
959 | } | ||
960 | |||
961 | /* Spitfire Errata #32 workaround */ | ||
962 | /* NOTE: Always runs on spitfire, so no | ||
963 | * cheetah+ page size encodings. | ||
964 | */ | ||
965 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
966 | "flush %%g6" | ||
967 | : /* No outputs */ | ||
968 | : "r" (0), | ||
969 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
970 | |||
971 | if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { | ||
972 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
973 | "membar #Sync" | ||
974 | : /* no outputs */ | ||
975 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
976 | spitfire_put_itlb_data(i, 0x0UL); | ||
977 | } | ||
978 | } | ||
979 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
980 | cheetah_flush_dtlb_all(); | ||
981 | cheetah_flush_itlb_all(); | ||
982 | } | ||
983 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
984 | : : "r" (pstate)); | ||
985 | } | ||
986 | |||
987 | /* Caller does TLB context flushing on local CPU if necessary. | 624 | /* Caller does TLB context flushing on local CPU if necessary. |
988 | * The caller also ensures that CTX_VALID(mm->context) is false. | 625 | * The caller also ensures that CTX_VALID(mm->context) is false. |
989 | * | 626 | * |
@@ -991,17 +628,21 @@ void __flush_tlb_all(void) | |||
991 | * let the user have CTX 0 (nucleus) or we ever use a CTX | 628 | * let the user have CTX 0 (nucleus) or we ever use a CTX |
992 | * version of zero (and thus NO_CONTEXT would not be caught | 629 | * version of zero (and thus NO_CONTEXT would not be caught |
993 | * by version mis-match tests in mmu_context.h). | 630 | * by version mis-match tests in mmu_context.h). |
631 | * | ||
632 | * Always invoked with interrupts disabled. | ||
994 | */ | 633 | */ |
995 | void get_new_mmu_context(struct mm_struct *mm) | 634 | void get_new_mmu_context(struct mm_struct *mm) |
996 | { | 635 | { |
997 | unsigned long ctx, new_ctx; | 636 | unsigned long ctx, new_ctx; |
998 | unsigned long orig_pgsz_bits; | 637 | unsigned long orig_pgsz_bits; |
999 | 638 | unsigned long flags; | |
639 | int new_version; | ||
1000 | 640 | ||
1001 | spin_lock(&ctx_alloc_lock); | 641 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
1002 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | 642 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
1003 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | 643 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; |
1004 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | 644 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); |
645 | new_version = 0; | ||
1005 | if (new_ctx >= (1 << CTX_NR_BITS)) { | 646 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
1006 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | 647 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); |
1007 | if (new_ctx >= ctx) { | 648 | if (new_ctx >= ctx) { |
@@ -1024,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
1024 | mmu_context_bmap[i + 2] = 0; | 665 | mmu_context_bmap[i + 2] = 0; |
1025 | mmu_context_bmap[i + 3] = 0; | 666 | mmu_context_bmap[i + 3] = 0; |
1026 | } | 667 | } |
668 | new_version = 1; | ||
1027 | goto out; | 669 | goto out; |
1028 | } | 670 | } |
1029 | } | 671 | } |
@@ -1032,79 +674,10 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
1032 | out: | 674 | out: |
1033 | tlb_context_cache = new_ctx; | 675 | tlb_context_cache = new_ctx; |
1034 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | 676 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; |
1035 | spin_unlock(&ctx_alloc_lock); | 677 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
1036 | } | ||
1037 | |||
1038 | #ifndef CONFIG_SMP | ||
1039 | struct pgtable_cache_struct pgt_quicklists; | ||
1040 | #endif | ||
1041 | |||
1042 | /* OK, we have to color these pages. The page tables are accessed | ||
1043 | * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S | ||
1044 | * code, as well as by PAGE_OFFSET range direct-mapped addresses by | ||
1045 | * other parts of the kernel. By coloring, we make sure that the tlbmiss | ||
1046 | * fast handlers do not get data from old/garbage dcache lines that | ||
1047 | * correspond to an old/stale virtual address (user/kernel) that | ||
1048 | * previously mapped the pagetable page while accessing vpte range | ||
1049 | * addresses. The idea is that if the vpte color and PAGE_OFFSET range | ||
1050 | * color is the same, then when the kernel initializes the pagetable | ||
1051 | * using the later address range, accesses with the first address | ||
1052 | * range will see the newly initialized data rather than the garbage. | ||
1053 | */ | ||
1054 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1055 | #define DC_ALIAS_SHIFT 1 | ||
1056 | #else | ||
1057 | #define DC_ALIAS_SHIFT 0 | ||
1058 | #endif | ||
1059 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
1060 | { | ||
1061 | struct page *page; | ||
1062 | unsigned long color; | ||
1063 | |||
1064 | { | ||
1065 | pte_t *ptep = pte_alloc_one_fast(mm, address); | ||
1066 | |||
1067 | if (ptep) | ||
1068 | return ptep; | ||
1069 | } | ||
1070 | 678 | ||
1071 | color = VPTE_COLOR(address); | 679 | if (unlikely(new_version)) |
1072 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); | 680 | smp_new_mmu_context_version(); |
1073 | if (page) { | ||
1074 | unsigned long *to_free; | ||
1075 | unsigned long paddr; | ||
1076 | pte_t *pte; | ||
1077 | |||
1078 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1079 | set_page_count(page, 1); | ||
1080 | ClearPageCompound(page); | ||
1081 | |||
1082 | set_page_count((page + 1), 1); | ||
1083 | ClearPageCompound(page + 1); | ||
1084 | #endif | ||
1085 | paddr = (unsigned long) page_address(page); | ||
1086 | memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); | ||
1087 | |||
1088 | if (!color) { | ||
1089 | pte = (pte_t *) paddr; | ||
1090 | to_free = (unsigned long *) (paddr + PAGE_SIZE); | ||
1091 | } else { | ||
1092 | pte = (pte_t *) (paddr + PAGE_SIZE); | ||
1093 | to_free = (unsigned long *) paddr; | ||
1094 | } | ||
1095 | |||
1096 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1097 | /* Now free the other one up, adjust cache size. */ | ||
1098 | preempt_disable(); | ||
1099 | *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; | ||
1100 | pte_quicklist[color ^ 0x1] = to_free; | ||
1101 | pgtable_cache_size++; | ||
1102 | preempt_enable(); | ||
1103 | #endif | ||
1104 | |||
1105 | return pte; | ||
1106 | } | ||
1107 | return NULL; | ||
1108 | } | 681 | } |
1109 | 682 | ||
1110 | void sparc_ultra_dump_itlb(void) | 683 | void sparc_ultra_dump_itlb(void) |
@@ -1196,9 +769,78 @@ void sparc_ultra_dump_dtlb(void) | |||
1196 | 769 | ||
1197 | extern unsigned long cmdline_memory_size; | 770 | extern unsigned long cmdline_memory_size; |
1198 | 771 | ||
1199 | unsigned long __init bootmem_init(unsigned long *pages_avail) | 772 | /* Find a free area for the bootmem map, avoiding the kernel image |
773 | * and the initial ramdisk. | ||
774 | */ | ||
775 | static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, | ||
776 | unsigned long end_pfn) | ||
777 | { | ||
778 | unsigned long avoid_start, avoid_end, bootmap_size; | ||
779 | int i; | ||
780 | |||
781 | bootmap_size = ((end_pfn - start_pfn) + 7) / 8; | ||
782 | bootmap_size = ALIGN(bootmap_size, sizeof(long)); | ||
783 | |||
784 | avoid_start = avoid_end = 0; | ||
785 | #ifdef CONFIG_BLK_DEV_INITRD | ||
786 | avoid_start = initrd_start; | ||
787 | avoid_end = PAGE_ALIGN(initrd_end); | ||
788 | #endif | ||
789 | |||
790 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
791 | prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", | ||
792 | kern_base, PAGE_ALIGN(kern_base + kern_size), | ||
793 | avoid_start, avoid_end); | ||
794 | #endif | ||
795 | for (i = 0; i < pavail_ents; i++) { | ||
796 | unsigned long start, end; | ||
797 | |||
798 | start = pavail[i].phys_addr; | ||
799 | end = start + pavail[i].reg_size; | ||
800 | |||
801 | while (start < end) { | ||
802 | if (start >= kern_base && | ||
803 | start < PAGE_ALIGN(kern_base + kern_size)) { | ||
804 | start = PAGE_ALIGN(kern_base + kern_size); | ||
805 | continue; | ||
806 | } | ||
807 | if (start >= avoid_start && start < avoid_end) { | ||
808 | start = avoid_end; | ||
809 | continue; | ||
810 | } | ||
811 | |||
812 | if ((end - start) < bootmap_size) | ||
813 | break; | ||
814 | |||
815 | if (start < kern_base && | ||
816 | (start + bootmap_size) > kern_base) { | ||
817 | start = PAGE_ALIGN(kern_base + kern_size); | ||
818 | continue; | ||
819 | } | ||
820 | |||
821 | if (start < avoid_start && | ||
822 | (start + bootmap_size) > avoid_start) { | ||
823 | start = avoid_end; | ||
824 | continue; | ||
825 | } | ||
826 | |||
827 | /* OK, it doesn't overlap anything, use it. */ | ||
828 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
829 | prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", | ||
830 | start >> PAGE_SHIFT, start); | ||
831 | #endif | ||
832 | return start >> PAGE_SHIFT; | ||
833 | } | ||
834 | } | ||
835 | |||
836 | prom_printf("Cannot find free area for bootmap, aborting.\n"); | ||
837 | prom_halt(); | ||
838 | } | ||
839 | |||
840 | static unsigned long __init bootmem_init(unsigned long *pages_avail, | ||
841 | unsigned long phys_base) | ||
1200 | { | 842 | { |
1201 | unsigned long bootmap_size, start_pfn, end_pfn; | 843 | unsigned long bootmap_size, end_pfn; |
1202 | unsigned long end_of_phys_memory = 0UL; | 844 | unsigned long end_of_phys_memory = 0UL; |
1203 | unsigned long bootmap_pfn, bytes_avail, size; | 845 | unsigned long bootmap_pfn, bytes_avail, size; |
1204 | int i; | 846 | int i; |
@@ -1236,14 +878,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1236 | 878 | ||
1237 | *pages_avail = bytes_avail >> PAGE_SHIFT; | 879 | *pages_avail = bytes_avail >> PAGE_SHIFT; |
1238 | 880 | ||
1239 | /* Start with page aligned address of last symbol in kernel | ||
1240 | * image. The kernel is hard mapped below PAGE_OFFSET in a | ||
1241 | * 4MB locked TLB translation. | ||
1242 | */ | ||
1243 | start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; | ||
1244 | |||
1245 | bootmap_pfn = start_pfn; | ||
1246 | |||
1247 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; | 881 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; |
1248 | 882 | ||
1249 | #ifdef CONFIG_BLK_DEV_INITRD | 883 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -1260,23 +894,22 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1260 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | 894 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", |
1261 | initrd_end, end_of_phys_memory); | 895 | initrd_end, end_of_phys_memory); |
1262 | initrd_start = 0; | 896 | initrd_start = 0; |
1263 | } | 897 | initrd_end = 0; |
1264 | if (initrd_start) { | ||
1265 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | ||
1266 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | ||
1267 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | ||
1268 | } | 898 | } |
1269 | } | 899 | } |
1270 | #endif | 900 | #endif |
1271 | /* Initialize the boot-time allocator. */ | 901 | /* Initialize the boot-time allocator. */ |
1272 | max_pfn = max_low_pfn = end_pfn; | 902 | max_pfn = max_low_pfn = end_pfn; |
1273 | min_low_pfn = pfn_base; | 903 | min_low_pfn = (phys_base >> PAGE_SHIFT); |
904 | |||
905 | bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); | ||
1274 | 906 | ||
1275 | #ifdef CONFIG_DEBUG_BOOTMEM | 907 | #ifdef CONFIG_DEBUG_BOOTMEM |
1276 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", | 908 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", |
1277 | min_low_pfn, bootmap_pfn, max_low_pfn); | 909 | min_low_pfn, bootmap_pfn, max_low_pfn); |
1278 | #endif | 910 | #endif |
1279 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | 911 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, |
912 | min_low_pfn, end_pfn); | ||
1280 | 913 | ||
1281 | /* Now register the available physical memory with the | 914 | /* Now register the available physical memory with the |
1282 | * allocator. | 915 | * allocator. |
@@ -1324,9 +957,26 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1324 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | 957 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); |
1325 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | 958 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; |
1326 | 959 | ||
960 | for (i = 0; i < pavail_ents; i++) { | ||
961 | unsigned long start_pfn, end_pfn; | ||
962 | |||
963 | start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; | ||
964 | end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); | ||
965 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
966 | prom_printf("memory_present(0, %lx, %lx)\n", | ||
967 | start_pfn, end_pfn); | ||
968 | #endif | ||
969 | memory_present(0, start_pfn, end_pfn); | ||
970 | } | ||
971 | |||
972 | sparse_init(); | ||
973 | |||
1327 | return end_pfn; | 974 | return end_pfn; |
1328 | } | 975 | } |
1329 | 976 | ||
977 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
978 | static int pall_ents __initdata; | ||
979 | |||
1330 | #ifdef CONFIG_DEBUG_PAGEALLOC | 980 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1331 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | 981 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) |
1332 | { | 982 | { |
@@ -1382,14 +1032,44 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, | |||
1382 | return alloc_bytes; | 1032 | return alloc_bytes; |
1383 | } | 1033 | } |
1384 | 1034 | ||
1385 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
1386 | static int pall_ents __initdata; | ||
1387 | |||
1388 | extern unsigned int kvmap_linear_patch[1]; | 1035 | extern unsigned int kvmap_linear_patch[1]; |
1036 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
1037 | |||
1038 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | ||
1039 | { | ||
1040 | const unsigned long shift_256MB = 28; | ||
1041 | const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); | ||
1042 | const unsigned long size_256MB = (1UL << shift_256MB); | ||
1043 | |||
1044 | while (start < end) { | ||
1045 | long remains; | ||
1046 | |||
1047 | remains = end - start; | ||
1048 | if (remains < size_256MB) | ||
1049 | break; | ||
1050 | |||
1051 | if (start & mask_256MB) { | ||
1052 | start = (start + size_256MB) & ~mask_256MB; | ||
1053 | continue; | ||
1054 | } | ||
1055 | |||
1056 | while (remains >= size_256MB) { | ||
1057 | unsigned long index = start >> shift_256MB; | ||
1058 | |||
1059 | __set_bit(index, kpte_linear_bitmap); | ||
1060 | |||
1061 | start += size_256MB; | ||
1062 | remains -= size_256MB; | ||
1063 | } | ||
1064 | } | ||
1065 | } | ||
1389 | 1066 | ||
1390 | static void __init kernel_physical_mapping_init(void) | 1067 | static void __init kernel_physical_mapping_init(void) |
1391 | { | 1068 | { |
1392 | unsigned long i, mem_alloced = 0UL; | 1069 | unsigned long i; |
1070 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1071 | unsigned long mem_alloced = 0UL; | ||
1072 | #endif | ||
1393 | 1073 | ||
1394 | read_obp_memory("reg", &pall[0], &pall_ents); | 1074 | read_obp_memory("reg", &pall[0], &pall_ents); |
1395 | 1075 | ||
@@ -1398,10 +1078,16 @@ static void __init kernel_physical_mapping_init(void) | |||
1398 | 1078 | ||
1399 | phys_start = pall[i].phys_addr; | 1079 | phys_start = pall[i].phys_addr; |
1400 | phys_end = phys_start + pall[i].reg_size; | 1080 | phys_end = phys_start + pall[i].reg_size; |
1081 | |||
1082 | mark_kpte_bitmap(phys_start, phys_end); | ||
1083 | |||
1084 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1401 | mem_alloced += kernel_map_range(phys_start, phys_end, | 1085 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1402 | PAGE_KERNEL); | 1086 | PAGE_KERNEL); |
1087 | #endif | ||
1403 | } | 1088 | } |
1404 | 1089 | ||
1090 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1405 | printk("Allocated %ld bytes for kernel page tables.\n", | 1091 | printk("Allocated %ld bytes for kernel page tables.\n", |
1406 | mem_alloced); | 1092 | mem_alloced); |
1407 | 1093 | ||
@@ -1409,8 +1095,10 @@ static void __init kernel_physical_mapping_init(void) | |||
1409 | flushi(&kvmap_linear_patch[0]); | 1095 | flushi(&kvmap_linear_patch[0]); |
1410 | 1096 | ||
1411 | __flush_tlb_all(); | 1097 | __flush_tlb_all(); |
1098 | #endif | ||
1412 | } | 1099 | } |
1413 | 1100 | ||
1101 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1414 | void kernel_map_pages(struct page *page, int numpages, int enable) | 1102 | void kernel_map_pages(struct page *page, int numpages, int enable) |
1415 | { | 1103 | { |
1416 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | 1104 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; |
@@ -1419,6 +1107,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1419 | kernel_map_range(phys_start, phys_end, | 1107 | kernel_map_range(phys_start, phys_end, |
1420 | (enable ? PAGE_KERNEL : __pgprot(0))); | 1108 | (enable ? PAGE_KERNEL : __pgprot(0))); |
1421 | 1109 | ||
1110 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, | ||
1111 | PAGE_OFFSET + phys_end); | ||
1112 | |||
1422 | /* we should perform an IPI and flush all tlbs, | 1113 | /* we should perform an IPI and flush all tlbs, |
1423 | * but that can deadlock->flush only current cpu. | 1114 | * but that can deadlock->flush only current cpu. |
1424 | */ | 1115 | */ |
@@ -1439,18 +1130,150 @@ unsigned long __init find_ecache_flush_span(unsigned long size) | |||
1439 | return ~0UL; | 1130 | return ~0UL; |
1440 | } | 1131 | } |
1441 | 1132 | ||
1133 | static void __init tsb_phys_patch(void) | ||
1134 | { | ||
1135 | struct tsb_ldquad_phys_patch_entry *pquad; | ||
1136 | struct tsb_phys_patch_entry *p; | ||
1137 | |||
1138 | pquad = &__tsb_ldquad_phys_patch; | ||
1139 | while (pquad < &__tsb_ldquad_phys_patch_end) { | ||
1140 | unsigned long addr = pquad->addr; | ||
1141 | |||
1142 | if (tlb_type == hypervisor) | ||
1143 | *(unsigned int *) addr = pquad->sun4v_insn; | ||
1144 | else | ||
1145 | *(unsigned int *) addr = pquad->sun4u_insn; | ||
1146 | wmb(); | ||
1147 | __asm__ __volatile__("flush %0" | ||
1148 | : /* no outputs */ | ||
1149 | : "r" (addr)); | ||
1150 | |||
1151 | pquad++; | ||
1152 | } | ||
1153 | |||
1154 | p = &__tsb_phys_patch; | ||
1155 | while (p < &__tsb_phys_patch_end) { | ||
1156 | unsigned long addr = p->addr; | ||
1157 | |||
1158 | *(unsigned int *) addr = p->insn; | ||
1159 | wmb(); | ||
1160 | __asm__ __volatile__("flush %0" | ||
1161 | : /* no outputs */ | ||
1162 | : "r" (addr)); | ||
1163 | |||
1164 | p++; | ||
1165 | } | ||
1166 | } | ||
1167 | |||
1168 | /* Don't mark as init, we give this to the Hypervisor. */ | ||
1169 | static struct hv_tsb_descr ktsb_descr[2]; | ||
1170 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
1171 | |||
1172 | static void __init sun4v_ktsb_init(void) | ||
1173 | { | ||
1174 | unsigned long ktsb_pa; | ||
1175 | |||
1176 | /* First KTSB for PAGE_SIZE mappings. */ | ||
1177 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | ||
1178 | |||
1179 | switch (PAGE_SIZE) { | ||
1180 | case 8 * 1024: | ||
1181 | default: | ||
1182 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; | ||
1183 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; | ||
1184 | break; | ||
1185 | |||
1186 | case 64 * 1024: | ||
1187 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; | ||
1188 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; | ||
1189 | break; | ||
1190 | |||
1191 | case 512 * 1024: | ||
1192 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; | ||
1193 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; | ||
1194 | break; | ||
1195 | |||
1196 | case 4 * 1024 * 1024: | ||
1197 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1198 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; | ||
1199 | break; | ||
1200 | }; | ||
1201 | |||
1202 | ktsb_descr[0].assoc = 1; | ||
1203 | ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; | ||
1204 | ktsb_descr[0].ctx_idx = 0; | ||
1205 | ktsb_descr[0].tsb_base = ktsb_pa; | ||
1206 | ktsb_descr[0].resv = 0; | ||
1207 | |||
1208 | /* Second KTSB for 4MB/256MB mappings. */ | ||
1209 | ktsb_pa = (kern_base + | ||
1210 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | ||
1211 | |||
1212 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1213 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | ||
1214 | HV_PGSZ_MASK_256MB); | ||
1215 | ktsb_descr[1].assoc = 1; | ||
1216 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | ||
1217 | ktsb_descr[1].ctx_idx = 0; | ||
1218 | ktsb_descr[1].tsb_base = ktsb_pa; | ||
1219 | ktsb_descr[1].resv = 0; | ||
1220 | } | ||
1221 | |||
1222 | void __cpuinit sun4v_ktsb_register(void) | ||
1223 | { | ||
1224 | register unsigned long func asm("%o5"); | ||
1225 | register unsigned long arg0 asm("%o0"); | ||
1226 | register unsigned long arg1 asm("%o1"); | ||
1227 | unsigned long pa; | ||
1228 | |||
1229 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | ||
1230 | |||
1231 | func = HV_FAST_MMU_TSB_CTX0; | ||
1232 | arg0 = 2; | ||
1233 | arg1 = pa; | ||
1234 | __asm__ __volatile__("ta %6" | ||
1235 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | ||
1236 | : "0" (func), "1" (arg0), "2" (arg1), | ||
1237 | "i" (HV_FAST_TRAP)); | ||
1238 | } | ||
1239 | |||
1442 | /* paging_init() sets up the page tables */ | 1240 | /* paging_init() sets up the page tables */ |
1443 | 1241 | ||
1444 | extern void cheetah_ecache_flush_init(void); | 1242 | extern void cheetah_ecache_flush_init(void); |
1243 | extern void sun4v_patch_tlb_handlers(void); | ||
1445 | 1244 | ||
1446 | static unsigned long last_valid_pfn; | 1245 | static unsigned long last_valid_pfn; |
1447 | pgd_t swapper_pg_dir[2048]; | 1246 | pgd_t swapper_pg_dir[2048]; |
1448 | 1247 | ||
1248 | static void sun4u_pgprot_init(void); | ||
1249 | static void sun4v_pgprot_init(void); | ||
1250 | |||
1449 | void __init paging_init(void) | 1251 | void __init paging_init(void) |
1450 | { | 1252 | { |
1451 | unsigned long end_pfn, pages_avail, shift; | 1253 | unsigned long end_pfn, pages_avail, shift, phys_base; |
1452 | unsigned long real_end, i; | 1254 | unsigned long real_end, i; |
1453 | 1255 | ||
1256 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1257 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1258 | |||
1259 | /* Invalidate both kernel TSBs. */ | ||
1260 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); | ||
1261 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | ||
1262 | |||
1263 | if (tlb_type == hypervisor) | ||
1264 | sun4v_pgprot_init(); | ||
1265 | else | ||
1266 | sun4u_pgprot_init(); | ||
1267 | |||
1268 | if (tlb_type == cheetah_plus || | ||
1269 | tlb_type == hypervisor) | ||
1270 | tsb_phys_patch(); | ||
1271 | |||
1272 | if (tlb_type == hypervisor) { | ||
1273 | sun4v_patch_tlb_handlers(); | ||
1274 | sun4v_ktsb_init(); | ||
1275 | } | ||
1276 | |||
1454 | /* Find available physical memory... */ | 1277 | /* Find available physical memory... */ |
1455 | read_obp_memory("available", &pavail[0], &pavail_ents); | 1278 | read_obp_memory("available", &pavail[0], &pavail_ents); |
1456 | 1279 | ||
@@ -1458,11 +1281,6 @@ void __init paging_init(void) | |||
1458 | for (i = 0; i < pavail_ents; i++) | 1281 | for (i = 0; i < pavail_ents; i++) |
1459 | phys_base = min(phys_base, pavail[i].phys_addr); | 1282 | phys_base = min(phys_base, pavail[i].phys_addr); |
1460 | 1283 | ||
1461 | pfn_base = phys_base >> PAGE_SHIFT; | ||
1462 | |||
1463 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1464 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1465 | |||
1466 | set_bit(0, mmu_context_bmap); | 1284 | set_bit(0, mmu_context_bmap); |
1467 | 1285 | ||
1468 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 1286 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
@@ -1486,47 +1304,38 @@ void __init paging_init(void) | |||
1486 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | 1304 | pud_set(pud_offset(&swapper_pg_dir[0], 0), |
1487 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); | 1305 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1488 | 1306 | ||
1489 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); | ||
1490 | |||
1491 | inherit_prom_mappings(); | 1307 | inherit_prom_mappings(); |
1492 | 1308 | ||
1493 | /* Ok, we can use our TLB miss and window trap handlers safely. | 1309 | /* Ok, we can use our TLB miss and window trap handlers safely. */ |
1494 | * We need to do a quick peek here to see if we are on StarFire | 1310 | setup_tba(); |
1495 | * or not, so setup_tba can setup the IRQ globals correctly (it | ||
1496 | * needs to get the hard smp processor id correctly). | ||
1497 | */ | ||
1498 | { | ||
1499 | extern void setup_tba(int); | ||
1500 | setup_tba(this_is_starfire); | ||
1501 | } | ||
1502 | |||
1503 | inherit_locked_prom_mappings(1); | ||
1504 | 1311 | ||
1505 | __flush_tlb_all(); | 1312 | __flush_tlb_all(); |
1506 | 1313 | ||
1314 | if (tlb_type == hypervisor) | ||
1315 | sun4v_ktsb_register(); | ||
1316 | |||
1507 | /* Setup bootmem... */ | 1317 | /* Setup bootmem... */ |
1508 | pages_avail = 0; | 1318 | pages_avail = 0; |
1509 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | 1319 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); |
1320 | |||
1321 | max_mapnr = last_valid_pfn; | ||
1510 | 1322 | ||
1511 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1512 | kernel_physical_mapping_init(); | 1323 | kernel_physical_mapping_init(); |
1513 | #endif | ||
1514 | 1324 | ||
1515 | { | 1325 | { |
1516 | unsigned long zones_size[MAX_NR_ZONES]; | 1326 | unsigned long zones_size[MAX_NR_ZONES]; |
1517 | unsigned long zholes_size[MAX_NR_ZONES]; | 1327 | unsigned long zholes_size[MAX_NR_ZONES]; |
1518 | unsigned long npages; | ||
1519 | int znum; | 1328 | int znum; |
1520 | 1329 | ||
1521 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | 1330 | for (znum = 0; znum < MAX_NR_ZONES; znum++) |
1522 | zones_size[znum] = zholes_size[znum] = 0; | 1331 | zones_size[znum] = zholes_size[znum] = 0; |
1523 | 1332 | ||
1524 | npages = end_pfn - pfn_base; | 1333 | zones_size[ZONE_DMA] = end_pfn; |
1525 | zones_size[ZONE_DMA] = npages; | 1334 | zholes_size[ZONE_DMA] = end_pfn - pages_avail; |
1526 | zholes_size[ZONE_DMA] = npages - pages_avail; | ||
1527 | 1335 | ||
1528 | free_area_init_node(0, &contig_page_data, zones_size, | 1336 | free_area_init_node(0, &contig_page_data, zones_size, |
1529 | phys_base >> PAGE_SHIFT, zholes_size); | 1337 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, |
1338 | zholes_size); | ||
1530 | } | 1339 | } |
1531 | 1340 | ||
1532 | device_scan(); | 1341 | device_scan(); |
@@ -1596,7 +1405,6 @@ void __init mem_init(void) | |||
1596 | 1405 | ||
1597 | taint_real_pages(); | 1406 | taint_real_pages(); |
1598 | 1407 | ||
1599 | max_mapnr = last_valid_pfn - pfn_base; | ||
1600 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 1408 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
1601 | 1409 | ||
1602 | #ifdef CONFIG_DEBUG_BOOTMEM | 1410 | #ifdef CONFIG_DEBUG_BOOTMEM |
@@ -1676,3 +1484,342 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
1676 | } | 1484 | } |
1677 | } | 1485 | } |
1678 | #endif | 1486 | #endif |
1487 | |||
1488 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) | ||
1489 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | ||
1490 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | ||
1491 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | ||
1492 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | ||
1493 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | ||
1494 | |||
1495 | pgprot_t PAGE_KERNEL __read_mostly; | ||
1496 | EXPORT_SYMBOL(PAGE_KERNEL); | ||
1497 | |||
1498 | pgprot_t PAGE_KERNEL_LOCKED __read_mostly; | ||
1499 | pgprot_t PAGE_COPY __read_mostly; | ||
1500 | |||
1501 | pgprot_t PAGE_SHARED __read_mostly; | ||
1502 | EXPORT_SYMBOL(PAGE_SHARED); | ||
1503 | |||
1504 | pgprot_t PAGE_EXEC __read_mostly; | ||
1505 | unsigned long pg_iobits __read_mostly; | ||
1506 | |||
1507 | unsigned long _PAGE_IE __read_mostly; | ||
1508 | |||
1509 | unsigned long _PAGE_E __read_mostly; | ||
1510 | EXPORT_SYMBOL(_PAGE_E); | ||
1511 | |||
1512 | unsigned long _PAGE_CACHE __read_mostly; | ||
1513 | EXPORT_SYMBOL(_PAGE_CACHE); | ||
1514 | |||
1515 | static void prot_init_common(unsigned long page_none, | ||
1516 | unsigned long page_shared, | ||
1517 | unsigned long page_copy, | ||
1518 | unsigned long page_readonly, | ||
1519 | unsigned long page_exec_bit) | ||
1520 | { | ||
1521 | PAGE_COPY = __pgprot(page_copy); | ||
1522 | PAGE_SHARED = __pgprot(page_shared); | ||
1523 | |||
1524 | protection_map[0x0] = __pgprot(page_none); | ||
1525 | protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); | ||
1526 | protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); | ||
1527 | protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); | ||
1528 | protection_map[0x4] = __pgprot(page_readonly); | ||
1529 | protection_map[0x5] = __pgprot(page_readonly); | ||
1530 | protection_map[0x6] = __pgprot(page_copy); | ||
1531 | protection_map[0x7] = __pgprot(page_copy); | ||
1532 | protection_map[0x8] = __pgprot(page_none); | ||
1533 | protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); | ||
1534 | protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); | ||
1535 | protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); | ||
1536 | protection_map[0xc] = __pgprot(page_readonly); | ||
1537 | protection_map[0xd] = __pgprot(page_readonly); | ||
1538 | protection_map[0xe] = __pgprot(page_shared); | ||
1539 | protection_map[0xf] = __pgprot(page_shared); | ||
1540 | } | ||
1541 | |||
1542 | static void __init sun4u_pgprot_init(void) | ||
1543 | { | ||
1544 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
1545 | unsigned long page_exec_bit; | ||
1546 | |||
1547 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
1548 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
1549 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
1550 | _PAGE_EXEC_4U); | ||
1551 | PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
1552 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
1553 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
1554 | _PAGE_EXEC_4U | _PAGE_L_4U); | ||
1555 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); | ||
1556 | |||
1557 | _PAGE_IE = _PAGE_IE_4U; | ||
1558 | _PAGE_E = _PAGE_E_4U; | ||
1559 | _PAGE_CACHE = _PAGE_CACHE_4U; | ||
1560 | |||
1561 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | | ||
1562 | __ACCESS_BITS_4U | _PAGE_E_4U); | ||
1563 | |||
1564 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ | ||
1565 | 0xfffff80000000000; | ||
1566 | kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | | ||
1567 | _PAGE_P_4U | _PAGE_W_4U); | ||
1568 | |||
1569 | /* XXX Should use 256MB on Panther. XXX */ | ||
1570 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; | ||
1571 | |||
1572 | _PAGE_SZBITS = _PAGE_SZBITS_4U; | ||
1573 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | | ||
1574 | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | | ||
1575 | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); | ||
1576 | |||
1577 | |||
1578 | page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; | ||
1579 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1580 | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); | ||
1581 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1582 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
1583 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1584 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
1585 | |||
1586 | page_exec_bit = _PAGE_EXEC_4U; | ||
1587 | |||
1588 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
1589 | page_exec_bit); | ||
1590 | } | ||
1591 | |||
1592 | static void __init sun4v_pgprot_init(void) | ||
1593 | { | ||
1594 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
1595 | unsigned long page_exec_bit; | ||
1596 | |||
1597 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | ||
1598 | _PAGE_CACHE_4V | _PAGE_P_4V | | ||
1599 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | ||
1600 | _PAGE_EXEC_4V); | ||
1601 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | ||
1602 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); | ||
1603 | |||
1604 | _PAGE_IE = _PAGE_IE_4V; | ||
1605 | _PAGE_E = _PAGE_E_4V; | ||
1606 | _PAGE_CACHE = _PAGE_CACHE_4V; | ||
1607 | |||
1608 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ | ||
1609 | 0xfffff80000000000; | ||
1610 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
1611 | _PAGE_P_4V | _PAGE_W_4V); | ||
1612 | |||
1613 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ | ||
1614 | 0xfffff80000000000; | ||
1615 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
1616 | _PAGE_P_4V | _PAGE_W_4V); | ||
1617 | |||
1618 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | | ||
1619 | __ACCESS_BITS_4V | _PAGE_E_4V); | ||
1620 | |||
1621 | _PAGE_SZBITS = _PAGE_SZBITS_4V; | ||
1622 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | | ||
1623 | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | | ||
1624 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | ||
1625 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | ||
1626 | |||
1627 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | ||
1628 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1629 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | ||
1630 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1631 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
1632 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1633 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
1634 | |||
1635 | page_exec_bit = _PAGE_EXEC_4V; | ||
1636 | |||
1637 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
1638 | page_exec_bit); | ||
1639 | } | ||
1640 | |||
1641 | unsigned long pte_sz_bits(unsigned long sz) | ||
1642 | { | ||
1643 | if (tlb_type == hypervisor) { | ||
1644 | switch (sz) { | ||
1645 | case 8 * 1024: | ||
1646 | default: | ||
1647 | return _PAGE_SZ8K_4V; | ||
1648 | case 64 * 1024: | ||
1649 | return _PAGE_SZ64K_4V; | ||
1650 | case 512 * 1024: | ||
1651 | return _PAGE_SZ512K_4V; | ||
1652 | case 4 * 1024 * 1024: | ||
1653 | return _PAGE_SZ4MB_4V; | ||
1654 | }; | ||
1655 | } else { | ||
1656 | switch (sz) { | ||
1657 | case 8 * 1024: | ||
1658 | default: | ||
1659 | return _PAGE_SZ8K_4U; | ||
1660 | case 64 * 1024: | ||
1661 | return _PAGE_SZ64K_4U; | ||
1662 | case 512 * 1024: | ||
1663 | return _PAGE_SZ512K_4U; | ||
1664 | case 4 * 1024 * 1024: | ||
1665 | return _PAGE_SZ4MB_4U; | ||
1666 | }; | ||
1667 | } | ||
1668 | } | ||
1669 | |||
1670 | pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) | ||
1671 | { | ||
1672 | pte_t pte; | ||
1673 | |||
1674 | pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); | ||
1675 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
1676 | pte_val(pte) |= pte_sz_bits(page_size); | ||
1677 | |||
1678 | return pte; | ||
1679 | } | ||
1680 | |||
1681 | static unsigned long kern_large_tte(unsigned long paddr) | ||
1682 | { | ||
1683 | unsigned long val; | ||
1684 | |||
1685 | val = (_PAGE_VALID | _PAGE_SZ4MB_4U | | ||
1686 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | | ||
1687 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | ||
1688 | if (tlb_type == hypervisor) | ||
1689 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | ||
1690 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | ||
1691 | _PAGE_EXEC_4V | _PAGE_W_4V); | ||
1692 | |||
1693 | return val | paddr; | ||
1694 | } | ||
1695 | |||
1696 | /* | ||
1697 | * Translate PROM's mapping we capture at boot time into physical address. | ||
1698 | * The second parameter is only set from prom_callback() invocations. | ||
1699 | */ | ||
1700 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | ||
1701 | { | ||
1702 | unsigned long mask; | ||
1703 | int i; | ||
1704 | |||
1705 | mask = _PAGE_PADDR_4U; | ||
1706 | if (tlb_type == hypervisor) | ||
1707 | mask = _PAGE_PADDR_4V; | ||
1708 | |||
1709 | for (i = 0; i < prom_trans_ents; i++) { | ||
1710 | struct linux_prom_translation *p = &prom_trans[i]; | ||
1711 | |||
1712 | if (promva >= p->virt && | ||
1713 | promva < (p->virt + p->size)) { | ||
1714 | unsigned long base = p->data & mask; | ||
1715 | |||
1716 | if (error) | ||
1717 | *error = 0; | ||
1718 | return base + (promva & (8192 - 1)); | ||
1719 | } | ||
1720 | } | ||
1721 | if (error) | ||
1722 | *error = 1; | ||
1723 | return 0UL; | ||
1724 | } | ||
1725 | |||
1726 | /* XXX We should kill off this ugly thing at so me point. XXX */ | ||
1727 | unsigned long sun4u_get_pte(unsigned long addr) | ||
1728 | { | ||
1729 | pgd_t *pgdp; | ||
1730 | pud_t *pudp; | ||
1731 | pmd_t *pmdp; | ||
1732 | pte_t *ptep; | ||
1733 | unsigned long mask = _PAGE_PADDR_4U; | ||
1734 | |||
1735 | if (tlb_type == hypervisor) | ||
1736 | mask = _PAGE_PADDR_4V; | ||
1737 | |||
1738 | if (addr >= PAGE_OFFSET) | ||
1739 | return addr & mask; | ||
1740 | |||
1741 | if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) | ||
1742 | return prom_virt_to_phys(addr, NULL); | ||
1743 | |||
1744 | pgdp = pgd_offset_k(addr); | ||
1745 | pudp = pud_offset(pgdp, addr); | ||
1746 | pmdp = pmd_offset(pudp, addr); | ||
1747 | ptep = pte_offset_kernel(pmdp, addr); | ||
1748 | |||
1749 | return pte_val(*ptep) & mask; | ||
1750 | } | ||
1751 | |||
1752 | /* If not locked, zap it. */ | ||
1753 | void __flush_tlb_all(void) | ||
1754 | { | ||
1755 | unsigned long pstate; | ||
1756 | int i; | ||
1757 | |||
1758 | __asm__ __volatile__("flushw\n\t" | ||
1759 | "rdpr %%pstate, %0\n\t" | ||
1760 | "wrpr %0, %1, %%pstate" | ||
1761 | : "=r" (pstate) | ||
1762 | : "i" (PSTATE_IE)); | ||
1763 | if (tlb_type == spitfire) { | ||
1764 | for (i = 0; i < 64; i++) { | ||
1765 | /* Spitfire Errata #32 workaround */ | ||
1766 | /* NOTE: Always runs on spitfire, so no | ||
1767 | * cheetah+ page size encodings. | ||
1768 | */ | ||
1769 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
1770 | "flush %%g6" | ||
1771 | : /* No outputs */ | ||
1772 | : "r" (0), | ||
1773 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
1774 | |||
1775 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { | ||
1776 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
1777 | "membar #Sync" | ||
1778 | : /* no outputs */ | ||
1779 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
1780 | spitfire_put_dtlb_data(i, 0x0UL); | ||
1781 | } | ||
1782 | |||
1783 | /* Spitfire Errata #32 workaround */ | ||
1784 | /* NOTE: Always runs on spitfire, so no | ||
1785 | * cheetah+ page size encodings. | ||
1786 | */ | ||
1787 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
1788 | "flush %%g6" | ||
1789 | : /* No outputs */ | ||
1790 | : "r" (0), | ||
1791 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
1792 | |||
1793 | if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { | ||
1794 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
1795 | "membar #Sync" | ||
1796 | : /* no outputs */ | ||
1797 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
1798 | spitfire_put_itlb_data(i, 0x0UL); | ||
1799 | } | ||
1800 | } | ||
1801 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
1802 | cheetah_flush_dtlb_all(); | ||
1803 | cheetah_flush_itlb_all(); | ||
1804 | } | ||
1805 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
1806 | : : "r" (pstate)); | ||
1807 | } | ||
1808 | |||
1809 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
1810 | |||
1811 | void online_page(struct page *page) | ||
1812 | { | ||
1813 | ClearPageReserved(page); | ||
1814 | set_page_count(page, 0); | ||
1815 | free_cold_page(page); | ||
1816 | totalram_pages++; | ||
1817 | num_physpages++; | ||
1818 | } | ||
1819 | |||
1820 | int remove_memory(u64 start, u64 size) | ||
1821 | { | ||
1822 | return -EINVAL; | ||
1823 | } | ||
1824 | |||
1825 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 8b104be4662b..a079cf42505e 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c | |||
@@ -25,6 +25,8 @@ void flush_tlb_pending(void) | |||
25 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | 25 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); |
26 | 26 | ||
27 | if (mp->tlb_nr) { | 27 | if (mp->tlb_nr) { |
28 | flush_tsb_user(mp); | ||
29 | |||
28 | if (CTX_VALID(mp->mm->context)) { | 30 | if (CTX_VALID(mp->mm->context)) { |
29 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
30 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, | 32 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, |
@@ -47,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t | |||
47 | if (pte_exec(orig)) | 49 | if (pte_exec(orig)) |
48 | vaddr |= 0x1UL; | 50 | vaddr |= 0x1UL; |
49 | 51 | ||
50 | if (pte_dirty(orig)) { | 52 | if (tlb_type != hypervisor && |
53 | pte_dirty(orig)) { | ||
51 | unsigned long paddr, pfn = pte_pfn(orig); | 54 | unsigned long paddr, pfn = pte_pfn(orig); |
52 | struct address_space *mapping; | 55 | struct address_space *mapping; |
53 | struct page *page; | 56 | struct page *page; |
@@ -89,62 +92,3 @@ no_cache_flush: | |||
89 | if (nr >= TLB_BATCH_NR) | 92 | if (nr >= TLB_BATCH_NR) |
90 | flush_tlb_pending(); | 93 | flush_tlb_pending(); |
91 | } | 94 | } |
92 | |||
93 | void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) | ||
94 | { | ||
95 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | ||
96 | unsigned long nr = mp->tlb_nr; | ||
97 | long s = start, e = end, vpte_base; | ||
98 | |||
99 | if (mp->fullmm) | ||
100 | return; | ||
101 | |||
102 | /* If start is greater than end, that is a real problem. */ | ||
103 | BUG_ON(start > end); | ||
104 | |||
105 | /* However, straddling the VA space hole is quite normal. */ | ||
106 | s &= PMD_MASK; | ||
107 | e = (e + PMD_SIZE - 1) & PMD_MASK; | ||
108 | |||
109 | vpte_base = (tlb_type == spitfire ? | ||
110 | VPTE_BASE_SPITFIRE : | ||
111 | VPTE_BASE_CHEETAH); | ||
112 | |||
113 | if (unlikely(nr != 0 && mm != mp->mm)) { | ||
114 | flush_tlb_pending(); | ||
115 | nr = 0; | ||
116 | } | ||
117 | |||
118 | if (nr == 0) | ||
119 | mp->mm = mm; | ||
120 | |||
121 | start = vpte_base + (s >> (PAGE_SHIFT - 3)); | ||
122 | end = vpte_base + (e >> (PAGE_SHIFT - 3)); | ||
123 | |||
124 | /* If the request straddles the VA space hole, we | ||
125 | * need to swap start and end. The reason this | ||
126 | * occurs is that "vpte_base" is the center of | ||
127 | * the linear page table mapping area. Thus, | ||
128 | * high addresses with the sign bit set map to | ||
129 | * addresses below vpte_base and non-sign bit | ||
130 | * addresses map to addresses above vpte_base. | ||
131 | */ | ||
132 | if (end < start) { | ||
133 | unsigned long tmp = start; | ||
134 | |||
135 | start = end; | ||
136 | end = tmp; | ||
137 | } | ||
138 | |||
139 | while (start < end) { | ||
140 | mp->vaddrs[nr] = start; | ||
141 | mp->tlb_nr = ++nr; | ||
142 | if (nr >= TLB_BATCH_NR) { | ||
143 | flush_tlb_pending(); | ||
144 | nr = 0; | ||
145 | } | ||
146 | start += PAGE_SIZE; | ||
147 | } | ||
148 | if (nr) | ||
149 | flush_tlb_pending(); | ||
150 | } | ||
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c new file mode 100644 index 000000000000..b2064e2a44d6 --- /dev/null +++ b/arch/sparc64/mm/tsb.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* arch/sparc64/mm/tsb.c | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <asm/system.h> | ||
8 | #include <asm/page.h> | ||
9 | #include <asm/tlbflush.h> | ||
10 | #include <asm/tlb.h> | ||
11 | #include <asm/mmu_context.h> | ||
12 | #include <asm/pgtable.h> | ||
13 | #include <asm/tsb.h> | ||
14 | #include <asm/oplib.h> | ||
15 | |||
16 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
17 | |||
18 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) | ||
19 | { | ||
20 | vaddr >>= PAGE_SHIFT; | ||
21 | return vaddr & (nentries - 1); | ||
22 | } | ||
23 | |||
24 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) | ||
25 | { | ||
26 | return (tag == (vaddr >> 22)); | ||
27 | } | ||
28 | |||
29 | /* TSB flushes need only occur on the processor initiating the address | ||
30 | * space modification, not on each cpu the address space has run on. | ||
31 | * Only the TLB flush needs that treatment. | ||
32 | */ | ||
33 | |||
34 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) | ||
35 | { | ||
36 | unsigned long v; | ||
37 | |||
38 | for (v = start; v < end; v += PAGE_SIZE) { | ||
39 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); | ||
40 | struct tsb *ent = &swapper_tsb[hash]; | ||
41 | |||
42 | if (tag_compare(ent->tag, v)) { | ||
43 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | ||
44 | membar_storeload_storestore(); | ||
45 | } | ||
46 | } | ||
47 | } | ||
48 | |||
49 | void flush_tsb_user(struct mmu_gather *mp) | ||
50 | { | ||
51 | struct mm_struct *mm = mp->mm; | ||
52 | unsigned long nentries, base, flags; | ||
53 | struct tsb *tsb; | ||
54 | int i; | ||
55 | |||
56 | spin_lock_irqsave(&mm->context.lock, flags); | ||
57 | |||
58 | tsb = mm->context.tsb; | ||
59 | nentries = mm->context.tsb_nentries; | ||
60 | |||
61 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
62 | base = __pa(tsb); | ||
63 | else | ||
64 | base = (unsigned long) tsb; | ||
65 | |||
66 | for (i = 0; i < mp->tlb_nr; i++) { | ||
67 | unsigned long v = mp->vaddrs[i]; | ||
68 | unsigned long tag, ent, hash; | ||
69 | |||
70 | v &= ~0x1UL; | ||
71 | |||
72 | hash = tsb_hash(v, nentries); | ||
73 | ent = base + (hash * sizeof(struct tsb)); | ||
74 | tag = (v >> 22UL); | ||
75 | |||
76 | tsb_flush(ent, tag); | ||
77 | } | ||
78 | |||
79 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
80 | } | ||
81 | |||
82 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) | ||
83 | { | ||
84 | unsigned long tsb_reg, base, tsb_paddr; | ||
85 | unsigned long page_sz, tte; | ||
86 | |||
87 | mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); | ||
88 | |||
89 | base = TSBMAP_BASE; | ||
90 | tte = pgprot_val(PAGE_KERNEL_LOCKED); | ||
91 | tsb_paddr = __pa(mm->context.tsb); | ||
92 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); | ||
93 | |||
94 | /* Use the smallest page size that can map the whole TSB | ||
95 | * in one TLB entry. | ||
96 | */ | ||
97 | switch (tsb_bytes) { | ||
98 | case 8192 << 0: | ||
99 | tsb_reg = 0x0UL; | ||
100 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
101 | base += (tsb_paddr & 8192); | ||
102 | #endif | ||
103 | page_sz = 8192; | ||
104 | break; | ||
105 | |||
106 | case 8192 << 1: | ||
107 | tsb_reg = 0x1UL; | ||
108 | page_sz = 64 * 1024; | ||
109 | break; | ||
110 | |||
111 | case 8192 << 2: | ||
112 | tsb_reg = 0x2UL; | ||
113 | page_sz = 64 * 1024; | ||
114 | break; | ||
115 | |||
116 | case 8192 << 3: | ||
117 | tsb_reg = 0x3UL; | ||
118 | page_sz = 64 * 1024; | ||
119 | break; | ||
120 | |||
121 | case 8192 << 4: | ||
122 | tsb_reg = 0x4UL; | ||
123 | page_sz = 512 * 1024; | ||
124 | break; | ||
125 | |||
126 | case 8192 << 5: | ||
127 | tsb_reg = 0x5UL; | ||
128 | page_sz = 512 * 1024; | ||
129 | break; | ||
130 | |||
131 | case 8192 << 6: | ||
132 | tsb_reg = 0x6UL; | ||
133 | page_sz = 512 * 1024; | ||
134 | break; | ||
135 | |||
136 | case 8192 << 7: | ||
137 | tsb_reg = 0x7UL; | ||
138 | page_sz = 4 * 1024 * 1024; | ||
139 | break; | ||
140 | |||
141 | default: | ||
142 | BUG(); | ||
143 | }; | ||
144 | tte |= pte_sz_bits(page_sz); | ||
145 | |||
146 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
147 | /* Physical mapping, no locked TLB entry for TSB. */ | ||
148 | tsb_reg |= tsb_paddr; | ||
149 | |||
150 | mm->context.tsb_reg_val = tsb_reg; | ||
151 | mm->context.tsb_map_vaddr = 0; | ||
152 | mm->context.tsb_map_pte = 0; | ||
153 | } else { | ||
154 | tsb_reg |= base; | ||
155 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); | ||
156 | tte |= (tsb_paddr & ~(page_sz - 1UL)); | ||
157 | |||
158 | mm->context.tsb_reg_val = tsb_reg; | ||
159 | mm->context.tsb_map_vaddr = base; | ||
160 | mm->context.tsb_map_pte = tte; | ||
161 | } | ||
162 | |||
163 | /* Setup the Hypervisor TSB descriptor. */ | ||
164 | if (tlb_type == hypervisor) { | ||
165 | struct hv_tsb_descr *hp = &mm->context.tsb_descr; | ||
166 | |||
167 | switch (PAGE_SIZE) { | ||
168 | case 8192: | ||
169 | default: | ||
170 | hp->pgsz_idx = HV_PGSZ_IDX_8K; | ||
171 | break; | ||
172 | |||
173 | case 64 * 1024: | ||
174 | hp->pgsz_idx = HV_PGSZ_IDX_64K; | ||
175 | break; | ||
176 | |||
177 | case 512 * 1024: | ||
178 | hp->pgsz_idx = HV_PGSZ_IDX_512K; | ||
179 | break; | ||
180 | |||
181 | case 4 * 1024 * 1024: | ||
182 | hp->pgsz_idx = HV_PGSZ_IDX_4MB; | ||
183 | break; | ||
184 | }; | ||
185 | hp->assoc = 1; | ||
186 | hp->num_ttes = tsb_bytes / 16; | ||
187 | hp->ctx_idx = 0; | ||
188 | switch (PAGE_SIZE) { | ||
189 | case 8192: | ||
190 | default: | ||
191 | hp->pgsz_mask = HV_PGSZ_MASK_8K; | ||
192 | break; | ||
193 | |||
194 | case 64 * 1024: | ||
195 | hp->pgsz_mask = HV_PGSZ_MASK_64K; | ||
196 | break; | ||
197 | |||
198 | case 512 * 1024: | ||
199 | hp->pgsz_mask = HV_PGSZ_MASK_512K; | ||
200 | break; | ||
201 | |||
202 | case 4 * 1024 * 1024: | ||
203 | hp->pgsz_mask = HV_PGSZ_MASK_4MB; | ||
204 | break; | ||
205 | }; | ||
206 | hp->tsb_base = tsb_paddr; | ||
207 | hp->resv = 0; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static kmem_cache_t *tsb_caches[8] __read_mostly; | ||
212 | |||
213 | static const char *tsb_cache_names[8] = { | ||
214 | "tsb_8KB", | ||
215 | "tsb_16KB", | ||
216 | "tsb_32KB", | ||
217 | "tsb_64KB", | ||
218 | "tsb_128KB", | ||
219 | "tsb_256KB", | ||
220 | "tsb_512KB", | ||
221 | "tsb_1MB", | ||
222 | }; | ||
223 | |||
224 | void __init tsb_cache_init(void) | ||
225 | { | ||
226 | unsigned long i; | ||
227 | |||
228 | for (i = 0; i < 8; i++) { | ||
229 | unsigned long size = 8192 << i; | ||
230 | const char *name = tsb_cache_names[i]; | ||
231 | |||
232 | tsb_caches[i] = kmem_cache_create(name, | ||
233 | size, size, | ||
234 | SLAB_HWCACHE_ALIGN | | ||
235 | SLAB_MUST_HWCACHE_ALIGN, | ||
236 | NULL, NULL); | ||
237 | if (!tsb_caches[i]) { | ||
238 | prom_printf("Could not create %s cache\n", name); | ||
239 | prom_halt(); | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, | ||
245 | * do_sparc64_fault() invokes this routine to try and grow the TSB. | ||
246 | * | ||
247 | * When we reach the maximum TSB size supported, we stick ~0UL into | ||
248 | * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() | ||
249 | * will not trigger any longer. | ||
250 | * | ||
251 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers | ||
252 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB | ||
253 | * must be 512K aligned. It also must be physically contiguous, so we | ||
254 | * cannot use vmalloc(). | ||
255 | * | ||
256 | * The idea here is to grow the TSB when the RSS of the process approaches | ||
257 | * the number of entries that the current TSB can hold at once. Currently, | ||
258 | * we trigger when the RSS hits 3/4 of the TSB capacity. | ||
259 | */ | ||
260 | void tsb_grow(struct mm_struct *mm, unsigned long rss) | ||
261 | { | ||
262 | unsigned long max_tsb_size = 1 * 1024 * 1024; | ||
263 | unsigned long new_size, old_size, flags; | ||
264 | struct tsb *old_tsb, *new_tsb; | ||
265 | unsigned long new_cache_index, old_cache_index; | ||
266 | unsigned long new_rss_limit; | ||
267 | gfp_t gfp_flags; | ||
268 | |||
269 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) | ||
270 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); | ||
271 | |||
272 | new_cache_index = 0; | ||
273 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { | ||
274 | unsigned long n_entries = new_size / sizeof(struct tsb); | ||
275 | |||
276 | n_entries = (n_entries * 3) / 4; | ||
277 | if (n_entries > rss) | ||
278 | break; | ||
279 | |||
280 | new_cache_index++; | ||
281 | } | ||
282 | |||
283 | if (new_size == max_tsb_size) | ||
284 | new_rss_limit = ~0UL; | ||
285 | else | ||
286 | new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; | ||
287 | |||
288 | retry_tsb_alloc: | ||
289 | gfp_flags = GFP_KERNEL; | ||
290 | if (new_size > (PAGE_SIZE * 2)) | ||
291 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; | ||
292 | |||
293 | new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); | ||
294 | if (unlikely(!new_tsb)) { | ||
295 | /* Not being able to fork due to a high-order TSB | ||
296 | * allocation failure is very bad behavior. Just back | ||
297 | * down to a 0-order allocation and force no TSB | ||
298 | * growing for this address space. | ||
299 | */ | ||
300 | if (mm->context.tsb == NULL && new_cache_index > 0) { | ||
301 | new_cache_index = 0; | ||
302 | new_size = 8192; | ||
303 | new_rss_limit = ~0UL; | ||
304 | goto retry_tsb_alloc; | ||
305 | } | ||
306 | |||
307 | /* If we failed on a TSB grow, we are under serious | ||
308 | * memory pressure so don't try to grow any more. | ||
309 | */ | ||
310 | if (mm->context.tsb != NULL) | ||
311 | mm->context.tsb_rss_limit = ~0UL; | ||
312 | return; | ||
313 | } | ||
314 | |||
315 | /* Mark all tags as invalid. */ | ||
316 | tsb_init(new_tsb, new_size); | ||
317 | |||
318 | /* Ok, we are about to commit the changes. If we are | ||
319 | * growing an existing TSB the locking is very tricky, | ||
320 | * so WATCH OUT! | ||
321 | * | ||
322 | * We have to hold mm->context.lock while committing to the | ||
323 | * new TSB, this synchronizes us with processors in | ||
324 | * flush_tsb_user() and switch_mm() for this address space. | ||
325 | * | ||
326 | * But even with that lock held, processors run asynchronously | ||
327 | * accessing the old TSB via TLB miss handling. This is OK | ||
328 | * because those actions are just propagating state from the | ||
329 | * Linux page tables into the TSB, page table mappings are not | ||
330 | * being changed. If a real fault occurs, the processor will | ||
331 | * synchronize with us when it hits flush_tsb_user(), this is | ||
332 | * also true for the case where vmscan is modifying the page | ||
333 | * tables. The only thing we need to be careful with is to | ||
334 | * skip any locked TSB entries during copy_tsb(). | ||
335 | * | ||
336 | * When we finish committing to the new TSB, we have to drop | ||
337 | * the lock and ask all other cpus running this address space | ||
338 | * to run tsb_context_switch() to see the new TSB table. | ||
339 | */ | ||
340 | spin_lock_irqsave(&mm->context.lock, flags); | ||
341 | |||
342 | old_tsb = mm->context.tsb; | ||
343 | old_cache_index = (mm->context.tsb_reg_val & 0x7UL); | ||
344 | old_size = mm->context.tsb_nentries * sizeof(struct tsb); | ||
345 | |||
346 | |||
347 | /* Handle multiple threads trying to grow the TSB at the same time. | ||
348 | * One will get in here first, and bump the size and the RSS limit. | ||
349 | * The others will get in here next and hit this check. | ||
350 | */ | ||
351 | if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { | ||
352 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
353 | |||
354 | kmem_cache_free(tsb_caches[new_cache_index], new_tsb); | ||
355 | return; | ||
356 | } | ||
357 | |||
358 | mm->context.tsb_rss_limit = new_rss_limit; | ||
359 | |||
360 | if (old_tsb) { | ||
361 | extern void copy_tsb(unsigned long old_tsb_base, | ||
362 | unsigned long old_tsb_size, | ||
363 | unsigned long new_tsb_base, | ||
364 | unsigned long new_tsb_size); | ||
365 | unsigned long old_tsb_base = (unsigned long) old_tsb; | ||
366 | unsigned long new_tsb_base = (unsigned long) new_tsb; | ||
367 | |||
368 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
369 | old_tsb_base = __pa(old_tsb_base); | ||
370 | new_tsb_base = __pa(new_tsb_base); | ||
371 | } | ||
372 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | ||
373 | } | ||
374 | |||
375 | mm->context.tsb = new_tsb; | ||
376 | setup_tsb_params(mm, new_size); | ||
377 | |||
378 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
379 | |||
380 | /* If old_tsb is NULL, we're being invoked for the first time | ||
381 | * from init_new_context(). | ||
382 | */ | ||
383 | if (old_tsb) { | ||
384 | /* Reload it on the local cpu. */ | ||
385 | tsb_context_switch(mm); | ||
386 | |||
387 | /* Now force other processors to do the same. */ | ||
388 | smp_tsb_sync(mm); | ||
389 | |||
390 | /* Now it is safe to free the old tsb. */ | ||
391 | kmem_cache_free(tsb_caches[old_cache_index], old_tsb); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
396 | { | ||
397 | spin_lock_init(&mm->context.lock); | ||
398 | |||
399 | mm->context.sparc64_ctx_val = 0UL; | ||
400 | |||
401 | /* copy_mm() copies over the parent's mm_struct before calling | ||
402 | * us, so we need to zero out the TSB pointer or else tsb_grow() | ||
403 | * will be confused and think there is an older TSB to free up. | ||
404 | */ | ||
405 | mm->context.tsb = NULL; | ||
406 | |||
407 | /* If this is fork, inherit the parent's TSB size. We would | ||
408 | * grow it to that size on the first page fault anyways. | ||
409 | */ | ||
410 | tsb_grow(mm, get_mm_rss(mm)); | ||
411 | |||
412 | if (unlikely(!mm->context.tsb)) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | void destroy_context(struct mm_struct *mm) | ||
419 | { | ||
420 | unsigned long flags, cache_index; | ||
421 | |||
422 | cache_index = (mm->context.tsb_reg_val & 0x7UL); | ||
423 | kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); | ||
424 | |||
425 | /* We can remove these later, but for now it's useful | ||
426 | * to catch any bogus post-destroy_context() references | ||
427 | * to the TSB. | ||
428 | */ | ||
429 | mm->context.tsb = NULL; | ||
430 | mm->context.tsb_reg_val = 0UL; | ||
431 | |||
432 | spin_lock_irqsave(&ctx_alloc_lock, flags); | ||
433 | |||
434 | if (CTX_VALID(mm->context)) { | ||
435 | unsigned long nr = CTX_NRBITS(mm->context); | ||
436 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | ||
437 | } | ||
438 | |||
439 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | ||
440 | } | ||
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index e4c9151fa116..f8479fad4047 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/head.h> | 15 | #include <asm/head.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/hypervisor.h> | ||
18 | 19 | ||
19 | /* Basically, most of the Spitfire vs. Cheetah madness | 20 | /* Basically, most of the Spitfire vs. Cheetah madness |
20 | * has to do with the fact that Cheetah does not support | 21 | * has to do with the fact that Cheetah does not support |
@@ -29,16 +30,18 @@ | |||
29 | .text | 30 | .text |
30 | .align 32 | 31 | .align 32 |
31 | .globl __flush_tlb_mm | 32 | .globl __flush_tlb_mm |
32 | __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | 33 | __flush_tlb_mm: /* 18 insns */ |
34 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | ||
33 | ldxa [%o1] ASI_DMMU, %g2 | 35 | ldxa [%o1] ASI_DMMU, %g2 |
34 | cmp %g2, %o0 | 36 | cmp %g2, %o0 |
35 | bne,pn %icc, __spitfire_flush_tlb_mm_slow | 37 | bne,pn %icc, __spitfire_flush_tlb_mm_slow |
36 | mov 0x50, %g3 | 38 | mov 0x50, %g3 |
37 | stxa %g0, [%g3] ASI_DMMU_DEMAP | 39 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
38 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 40 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
41 | sethi %hi(KERNBASE), %g3 | ||
42 | flush %g3 | ||
39 | retl | 43 | retl |
40 | flush %g6 | 44 | nop |
41 | nop | ||
42 | nop | 45 | nop |
43 | nop | 46 | nop |
44 | nop | 47 | nop |
@@ -51,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | |||
51 | 54 | ||
52 | .align 32 | 55 | .align 32 |
53 | .globl __flush_tlb_pending | 56 | .globl __flush_tlb_pending |
54 | __flush_tlb_pending: | 57 | __flush_tlb_pending: /* 26 insns */ |
55 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 58 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
56 | rdpr %pstate, %g7 | 59 | rdpr %pstate, %g7 |
57 | sllx %o1, 3, %o1 | 60 | sllx %o1, 3, %o1 |
@@ -72,7 +75,8 @@ __flush_tlb_pending: | |||
72 | brnz,pt %o1, 1b | 75 | brnz,pt %o1, 1b |
73 | nop | 76 | nop |
74 | stxa %g2, [%o4] ASI_DMMU | 77 | stxa %g2, [%o4] ASI_DMMU |
75 | flush %g6 | 78 | sethi %hi(KERNBASE), %o4 |
79 | flush %o4 | ||
76 | retl | 80 | retl |
77 | wrpr %g7, 0x0, %pstate | 81 | wrpr %g7, 0x0, %pstate |
78 | nop | 82 | nop |
@@ -82,7 +86,8 @@ __flush_tlb_pending: | |||
82 | 86 | ||
83 | .align 32 | 87 | .align 32 |
84 | .globl __flush_tlb_kernel_range | 88 | .globl __flush_tlb_kernel_range |
85 | __flush_tlb_kernel_range: /* %o0=start, %o1=end */ | 89 | __flush_tlb_kernel_range: /* 16 insns */ |
90 | /* %o0=start, %o1=end */ | ||
86 | cmp %o0, %o1 | 91 | cmp %o0, %o1 |
87 | be,pn %xcc, 2f | 92 | be,pn %xcc, 2f |
88 | sethi %hi(PAGE_SIZE), %o4 | 93 | sethi %hi(PAGE_SIZE), %o4 |
@@ -94,8 +99,11 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ | |||
94 | membar #Sync | 99 | membar #Sync |
95 | brnz,pt %o3, 1b | 100 | brnz,pt %o3, 1b |
96 | sub %o3, %o4, %o3 | 101 | sub %o3, %o4, %o3 |
97 | 2: retl | 102 | 2: sethi %hi(KERNBASE), %o3 |
98 | flush %g6 | 103 | flush %o3 |
104 | retl | ||
105 | nop | ||
106 | nop | ||
99 | 107 | ||
100 | __spitfire_flush_tlb_mm_slow: | 108 | __spitfire_flush_tlb_mm_slow: |
101 | rdpr %pstate, %g1 | 109 | rdpr %pstate, %g1 |
@@ -105,7 +113,8 @@ __spitfire_flush_tlb_mm_slow: | |||
105 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 113 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
106 | flush %g6 | 114 | flush %g6 |
107 | stxa %g2, [%o1] ASI_DMMU | 115 | stxa %g2, [%o1] ASI_DMMU |
108 | flush %g6 | 116 | sethi %hi(KERNBASE), %o1 |
117 | flush %o1 | ||
109 | retl | 118 | retl |
110 | wrpr %g1, 0, %pstate | 119 | wrpr %g1, 0, %pstate |
111 | 120 | ||
@@ -181,7 +190,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | |||
181 | .previous | 190 | .previous |
182 | 191 | ||
183 | /* Cheetah specific versions, patched at boot time. */ | 192 | /* Cheetah specific versions, patched at boot time. */ |
184 | __cheetah_flush_tlb_mm: /* 18 insns */ | 193 | __cheetah_flush_tlb_mm: /* 19 insns */ |
185 | rdpr %pstate, %g7 | 194 | rdpr %pstate, %g7 |
186 | andn %g7, PSTATE_IE, %g2 | 195 | andn %g7, PSTATE_IE, %g2 |
187 | wrpr %g2, 0x0, %pstate | 196 | wrpr %g2, 0x0, %pstate |
@@ -196,12 +205,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */ | |||
196 | stxa %g0, [%g3] ASI_DMMU_DEMAP | 205 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
197 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 206 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
198 | stxa %g2, [%o2] ASI_DMMU | 207 | stxa %g2, [%o2] ASI_DMMU |
199 | flush %g6 | 208 | sethi %hi(KERNBASE), %o2 |
209 | flush %o2 | ||
200 | wrpr %g0, 0, %tl | 210 | wrpr %g0, 0, %tl |
201 | retl | 211 | retl |
202 | wrpr %g7, 0x0, %pstate | 212 | wrpr %g7, 0x0, %pstate |
203 | 213 | ||
204 | __cheetah_flush_tlb_pending: /* 26 insns */ | 214 | __cheetah_flush_tlb_pending: /* 27 insns */ |
205 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 215 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
206 | rdpr %pstate, %g7 | 216 | rdpr %pstate, %g7 |
207 | sllx %o1, 3, %o1 | 217 | sllx %o1, 3, %o1 |
@@ -225,7 +235,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */ | |||
225 | brnz,pt %o1, 1b | 235 | brnz,pt %o1, 1b |
226 | nop | 236 | nop |
227 | stxa %g2, [%o4] ASI_DMMU | 237 | stxa %g2, [%o4] ASI_DMMU |
228 | flush %g6 | 238 | sethi %hi(KERNBASE), %o4 |
239 | flush %o4 | ||
229 | wrpr %g0, 0, %tl | 240 | wrpr %g0, 0, %tl |
230 | retl | 241 | retl |
231 | wrpr %g7, 0x0, %pstate | 242 | wrpr %g7, 0x0, %pstate |
@@ -245,7 +256,76 @@ __cheetah_flush_dcache_page: /* 11 insns */ | |||
245 | nop | 256 | nop |
246 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 257 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
247 | 258 | ||
248 | cheetah_patch_one: | 259 | /* Hypervisor specific versions, patched at boot time. */ |
260 | __hypervisor_tlb_tl0_error: | ||
261 | save %sp, -192, %sp | ||
262 | mov %i0, %o0 | ||
263 | call hypervisor_tlbop_error | ||
264 | mov %i1, %o1 | ||
265 | ret | ||
266 | restore | ||
267 | |||
268 | __hypervisor_flush_tlb_mm: /* 10 insns */ | ||
269 | mov %o0, %o2 /* ARG2: mmu context */ | ||
270 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | ||
271 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | ||
272 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
273 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
274 | ta HV_FAST_TRAP | ||
275 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
276 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | ||
277 | retl | ||
278 | nop | ||
279 | |||
280 | __hypervisor_flush_tlb_pending: /* 16 insns */ | ||
281 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | ||
282 | sllx %o1, 3, %g1 | ||
283 | mov %o2, %g2 | ||
284 | mov %o0, %g3 | ||
285 | 1: sub %g1, (1 << 3), %g1 | ||
286 | ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ | ||
287 | mov %g3, %o1 /* ARG1: mmu context */ | ||
288 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
289 | srlx %o0, PAGE_SHIFT, %o0 | ||
290 | sllx %o0, PAGE_SHIFT, %o0 | ||
291 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
292 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
293 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
294 | brnz,pt %g1, 1b | ||
295 | nop | ||
296 | retl | ||
297 | nop | ||
298 | |||
299 | __hypervisor_flush_tlb_kernel_range: /* 16 insns */ | ||
300 | /* %o0=start, %o1=end */ | ||
301 | cmp %o0, %o1 | ||
302 | be,pn %xcc, 2f | ||
303 | sethi %hi(PAGE_SIZE), %g3 | ||
304 | mov %o0, %g1 | ||
305 | sub %o1, %g1, %g2 | ||
306 | sub %g2, %g3, %g2 | ||
307 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ | ||
308 | mov 0, %o1 /* ARG1: mmu context */ | ||
309 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
310 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
311 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
312 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
313 | brnz,pt %g2, 1b | ||
314 | sub %g2, %g3, %g2 | ||
315 | 2: retl | ||
316 | nop | ||
317 | |||
318 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
319 | /* XXX Niagara and friends have an 8K cache, so no aliasing is | ||
320 | * XXX possible, but nothing explicit in the Hypervisor API | ||
321 | * XXX guarantees this. | ||
322 | */ | ||
323 | __hypervisor_flush_dcache_page: /* 2 insns */ | ||
324 | retl | ||
325 | nop | ||
326 | #endif | ||
327 | |||
328 | tlb_patch_one: | ||
249 | 1: lduw [%o1], %g1 | 329 | 1: lduw [%o1], %g1 |
250 | stw %g1, [%o0] | 330 | stw %g1, [%o0] |
251 | flush %o0 | 331 | flush %o0 |
@@ -264,22 +344,22 @@ cheetah_patch_cachetlbops: | |||
264 | or %o0, %lo(__flush_tlb_mm), %o0 | 344 | or %o0, %lo(__flush_tlb_mm), %o0 |
265 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | 345 | sethi %hi(__cheetah_flush_tlb_mm), %o1 |
266 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | 346 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 |
267 | call cheetah_patch_one | 347 | call tlb_patch_one |
268 | mov 18, %o2 | 348 | mov 19, %o2 |
269 | 349 | ||
270 | sethi %hi(__flush_tlb_pending), %o0 | 350 | sethi %hi(__flush_tlb_pending), %o0 |
271 | or %o0, %lo(__flush_tlb_pending), %o0 | 351 | or %o0, %lo(__flush_tlb_pending), %o0 |
272 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | 352 | sethi %hi(__cheetah_flush_tlb_pending), %o1 |
273 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | 353 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 |
274 | call cheetah_patch_one | 354 | call tlb_patch_one |
275 | mov 26, %o2 | 355 | mov 27, %o2 |
276 | 356 | ||
277 | #ifdef DCACHE_ALIASING_POSSIBLE | 357 | #ifdef DCACHE_ALIASING_POSSIBLE |
278 | sethi %hi(__flush_dcache_page), %o0 | 358 | sethi %hi(__flush_dcache_page), %o0 |
279 | or %o0, %lo(__flush_dcache_page), %o0 | 359 | or %o0, %lo(__flush_dcache_page), %o0 |
280 | sethi %hi(__cheetah_flush_dcache_page), %o1 | 360 | sethi %hi(__cheetah_flush_dcache_page), %o1 |
281 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | 361 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 |
282 | call cheetah_patch_one | 362 | call tlb_patch_one |
283 | mov 11, %o2 | 363 | mov 11, %o2 |
284 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 364 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
285 | 365 | ||
@@ -295,16 +375,14 @@ cheetah_patch_cachetlbops: | |||
295 | * %g1 address arg 1 (tlb page and range flushes) | 375 | * %g1 address arg 1 (tlb page and range flushes) |
296 | * %g7 address arg 2 (tlb range flush only) | 376 | * %g7 address arg 2 (tlb range flush only) |
297 | * | 377 | * |
298 | * %g6 ivector table, don't touch | 378 | * %g6 scratch 1 |
299 | * %g2 scratch 1 | 379 | * %g2 scratch 2 |
300 | * %g3 scratch 2 | 380 | * %g3 scratch 3 |
301 | * %g4 scratch 3 | 381 | * %g4 scratch 4 |
302 | * | ||
303 | * TODO: Make xcall TLB range flushes use the tricks above... -DaveM | ||
304 | */ | 382 | */ |
305 | .align 32 | 383 | .align 32 |
306 | .globl xcall_flush_tlb_mm | 384 | .globl xcall_flush_tlb_mm |
307 | xcall_flush_tlb_mm: | 385 | xcall_flush_tlb_mm: /* 21 insns */ |
308 | mov PRIMARY_CONTEXT, %g2 | 386 | mov PRIMARY_CONTEXT, %g2 |
309 | ldxa [%g2] ASI_DMMU, %g3 | 387 | ldxa [%g2] ASI_DMMU, %g3 |
310 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 | 388 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 |
@@ -316,9 +394,19 @@ xcall_flush_tlb_mm: | |||
316 | stxa %g0, [%g4] ASI_IMMU_DEMAP | 394 | stxa %g0, [%g4] ASI_IMMU_DEMAP |
317 | stxa %g3, [%g2] ASI_DMMU | 395 | stxa %g3, [%g2] ASI_DMMU |
318 | retry | 396 | retry |
397 | nop | ||
398 | nop | ||
399 | nop | ||
400 | nop | ||
401 | nop | ||
402 | nop | ||
403 | nop | ||
404 | nop | ||
405 | nop | ||
406 | nop | ||
319 | 407 | ||
320 | .globl xcall_flush_tlb_pending | 408 | .globl xcall_flush_tlb_pending |
321 | xcall_flush_tlb_pending: | 409 | xcall_flush_tlb_pending: /* 21 insns */ |
322 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ | 410 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ |
323 | sllx %g1, 3, %g1 | 411 | sllx %g1, 3, %g1 |
324 | mov PRIMARY_CONTEXT, %g4 | 412 | mov PRIMARY_CONTEXT, %g4 |
@@ -341,9 +429,10 @@ xcall_flush_tlb_pending: | |||
341 | nop | 429 | nop |
342 | stxa %g2, [%g4] ASI_DMMU | 430 | stxa %g2, [%g4] ASI_DMMU |
343 | retry | 431 | retry |
432 | nop | ||
344 | 433 | ||
345 | .globl xcall_flush_tlb_kernel_range | 434 | .globl xcall_flush_tlb_kernel_range |
346 | xcall_flush_tlb_kernel_range: | 435 | xcall_flush_tlb_kernel_range: /* 25 insns */ |
347 | sethi %hi(PAGE_SIZE - 1), %g2 | 436 | sethi %hi(PAGE_SIZE - 1), %g2 |
348 | or %g2, %lo(PAGE_SIZE - 1), %g2 | 437 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
349 | andn %g1, %g2, %g1 | 438 | andn %g1, %g2, %g1 |
@@ -360,14 +449,30 @@ xcall_flush_tlb_kernel_range: | |||
360 | retry | 449 | retry |
361 | nop | 450 | nop |
362 | nop | 451 | nop |
452 | nop | ||
453 | nop | ||
454 | nop | ||
455 | nop | ||
456 | nop | ||
457 | nop | ||
458 | nop | ||
459 | nop | ||
460 | nop | ||
363 | 461 | ||
364 | /* This runs in a very controlled environment, so we do | 462 | /* This runs in a very controlled environment, so we do |
365 | * not need to worry about BH races etc. | 463 | * not need to worry about BH races etc. |
366 | */ | 464 | */ |
367 | .globl xcall_sync_tick | 465 | .globl xcall_sync_tick |
368 | xcall_sync_tick: | 466 | xcall_sync_tick: |
369 | rdpr %pstate, %g2 | 467 | |
468 | 661: rdpr %pstate, %g2 | ||
370 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | 469 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
470 | .section .sun4v_2insn_patch, "ax" | ||
471 | .word 661b | ||
472 | nop | ||
473 | nop | ||
474 | .previous | ||
475 | |||
371 | rdpr %pil, %g2 | 476 | rdpr %pil, %g2 |
372 | wrpr %g0, 15, %pil | 477 | wrpr %g0, 15, %pil |
373 | sethi %hi(109f), %g7 | 478 | sethi %hi(109f), %g7 |
@@ -390,8 +495,15 @@ xcall_sync_tick: | |||
390 | */ | 495 | */ |
391 | .globl xcall_report_regs | 496 | .globl xcall_report_regs |
392 | xcall_report_regs: | 497 | xcall_report_regs: |
393 | rdpr %pstate, %g2 | 498 | |
499 | 661: rdpr %pstate, %g2 | ||
394 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | 500 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
501 | .section .sun4v_2insn_patch, "ax" | ||
502 | .word 661b | ||
503 | nop | ||
504 | nop | ||
505 | .previous | ||
506 | |||
395 | rdpr %pil, %g2 | 507 | rdpr %pil, %g2 |
396 | wrpr %g0, 15, %pil | 508 | wrpr %g0, 15, %pil |
397 | sethi %hi(109f), %g7 | 509 | sethi %hi(109f), %g7 |
@@ -453,62 +565,96 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address | |||
453 | nop | 565 | nop |
454 | nop | 566 | nop |
455 | 567 | ||
456 | .data | 568 | /* %g5: error |
457 | 569 | * %g6: tlb op | |
458 | errata32_hwbug: | 570 | */ |
459 | .xword 0 | 571 | __hypervisor_tlb_xcall_error: |
460 | 572 | mov %g5, %g4 | |
461 | .text | 573 | mov %g6, %g5 |
462 | 574 | ba,pt %xcc, etrap | |
463 | /* These two are not performance critical... */ | 575 | rd %pc, %g7 |
464 | .globl xcall_flush_tlb_all_spitfire | 576 | mov %l4, %o0 |
465 | xcall_flush_tlb_all_spitfire: | 577 | call hypervisor_tlbop_error_xcall |
466 | /* Spitfire Errata #32 workaround. */ | 578 | mov %l5, %o1 |
467 | sethi %hi(errata32_hwbug), %g4 | 579 | ba,a,pt %xcc, rtrap_clr_l6 |
468 | stx %g0, [%g4 + %lo(errata32_hwbug)] | 580 | |
469 | 581 | .globl __hypervisor_xcall_flush_tlb_mm | |
470 | clr %g2 | 582 | __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ |
471 | clr %g3 | 583 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ |
472 | 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 | 584 | mov %o0, %g2 |
473 | and %g4, _PAGE_L, %g5 | 585 | mov %o1, %g3 |
474 | brnz,pn %g5, 2f | 586 | mov %o2, %g4 |
475 | mov TLB_TAG_ACCESS, %g7 | 587 | mov %o3, %g1 |
476 | 588 | mov %o5, %g7 | |
477 | stxa %g0, [%g7] ASI_DMMU | 589 | clr %o0 /* ARG0: CPU lists unimplemented */ |
478 | membar #Sync | 590 | clr %o1 /* ARG1: CPU lists unimplemented */ |
479 | stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS | 591 | mov %g5, %o2 /* ARG2: mmu context */ |
592 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
593 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
594 | ta HV_FAST_TRAP | ||
595 | mov HV_FAST_MMU_DEMAP_CTX, %g6 | ||
596 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
597 | mov %o0, %g5 | ||
598 | mov %g2, %o0 | ||
599 | mov %g3, %o1 | ||
600 | mov %g4, %o2 | ||
601 | mov %g1, %o3 | ||
602 | mov %g7, %o5 | ||
480 | membar #Sync | 603 | membar #Sync |
604 | retry | ||
481 | 605 | ||
482 | /* Spitfire Errata #32 workaround. */ | 606 | .globl __hypervisor_xcall_flush_tlb_pending |
483 | sethi %hi(errata32_hwbug), %g4 | 607 | __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ |
484 | stx %g0, [%g4 + %lo(errata32_hwbug)] | 608 | /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ |
485 | 609 | sllx %g1, 3, %g1 | |
486 | 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 | 610 | mov %o0, %g2 |
487 | and %g4, _PAGE_L, %g5 | 611 | mov %o1, %g3 |
488 | brnz,pn %g5, 2f | 612 | mov %o2, %g4 |
489 | mov TLB_TAG_ACCESS, %g7 | 613 | 1: sub %g1, (1 << 3), %g1 |
490 | 614 | ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ | |
491 | stxa %g0, [%g7] ASI_IMMU | 615 | mov %g5, %o1 /* ARG1: mmu context */ |
492 | membar #Sync | 616 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
493 | stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS | 617 | srlx %o0, PAGE_SHIFT, %o0 |
618 | sllx %o0, PAGE_SHIFT, %o0 | ||
619 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
620 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
621 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | ||
622 | mov %o0, %g5 | ||
623 | brnz,pt %g1, 1b | ||
624 | nop | ||
625 | mov %g2, %o0 | ||
626 | mov %g3, %o1 | ||
627 | mov %g4, %o2 | ||
494 | membar #Sync | 628 | membar #Sync |
495 | |||
496 | /* Spitfire Errata #32 workaround. */ | ||
497 | sethi %hi(errata32_hwbug), %g4 | ||
498 | stx %g0, [%g4 + %lo(errata32_hwbug)] | ||
499 | |||
500 | 2: add %g2, 1, %g2 | ||
501 | cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT | ||
502 | ble,pt %icc, 1b | ||
503 | sll %g2, 3, %g3 | ||
504 | flush %g6 | ||
505 | retry | 629 | retry |
506 | 630 | ||
507 | .globl xcall_flush_tlb_all_cheetah | 631 | .globl __hypervisor_xcall_flush_tlb_kernel_range |
508 | xcall_flush_tlb_all_cheetah: | 632 | __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ |
509 | mov 0x80, %g2 | 633 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ |
510 | stxa %g0, [%g2] ASI_DMMU_DEMAP | 634 | sethi %hi(PAGE_SIZE - 1), %g2 |
511 | stxa %g0, [%g2] ASI_IMMU_DEMAP | 635 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
636 | andn %g1, %g2, %g1 | ||
637 | andn %g7, %g2, %g7 | ||
638 | sub %g7, %g1, %g3 | ||
639 | add %g2, 1, %g2 | ||
640 | sub %g3, %g2, %g3 | ||
641 | mov %o0, %g2 | ||
642 | mov %o1, %g4 | ||
643 | mov %o2, %g7 | ||
644 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ | ||
645 | mov 0, %o1 /* ARG1: mmu context */ | ||
646 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
647 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
648 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
649 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
650 | mov %o0, %g5 | ||
651 | sethi %hi(PAGE_SIZE), %o2 | ||
652 | brnz,pt %g3, 1b | ||
653 | sub %g3, %o2, %g3 | ||
654 | mov %g2, %o0 | ||
655 | mov %g4, %o1 | ||
656 | mov %g7, %o2 | ||
657 | membar #Sync | ||
512 | retry | 658 | retry |
513 | 659 | ||
514 | /* These just get rescheduled to PIL vectors. */ | 660 | /* These just get rescheduled to PIL vectors. */ |
@@ -527,4 +673,70 @@ xcall_capture: | |||
527 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | 673 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
528 | retry | 674 | retry |
529 | 675 | ||
676 | .globl xcall_new_mmu_context_version | ||
677 | xcall_new_mmu_context_version: | ||
678 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | ||
679 | retry | ||
680 | |||
530 | #endif /* CONFIG_SMP */ | 681 | #endif /* CONFIG_SMP */ |
682 | |||
683 | |||
684 | .globl hypervisor_patch_cachetlbops | ||
685 | hypervisor_patch_cachetlbops: | ||
686 | save %sp, -128, %sp | ||
687 | |||
688 | sethi %hi(__flush_tlb_mm), %o0 | ||
689 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
690 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 | ||
691 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 | ||
692 | call tlb_patch_one | ||
693 | mov 10, %o2 | ||
694 | |||
695 | sethi %hi(__flush_tlb_pending), %o0 | ||
696 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
697 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | ||
698 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 | ||
699 | call tlb_patch_one | ||
700 | mov 16, %o2 | ||
701 | |||
702 | sethi %hi(__flush_tlb_kernel_range), %o0 | ||
703 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | ||
704 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 | ||
705 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 | ||
706 | call tlb_patch_one | ||
707 | mov 16, %o2 | ||
708 | |||
709 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
710 | sethi %hi(__flush_dcache_page), %o0 | ||
711 | or %o0, %lo(__flush_dcache_page), %o0 | ||
712 | sethi %hi(__hypervisor_flush_dcache_page), %o1 | ||
713 | or %o1, %lo(__hypervisor_flush_dcache_page), %o1 | ||
714 | call tlb_patch_one | ||
715 | mov 2, %o2 | ||
716 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
717 | |||
718 | #ifdef CONFIG_SMP | ||
719 | sethi %hi(xcall_flush_tlb_mm), %o0 | ||
720 | or %o0, %lo(xcall_flush_tlb_mm), %o0 | ||
721 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
722 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
723 | call tlb_patch_one | ||
724 | mov 21, %o2 | ||
725 | |||
726 | sethi %hi(xcall_flush_tlb_pending), %o0 | ||
727 | or %o0, %lo(xcall_flush_tlb_pending), %o0 | ||
728 | sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
729 | or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
730 | call tlb_patch_one | ||
731 | mov 21, %o2 | ||
732 | |||
733 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | ||
734 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | ||
735 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
736 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
737 | call tlb_patch_one | ||
738 | mov 25, %o2 | ||
739 | #endif /* CONFIG_SMP */ | ||
740 | |||
741 | ret | ||
742 | restore | ||
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S index 29d0ae74aed8..5f27ad779c0c 100644 --- a/arch/sparc64/prom/cif.S +++ b/arch/sparc64/prom/cif.S | |||
@@ -1,10 +1,12 @@ | |||
1 | /* cif.S: PROM entry/exit assembler trampolines. | 1 | /* cif.S: PROM entry/exit assembler trampolines. |
2 | * | 2 | * |
3 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 3 | * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
4 | * Copyright (C) 2005 David S. Miller <davem@davemloft.net> | 4 | * Copyright (C) 2005, 2006 David S. Miller <davem@davemloft.net> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/pstate.h> | 7 | #include <asm/pstate.h> |
8 | #include <asm/cpudata.h> | ||
9 | #include <asm/thread_info.h> | ||
8 | 10 | ||
9 | .text | 11 | .text |
10 | .globl prom_cif_interface | 12 | .globl prom_cif_interface |
@@ -12,78 +14,16 @@ prom_cif_interface: | |||
12 | sethi %hi(p1275buf), %o0 | 14 | sethi %hi(p1275buf), %o0 |
13 | or %o0, %lo(p1275buf), %o0 | 15 | or %o0, %lo(p1275buf), %o0 |
14 | ldx [%o0 + 0x010], %o1 ! prom_cif_stack | 16 | ldx [%o0 + 0x010], %o1 ! prom_cif_stack |
15 | save %o1, -0x190, %sp | 17 | save %o1, -192, %sp |
16 | ldx [%i0 + 0x008], %l2 ! prom_cif_handler | 18 | ldx [%i0 + 0x008], %l2 ! prom_cif_handler |
17 | rdpr %pstate, %l4 | 19 | mov %g4, %l0 |
18 | wrpr %g0, 0x15, %pstate ! save alternate globals | 20 | mov %g5, %l1 |
19 | stx %g1, [%sp + 2047 + 0x0b0] | 21 | mov %g6, %l3 |
20 | stx %g2, [%sp + 2047 + 0x0b8] | ||
21 | stx %g3, [%sp + 2047 + 0x0c0] | ||
22 | stx %g4, [%sp + 2047 + 0x0c8] | ||
23 | stx %g5, [%sp + 2047 + 0x0d0] | ||
24 | stx %g6, [%sp + 2047 + 0x0d8] | ||
25 | stx %g7, [%sp + 2047 + 0x0e0] | ||
26 | wrpr %g0, 0x814, %pstate ! save interrupt globals | ||
27 | stx %g1, [%sp + 2047 + 0x0e8] | ||
28 | stx %g2, [%sp + 2047 + 0x0f0] | ||
29 | stx %g3, [%sp + 2047 + 0x0f8] | ||
30 | stx %g4, [%sp + 2047 + 0x100] | ||
31 | stx %g5, [%sp + 2047 + 0x108] | ||
32 | stx %g6, [%sp + 2047 + 0x110] | ||
33 | stx %g7, [%sp + 2047 + 0x118] | ||
34 | wrpr %g0, 0x14, %pstate ! save normal globals | ||
35 | stx %g1, [%sp + 2047 + 0x120] | ||
36 | stx %g2, [%sp + 2047 + 0x128] | ||
37 | stx %g3, [%sp + 2047 + 0x130] | ||
38 | stx %g4, [%sp + 2047 + 0x138] | ||
39 | stx %g5, [%sp + 2047 + 0x140] | ||
40 | stx %g6, [%sp + 2047 + 0x148] | ||
41 | stx %g7, [%sp + 2047 + 0x150] | ||
42 | wrpr %g0, 0x414, %pstate ! save mmu globals | ||
43 | stx %g1, [%sp + 2047 + 0x158] | ||
44 | stx %g2, [%sp + 2047 + 0x160] | ||
45 | stx %g3, [%sp + 2047 + 0x168] | ||
46 | stx %g4, [%sp + 2047 + 0x170] | ||
47 | stx %g5, [%sp + 2047 + 0x178] | ||
48 | stx %g6, [%sp + 2047 + 0x180] | ||
49 | stx %g7, [%sp + 2047 + 0x188] | ||
50 | mov %g1, %l0 ! also save to locals, so we can handle | ||
51 | mov %g2, %l1 ! tlb faults later on, when accessing | ||
52 | mov %g3, %l3 ! the stack. | ||
53 | mov %g7, %l5 | ||
54 | wrpr %l4, PSTATE_IE, %pstate ! turn off interrupts | ||
55 | call %l2 | 22 | call %l2 |
56 | add %i0, 0x018, %o0 ! prom_args | 23 | add %i0, 0x018, %o0 ! prom_args |
57 | wrpr %g0, 0x414, %pstate ! restore mmu globals | 24 | mov %l0, %g4 |
58 | mov %l0, %g1 | 25 | mov %l1, %g5 |
59 | mov %l1, %g2 | 26 | mov %l3, %g6 |
60 | mov %l3, %g3 | ||
61 | mov %l5, %g7 | ||
62 | wrpr %g0, 0x14, %pstate ! restore normal globals | ||
63 | ldx [%sp + 2047 + 0x120], %g1 | ||
64 | ldx [%sp + 2047 + 0x128], %g2 | ||
65 | ldx [%sp + 2047 + 0x130], %g3 | ||
66 | ldx [%sp + 2047 + 0x138], %g4 | ||
67 | ldx [%sp + 2047 + 0x140], %g5 | ||
68 | ldx [%sp + 2047 + 0x148], %g6 | ||
69 | ldx [%sp + 2047 + 0x150], %g7 | ||
70 | wrpr %g0, 0x814, %pstate ! restore interrupt globals | ||
71 | ldx [%sp + 2047 + 0x0e8], %g1 | ||
72 | ldx [%sp + 2047 + 0x0f0], %g2 | ||
73 | ldx [%sp + 2047 + 0x0f8], %g3 | ||
74 | ldx [%sp + 2047 + 0x100], %g4 | ||
75 | ldx [%sp + 2047 + 0x108], %g5 | ||
76 | ldx [%sp + 2047 + 0x110], %g6 | ||
77 | ldx [%sp + 2047 + 0x118], %g7 | ||
78 | wrpr %g0, 0x15, %pstate ! restore alternate globals | ||
79 | ldx [%sp + 2047 + 0x0b0], %g1 | ||
80 | ldx [%sp + 2047 + 0x0b8], %g2 | ||
81 | ldx [%sp + 2047 + 0x0c0], %g3 | ||
82 | ldx [%sp + 2047 + 0x0c8], %g4 | ||
83 | ldx [%sp + 2047 + 0x0d0], %g5 | ||
84 | ldx [%sp + 2047 + 0x0d8], %g6 | ||
85 | ldx [%sp + 2047 + 0x0e0], %g7 | ||
86 | wrpr %l4, 0, %pstate ! restore original pstate | ||
87 | ret | 27 | ret |
88 | restore | 28 | restore |
89 | 29 | ||
@@ -91,135 +31,18 @@ prom_cif_interface: | |||
91 | prom_cif_callback: | 31 | prom_cif_callback: |
92 | sethi %hi(p1275buf), %o1 | 32 | sethi %hi(p1275buf), %o1 |
93 | or %o1, %lo(p1275buf), %o1 | 33 | or %o1, %lo(p1275buf), %o1 |
94 | save %sp, -0x270, %sp | 34 | save %sp, -192, %sp |
95 | rdpr %pstate, %l4 | 35 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
96 | wrpr %g0, 0x15, %pstate ! save PROM alternate globals | 36 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %o0) |
97 | stx %g1, [%sp + 2047 + 0x0b0] | 37 | ldx [%g6 + TI_TASK], %g4 |
98 | stx %g2, [%sp + 2047 + 0x0b8] | ||
99 | stx %g3, [%sp + 2047 + 0x0c0] | ||
100 | stx %g4, [%sp + 2047 + 0x0c8] | ||
101 | stx %g5, [%sp + 2047 + 0x0d0] | ||
102 | stx %g6, [%sp + 2047 + 0x0d8] | ||
103 | stx %g7, [%sp + 2047 + 0x0e0] | ||
104 | ! restore Linux alternate globals | ||
105 | ldx [%sp + 2047 + 0x190], %g1 | ||
106 | ldx [%sp + 2047 + 0x198], %g2 | ||
107 | ldx [%sp + 2047 + 0x1a0], %g3 | ||
108 | ldx [%sp + 2047 + 0x1a8], %g4 | ||
109 | ldx [%sp + 2047 + 0x1b0], %g5 | ||
110 | ldx [%sp + 2047 + 0x1b8], %g6 | ||
111 | ldx [%sp + 2047 + 0x1c0], %g7 | ||
112 | wrpr %g0, 0x814, %pstate ! save PROM interrupt globals | ||
113 | stx %g1, [%sp + 2047 + 0x0e8] | ||
114 | stx %g2, [%sp + 2047 + 0x0f0] | ||
115 | stx %g3, [%sp + 2047 + 0x0f8] | ||
116 | stx %g4, [%sp + 2047 + 0x100] | ||
117 | stx %g5, [%sp + 2047 + 0x108] | ||
118 | stx %g6, [%sp + 2047 + 0x110] | ||
119 | stx %g7, [%sp + 2047 + 0x118] | ||
120 | ! restore Linux interrupt globals | ||
121 | ldx [%sp + 2047 + 0x1c8], %g1 | ||
122 | ldx [%sp + 2047 + 0x1d0], %g2 | ||
123 | ldx [%sp + 2047 + 0x1d8], %g3 | ||
124 | ldx [%sp + 2047 + 0x1e0], %g4 | ||
125 | ldx [%sp + 2047 + 0x1e8], %g5 | ||
126 | ldx [%sp + 2047 + 0x1f0], %g6 | ||
127 | ldx [%sp + 2047 + 0x1f8], %g7 | ||
128 | wrpr %g0, 0x14, %pstate ! save PROM normal globals | ||
129 | stx %g1, [%sp + 2047 + 0x120] | ||
130 | stx %g2, [%sp + 2047 + 0x128] | ||
131 | stx %g3, [%sp + 2047 + 0x130] | ||
132 | stx %g4, [%sp + 2047 + 0x138] | ||
133 | stx %g5, [%sp + 2047 + 0x140] | ||
134 | stx %g6, [%sp + 2047 + 0x148] | ||
135 | stx %g7, [%sp + 2047 + 0x150] | ||
136 | ! restore Linux normal globals | ||
137 | ldx [%sp + 2047 + 0x200], %g1 | ||
138 | ldx [%sp + 2047 + 0x208], %g2 | ||
139 | ldx [%sp + 2047 + 0x210], %g3 | ||
140 | ldx [%sp + 2047 + 0x218], %g4 | ||
141 | ldx [%sp + 2047 + 0x220], %g5 | ||
142 | ldx [%sp + 2047 + 0x228], %g6 | ||
143 | ldx [%sp + 2047 + 0x230], %g7 | ||
144 | wrpr %g0, 0x414, %pstate ! save PROM mmu globals | ||
145 | stx %g1, [%sp + 2047 + 0x158] | ||
146 | stx %g2, [%sp + 2047 + 0x160] | ||
147 | stx %g3, [%sp + 2047 + 0x168] | ||
148 | stx %g4, [%sp + 2047 + 0x170] | ||
149 | stx %g5, [%sp + 2047 + 0x178] | ||
150 | stx %g6, [%sp + 2047 + 0x180] | ||
151 | stx %g7, [%sp + 2047 + 0x188] | ||
152 | ! restore Linux mmu globals | ||
153 | ldx [%sp + 2047 + 0x238], %o0 | ||
154 | ldx [%sp + 2047 + 0x240], %o1 | ||
155 | ldx [%sp + 2047 + 0x248], %l2 | ||
156 | ldx [%sp + 2047 + 0x250], %l3 | ||
157 | ldx [%sp + 2047 + 0x258], %l5 | ||
158 | ldx [%sp + 2047 + 0x260], %l6 | ||
159 | ldx [%sp + 2047 + 0x268], %l7 | ||
160 | ! switch to Linux tba | ||
161 | sethi %hi(sparc64_ttable_tl0), %l1 | ||
162 | rdpr %tba, %l0 ! save PROM tba | ||
163 | mov %o0, %g1 | ||
164 | mov %o1, %g2 | ||
165 | mov %l2, %g3 | ||
166 | mov %l3, %g4 | ||
167 | mov %l5, %g5 | ||
168 | mov %l6, %g6 | ||
169 | mov %l7, %g7 | ||
170 | wrpr %l1, %tba ! install Linux tba | ||
171 | wrpr %l4, 0, %pstate ! restore PSTATE | ||
172 | call prom_world | 38 | call prom_world |
173 | mov %g0, %o0 | 39 | mov 0, %o0 |
174 | ldx [%i1 + 0x000], %l2 | 40 | ldx [%i1 + 0x000], %l2 |
175 | call %l2 | 41 | call %l2 |
176 | mov %i0, %o0 | 42 | mov %i0, %o0 |
177 | mov %o0, %l1 | 43 | mov %o0, %l1 |
178 | call prom_world | 44 | call prom_world |
179 | or %g0, 1, %o0 | 45 | mov 1, %o0 |
180 | wrpr %g0, 0x14, %pstate ! interrupts off | ||
181 | ! restore PROM mmu globals | ||
182 | ldx [%sp + 2047 + 0x158], %o0 | ||
183 | ldx [%sp + 2047 + 0x160], %o1 | ||
184 | ldx [%sp + 2047 + 0x168], %l2 | ||
185 | ldx [%sp + 2047 + 0x170], %l3 | ||
186 | ldx [%sp + 2047 + 0x178], %l5 | ||
187 | ldx [%sp + 2047 + 0x180], %l6 | ||
188 | ldx [%sp + 2047 + 0x188], %l7 | ||
189 | wrpr %g0, 0x414, %pstate ! restore PROM mmu globals | ||
190 | mov %o0, %g1 | ||
191 | mov %o1, %g2 | ||
192 | mov %l2, %g3 | ||
193 | mov %l3, %g4 | ||
194 | mov %l5, %g5 | ||
195 | mov %l6, %g6 | ||
196 | mov %l7, %g7 | ||
197 | wrpr %l0, %tba ! restore PROM tba | ||
198 | wrpr %g0, 0x14, %pstate ! restore PROM normal globals | ||
199 | ldx [%sp + 2047 + 0x120], %g1 | ||
200 | ldx [%sp + 2047 + 0x128], %g2 | ||
201 | ldx [%sp + 2047 + 0x130], %g3 | ||
202 | ldx [%sp + 2047 + 0x138], %g4 | ||
203 | ldx [%sp + 2047 + 0x140], %g5 | ||
204 | ldx [%sp + 2047 + 0x148], %g6 | ||
205 | ldx [%sp + 2047 + 0x150], %g7 | ||
206 | wrpr %g0, 0x814, %pstate ! restore PROM interrupt globals | ||
207 | ldx [%sp + 2047 + 0x0e8], %g1 | ||
208 | ldx [%sp + 2047 + 0x0f0], %g2 | ||
209 | ldx [%sp + 2047 + 0x0f8], %g3 | ||
210 | ldx [%sp + 2047 + 0x100], %g4 | ||
211 | ldx [%sp + 2047 + 0x108], %g5 | ||
212 | ldx [%sp + 2047 + 0x110], %g6 | ||
213 | ldx [%sp + 2047 + 0x118], %g7 | ||
214 | wrpr %g0, 0x15, %pstate ! restore PROM alternate globals | ||
215 | ldx [%sp + 2047 + 0x0b0], %g1 | ||
216 | ldx [%sp + 2047 + 0x0b8], %g2 | ||
217 | ldx [%sp + 2047 + 0x0c0], %g3 | ||
218 | ldx [%sp + 2047 + 0x0c8], %g4 | ||
219 | ldx [%sp + 2047 + 0x0d0], %g5 | ||
220 | ldx [%sp + 2047 + 0x0d8], %g6 | ||
221 | ldx [%sp + 2047 + 0x0e0], %g7 | ||
222 | wrpr %l4, 0, %pstate | ||
223 | ret | 46 | ret |
224 | restore %l1, 0, %o0 | 47 | restore %l1, 0, %o0 |
225 | 48 | ||
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index ac6d035dd150..7c25c54cefdc 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c | |||
@@ -102,6 +102,9 @@ prom_query_input_device(void) | |||
102 | if (!strncmp (propb, "rsc", 3)) | 102 | if (!strncmp (propb, "rsc", 3)) |
103 | return PROMDEV_IRSC; | 103 | return PROMDEV_IRSC; |
104 | 104 | ||
105 | if (!strncmp (propb, "virtual-console", 3)) | ||
106 | return PROMDEV_IVCONS; | ||
107 | |||
105 | if (strncmp (propb, "tty", 3) || !propb[3]) | 108 | if (strncmp (propb, "tty", 3) || !propb[3]) |
106 | return PROMDEV_I_UNK; | 109 | return PROMDEV_I_UNK; |
107 | 110 | ||
@@ -143,6 +146,9 @@ prom_query_output_device(void) | |||
143 | if (!strncmp (propb, "rsc", 3)) | 146 | if (!strncmp (propb, "rsc", 3)) |
144 | return PROMDEV_ORSC; | 147 | return PROMDEV_ORSC; |
145 | 148 | ||
149 | if (!strncmp (propb, "virtual-console", 3)) | ||
150 | return PROMDEV_OVCONS; | ||
151 | |||
146 | if (strncmp (propb, "tty", 3) || !propb[3]) | 152 | if (strncmp (propb, "tty", 3) || !propb[3]) |
147 | return PROMDEV_O_UNK; | 153 | return PROMDEV_O_UNK; |
148 | 154 | ||
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index f3cc2d8578b2..1c0db842a6f4 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c | |||
@@ -14,11 +14,10 @@ | |||
14 | #include <asm/openprom.h> | 14 | #include <asm/openprom.h> |
15 | #include <asm/oplib.h> | 15 | #include <asm/oplib.h> |
16 | 16 | ||
17 | enum prom_major_version prom_vers; | 17 | /* OBP version string. */ |
18 | unsigned int prom_rev, prom_prev; | 18 | char prom_version[80]; |
19 | 19 | ||
20 | /* The root node of the prom device tree. */ | 20 | /* The root node of the prom device tree. */ |
21 | int prom_root_node; | ||
22 | int prom_stdin, prom_stdout; | 21 | int prom_stdin, prom_stdout; |
23 | int prom_chosen_node; | 22 | int prom_chosen_node; |
24 | 23 | ||
@@ -31,68 +30,25 @@ extern void prom_cif_init(void *, void *); | |||
31 | 30 | ||
32 | void __init prom_init(void *cif_handler, void *cif_stack) | 31 | void __init prom_init(void *cif_handler, void *cif_stack) |
33 | { | 32 | { |
34 | char buffer[80], *p; | ||
35 | int ints[3]; | ||
36 | int node; | 33 | int node; |
37 | int i = 0; | ||
38 | int bufadjust; | ||
39 | |||
40 | prom_vers = PROM_P1275; | ||
41 | 34 | ||
42 | prom_cif_init(cif_handler, cif_stack); | 35 | prom_cif_init(cif_handler, cif_stack); |
43 | 36 | ||
44 | prom_root_node = prom_getsibling(0); | ||
45 | if((prom_root_node == 0) || (prom_root_node == -1)) | ||
46 | prom_halt(); | ||
47 | |||
48 | prom_chosen_node = prom_finddevice(prom_chosen_path); | 37 | prom_chosen_node = prom_finddevice(prom_chosen_path); |
49 | if (!prom_chosen_node || prom_chosen_node == -1) | 38 | if (!prom_chosen_node || prom_chosen_node == -1) |
50 | prom_halt(); | 39 | prom_halt(); |
51 | 40 | ||
52 | prom_stdin = prom_getint (prom_chosen_node, "stdin"); | 41 | prom_stdin = prom_getint(prom_chosen_node, "stdin"); |
53 | prom_stdout = prom_getint (prom_chosen_node, "stdout"); | 42 | prom_stdout = prom_getint(prom_chosen_node, "stdout"); |
54 | 43 | ||
55 | node = prom_finddevice("/openprom"); | 44 | node = prom_finddevice("/openprom"); |
56 | if (!node || node == -1) | 45 | if (!node || node == -1) |
57 | prom_halt(); | 46 | prom_halt(); |
58 | 47 | ||
59 | prom_getstring (node, "version", buffer, sizeof (buffer)); | 48 | prom_getstring(node, "version", prom_version, sizeof(prom_version)); |
60 | |||
61 | prom_printf ("\n"); | ||
62 | |||
63 | if (strncmp (buffer, "OBP ", 4)) | ||
64 | goto strange_version; | ||
65 | |||
66 | /* | ||
67 | * Version field is expected to be 'OBP xx.yy.zz date...' | ||
68 | * However, Sun can't stick to this format very well, so | ||
69 | * we need to check for 'OBP xx.yy.zz date...' and adjust | ||
70 | * accordingly. -spot | ||
71 | */ | ||
72 | |||
73 | if (strncmp (buffer, "OBP ", 5)) | ||
74 | bufadjust = 4; | ||
75 | else | ||
76 | bufadjust = 5; | ||
77 | |||
78 | p = buffer + bufadjust; | ||
79 | while (p && isdigit(*p) && i < 3) { | ||
80 | ints[i++] = simple_strtoul(p, NULL, 0); | ||
81 | if ((p = strchr(p, '.')) != NULL) | ||
82 | p++; | ||
83 | } | ||
84 | if (i != 3) | ||
85 | goto strange_version; | ||
86 | |||
87 | prom_rev = ints[1]; | ||
88 | prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2]; | ||
89 | |||
90 | printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); | ||
91 | 49 | ||
92 | /* Initialization successful. */ | 50 | prom_printf("\n"); |
93 | return; | ||
94 | 51 | ||
95 | strange_version: | 52 | printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version); |
96 | prom_printf ("Strange OBP version `%s'.\n", buffer); | 53 | printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); |
97 | prom_halt (); | ||
98 | } | 54 | } |
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 87f5cfce23bb..577bde8b6647 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c | |||
@@ -112,28 +112,20 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes) | |||
112 | return 0xff; | 112 | return 0xff; |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Get the major prom version number. */ | 115 | /* Install Linux trap table so PROM uses that instead of its own. */ |
116 | int prom_version(void) | 116 | void prom_set_trap_table(unsigned long tba) |
117 | { | ||
118 | return PROM_P1275; | ||
119 | } | ||
120 | |||
121 | /* Get the prom plugin-revision. */ | ||
122 | int prom_getrev(void) | ||
123 | { | ||
124 | return prom_rev; | ||
125 | } | ||
126 | |||
127 | /* Get the prom firmware print revision. */ | ||
128 | int prom_getprev(void) | ||
129 | { | 117 | { |
130 | return prom_prev; | 118 | p1275_cmd("SUNW,set-trap-table", |
119 | (P1275_ARG(0, P1275_ARG_IN_64B) | | ||
120 | P1275_INOUT(1, 0)), tba); | ||
131 | } | 121 | } |
132 | 122 | ||
133 | /* Install Linux trap table so PROM uses that instead of its own. */ | 123 | void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa) |
134 | void prom_set_trap_table(unsigned long tba) | ||
135 | { | 124 | { |
136 | p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); | 125 | p1275_cmd("SUNW,set-trap-table", |
126 | (P1275_ARG(0, P1275_ARG_IN_64B) | | ||
127 | P1275_ARG(1, P1275_ARG_IN_64B) | | ||
128 | P1275_INOUT(2, 0)), tba, mmfsa); | ||
137 | } | 129 | } |
138 | 130 | ||
139 | int prom_get_mmu_ihandle(void) | 131 | int prom_get_mmu_ihandle(void) |
@@ -303,9 +295,21 @@ int prom_wakeupsystem(void) | |||
303 | } | 295 | } |
304 | 296 | ||
305 | #ifdef CONFIG_SMP | 297 | #ifdef CONFIG_SMP |
306 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0) | 298 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) |
299 | { | ||
300 | p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg); | ||
301 | } | ||
302 | |||
303 | void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) | ||
304 | { | ||
305 | p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0), | ||
306 | cpuid, pc, arg); | ||
307 | } | ||
308 | |||
309 | void prom_stopcpu_cpuid(int cpuid) | ||
307 | { | 310 | { |
308 | p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0); | 311 | p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0), |
312 | cpuid); | ||
309 | } | 313 | } |
310 | 314 | ||
311 | void prom_stopself(void) | 315 | void prom_stopself(void) |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c index a5a7c5712028..2b32c489860c 100644 --- a/arch/sparc64/prom/p1275.c +++ b/arch/sparc64/prom/p1275.c | |||
@@ -30,16 +30,6 @@ extern void prom_world(int); | |||
30 | extern void prom_cif_interface(void); | 30 | extern void prom_cif_interface(void); |
31 | extern void prom_cif_callback(void); | 31 | extern void prom_cif_callback(void); |
32 | 32 | ||
33 | static inline unsigned long spitfire_get_primary_context(void) | ||
34 | { | ||
35 | unsigned long ctx; | ||
36 | |||
37 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
38 | : "=r" (ctx) | ||
39 | : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
40 | return ctx; | ||
41 | } | ||
42 | |||
43 | /* | 33 | /* |
44 | * This provides SMP safety on the p1275buf. prom_callback() drops this lock | 34 | * This provides SMP safety on the p1275buf. prom_callback() drops this lock |
45 | * to allow recursuve acquisition. | 35 | * to allow recursuve acquisition. |
@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...) | |||
55 | long attrs, x; | 45 | long attrs, x; |
56 | 46 | ||
57 | p = p1275buf.prom_buffer; | 47 | p = p1275buf.prom_buffer; |
58 | BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0); | ||
59 | 48 | ||
60 | spin_lock_irqsave(&prom_entry_lock, flags); | 49 | spin_lock_irqsave(&prom_entry_lock, flags); |
61 | 50 | ||
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index b1ff9e87dcc6..49075abd7cbc 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c | |||
@@ -51,7 +51,7 @@ prom_getparent(int node) | |||
51 | __inline__ int | 51 | __inline__ int |
52 | __prom_getsibling(int node) | 52 | __prom_getsibling(int node) |
53 | { | 53 | { |
54 | return p1275_cmd ("peer", P1275_INOUT(1, 1), node); | 54 | return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); |
55 | } | 55 | } |
56 | 56 | ||
57 | __inline__ int | 57 | __inline__ int |
@@ -59,9 +59,12 @@ prom_getsibling(int node) | |||
59 | { | 59 | { |
60 | int sibnode; | 60 | int sibnode; |
61 | 61 | ||
62 | if(node == -1) return 0; | 62 | if (node == -1) |
63 | return 0; | ||
63 | sibnode = __prom_getsibling(node); | 64 | sibnode = __prom_getsibling(node); |
64 | if(sibnode == -1) return 0; | 65 | if (sibnode == -1) |
66 | return 0; | ||
67 | |||
65 | return sibnode; | 68 | return sibnode; |
66 | } | 69 | } |
67 | 70 | ||
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 3ab4677395f2..5284996780a7 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c | |||
@@ -90,7 +90,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o | |||
90 | len = PAGE_ALIGN(len); | 90 | len = PAGE_ALIGN(len); |
91 | if(!(flags & MAP_FIXED)) | 91 | if(!(flags & MAP_FIXED)) |
92 | addr = 0; | 92 | addr = 0; |
93 | else if (len > 0xf0000000UL || addr > 0xf0000000UL - len) | 93 | else if (len > STACK_TOP32 || addr > STACK_TOP32 - len) |
94 | goto out_putf; | 94 | goto out_putf; |
95 | ret_type = flags & _MAP_NEW; | 95 | ret_type = flags & _MAP_NEW; |
96 | flags &= ~_MAP_NEW; | 96 | flags &= ~_MAP_NEW; |
@@ -102,7 +102,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o | |||
102 | (unsigned long) prot, (unsigned long) flags, off); | 102 | (unsigned long) prot, (unsigned long) flags, off); |
103 | up_write(¤t->mm->mmap_sem); | 103 | up_write(¤t->mm->mmap_sem); |
104 | if(!ret_type) | 104 | if(!ret_type) |
105 | retval = ((retval < 0xf0000000) ? 0 : retval); | 105 | retval = ((retval < STACK_TOP32) ? 0 : retval); |
106 | 106 | ||
107 | out_putf: | 107 | out_putf: |
108 | if (file) | 108 | if (file) |
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8da3cf66894c..296708ceceb2 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -182,6 +182,9 @@ struct as_rq { | |||
182 | 182 | ||
183 | static kmem_cache_t *arq_pool; | 183 | static kmem_cache_t *arq_pool; |
184 | 184 | ||
185 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
186 | static struct completion *ioc_gone; | ||
187 | |||
185 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); | 188 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); |
186 | static void as_antic_stop(struct as_data *ad); | 189 | static void as_antic_stop(struct as_data *ad); |
187 | 190 | ||
@@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad); | |||
193 | static void free_as_io_context(struct as_io_context *aic) | 196 | static void free_as_io_context(struct as_io_context *aic) |
194 | { | 197 | { |
195 | kfree(aic); | 198 | kfree(aic); |
199 | if (atomic_dec_and_test(&ioc_count) && ioc_gone) | ||
200 | complete(ioc_gone); | ||
201 | } | ||
202 | |||
203 | static void as_trim(struct io_context *ioc) | ||
204 | { | ||
205 | if (ioc->aic) | ||
206 | free_as_io_context(ioc->aic); | ||
207 | ioc->aic = NULL; | ||
196 | } | 208 | } |
197 | 209 | ||
198 | /* Called when the task exits */ | 210 | /* Called when the task exits */ |
@@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void) | |||
220 | ret->seek_total = 0; | 232 | ret->seek_total = 0; |
221 | ret->seek_samples = 0; | 233 | ret->seek_samples = 0; |
222 | ret->seek_mean = 0; | 234 | ret->seek_mean = 0; |
235 | atomic_inc(&ioc_count); | ||
223 | } | 236 | } |
224 | 237 | ||
225 | return ret; | 238 | return ret; |
@@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
1696 | /* | 1709 | /* |
1697 | * sysfs parts below | 1710 | * sysfs parts below |
1698 | */ | 1711 | */ |
1699 | struct as_fs_entry { | ||
1700 | struct attribute attr; | ||
1701 | ssize_t (*show)(struct as_data *, char *); | ||
1702 | ssize_t (*store)(struct as_data *, const char *, size_t); | ||
1703 | }; | ||
1704 | 1712 | ||
1705 | static ssize_t | 1713 | static ssize_t |
1706 | as_var_show(unsigned int var, char *page) | 1714 | as_var_show(unsigned int var, char *page) |
@@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count) | |||
1717 | return count; | 1725 | return count; |
1718 | } | 1726 | } |
1719 | 1727 | ||
1720 | static ssize_t as_est_show(struct as_data *ad, char *page) | 1728 | static ssize_t est_time_show(elevator_t *e, char *page) |
1721 | { | 1729 | { |
1730 | struct as_data *ad = e->elevator_data; | ||
1722 | int pos = 0; | 1731 | int pos = 0; |
1723 | 1732 | ||
1724 | pos += sprintf(page+pos, "%lu %% exit probability\n", | 1733 | pos += sprintf(page+pos, "%lu %% exit probability\n", |
@@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page) | |||
1734 | } | 1743 | } |
1735 | 1744 | ||
1736 | #define SHOW_FUNCTION(__FUNC, __VAR) \ | 1745 | #define SHOW_FUNCTION(__FUNC, __VAR) \ |
1737 | static ssize_t __FUNC(struct as_data *ad, char *page) \ | 1746 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
1738 | { \ | 1747 | { \ |
1748 | struct as_data *ad = e->elevator_data; \ | ||
1739 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ | 1749 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ |
1740 | } | 1750 | } |
1741 | SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]); | 1751 | SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); |
1742 | SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]); | 1752 | SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); |
1743 | SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire); | 1753 | SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); |
1744 | SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]); | 1754 | SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); |
1745 | SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]); | 1755 | SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); |
1746 | #undef SHOW_FUNCTION | 1756 | #undef SHOW_FUNCTION |
1747 | 1757 | ||
1748 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | 1758 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
1749 | static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | 1759 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
1750 | { \ | 1760 | { \ |
1751 | int ret = as_var_store(__PTR, (page), count); \ | 1761 | struct as_data *ad = e->elevator_data; \ |
1762 | int ret = as_var_store(__PTR, (page), count); \ | ||
1752 | if (*(__PTR) < (MIN)) \ | 1763 | if (*(__PTR) < (MIN)) \ |
1753 | *(__PTR) = (MIN); \ | 1764 | *(__PTR) = (MIN); \ |
1754 | else if (*(__PTR) > (MAX)) \ | 1765 | else if (*(__PTR) > (MAX)) \ |
@@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | |||
1756 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ | 1767 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ |
1757 | return ret; \ | 1768 | return ret; \ |
1758 | } | 1769 | } |
1759 | STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); | 1770 | STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); |
1760 | STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); | 1771 | STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); |
1761 | STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX); | 1772 | STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); |
1762 | STORE_FUNCTION(as_read_batchexpire_store, | 1773 | STORE_FUNCTION(as_read_batch_expire_store, |
1763 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); | 1774 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); |
1764 | STORE_FUNCTION(as_write_batchexpire_store, | 1775 | STORE_FUNCTION(as_write_batch_expire_store, |
1765 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); | 1776 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); |
1766 | #undef STORE_FUNCTION | 1777 | #undef STORE_FUNCTION |
1767 | 1778 | ||
1768 | static struct as_fs_entry as_est_entry = { | 1779 | #define AS_ATTR(name) \ |
1769 | .attr = {.name = "est_time", .mode = S_IRUGO }, | 1780 | __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) |
1770 | .show = as_est_show, | 1781 | |
1771 | }; | 1782 | static struct elv_fs_entry as_attrs[] = { |
1772 | static struct as_fs_entry as_readexpire_entry = { | 1783 | __ATTR_RO(est_time), |
1773 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 1784 | AS_ATTR(read_expire), |
1774 | .show = as_readexpire_show, | 1785 | AS_ATTR(write_expire), |
1775 | .store = as_readexpire_store, | 1786 | AS_ATTR(antic_expire), |
1776 | }; | 1787 | AS_ATTR(read_batch_expire), |
1777 | static struct as_fs_entry as_writeexpire_entry = { | 1788 | AS_ATTR(write_batch_expire), |
1778 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 1789 | __ATTR_NULL |
1779 | .show = as_writeexpire_show, | ||
1780 | .store = as_writeexpire_store, | ||
1781 | }; | ||
1782 | static struct as_fs_entry as_anticexpire_entry = { | ||
1783 | .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1784 | .show = as_anticexpire_show, | ||
1785 | .store = as_anticexpire_store, | ||
1786 | }; | ||
1787 | static struct as_fs_entry as_read_batchexpire_entry = { | ||
1788 | .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1789 | .show = as_read_batchexpire_show, | ||
1790 | .store = as_read_batchexpire_store, | ||
1791 | }; | ||
1792 | static struct as_fs_entry as_write_batchexpire_entry = { | ||
1793 | .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1794 | .show = as_write_batchexpire_show, | ||
1795 | .store = as_write_batchexpire_store, | ||
1796 | }; | ||
1797 | |||
1798 | static struct attribute *default_attrs[] = { | ||
1799 | &as_est_entry.attr, | ||
1800 | &as_readexpire_entry.attr, | ||
1801 | &as_writeexpire_entry.attr, | ||
1802 | &as_anticexpire_entry.attr, | ||
1803 | &as_read_batchexpire_entry.attr, | ||
1804 | &as_write_batchexpire_entry.attr, | ||
1805 | NULL, | ||
1806 | }; | ||
1807 | |||
1808 | #define to_as(atr) container_of((atr), struct as_fs_entry, attr) | ||
1809 | |||
1810 | static ssize_t | ||
1811 | as_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
1812 | { | ||
1813 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
1814 | struct as_fs_entry *entry = to_as(attr); | ||
1815 | |||
1816 | if (!entry->show) | ||
1817 | return -EIO; | ||
1818 | |||
1819 | return entry->show(e->elevator_data, page); | ||
1820 | } | ||
1821 | |||
1822 | static ssize_t | ||
1823 | as_attr_store(struct kobject *kobj, struct attribute *attr, | ||
1824 | const char *page, size_t length) | ||
1825 | { | ||
1826 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
1827 | struct as_fs_entry *entry = to_as(attr); | ||
1828 | |||
1829 | if (!entry->store) | ||
1830 | return -EIO; | ||
1831 | |||
1832 | return entry->store(e->elevator_data, page, length); | ||
1833 | } | ||
1834 | |||
1835 | static struct sysfs_ops as_sysfs_ops = { | ||
1836 | .show = as_attr_show, | ||
1837 | .store = as_attr_store, | ||
1838 | }; | ||
1839 | |||
1840 | static struct kobj_type as_ktype = { | ||
1841 | .sysfs_ops = &as_sysfs_ops, | ||
1842 | .default_attrs = default_attrs, | ||
1843 | }; | 1790 | }; |
1844 | 1791 | ||
1845 | static struct elevator_type iosched_as = { | 1792 | static struct elevator_type iosched_as = { |
@@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = { | |||
1860 | .elevator_may_queue_fn = as_may_queue, | 1807 | .elevator_may_queue_fn = as_may_queue, |
1861 | .elevator_init_fn = as_init_queue, | 1808 | .elevator_init_fn = as_init_queue, |
1862 | .elevator_exit_fn = as_exit_queue, | 1809 | .elevator_exit_fn = as_exit_queue, |
1810 | .trim = as_trim, | ||
1863 | }, | 1811 | }, |
1864 | 1812 | ||
1865 | .elevator_ktype = &as_ktype, | 1813 | .elevator_attrs = as_attrs, |
1866 | .elevator_name = "anticipatory", | 1814 | .elevator_name = "anticipatory", |
1867 | .elevator_owner = THIS_MODULE, | 1815 | .elevator_owner = THIS_MODULE, |
1868 | }; | 1816 | }; |
@@ -1893,7 +1841,13 @@ static int __init as_init(void) | |||
1893 | 1841 | ||
1894 | static void __exit as_exit(void) | 1842 | static void __exit as_exit(void) |
1895 | { | 1843 | { |
1844 | DECLARE_COMPLETION(all_gone); | ||
1896 | elv_unregister(&iosched_as); | 1845 | elv_unregister(&iosched_as); |
1846 | ioc_gone = &all_gone; | ||
1847 | barrier(); | ||
1848 | if (atomic_read(&ioc_count)) | ||
1849 | complete(ioc_gone); | ||
1850 | synchronize_rcu(); | ||
1897 | kmem_cache_destroy(arq_pool); | 1851 | kmem_cache_destroy(arq_pool); |
1898 | } | 1852 | } |
1899 | 1853 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c8dbe38c81c8..c4a0d5d8d7f0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -6,21 +6,13 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> | 7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> |
8 | */ | 8 | */ |
9 | #include <linux/kernel.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/blkdev.h> | ||
12 | #include <linux/elevator.h> | ||
13 | #include <linux/bio.h> | ||
14 | #include <linux/config.h> | 9 | #include <linux/config.h> |
15 | #include <linux/module.h> | 10 | #include <linux/module.h> |
16 | #include <linux/slab.h> | 11 | #include <linux/blkdev.h> |
17 | #include <linux/init.h> | 12 | #include <linux/elevator.h> |
18 | #include <linux/compiler.h> | ||
19 | #include <linux/hash.h> | 13 | #include <linux/hash.h> |
20 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
21 | #include <linux/mempool.h> | ||
22 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
23 | #include <linux/writeback.h> | ||
24 | 16 | ||
25 | /* | 17 | /* |
26 | * tunables | 18 | * tunables |
@@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100; | |||
47 | */ | 39 | */ |
48 | static const int cfq_max_depth = 2; | 40 | static const int cfq_max_depth = 2; |
49 | 41 | ||
42 | static DEFINE_RWLOCK(cfq_exit_lock); | ||
43 | |||
50 | /* | 44 | /* |
51 | * for the hash of cfqq inside the cfqd | 45 | * for the hash of cfqq inside the cfqd |
52 | */ | 46 | */ |
@@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool; | |||
89 | static kmem_cache_t *cfq_pool; | 83 | static kmem_cache_t *cfq_pool; |
90 | static kmem_cache_t *cfq_ioc_pool; | 84 | static kmem_cache_t *cfq_ioc_pool; |
91 | 85 | ||
86 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
87 | static struct completion *ioc_gone; | ||
88 | |||
92 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 89 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
93 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 90 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
94 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) | 91 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) |
@@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool; | |||
109 | * Per block device queue structure | 106 | * Per block device queue structure |
110 | */ | 107 | */ |
111 | struct cfq_data { | 108 | struct cfq_data { |
112 | atomic_t ref; | ||
113 | request_queue_t *queue; | 109 | request_queue_t *queue; |
114 | 110 | ||
115 | /* | 111 | /* |
@@ -175,6 +171,8 @@ struct cfq_data { | |||
175 | unsigned int cfq_slice_async_rq; | 171 | unsigned int cfq_slice_async_rq; |
176 | unsigned int cfq_slice_idle; | 172 | unsigned int cfq_slice_idle; |
177 | unsigned int cfq_max_depth; | 173 | unsigned int cfq_max_depth; |
174 | |||
175 | struct list_head cic_list; | ||
178 | }; | 176 | }; |
179 | 177 | ||
180 | /* | 178 | /* |
@@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync); | |||
288 | 286 | ||
289 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 287 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
290 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | 288 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); |
291 | static void cfq_put_cfqd(struct cfq_data *cfqd); | 289 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); |
292 | 290 | ||
293 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | 291 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
294 | 292 | ||
@@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1160 | if (unlikely(cfqd->active_queue == cfqq)) | 1158 | if (unlikely(cfqd->active_queue == cfqq)) |
1161 | __cfq_slice_expired(cfqd, cfqq, 0); | 1159 | __cfq_slice_expired(cfqd, cfqq, 0); |
1162 | 1160 | ||
1163 | cfq_put_cfqd(cfqq->cfqd); | ||
1164 | |||
1165 | /* | 1161 | /* |
1166 | * it's on the empty list and still hashed | 1162 | * it's on the empty list and still hashed |
1167 | */ | 1163 | */ |
@@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, | |||
1179 | 1175 | ||
1180 | hlist_for_each_safe(entry, next, hash_list) { | 1176 | hlist_for_each_safe(entry, next, hash_list) { |
1181 | struct cfq_queue *__cfqq = list_entry_qhash(entry); | 1177 | struct cfq_queue *__cfqq = list_entry_qhash(entry); |
1182 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); | 1178 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); |
1183 | 1179 | ||
1184 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) | 1180 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) |
1185 | return __cfqq; | 1181 | return __cfqq; |
@@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
1198 | { | 1194 | { |
1199 | struct cfq_io_context *__cic; | 1195 | struct cfq_io_context *__cic; |
1200 | struct list_head *entry, *next; | 1196 | struct list_head *entry, *next; |
1197 | int freed = 1; | ||
1201 | 1198 | ||
1202 | list_for_each_safe(entry, next, &cic->list) { | 1199 | list_for_each_safe(entry, next, &cic->list) { |
1203 | __cic = list_entry(entry, struct cfq_io_context, list); | 1200 | __cic = list_entry(entry, struct cfq_io_context, list); |
1204 | kmem_cache_free(cfq_ioc_pool, __cic); | 1201 | kmem_cache_free(cfq_ioc_pool, __cic); |
1202 | freed++; | ||
1205 | } | 1203 | } |
1206 | 1204 | ||
1207 | kmem_cache_free(cfq_ioc_pool, cic); | 1205 | kmem_cache_free(cfq_ioc_pool, cic); |
1206 | if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) | ||
1207 | complete(ioc_gone); | ||
1208 | } | ||
1209 | |||
1210 | static void cfq_trim(struct io_context *ioc) | ||
1211 | { | ||
1212 | ioc->set_ioprio = NULL; | ||
1213 | if (ioc->cic) | ||
1214 | cfq_free_io_context(ioc->cic); | ||
1208 | } | 1215 | } |
1209 | 1216 | ||
1210 | /* | 1217 | /* |
@@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
1212 | */ | 1219 | */ |
1213 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | 1220 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) |
1214 | { | 1221 | { |
1215 | struct cfq_data *cfqd = cic->cfqq->cfqd; | 1222 | struct cfq_data *cfqd = cic->key; |
1216 | request_queue_t *q = cfqd->queue; | 1223 | request_queue_t *q; |
1224 | |||
1225 | if (!cfqd) | ||
1226 | return; | ||
1227 | |||
1228 | q = cfqd->queue; | ||
1217 | 1229 | ||
1218 | WARN_ON(!irqs_disabled()); | 1230 | WARN_ON(!irqs_disabled()); |
1219 | 1231 | ||
1220 | spin_lock(q->queue_lock); | 1232 | spin_lock(q->queue_lock); |
1221 | 1233 | ||
1222 | if (unlikely(cic->cfqq == cfqd->active_queue)) | 1234 | if (cic->cfqq[ASYNC]) { |
1223 | __cfq_slice_expired(cfqd, cic->cfqq, 0); | 1235 | if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue)) |
1236 | __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0); | ||
1237 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
1238 | cic->cfqq[ASYNC] = NULL; | ||
1239 | } | ||
1240 | |||
1241 | if (cic->cfqq[SYNC]) { | ||
1242 | if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue)) | ||
1243 | __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0); | ||
1244 | cfq_put_queue(cic->cfqq[SYNC]); | ||
1245 | cic->cfqq[SYNC] = NULL; | ||
1246 | } | ||
1224 | 1247 | ||
1225 | cfq_put_queue(cic->cfqq); | 1248 | cic->key = NULL; |
1226 | cic->cfqq = NULL; | 1249 | list_del_init(&cic->queue_list); |
1227 | spin_unlock(q->queue_lock); | 1250 | spin_unlock(q->queue_lock); |
1228 | } | 1251 | } |
1229 | 1252 | ||
1230 | /* | ||
1231 | * Another task may update the task cic list, if it is doing a queue lookup | ||
1232 | * on its behalf. cfq_cic_lock excludes such concurrent updates | ||
1233 | */ | ||
1234 | static void cfq_exit_io_context(struct cfq_io_context *cic) | 1253 | static void cfq_exit_io_context(struct cfq_io_context *cic) |
1235 | { | 1254 | { |
1236 | struct cfq_io_context *__cic; | 1255 | struct cfq_io_context *__cic; |
@@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) | |||
1242 | /* | 1261 | /* |
1243 | * put the reference this task is holding to the various queues | 1262 | * put the reference this task is holding to the various queues |
1244 | */ | 1263 | */ |
1264 | read_lock(&cfq_exit_lock); | ||
1245 | list_for_each(entry, &cic->list) { | 1265 | list_for_each(entry, &cic->list) { |
1246 | __cic = list_entry(entry, struct cfq_io_context, list); | 1266 | __cic = list_entry(entry, struct cfq_io_context, list); |
1247 | cfq_exit_single_io_context(__cic); | 1267 | cfq_exit_single_io_context(__cic); |
1248 | } | 1268 | } |
1249 | 1269 | ||
1250 | cfq_exit_single_io_context(cic); | 1270 | cfq_exit_single_io_context(cic); |
1271 | read_unlock(&cfq_exit_lock); | ||
1251 | local_irq_restore(flags); | 1272 | local_irq_restore(flags); |
1252 | } | 1273 | } |
1253 | 1274 | ||
@@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1258 | 1279 | ||
1259 | if (cic) { | 1280 | if (cic) { |
1260 | INIT_LIST_HEAD(&cic->list); | 1281 | INIT_LIST_HEAD(&cic->list); |
1261 | cic->cfqq = NULL; | 1282 | cic->cfqq[ASYNC] = NULL; |
1283 | cic->cfqq[SYNC] = NULL; | ||
1262 | cic->key = NULL; | 1284 | cic->key = NULL; |
1263 | cic->last_end_request = jiffies; | 1285 | cic->last_end_request = jiffies; |
1264 | cic->ttime_total = 0; | 1286 | cic->ttime_total = 0; |
@@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1266 | cic->ttime_mean = 0; | 1288 | cic->ttime_mean = 0; |
1267 | cic->dtor = cfq_free_io_context; | 1289 | cic->dtor = cfq_free_io_context; |
1268 | cic->exit = cfq_exit_io_context; | 1290 | cic->exit = cfq_exit_io_context; |
1291 | INIT_LIST_HEAD(&cic->queue_list); | ||
1292 | atomic_inc(&ioc_count); | ||
1269 | } | 1293 | } |
1270 | 1294 | ||
1271 | return cic; | 1295 | return cic; |
@@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1318 | cfq_clear_cfqq_prio_changed(cfqq); | 1342 | cfq_clear_cfqq_prio_changed(cfqq); |
1319 | } | 1343 | } |
1320 | 1344 | ||
1321 | static inline void changed_ioprio(struct cfq_queue *cfqq) | 1345 | static inline void changed_ioprio(struct cfq_io_context *cic) |
1322 | { | 1346 | { |
1323 | if (cfqq) { | 1347 | struct cfq_data *cfqd = cic->key; |
1324 | struct cfq_data *cfqd = cfqq->cfqd; | 1348 | struct cfq_queue *cfqq; |
1325 | 1349 | if (cfqd) { | |
1326 | spin_lock(cfqd->queue->queue_lock); | 1350 | spin_lock(cfqd->queue->queue_lock); |
1327 | cfq_mark_cfqq_prio_changed(cfqq); | 1351 | cfqq = cic->cfqq[ASYNC]; |
1328 | cfq_init_prio_data(cfqq); | 1352 | if (cfqq) { |
1353 | struct cfq_queue *new_cfqq; | ||
1354 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, | ||
1355 | cic->ioc->task, GFP_ATOMIC); | ||
1356 | if (new_cfqq) { | ||
1357 | cic->cfqq[ASYNC] = new_cfqq; | ||
1358 | cfq_put_queue(cfqq); | ||
1359 | } | ||
1360 | } | ||
1361 | cfqq = cic->cfqq[SYNC]; | ||
1362 | if (cfqq) { | ||
1363 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1364 | cfq_init_prio_data(cfqq); | ||
1365 | } | ||
1329 | spin_unlock(cfqd->queue->queue_lock); | 1366 | spin_unlock(cfqd->queue->queue_lock); |
1330 | } | 1367 | } |
1331 | } | 1368 | } |
@@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq) | |||
1335 | */ | 1372 | */ |
1336 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | 1373 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) |
1337 | { | 1374 | { |
1338 | struct cfq_io_context *cic = ioc->cic; | 1375 | struct cfq_io_context *cic; |
1376 | |||
1377 | write_lock(&cfq_exit_lock); | ||
1378 | |||
1379 | cic = ioc->cic; | ||
1339 | 1380 | ||
1340 | changed_ioprio(cic->cfqq); | 1381 | changed_ioprio(cic); |
1341 | 1382 | ||
1342 | list_for_each_entry(cic, &cic->list, list) | 1383 | list_for_each_entry(cic, &cic->list, list) |
1343 | changed_ioprio(cic->cfqq); | 1384 | changed_ioprio(cic); |
1385 | |||
1386 | write_unlock(&cfq_exit_lock); | ||
1344 | 1387 | ||
1345 | return 0; | 1388 | return 0; |
1346 | } | 1389 | } |
1347 | 1390 | ||
1348 | static struct cfq_queue * | 1391 | static struct cfq_queue * |
1349 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | 1392 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, |
1350 | gfp_t gfp_mask) | 1393 | gfp_t gfp_mask) |
1351 | { | 1394 | { |
1352 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | 1395 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); |
1353 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1396 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1397 | unsigned short ioprio; | ||
1354 | 1398 | ||
1355 | retry: | 1399 | retry: |
1400 | ioprio = tsk->ioprio; | ||
1356 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | 1401 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); |
1357 | 1402 | ||
1358 | if (!cfqq) { | 1403 | if (!cfqq) { |
@@ -1381,7 +1426,6 @@ retry: | |||
1381 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1426 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
1382 | atomic_set(&cfqq->ref, 0); | 1427 | atomic_set(&cfqq->ref, 0); |
1383 | cfqq->cfqd = cfqd; | 1428 | cfqq->cfqd = cfqd; |
1384 | atomic_inc(&cfqd->ref); | ||
1385 | cfqq->service_last = 0; | 1429 | cfqq->service_last = 0; |
1386 | /* | 1430 | /* |
1387 | * set ->slice_left to allow preemption for a new process | 1431 | * set ->slice_left to allow preemption for a new process |
@@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1419 | if (!ioc) | 1463 | if (!ioc) |
1420 | return NULL; | 1464 | return NULL; |
1421 | 1465 | ||
1466 | restart: | ||
1422 | if ((cic = ioc->cic) == NULL) { | 1467 | if ((cic = ioc->cic) == NULL) { |
1423 | cic = cfq_alloc_io_context(cfqd, gfp_mask); | 1468 | cic = cfq_alloc_io_context(cfqd, gfp_mask); |
1424 | 1469 | ||
@@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1429 | * manually increment generic io_context usage count, it | 1474 | * manually increment generic io_context usage count, it |
1430 | * cannot go away since we are already holding one ref to it | 1475 | * cannot go away since we are already holding one ref to it |
1431 | */ | 1476 | */ |
1432 | ioc->cic = cic; | ||
1433 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1434 | cic->ioc = ioc; | 1477 | cic->ioc = ioc; |
1435 | cic->key = cfqd; | 1478 | cic->key = cfqd; |
1436 | atomic_inc(&cfqd->ref); | 1479 | read_lock(&cfq_exit_lock); |
1480 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1481 | ioc->cic = cic; | ||
1482 | list_add(&cic->queue_list, &cfqd->cic_list); | ||
1483 | read_unlock(&cfq_exit_lock); | ||
1437 | } else { | 1484 | } else { |
1438 | struct cfq_io_context *__cic; | 1485 | struct cfq_io_context *__cic; |
1439 | 1486 | ||
@@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1443 | if (cic->key == cfqd) | 1490 | if (cic->key == cfqd) |
1444 | goto out; | 1491 | goto out; |
1445 | 1492 | ||
1493 | if (unlikely(!cic->key)) { | ||
1494 | read_lock(&cfq_exit_lock); | ||
1495 | if (list_empty(&cic->list)) | ||
1496 | ioc->cic = NULL; | ||
1497 | else | ||
1498 | ioc->cic = list_entry(cic->list.next, | ||
1499 | struct cfq_io_context, | ||
1500 | list); | ||
1501 | read_unlock(&cfq_exit_lock); | ||
1502 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1503 | atomic_dec(&ioc_count); | ||
1504 | goto restart; | ||
1505 | } | ||
1506 | |||
1446 | /* | 1507 | /* |
1447 | * cic exists, check if we already are there. linear search | 1508 | * cic exists, check if we already are there. linear search |
1448 | * should be ok here, the list will usually not be more than | 1509 | * should be ok here, the list will usually not be more than |
@@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1457 | cic = __cic; | 1518 | cic = __cic; |
1458 | goto out; | 1519 | goto out; |
1459 | } | 1520 | } |
1521 | if (unlikely(!__cic->key)) { | ||
1522 | read_lock(&cfq_exit_lock); | ||
1523 | list_del(&__cic->list); | ||
1524 | read_unlock(&cfq_exit_lock); | ||
1525 | kmem_cache_free(cfq_ioc_pool, __cic); | ||
1526 | atomic_dec(&ioc_count); | ||
1527 | goto restart; | ||
1528 | } | ||
1460 | } | 1529 | } |
1461 | 1530 | ||
1462 | /* | 1531 | /* |
@@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
1469 | 1538 | ||
1470 | __cic->ioc = ioc; | 1539 | __cic->ioc = ioc; |
1471 | __cic->key = cfqd; | 1540 | __cic->key = cfqd; |
1472 | atomic_inc(&cfqd->ref); | 1541 | read_lock(&cfq_exit_lock); |
1473 | list_add(&__cic->list, &cic->list); | 1542 | list_add(&__cic->list, &cic->list); |
1543 | list_add(&__cic->queue_list, &cfqd->cic_list); | ||
1544 | read_unlock(&cfq_exit_lock); | ||
1474 | cic = __cic; | 1545 | cic = __cic; |
1475 | } | 1546 | } |
1476 | 1547 | ||
@@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1890 | struct cfq_queue *cfqq; | 1961 | struct cfq_queue *cfqq; |
1891 | struct cfq_rq *crq; | 1962 | struct cfq_rq *crq; |
1892 | unsigned long flags; | 1963 | unsigned long flags; |
1964 | int is_sync = key != CFQ_KEY_ASYNC; | ||
1893 | 1965 | ||
1894 | might_sleep_if(gfp_mask & __GFP_WAIT); | 1966 | might_sleep_if(gfp_mask & __GFP_WAIT); |
1895 | 1967 | ||
@@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1900 | if (!cic) | 1972 | if (!cic) |
1901 | goto queue_fail; | 1973 | goto queue_fail; |
1902 | 1974 | ||
1903 | if (!cic->cfqq) { | 1975 | if (!cic->cfqq[is_sync]) { |
1904 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); | 1976 | cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); |
1905 | if (!cfqq) | 1977 | if (!cfqq) |
1906 | goto queue_fail; | 1978 | goto queue_fail; |
1907 | 1979 | ||
1908 | cic->cfqq = cfqq; | 1980 | cic->cfqq[is_sync] = cfqq; |
1909 | } else | 1981 | } else |
1910 | cfqq = cic->cfqq; | 1982 | cfqq = cic->cfqq[is_sync]; |
1911 | 1983 | ||
1912 | cfqq->allocated[rw]++; | 1984 | cfqq->allocated[rw]++; |
1913 | cfq_clear_cfqq_must_alloc(cfqq); | 1985 | cfq_clear_cfqq_must_alloc(cfqq); |
@@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
1924 | crq->cfq_queue = cfqq; | 1996 | crq->cfq_queue = cfqq; |
1925 | crq->io_context = cic; | 1997 | crq->io_context = cic; |
1926 | 1998 | ||
1927 | if (rw == READ || process_sync(tsk)) | 1999 | if (is_sync) |
1928 | cfq_mark_crq_is_sync(crq); | 2000 | cfq_mark_crq_is_sync(crq); |
1929 | else | 2001 | else |
1930 | cfq_clear_crq_is_sync(crq); | 2002 | cfq_clear_crq_is_sync(crq); |
@@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | |||
2055 | blk_sync_queue(cfqd->queue); | 2127 | blk_sync_queue(cfqd->queue); |
2056 | } | 2128 | } |
2057 | 2129 | ||
2058 | static void cfq_put_cfqd(struct cfq_data *cfqd) | 2130 | static void cfq_exit_queue(elevator_t *e) |
2059 | { | 2131 | { |
2132 | struct cfq_data *cfqd = e->elevator_data; | ||
2060 | request_queue_t *q = cfqd->queue; | 2133 | request_queue_t *q = cfqd->queue; |
2061 | 2134 | ||
2062 | if (!atomic_dec_and_test(&cfqd->ref)) | 2135 | cfq_shutdown_timer_wq(cfqd); |
2063 | return; | 2136 | write_lock(&cfq_exit_lock); |
2137 | spin_lock_irq(q->queue_lock); | ||
2138 | if (cfqd->active_queue) | ||
2139 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | ||
2140 | while(!list_empty(&cfqd->cic_list)) { | ||
2141 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | ||
2142 | struct cfq_io_context, | ||
2143 | queue_list); | ||
2144 | if (cic->cfqq[ASYNC]) { | ||
2145 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
2146 | cic->cfqq[ASYNC] = NULL; | ||
2147 | } | ||
2148 | if (cic->cfqq[SYNC]) { | ||
2149 | cfq_put_queue(cic->cfqq[SYNC]); | ||
2150 | cic->cfqq[SYNC] = NULL; | ||
2151 | } | ||
2152 | cic->key = NULL; | ||
2153 | list_del_init(&cic->queue_list); | ||
2154 | } | ||
2155 | spin_unlock_irq(q->queue_lock); | ||
2156 | write_unlock(&cfq_exit_lock); | ||
2064 | 2157 | ||
2065 | cfq_shutdown_timer_wq(cfqd); | 2158 | cfq_shutdown_timer_wq(cfqd); |
2066 | blk_put_queue(q); | ||
2067 | 2159 | ||
2068 | mempool_destroy(cfqd->crq_pool); | 2160 | mempool_destroy(cfqd->crq_pool); |
2069 | kfree(cfqd->crq_hash); | 2161 | kfree(cfqd->crq_hash); |
@@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
2071 | kfree(cfqd); | 2163 | kfree(cfqd); |
2072 | } | 2164 | } |
2073 | 2165 | ||
2074 | static void cfq_exit_queue(elevator_t *e) | ||
2075 | { | ||
2076 | struct cfq_data *cfqd = e->elevator_data; | ||
2077 | |||
2078 | cfq_shutdown_timer_wq(cfqd); | ||
2079 | cfq_put_cfqd(cfqd); | ||
2080 | } | ||
2081 | |||
2082 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | 2166 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) |
2083 | { | 2167 | { |
2084 | struct cfq_data *cfqd; | 2168 | struct cfq_data *cfqd; |
@@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2097 | INIT_LIST_HEAD(&cfqd->cur_rr); | 2181 | INIT_LIST_HEAD(&cfqd->cur_rr); |
2098 | INIT_LIST_HEAD(&cfqd->idle_rr); | 2182 | INIT_LIST_HEAD(&cfqd->idle_rr); |
2099 | INIT_LIST_HEAD(&cfqd->empty_list); | 2183 | INIT_LIST_HEAD(&cfqd->empty_list); |
2184 | INIT_LIST_HEAD(&cfqd->cic_list); | ||
2100 | 2185 | ||
2101 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | 2186 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); |
2102 | if (!cfqd->crq_hash) | 2187 | if (!cfqd->crq_hash) |
@@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2118 | e->elevator_data = cfqd; | 2203 | e->elevator_data = cfqd; |
2119 | 2204 | ||
2120 | cfqd->queue = q; | 2205 | cfqd->queue = q; |
2121 | atomic_inc(&q->refcnt); | ||
2122 | 2206 | ||
2123 | cfqd->max_queued = q->nr_requests / 4; | 2207 | cfqd->max_queued = q->nr_requests / 4; |
2124 | q->nr_batching = cfq_queued; | 2208 | q->nr_batching = cfq_queued; |
@@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2133 | 2217 | ||
2134 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | 2218 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); |
2135 | 2219 | ||
2136 | atomic_set(&cfqd->ref, 1); | ||
2137 | |||
2138 | cfqd->cfq_queued = cfq_queued; | 2220 | cfqd->cfq_queued = cfq_queued; |
2139 | cfqd->cfq_quantum = cfq_quantum; | 2221 | cfqd->cfq_quantum = cfq_quantum; |
2140 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2222 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
@@ -2193,11 +2275,6 @@ fail: | |||
2193 | /* | 2275 | /* |
2194 | * sysfs parts below --> | 2276 | * sysfs parts below --> |
2195 | */ | 2277 | */ |
2196 | struct cfq_fs_entry { | ||
2197 | struct attribute attr; | ||
2198 | ssize_t (*show)(struct cfq_data *, char *); | ||
2199 | ssize_t (*store)(struct cfq_data *, const char *, size_t); | ||
2200 | }; | ||
2201 | 2278 | ||
2202 | static ssize_t | 2279 | static ssize_t |
2203 | cfq_var_show(unsigned int var, char *page) | 2280 | cfq_var_show(unsigned int var, char *page) |
@@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
2215 | } | 2292 | } |
2216 | 2293 | ||
2217 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 2294 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
2218 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | 2295 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
2219 | { \ | 2296 | { \ |
2297 | struct cfq_data *cfqd = e->elevator_data; \ | ||
2220 | unsigned int __data = __VAR; \ | 2298 | unsigned int __data = __VAR; \ |
2221 | if (__CONV) \ | 2299 | if (__CONV) \ |
2222 | __data = jiffies_to_msecs(__data); \ | 2300 | __data = jiffies_to_msecs(__data); \ |
@@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | |||
2226 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); | 2304 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); |
2227 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); | 2305 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); |
2228 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | 2306 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
2229 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); | 2307 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
2230 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); | 2308 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
2231 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 2309 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
2232 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2310 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
2233 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2311 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
@@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); | |||
2236 | #undef SHOW_FUNCTION | 2314 | #undef SHOW_FUNCTION |
2237 | 2315 | ||
2238 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2316 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
2239 | static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ | 2317 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
2240 | { \ | 2318 | { \ |
2319 | struct cfq_data *cfqd = e->elevator_data; \ | ||
2241 | unsigned int __data; \ | 2320 | unsigned int __data; \ |
2242 | int ret = cfq_var_store(&__data, (page), count); \ | 2321 | int ret = cfq_var_store(&__data, (page), count); \ |
2243 | if (__data < (MIN)) \ | 2322 | if (__data < (MIN)) \ |
@@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | |||
2254 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); | 2333 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); |
2255 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); | 2334 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); |
2256 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); | 2335 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
2257 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | 2336 | STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
2258 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | 2337 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
2259 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 2338 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
2260 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 2339 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
2261 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2340 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
@@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, | |||
2263 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); | 2342 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); |
2264 | #undef STORE_FUNCTION | 2343 | #undef STORE_FUNCTION |
2265 | 2344 | ||
2266 | static struct cfq_fs_entry cfq_quantum_entry = { | 2345 | #define CFQ_ATTR(name) \ |
2267 | .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, | 2346 | __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) |
2268 | .show = cfq_quantum_show, | 2347 | |
2269 | .store = cfq_quantum_store, | 2348 | static struct elv_fs_entry cfq_attrs[] = { |
2270 | }; | 2349 | CFQ_ATTR(quantum), |
2271 | static struct cfq_fs_entry cfq_queued_entry = { | 2350 | CFQ_ATTR(queued), |
2272 | .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, | 2351 | CFQ_ATTR(fifo_expire_sync), |
2273 | .show = cfq_queued_show, | 2352 | CFQ_ATTR(fifo_expire_async), |
2274 | .store = cfq_queued_store, | 2353 | CFQ_ATTR(back_seek_max), |
2275 | }; | 2354 | CFQ_ATTR(back_seek_penalty), |
2276 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { | 2355 | CFQ_ATTR(slice_sync), |
2277 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, | 2356 | CFQ_ATTR(slice_async), |
2278 | .show = cfq_fifo_expire_sync_show, | 2357 | CFQ_ATTR(slice_async_rq), |
2279 | .store = cfq_fifo_expire_sync_store, | 2358 | CFQ_ATTR(slice_idle), |
2280 | }; | 2359 | CFQ_ATTR(max_depth), |
2281 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { | 2360 | __ATTR_NULL |
2282 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, | ||
2283 | .show = cfq_fifo_expire_async_show, | ||
2284 | .store = cfq_fifo_expire_async_store, | ||
2285 | }; | ||
2286 | static struct cfq_fs_entry cfq_back_max_entry = { | ||
2287 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, | ||
2288 | .show = cfq_back_max_show, | ||
2289 | .store = cfq_back_max_store, | ||
2290 | }; | ||
2291 | static struct cfq_fs_entry cfq_back_penalty_entry = { | ||
2292 | .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, | ||
2293 | .show = cfq_back_penalty_show, | ||
2294 | .store = cfq_back_penalty_store, | ||
2295 | }; | ||
2296 | static struct cfq_fs_entry cfq_slice_sync_entry = { | ||
2297 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, | ||
2298 | .show = cfq_slice_sync_show, | ||
2299 | .store = cfq_slice_sync_store, | ||
2300 | }; | ||
2301 | static struct cfq_fs_entry cfq_slice_async_entry = { | ||
2302 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, | ||
2303 | .show = cfq_slice_async_show, | ||
2304 | .store = cfq_slice_async_store, | ||
2305 | }; | ||
2306 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { | ||
2307 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, | ||
2308 | .show = cfq_slice_async_rq_show, | ||
2309 | .store = cfq_slice_async_rq_store, | ||
2310 | }; | ||
2311 | static struct cfq_fs_entry cfq_slice_idle_entry = { | ||
2312 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, | ||
2313 | .show = cfq_slice_idle_show, | ||
2314 | .store = cfq_slice_idle_store, | ||
2315 | }; | ||
2316 | static struct cfq_fs_entry cfq_max_depth_entry = { | ||
2317 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, | ||
2318 | .show = cfq_max_depth_show, | ||
2319 | .store = cfq_max_depth_store, | ||
2320 | }; | ||
2321 | |||
2322 | static struct attribute *default_attrs[] = { | ||
2323 | &cfq_quantum_entry.attr, | ||
2324 | &cfq_queued_entry.attr, | ||
2325 | &cfq_fifo_expire_sync_entry.attr, | ||
2326 | &cfq_fifo_expire_async_entry.attr, | ||
2327 | &cfq_back_max_entry.attr, | ||
2328 | &cfq_back_penalty_entry.attr, | ||
2329 | &cfq_slice_sync_entry.attr, | ||
2330 | &cfq_slice_async_entry.attr, | ||
2331 | &cfq_slice_async_rq_entry.attr, | ||
2332 | &cfq_slice_idle_entry.attr, | ||
2333 | &cfq_max_depth_entry.attr, | ||
2334 | NULL, | ||
2335 | }; | ||
2336 | |||
2337 | #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) | ||
2338 | |||
2339 | static ssize_t | ||
2340 | cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
2341 | { | ||
2342 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2343 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2344 | |||
2345 | if (!entry->show) | ||
2346 | return -EIO; | ||
2347 | |||
2348 | return entry->show(e->elevator_data, page); | ||
2349 | } | ||
2350 | |||
2351 | static ssize_t | ||
2352 | cfq_attr_store(struct kobject *kobj, struct attribute *attr, | ||
2353 | const char *page, size_t length) | ||
2354 | { | ||
2355 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2356 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2357 | |||
2358 | if (!entry->store) | ||
2359 | return -EIO; | ||
2360 | |||
2361 | return entry->store(e->elevator_data, page, length); | ||
2362 | } | ||
2363 | |||
2364 | static struct sysfs_ops cfq_sysfs_ops = { | ||
2365 | .show = cfq_attr_show, | ||
2366 | .store = cfq_attr_store, | ||
2367 | }; | ||
2368 | |||
2369 | static struct kobj_type cfq_ktype = { | ||
2370 | .sysfs_ops = &cfq_sysfs_ops, | ||
2371 | .default_attrs = default_attrs, | ||
2372 | }; | 2361 | }; |
2373 | 2362 | ||
2374 | static struct elevator_type iosched_cfq = { | 2363 | static struct elevator_type iosched_cfq = { |
@@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = { | |||
2389 | .elevator_may_queue_fn = cfq_may_queue, | 2378 | .elevator_may_queue_fn = cfq_may_queue, |
2390 | .elevator_init_fn = cfq_init_queue, | 2379 | .elevator_init_fn = cfq_init_queue, |
2391 | .elevator_exit_fn = cfq_exit_queue, | 2380 | .elevator_exit_fn = cfq_exit_queue, |
2381 | .trim = cfq_trim, | ||
2392 | }, | 2382 | }, |
2393 | .elevator_ktype = &cfq_ktype, | 2383 | .elevator_attrs = cfq_attrs, |
2394 | .elevator_name = "cfq", | 2384 | .elevator_name = "cfq", |
2395 | .elevator_owner = THIS_MODULE, | 2385 | .elevator_owner = THIS_MODULE, |
2396 | }; | 2386 | }; |
@@ -2419,7 +2409,13 @@ static int __init cfq_init(void) | |||
2419 | 2409 | ||
2420 | static void __exit cfq_exit(void) | 2410 | static void __exit cfq_exit(void) |
2421 | { | 2411 | { |
2412 | DECLARE_COMPLETION(all_gone); | ||
2422 | elv_unregister(&iosched_cfq); | 2413 | elv_unregister(&iosched_cfq); |
2414 | ioc_gone = &all_gone; | ||
2415 | barrier(); | ||
2416 | if (atomic_read(&ioc_count)) | ||
2417 | complete(ioc_gone); | ||
2418 | synchronize_rcu(); | ||
2423 | cfq_slab_kill(); | 2419 | cfq_slab_kill(); |
2424 | } | 2420 | } |
2425 | 2421 | ||
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 27e494b1bf97..399fa1e60e1f 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
694 | /* | 694 | /* |
695 | * sysfs parts below | 695 | * sysfs parts below |
696 | */ | 696 | */ |
697 | struct deadline_fs_entry { | ||
698 | struct attribute attr; | ||
699 | ssize_t (*show)(struct deadline_data *, char *); | ||
700 | ssize_t (*store)(struct deadline_data *, const char *, size_t); | ||
701 | }; | ||
702 | 697 | ||
703 | static ssize_t | 698 | static ssize_t |
704 | deadline_var_show(int var, char *page) | 699 | deadline_var_show(int var, char *page) |
@@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count) | |||
716 | } | 711 | } |
717 | 712 | ||
718 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 713 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
719 | static ssize_t __FUNC(struct deadline_data *dd, char *page) \ | 714 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
720 | { \ | 715 | { \ |
721 | int __data = __VAR; \ | 716 | struct deadline_data *dd = e->elevator_data; \ |
717 | int __data = __VAR; \ | ||
722 | if (__CONV) \ | 718 | if (__CONV) \ |
723 | __data = jiffies_to_msecs(__data); \ | 719 | __data = jiffies_to_msecs(__data); \ |
724 | return deadline_var_show(__data, (page)); \ | 720 | return deadline_var_show(__data, (page)); \ |
725 | } | 721 | } |
726 | SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1); | 722 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
727 | SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1); | 723 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); |
728 | SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0); | 724 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); |
729 | SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0); | 725 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); |
730 | SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0); | 726 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); |
731 | #undef SHOW_FUNCTION | 727 | #undef SHOW_FUNCTION |
732 | 728 | ||
733 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 729 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
734 | static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \ | 730 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
735 | { \ | 731 | { \ |
732 | struct deadline_data *dd = e->elevator_data; \ | ||
736 | int __data; \ | 733 | int __data; \ |
737 | int ret = deadline_var_store(&__data, (page), count); \ | 734 | int ret = deadline_var_store(&__data, (page), count); \ |
738 | if (__data < (MIN)) \ | 735 | if (__data < (MIN)) \ |
@@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) | |||
745 | *(__PTR) = __data; \ | 742 | *(__PTR) = __data; \ |
746 | return ret; \ | 743 | return ret; \ |
747 | } | 744 | } |
748 | STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | 745 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
749 | STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | 746 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); |
750 | STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | 747 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); |
751 | STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0); | 748 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); |
752 | STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0); | 749 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); |
753 | #undef STORE_FUNCTION | 750 | #undef STORE_FUNCTION |
754 | 751 | ||
755 | static struct deadline_fs_entry deadline_readexpire_entry = { | 752 | #define DD_ATTR(name) \ |
756 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 753 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ |
757 | .show = deadline_readexpire_show, | 754 | deadline_##name##_store) |
758 | .store = deadline_readexpire_store, | 755 | |
759 | }; | 756 | static struct elv_fs_entry deadline_attrs[] = { |
760 | static struct deadline_fs_entry deadline_writeexpire_entry = { | 757 | DD_ATTR(read_expire), |
761 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 758 | DD_ATTR(write_expire), |
762 | .show = deadline_writeexpire_show, | 759 | DD_ATTR(writes_starved), |
763 | .store = deadline_writeexpire_store, | 760 | DD_ATTR(front_merges), |
764 | }; | 761 | DD_ATTR(fifo_batch), |
765 | static struct deadline_fs_entry deadline_writesstarved_entry = { | 762 | __ATTR_NULL |
766 | .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR }, | ||
767 | .show = deadline_writesstarved_show, | ||
768 | .store = deadline_writesstarved_store, | ||
769 | }; | ||
770 | static struct deadline_fs_entry deadline_frontmerges_entry = { | ||
771 | .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, | ||
772 | .show = deadline_frontmerges_show, | ||
773 | .store = deadline_frontmerges_store, | ||
774 | }; | ||
775 | static struct deadline_fs_entry deadline_fifobatch_entry = { | ||
776 | .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR }, | ||
777 | .show = deadline_fifobatch_show, | ||
778 | .store = deadline_fifobatch_store, | ||
779 | }; | ||
780 | |||
781 | static struct attribute *default_attrs[] = { | ||
782 | &deadline_readexpire_entry.attr, | ||
783 | &deadline_writeexpire_entry.attr, | ||
784 | &deadline_writesstarved_entry.attr, | ||
785 | &deadline_frontmerges_entry.attr, | ||
786 | &deadline_fifobatch_entry.attr, | ||
787 | NULL, | ||
788 | }; | ||
789 | |||
790 | #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr) | ||
791 | |||
792 | static ssize_t | ||
793 | deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
794 | { | ||
795 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
796 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
797 | |||
798 | if (!entry->show) | ||
799 | return -EIO; | ||
800 | |||
801 | return entry->show(e->elevator_data, page); | ||
802 | } | ||
803 | |||
804 | static ssize_t | ||
805 | deadline_attr_store(struct kobject *kobj, struct attribute *attr, | ||
806 | const char *page, size_t length) | ||
807 | { | ||
808 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
809 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
810 | |||
811 | if (!entry->store) | ||
812 | return -EIO; | ||
813 | |||
814 | return entry->store(e->elevator_data, page, length); | ||
815 | } | ||
816 | |||
817 | static struct sysfs_ops deadline_sysfs_ops = { | ||
818 | .show = deadline_attr_show, | ||
819 | .store = deadline_attr_store, | ||
820 | }; | ||
821 | |||
822 | static struct kobj_type deadline_ktype = { | ||
823 | .sysfs_ops = &deadline_sysfs_ops, | ||
824 | .default_attrs = default_attrs, | ||
825 | }; | 763 | }; |
826 | 764 | ||
827 | static struct elevator_type iosched_deadline = { | 765 | static struct elevator_type iosched_deadline = { |
@@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = { | |||
840 | .elevator_exit_fn = deadline_exit_queue, | 778 | .elevator_exit_fn = deadline_exit_queue, |
841 | }, | 779 | }, |
842 | 780 | ||
843 | .elevator_ktype = &deadline_ktype, | 781 | .elevator_attrs = deadline_attrs, |
844 | .elevator_name = "deadline", | 782 | .elevator_name = "deadline", |
845 | .elevator_owner = THIS_MODULE, | 783 | .elevator_owner = THIS_MODULE, |
846 | }; | 784 | }; |
diff --git a/block/elevator.c b/block/elevator.c index 24b702d649a9..db3d0d8296a0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name) | |||
120 | return e; | 120 | return e; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int elevator_attach(request_queue_t *q, struct elevator_type *e, | 123 | static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) |
124 | struct elevator_queue *eq) | ||
125 | { | 124 | { |
126 | int ret = 0; | 125 | int ret = 0; |
127 | 126 | ||
128 | memset(eq, 0, sizeof(*eq)); | ||
129 | eq->ops = &e->ops; | ||
130 | eq->elevator_type = e; | ||
131 | |||
132 | q->elevator = eq; | 127 | q->elevator = eq; |
133 | 128 | ||
134 | if (eq->ops->elevator_init_fn) | 129 | if (eq->ops->elevator_init_fn) |
@@ -154,6 +149,32 @@ static int __init elevator_setup(char *str) | |||
154 | 149 | ||
155 | __setup("elevator=", elevator_setup); | 150 | __setup("elevator=", elevator_setup); |
156 | 151 | ||
152 | static struct kobj_type elv_ktype; | ||
153 | |||
154 | static elevator_t *elevator_alloc(struct elevator_type *e) | ||
155 | { | ||
156 | elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); | ||
157 | if (eq) { | ||
158 | memset(eq, 0, sizeof(*eq)); | ||
159 | eq->ops = &e->ops; | ||
160 | eq->elevator_type = e; | ||
161 | kobject_init(&eq->kobj); | ||
162 | snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | ||
163 | eq->kobj.ktype = &elv_ktype; | ||
164 | mutex_init(&eq->sysfs_lock); | ||
165 | } else { | ||
166 | elevator_put(e); | ||
167 | } | ||
168 | return eq; | ||
169 | } | ||
170 | |||
171 | static void elevator_release(struct kobject *kobj) | ||
172 | { | ||
173 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
174 | elevator_put(e->elevator_type); | ||
175 | kfree(e); | ||
176 | } | ||
177 | |||
157 | int elevator_init(request_queue_t *q, char *name) | 178 | int elevator_init(request_queue_t *q, char *name) |
158 | { | 179 | { |
159 | struct elevator_type *e = NULL; | 180 | struct elevator_type *e = NULL; |
@@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name) | |||
176 | e = elevator_get("noop"); | 197 | e = elevator_get("noop"); |
177 | } | 198 | } |
178 | 199 | ||
179 | eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); | 200 | eq = elevator_alloc(e); |
180 | if (!eq) { | 201 | if (!eq) |
181 | elevator_put(e); | ||
182 | return -ENOMEM; | 202 | return -ENOMEM; |
183 | } | ||
184 | 203 | ||
185 | ret = elevator_attach(q, e, eq); | 204 | ret = elevator_attach(q, eq); |
186 | if (ret) { | 205 | if (ret) |
187 | kfree(eq); | 206 | kobject_put(&eq->kobj); |
188 | elevator_put(e); | ||
189 | } | ||
190 | 207 | ||
191 | return ret; | 208 | return ret; |
192 | } | 209 | } |
193 | 210 | ||
194 | void elevator_exit(elevator_t *e) | 211 | void elevator_exit(elevator_t *e) |
195 | { | 212 | { |
213 | mutex_lock(&e->sysfs_lock); | ||
196 | if (e->ops->elevator_exit_fn) | 214 | if (e->ops->elevator_exit_fn) |
197 | e->ops->elevator_exit_fn(e); | 215 | e->ops->elevator_exit_fn(e); |
216 | e->ops = NULL; | ||
217 | mutex_unlock(&e->sysfs_lock); | ||
198 | 218 | ||
199 | elevator_put(e->elevator_type); | 219 | kobject_put(&e->kobj); |
200 | e->elevator_type = NULL; | ||
201 | kfree(e); | ||
202 | } | 220 | } |
203 | 221 | ||
204 | /* | 222 | /* |
@@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq) | |||
627 | } | 645 | } |
628 | } | 646 | } |
629 | 647 | ||
630 | int elv_register_queue(struct request_queue *q) | 648 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
649 | |||
650 | static ssize_t | ||
651 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
631 | { | 652 | { |
632 | elevator_t *e = q->elevator; | 653 | elevator_t *e = container_of(kobj, elevator_t, kobj); |
654 | struct elv_fs_entry *entry = to_elv(attr); | ||
655 | ssize_t error; | ||
633 | 656 | ||
634 | e->kobj.parent = kobject_get(&q->kobj); | 657 | if (!entry->show) |
635 | if (!e->kobj.parent) | 658 | return -EIO; |
636 | return -EBUSY; | ||
637 | 659 | ||
638 | snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | 660 | mutex_lock(&e->sysfs_lock); |
639 | e->kobj.ktype = e->elevator_type->elevator_ktype; | 661 | error = e->ops ? entry->show(e, page) : -ENOENT; |
662 | mutex_unlock(&e->sysfs_lock); | ||
663 | return error; | ||
664 | } | ||
665 | |||
666 | static ssize_t | ||
667 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | ||
668 | const char *page, size_t length) | ||
669 | { | ||
670 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
671 | struct elv_fs_entry *entry = to_elv(attr); | ||
672 | ssize_t error; | ||
673 | |||
674 | if (!entry->store) | ||
675 | return -EIO; | ||
676 | |||
677 | mutex_lock(&e->sysfs_lock); | ||
678 | error = e->ops ? entry->store(e, page, length) : -ENOENT; | ||
679 | mutex_unlock(&e->sysfs_lock); | ||
680 | return error; | ||
681 | } | ||
682 | |||
683 | static struct sysfs_ops elv_sysfs_ops = { | ||
684 | .show = elv_attr_show, | ||
685 | .store = elv_attr_store, | ||
686 | }; | ||
687 | |||
688 | static struct kobj_type elv_ktype = { | ||
689 | .sysfs_ops = &elv_sysfs_ops, | ||
690 | .release = elevator_release, | ||
691 | }; | ||
640 | 692 | ||
641 | return kobject_register(&e->kobj); | 693 | int elv_register_queue(struct request_queue *q) |
694 | { | ||
695 | elevator_t *e = q->elevator; | ||
696 | int error; | ||
697 | |||
698 | e->kobj.parent = &q->kobj; | ||
699 | |||
700 | error = kobject_add(&e->kobj); | ||
701 | if (!error) { | ||
702 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; | ||
703 | if (attr) { | ||
704 | while (attr->attr.name) { | ||
705 | if (sysfs_create_file(&e->kobj, &attr->attr)) | ||
706 | break; | ||
707 | attr++; | ||
708 | } | ||
709 | } | ||
710 | kobject_uevent(&e->kobj, KOBJ_ADD); | ||
711 | } | ||
712 | return error; | ||
642 | } | 713 | } |
643 | 714 | ||
644 | void elv_unregister_queue(struct request_queue *q) | 715 | void elv_unregister_queue(struct request_queue *q) |
645 | { | 716 | { |
646 | if (q) { | 717 | if (q) { |
647 | elevator_t *e = q->elevator; | 718 | elevator_t *e = q->elevator; |
648 | kobject_unregister(&e->kobj); | 719 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
649 | kobject_put(&q->kobj); | 720 | kobject_del(&e->kobj); |
650 | } | 721 | } |
651 | } | 722 | } |
652 | 723 | ||
@@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e) | |||
675 | /* | 746 | /* |
676 | * Iterate every thread in the process to remove the io contexts. | 747 | * Iterate every thread in the process to remove the io contexts. |
677 | */ | 748 | */ |
678 | read_lock(&tasklist_lock); | 749 | if (e->ops.trim) { |
679 | do_each_thread(g, p) { | 750 | read_lock(&tasklist_lock); |
680 | struct io_context *ioc = p->io_context; | 751 | do_each_thread(g, p) { |
681 | if (ioc && ioc->cic) { | 752 | task_lock(p); |
682 | ioc->cic->exit(ioc->cic); | 753 | e->ops.trim(p->io_context); |
683 | ioc->cic->dtor(ioc->cic); | 754 | task_unlock(p); |
684 | ioc->cic = NULL; | 755 | } while_each_thread(g, p); |
685 | } | 756 | read_unlock(&tasklist_lock); |
686 | if (ioc && ioc->aic) { | 757 | } |
687 | ioc->aic->exit(ioc->aic); | ||
688 | ioc->aic->dtor(ioc->aic); | ||
689 | ioc->aic = NULL; | ||
690 | } | ||
691 | } while_each_thread(g, p); | ||
692 | read_unlock(&tasklist_lock); | ||
693 | 758 | ||
694 | spin_lock_irq(&elv_list_lock); | 759 | spin_lock_irq(&elv_list_lock); |
695 | list_del_init(&e->list); | 760 | list_del_init(&e->list); |
@@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
703 | * need for the new one. this way we have a chance of going back to the old | 768 | * need for the new one. this way we have a chance of going back to the old |
704 | * one, if the new one fails init for some reason. | 769 | * one, if the new one fails init for some reason. |
705 | */ | 770 | */ |
706 | static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 771 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) |
707 | { | 772 | { |
708 | elevator_t *old_elevator, *e; | 773 | elevator_t *old_elevator, *e; |
709 | 774 | ||
710 | /* | 775 | /* |
711 | * Allocate new elevator | 776 | * Allocate new elevator |
712 | */ | 777 | */ |
713 | e = kmalloc(sizeof(elevator_t), GFP_KERNEL); | 778 | e = elevator_alloc(new_e); |
714 | if (!e) | 779 | if (!e) |
715 | goto error; | 780 | return 0; |
716 | 781 | ||
717 | /* | 782 | /* |
718 | * Turn on BYPASS and drain all requests w/ elevator private data | 783 | * Turn on BYPASS and drain all requests w/ elevator private data |
@@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
743 | /* | 808 | /* |
744 | * attach and start new elevator | 809 | * attach and start new elevator |
745 | */ | 810 | */ |
746 | if (elevator_attach(q, new_e, e)) | 811 | if (elevator_attach(q, e)) |
747 | goto fail; | 812 | goto fail; |
748 | 813 | ||
749 | if (elv_register_queue(q)) | 814 | if (elv_register_queue(q)) |
@@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
754 | */ | 819 | */ |
755 | elevator_exit(old_elevator); | 820 | elevator_exit(old_elevator); |
756 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 821 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
757 | return; | 822 | return 1; |
758 | 823 | ||
759 | fail_register: | 824 | fail_register: |
760 | /* | 825 | /* |
@@ -767,10 +832,9 @@ fail: | |||
767 | q->elevator = old_elevator; | 832 | q->elevator = old_elevator; |
768 | elv_register_queue(q); | 833 | elv_register_queue(q); |
769 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 834 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
770 | kfree(e); | 835 | if (e) |
771 | error: | 836 | kobject_put(&e->kobj); |
772 | elevator_put(new_e); | 837 | return 0; |
773 | printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); | ||
774 | } | 838 | } |
775 | 839 | ||
776 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | 840 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) |
@@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | |||
797 | return count; | 861 | return count; |
798 | } | 862 | } |
799 | 863 | ||
800 | elevator_switch(q, e); | 864 | if (!elevator_switch(q, e)) |
865 | printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); | ||
801 | return count; | 866 | return count; |
802 | } | 867 | } |
803 | 868 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0ef2971a9e82..6c793b196aa9 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue); | |||
1740 | * Hopefully the low level driver will have finished any | 1740 | * Hopefully the low level driver will have finished any |
1741 | * outstanding requests first... | 1741 | * outstanding requests first... |
1742 | **/ | 1742 | **/ |
1743 | void blk_cleanup_queue(request_queue_t * q) | 1743 | static void blk_release_queue(struct kobject *kobj) |
1744 | { | 1744 | { |
1745 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | ||
1745 | struct request_list *rl = &q->rq; | 1746 | struct request_list *rl = &q->rq; |
1746 | 1747 | ||
1747 | if (!atomic_dec_and_test(&q->refcnt)) | ||
1748 | return; | ||
1749 | |||
1750 | if (q->elevator) | ||
1751 | elevator_exit(q->elevator); | ||
1752 | |||
1753 | blk_sync_queue(q); | 1748 | blk_sync_queue(q); |
1754 | 1749 | ||
1755 | if (rl->rq_pool) | 1750 | if (rl->rq_pool) |
@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q) | |||
1761 | kmem_cache_free(requestq_cachep, q); | 1756 | kmem_cache_free(requestq_cachep, q); |
1762 | } | 1757 | } |
1763 | 1758 | ||
1759 | void blk_put_queue(request_queue_t *q) | ||
1760 | { | ||
1761 | kobject_put(&q->kobj); | ||
1762 | } | ||
1763 | EXPORT_SYMBOL(blk_put_queue); | ||
1764 | |||
1765 | void blk_cleanup_queue(request_queue_t * q) | ||
1766 | { | ||
1767 | mutex_lock(&q->sysfs_lock); | ||
1768 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | ||
1769 | mutex_unlock(&q->sysfs_lock); | ||
1770 | |||
1771 | if (q->elevator) | ||
1772 | elevator_exit(q->elevator); | ||
1773 | |||
1774 | blk_put_queue(q); | ||
1775 | } | ||
1776 | |||
1764 | EXPORT_SYMBOL(blk_cleanup_queue); | 1777 | EXPORT_SYMBOL(blk_cleanup_queue); |
1765 | 1778 | ||
1766 | static int blk_init_free_list(request_queue_t *q) | 1779 | static int blk_init_free_list(request_queue_t *q) |
@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | |||
1788 | } | 1801 | } |
1789 | EXPORT_SYMBOL(blk_alloc_queue); | 1802 | EXPORT_SYMBOL(blk_alloc_queue); |
1790 | 1803 | ||
1804 | static struct kobj_type queue_ktype; | ||
1805 | |||
1791 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 1806 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1792 | { | 1807 | { |
1793 | request_queue_t *q; | 1808 | request_queue_t *q; |
@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
1798 | 1813 | ||
1799 | memset(q, 0, sizeof(*q)); | 1814 | memset(q, 0, sizeof(*q)); |
1800 | init_timer(&q->unplug_timer); | 1815 | init_timer(&q->unplug_timer); |
1801 | atomic_set(&q->refcnt, 1); | 1816 | |
1817 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | ||
1818 | q->kobj.ktype = &queue_ktype; | ||
1819 | kobject_init(&q->kobj); | ||
1802 | 1820 | ||
1803 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 1821 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; |
1804 | q->backing_dev_info.unplug_io_data = q; | 1822 | q->backing_dev_info.unplug_io_data = q; |
1805 | 1823 | ||
1824 | mutex_init(&q->sysfs_lock); | ||
1825 | |||
1806 | return q; | 1826 | return q; |
1807 | } | 1827 | } |
1808 | EXPORT_SYMBOL(blk_alloc_queue_node); | 1828 | EXPORT_SYMBOL(blk_alloc_queue_node); |
@@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1854 | return NULL; | 1874 | return NULL; |
1855 | 1875 | ||
1856 | q->node = node_id; | 1876 | q->node = node_id; |
1857 | if (blk_init_free_list(q)) | 1877 | if (blk_init_free_list(q)) { |
1858 | goto out_init; | 1878 | kmem_cache_free(requestq_cachep, q); |
1879 | return NULL; | ||
1880 | } | ||
1859 | 1881 | ||
1860 | /* | 1882 | /* |
1861 | * if caller didn't supply a lock, they get per-queue locking with | 1883 | * if caller didn't supply a lock, they get per-queue locking with |
@@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1891 | return q; | 1913 | return q; |
1892 | } | 1914 | } |
1893 | 1915 | ||
1894 | blk_cleanup_queue(q); | 1916 | blk_put_queue(q); |
1895 | out_init: | ||
1896 | kmem_cache_free(requestq_cachep, q); | ||
1897 | return NULL; | 1917 | return NULL; |
1898 | } | 1918 | } |
1899 | EXPORT_SYMBOL(blk_init_queue_node); | 1919 | EXPORT_SYMBOL(blk_init_queue_node); |
@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node); | |||
1901 | int blk_get_queue(request_queue_t *q) | 1921 | int blk_get_queue(request_queue_t *q) |
1902 | { | 1922 | { |
1903 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 1923 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
1904 | atomic_inc(&q->refcnt); | 1924 | kobject_get(&q->kobj); |
1905 | return 0; | 1925 | return 0; |
1906 | } | 1926 | } |
1907 | 1927 | ||
@@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc) | |||
3477 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3497 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
3478 | 3498 | ||
3479 | if (atomic_dec_and_test(&ioc->refcount)) { | 3499 | if (atomic_dec_and_test(&ioc->refcount)) { |
3500 | rcu_read_lock(); | ||
3480 | if (ioc->aic && ioc->aic->dtor) | 3501 | if (ioc->aic && ioc->aic->dtor) |
3481 | ioc->aic->dtor(ioc->aic); | 3502 | ioc->aic->dtor(ioc->aic); |
3482 | if (ioc->cic && ioc->cic->dtor) | 3503 | if (ioc->cic && ioc->cic->dtor) |
3483 | ioc->cic->dtor(ioc->cic); | 3504 | ioc->cic->dtor(ioc->cic); |
3505 | rcu_read_unlock(); | ||
3484 | 3506 | ||
3485 | kmem_cache_free(iocontext_cachep, ioc); | 3507 | kmem_cache_free(iocontext_cachep, ioc); |
3486 | } | 3508 | } |
@@ -3614,10 +3636,13 @@ static ssize_t | |||
3614 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | 3636 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
3615 | { | 3637 | { |
3616 | struct request_list *rl = &q->rq; | 3638 | struct request_list *rl = &q->rq; |
3639 | unsigned long nr; | ||
3640 | int ret = queue_var_store(&nr, page, count); | ||
3641 | if (nr < BLKDEV_MIN_RQ) | ||
3642 | nr = BLKDEV_MIN_RQ; | ||
3617 | 3643 | ||
3618 | int ret = queue_var_store(&q->nr_requests, page, count); | 3644 | spin_lock_irq(q->queue_lock); |
3619 | if (q->nr_requests < BLKDEV_MIN_RQ) | 3645 | q->nr_requests = nr; |
3620 | q->nr_requests = BLKDEV_MIN_RQ; | ||
3621 | blk_queue_congestion_threshold(q); | 3646 | blk_queue_congestion_threshold(q); |
3622 | 3647 | ||
3623 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 3648 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) |
@@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
3643 | blk_clear_queue_full(q, WRITE); | 3668 | blk_clear_queue_full(q, WRITE); |
3644 | wake_up(&rl->wait[WRITE]); | 3669 | wake_up(&rl->wait[WRITE]); |
3645 | } | 3670 | } |
3671 | spin_unlock_irq(q->queue_lock); | ||
3646 | return ret; | 3672 | return ret; |
3647 | } | 3673 | } |
3648 | 3674 | ||
@@ -3758,13 +3784,19 @@ static ssize_t | |||
3758 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 3784 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
3759 | { | 3785 | { |
3760 | struct queue_sysfs_entry *entry = to_queue(attr); | 3786 | struct queue_sysfs_entry *entry = to_queue(attr); |
3761 | struct request_queue *q; | 3787 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
3788 | ssize_t res; | ||
3762 | 3789 | ||
3763 | q = container_of(kobj, struct request_queue, kobj); | ||
3764 | if (!entry->show) | 3790 | if (!entry->show) |
3765 | return -EIO; | 3791 | return -EIO; |
3766 | 3792 | mutex_lock(&q->sysfs_lock); | |
3767 | return entry->show(q, page); | 3793 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
3794 | mutex_unlock(&q->sysfs_lock); | ||
3795 | return -ENOENT; | ||
3796 | } | ||
3797 | res = entry->show(q, page); | ||
3798 | mutex_unlock(&q->sysfs_lock); | ||
3799 | return res; | ||
3768 | } | 3800 | } |
3769 | 3801 | ||
3770 | static ssize_t | 3802 | static ssize_t |
@@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3772 | const char *page, size_t length) | 3804 | const char *page, size_t length) |
3773 | { | 3805 | { |
3774 | struct queue_sysfs_entry *entry = to_queue(attr); | 3806 | struct queue_sysfs_entry *entry = to_queue(attr); |
3775 | struct request_queue *q; | 3807 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
3808 | |||
3809 | ssize_t res; | ||
3776 | 3810 | ||
3777 | q = container_of(kobj, struct request_queue, kobj); | ||
3778 | if (!entry->store) | 3811 | if (!entry->store) |
3779 | return -EIO; | 3812 | return -EIO; |
3780 | 3813 | mutex_lock(&q->sysfs_lock); | |
3781 | return entry->store(q, page, length); | 3814 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
3815 | mutex_unlock(&q->sysfs_lock); | ||
3816 | return -ENOENT; | ||
3817 | } | ||
3818 | res = entry->store(q, page, length); | ||
3819 | mutex_unlock(&q->sysfs_lock); | ||
3820 | return res; | ||
3782 | } | 3821 | } |
3783 | 3822 | ||
3784 | static struct sysfs_ops queue_sysfs_ops = { | 3823 | static struct sysfs_ops queue_sysfs_ops = { |
@@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = { | |||
3789 | static struct kobj_type queue_ktype = { | 3828 | static struct kobj_type queue_ktype = { |
3790 | .sysfs_ops = &queue_sysfs_ops, | 3829 | .sysfs_ops = &queue_sysfs_ops, |
3791 | .default_attrs = default_attrs, | 3830 | .default_attrs = default_attrs, |
3831 | .release = blk_release_queue, | ||
3792 | }; | 3832 | }; |
3793 | 3833 | ||
3794 | int blk_register_queue(struct gendisk *disk) | 3834 | int blk_register_queue(struct gendisk *disk) |
@@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk) | |||
3801 | return -ENXIO; | 3841 | return -ENXIO; |
3802 | 3842 | ||
3803 | q->kobj.parent = kobject_get(&disk->kobj); | 3843 | q->kobj.parent = kobject_get(&disk->kobj); |
3804 | if (!q->kobj.parent) | ||
3805 | return -EBUSY; | ||
3806 | 3844 | ||
3807 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | 3845 | ret = kobject_add(&q->kobj); |
3808 | q->kobj.ktype = &queue_ktype; | ||
3809 | |||
3810 | ret = kobject_register(&q->kobj); | ||
3811 | if (ret < 0) | 3846 | if (ret < 0) |
3812 | return ret; | 3847 | return ret; |
3813 | 3848 | ||
3849 | kobject_uevent(&q->kobj, KOBJ_ADD); | ||
3850 | |||
3814 | ret = elv_register_queue(q); | 3851 | ret = elv_register_queue(q); |
3815 | if (ret) { | 3852 | if (ret) { |
3816 | kobject_unregister(&q->kobj); | 3853 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
3854 | kobject_del(&q->kobj); | ||
3817 | return ret; | 3855 | return ret; |
3818 | } | 3856 | } |
3819 | 3857 | ||
@@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk) | |||
3827 | if (q && q->request_fn) { | 3865 | if (q && q->request_fn) { |
3828 | elv_unregister_queue(q); | 3866 | elv_unregister_queue(q); |
3829 | 3867 | ||
3830 | kobject_unregister(&q->kobj); | 3868 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
3869 | kobject_del(&q->kobj); | ||
3831 | kobject_put(&disk->kobj); | 3870 | kobject_put(&disk->kobj); |
3832 | } | 3871 | } |
3833 | } | 3872 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5f6d1a5cce11..0010704739e3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1307,7 +1307,7 @@ static int __init loop_init(void) | |||
1307 | 1307 | ||
1308 | out_mem4: | 1308 | out_mem4: |
1309 | while (i--) | 1309 | while (i--) |
1310 | blk_put_queue(loop_dev[i].lo_queue); | 1310 | blk_cleanup_queue(loop_dev[i].lo_queue); |
1311 | devfs_remove("loop"); | 1311 | devfs_remove("loop"); |
1312 | i = max_loop; | 1312 | i = max_loop; |
1313 | out_mem3: | 1313 | out_mem3: |
@@ -1328,7 +1328,7 @@ static void loop_exit(void) | |||
1328 | 1328 | ||
1329 | for (i = 0; i < max_loop; i++) { | 1329 | for (i = 0; i < max_loop; i++) { |
1330 | del_gendisk(disks[i]); | 1330 | del_gendisk(disks[i]); |
1331 | blk_put_queue(loop_dev[i].lo_queue); | 1331 | blk_cleanup_queue(loop_dev[i].lo_queue); |
1332 | put_disk(disks[i]); | 1332 | put_disk(disks[i]); |
1333 | } | 1333 | } |
1334 | devfs_remove("loop"); | 1334 | devfs_remove("loop"); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index bc9b2bcd7dba..476a5b553f34 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
2514 | return 0; | 2514 | return 0; |
2515 | 2515 | ||
2516 | out_new_dev: | 2516 | out_new_dev: |
2517 | blk_put_queue(disk->queue); | 2517 | blk_cleanup_queue(disk->queue); |
2518 | out_mem2: | 2518 | out_mem2: |
2519 | put_disk(disk); | 2519 | put_disk(disk); |
2520 | out_mem: | 2520 | out_mem: |
@@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); | 2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); |
2556 | 2556 | ||
2557 | del_gendisk(pd->disk); | 2557 | del_gendisk(pd->disk); |
2558 | blk_put_queue(pd->disk->queue); | 2558 | blk_cleanup_queue(pd->disk->queue); |
2559 | put_disk(pd->disk); | 2559 | put_disk(pd->disk); |
2560 | 2560 | ||
2561 | pkt_devs[idx] = NULL; | 2561 | pkt_devs[idx] = NULL; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4ada1268b40d..c16e66b9c7a7 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev) | |||
1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, | 1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1132 | card->mm_pages[1].desc, | 1132 | card->mm_pages[1].desc, |
1133 | card->mm_pages[1].page_dma); | 1133 | card->mm_pages[1].page_dma); |
1134 | blk_put_queue(card->queue); | 1134 | blk_cleanup_queue(card->queue); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | static const struct pci_device_id mm_pci_ids[] = { { | 1137 | static const struct pci_device_id mm_pci_ids[] = { { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 745ca1f67b14..88d60202b9db 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) | |||
840 | bad3: | 840 | bad3: |
841 | mempool_destroy(md->io_pool); | 841 | mempool_destroy(md->io_pool); |
842 | bad2: | 842 | bad2: |
843 | blk_put_queue(md->queue); | 843 | blk_cleanup_queue(md->queue); |
844 | free_minor(minor); | 844 | free_minor(minor); |
845 | bad1: | 845 | bad1: |
846 | kfree(md); | 846 | kfree(md); |
@@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md) | |||
860 | del_gendisk(md->disk); | 860 | del_gendisk(md->disk); |
861 | free_minor(minor); | 861 | free_minor(minor); |
862 | put_disk(md->disk); | 862 | put_disk(md->disk); |
863 | blk_put_queue(md->queue); | 863 | blk_cleanup_queue(md->queue); |
864 | kfree(md); | 864 | kfree(md); |
865 | } | 865 | } |
866 | 866 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index d05e3125d298..5ed2228745cb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev) | |||
213 | return; | 213 | return; |
214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { | 214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { |
215 | list_del(&mddev->all_mddevs); | 215 | list_del(&mddev->all_mddevs); |
216 | blk_put_queue(mddev->queue); | 216 | /* that blocks */ |
217 | blk_cleanup_queue(mddev->queue); | ||
218 | /* that also blocks */ | ||
217 | kobject_unregister(&mddev->kobj); | 219 | kobject_unregister(&mddev->kobj); |
220 | /* result blows... */ | ||
218 | } | 221 | } |
219 | spin_unlock(&all_mddevs_lock); | 222 | spin_unlock(&all_mddevs_lock); |
220 | } | 223 | } |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 830528dce0ca..dc845f36fe49 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -100,6 +100,10 @@ static int max_interrupt_work = 10; | |||
100 | static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; | 100 | static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; |
101 | static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n"; | 101 | static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n"; |
102 | 102 | ||
103 | #if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) | ||
104 | #define EL3_SUSPEND | ||
105 | #endif | ||
106 | |||
103 | #ifdef EL3_DEBUG | 107 | #ifdef EL3_DEBUG |
104 | static int el3_debug = EL3_DEBUG; | 108 | static int el3_debug = EL3_DEBUG; |
105 | #else | 109 | #else |
@@ -174,9 +178,6 @@ struct el3_private { | |||
174 | /* skb send-queue */ | 178 | /* skb send-queue */ |
175 | int head, size; | 179 | int head, size; |
176 | struct sk_buff *queue[SKB_QUEUE_SIZE]; | 180 | struct sk_buff *queue[SKB_QUEUE_SIZE]; |
177 | #ifdef CONFIG_PM_LEGACY | ||
178 | struct pm_dev *pmdev; | ||
179 | #endif | ||
180 | enum { | 181 | enum { |
181 | EL3_MCA, | 182 | EL3_MCA, |
182 | EL3_PNP, | 183 | EL3_PNP, |
@@ -201,11 +202,15 @@ static void el3_tx_timeout (struct net_device *dev); | |||
201 | static void el3_down(struct net_device *dev); | 202 | static void el3_down(struct net_device *dev); |
202 | static void el3_up(struct net_device *dev); | 203 | static void el3_up(struct net_device *dev); |
203 | static struct ethtool_ops ethtool_ops; | 204 | static struct ethtool_ops ethtool_ops; |
204 | #ifdef CONFIG_PM_LEGACY | 205 | #ifdef EL3_SUSPEND |
205 | static int el3_suspend(struct pm_dev *pdev); | 206 | static int el3_suspend(struct device *, pm_message_t); |
206 | static int el3_resume(struct pm_dev *pdev); | 207 | static int el3_resume(struct device *); |
207 | static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data); | 208 | #else |
209 | #define el3_suspend NULL | ||
210 | #define el3_resume NULL | ||
208 | #endif | 211 | #endif |
212 | |||
213 | |||
209 | /* generic device remove for all device types */ | 214 | /* generic device remove for all device types */ |
210 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 215 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
211 | static int el3_device_remove (struct device *device); | 216 | static int el3_device_remove (struct device *device); |
@@ -229,7 +234,9 @@ static struct eisa_driver el3_eisa_driver = { | |||
229 | .driver = { | 234 | .driver = { |
230 | .name = "3c509", | 235 | .name = "3c509", |
231 | .probe = el3_eisa_probe, | 236 | .probe = el3_eisa_probe, |
232 | .remove = __devexit_p (el3_device_remove) | 237 | .remove = __devexit_p (el3_device_remove), |
238 | .suspend = el3_suspend, | ||
239 | .resume = el3_resume, | ||
233 | } | 240 | } |
234 | }; | 241 | }; |
235 | #endif | 242 | #endif |
@@ -262,6 +269,8 @@ static struct mca_driver el3_mca_driver = { | |||
262 | .bus = &mca_bus_type, | 269 | .bus = &mca_bus_type, |
263 | .probe = el3_mca_probe, | 270 | .probe = el3_mca_probe, |
264 | .remove = __devexit_p(el3_device_remove), | 271 | .remove = __devexit_p(el3_device_remove), |
272 | .suspend = el3_suspend, | ||
273 | .resume = el3_resume, | ||
265 | }, | 274 | }, |
266 | }; | 275 | }; |
267 | #endif /* CONFIG_MCA */ | 276 | #endif /* CONFIG_MCA */ |
@@ -362,10 +371,6 @@ static void el3_common_remove (struct net_device *dev) | |||
362 | struct el3_private *lp = netdev_priv(dev); | 371 | struct el3_private *lp = netdev_priv(dev); |
363 | 372 | ||
364 | (void) lp; /* Keep gcc quiet... */ | 373 | (void) lp; /* Keep gcc quiet... */ |
365 | #ifdef CONFIG_PM_LEGACY | ||
366 | if (lp->pmdev) | ||
367 | pm_unregister(lp->pmdev); | ||
368 | #endif | ||
369 | #if defined(__ISAPNP__) | 374 | #if defined(__ISAPNP__) |
370 | if (lp->type == EL3_PNP) | 375 | if (lp->type == EL3_PNP) |
371 | pnp_device_detach(to_pnp_dev(lp->dev)); | 376 | pnp_device_detach(to_pnp_dev(lp->dev)); |
@@ -572,16 +577,6 @@ no_pnp: | |||
572 | if (err) | 577 | if (err) |
573 | goto out1; | 578 | goto out1; |
574 | 579 | ||
575 | #ifdef CONFIG_PM_LEGACY | ||
576 | /* register power management */ | ||
577 | lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback); | ||
578 | if (lp->pmdev) { | ||
579 | struct pm_dev *p; | ||
580 | p = lp->pmdev; | ||
581 | p->data = (struct net_device *)dev; | ||
582 | } | ||
583 | #endif | ||
584 | |||
585 | el3_cards++; | 580 | el3_cards++; |
586 | lp->next_dev = el3_root_dev; | 581 | lp->next_dev = el3_root_dev; |
587 | el3_root_dev = dev; | 582 | el3_root_dev = dev; |
@@ -1480,20 +1475,17 @@ el3_up(struct net_device *dev) | |||
1480 | } | 1475 | } |
1481 | 1476 | ||
1482 | /* Power Management support functions */ | 1477 | /* Power Management support functions */ |
1483 | #ifdef CONFIG_PM_LEGACY | 1478 | #ifdef EL3_SUSPEND |
1484 | 1479 | ||
1485 | static int | 1480 | static int |
1486 | el3_suspend(struct pm_dev *pdev) | 1481 | el3_suspend(struct device *pdev, pm_message_t state) |
1487 | { | 1482 | { |
1488 | unsigned long flags; | 1483 | unsigned long flags; |
1489 | struct net_device *dev; | 1484 | struct net_device *dev; |
1490 | struct el3_private *lp; | 1485 | struct el3_private *lp; |
1491 | int ioaddr; | 1486 | int ioaddr; |
1492 | 1487 | ||
1493 | if (!pdev && !pdev->data) | 1488 | dev = pdev->driver_data; |
1494 | return -EINVAL; | ||
1495 | |||
1496 | dev = (struct net_device *)pdev->data; | ||
1497 | lp = netdev_priv(dev); | 1489 | lp = netdev_priv(dev); |
1498 | ioaddr = dev->base_addr; | 1490 | ioaddr = dev->base_addr; |
1499 | 1491 | ||
@@ -1510,17 +1502,14 @@ el3_suspend(struct pm_dev *pdev) | |||
1510 | } | 1502 | } |
1511 | 1503 | ||
1512 | static int | 1504 | static int |
1513 | el3_resume(struct pm_dev *pdev) | 1505 | el3_resume(struct device *pdev) |
1514 | { | 1506 | { |
1515 | unsigned long flags; | 1507 | unsigned long flags; |
1516 | struct net_device *dev; | 1508 | struct net_device *dev; |
1517 | struct el3_private *lp; | 1509 | struct el3_private *lp; |
1518 | int ioaddr; | 1510 | int ioaddr; |
1519 | 1511 | ||
1520 | if (!pdev && !pdev->data) | 1512 | dev = pdev->driver_data; |
1521 | return -EINVAL; | ||
1522 | |||
1523 | dev = (struct net_device *)pdev->data; | ||
1524 | lp = netdev_priv(dev); | 1513 | lp = netdev_priv(dev); |
1525 | ioaddr = dev->base_addr; | 1514 | ioaddr = dev->base_addr; |
1526 | 1515 | ||
@@ -1536,20 +1525,7 @@ el3_resume(struct pm_dev *pdev) | |||
1536 | return 0; | 1525 | return 0; |
1537 | } | 1526 | } |
1538 | 1527 | ||
1539 | static int | 1528 | #endif /* EL3_SUSPEND */ |
1540 | el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data) | ||
1541 | { | ||
1542 | switch (rqst) { | ||
1543 | case PM_SUSPEND: | ||
1544 | return el3_suspend(pdev); | ||
1545 | |||
1546 | case PM_RESUME: | ||
1547 | return el3_resume(pdev); | ||
1548 | } | ||
1549 | return 0; | ||
1550 | } | ||
1551 | |||
1552 | #endif /* CONFIG_PM_LEGACY */ | ||
1553 | 1529 | ||
1554 | /* Parameters that may be passed into the module. */ | 1530 | /* Parameters that may be passed into the module. */ |
1555 | static int debug = -1; | 1531 | static int debug = -1; |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index 9e1fe2e0478c..b40885d41680 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -105,6 +105,7 @@ | |||
105 | #include <linux/mca-legacy.h> | 105 | #include <linux/mca-legacy.h> |
106 | #include <linux/ethtool.h> | 106 | #include <linux/ethtool.h> |
107 | #include <linux/bitops.h> | 107 | #include <linux/bitops.h> |
108 | #include <linux/jiffies.h> | ||
108 | 109 | ||
109 | #include <asm/uaccess.h> | 110 | #include <asm/uaccess.h> |
110 | #include <asm/processor.h> | 111 | #include <asm/processor.h> |
@@ -658,7 +659,7 @@ static int init586(struct net_device *dev) | |||
658 | 659 | ||
659 | s = jiffies; /* warning: only active with interrupts on !! */ | 660 | s = jiffies; /* warning: only active with interrupts on !! */ |
660 | while (!(cfg_cmd->cmd_status & STAT_COMPL)) { | 661 | while (!(cfg_cmd->cmd_status & STAT_COMPL)) { |
661 | if (jiffies - s > 30*HZ/100) | 662 | if (time_after(jiffies, s + 30*HZ/100)) |
662 | break; | 663 | break; |
663 | } | 664 | } |
664 | 665 | ||
@@ -684,7 +685,7 @@ static int init586(struct net_device *dev) | |||
684 | 685 | ||
685 | s = jiffies; | 686 | s = jiffies; |
686 | while (!(ias_cmd->cmd_status & STAT_COMPL)) { | 687 | while (!(ias_cmd->cmd_status & STAT_COMPL)) { |
687 | if (jiffies - s > 30*HZ/100) | 688 | if (time_after(jiffies, s + 30*HZ/100)) |
688 | break; | 689 | break; |
689 | } | 690 | } |
690 | 691 | ||
@@ -709,7 +710,7 @@ static int init586(struct net_device *dev) | |||
709 | 710 | ||
710 | s = jiffies; | 711 | s = jiffies; |
711 | while (!(tdr_cmd->cmd_status & STAT_COMPL)) { | 712 | while (!(tdr_cmd->cmd_status & STAT_COMPL)) { |
712 | if (jiffies - s > 30*HZ/100) { | 713 | if (time_after(jiffies, s + 30*HZ/100)) { |
713 | printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); | 714 | printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); |
714 | result = 1; | 715 | result = 1; |
715 | break; | 716 | break; |
@@ -798,7 +799,7 @@ static int init586(struct net_device *dev) | |||
798 | elmc_id_attn586(); | 799 | elmc_id_attn586(); |
799 | s = jiffies; | 800 | s = jiffies; |
800 | while (!(mc_cmd->cmd_status & STAT_COMPL)) { | 801 | while (!(mc_cmd->cmd_status & STAT_COMPL)) { |
801 | if (jiffies - s > 30*HZ/100) | 802 | if (time_after(jiffies, s + 30*HZ/100)) |
802 | break; | 803 | break; |
803 | } | 804 | } |
804 | if (!(mc_cmd->cmd_status & STAT_COMPL)) { | 805 | if (!(mc_cmd->cmd_status & STAT_COMPL)) { |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 7f47124f118d..5d11a06ecb2c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -258,6 +258,7 @@ static int vortex_debug = 1; | |||
258 | #include <linux/highmem.h> | 258 | #include <linux/highmem.h> |
259 | #include <linux/eisa.h> | 259 | #include <linux/eisa.h> |
260 | #include <linux/bitops.h> | 260 | #include <linux/bitops.h> |
261 | #include <linux/jiffies.h> | ||
261 | #include <asm/irq.h> /* For NR_IRQS only. */ | 262 | #include <asm/irq.h> /* For NR_IRQS only. */ |
262 | #include <asm/io.h> | 263 | #include <asm/io.h> |
263 | #include <asm/uaccess.h> | 264 | #include <asm/uaccess.h> |
@@ -841,7 +842,7 @@ enum xcvr_types { | |||
841 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, | 842 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, |
842 | }; | 843 | }; |
843 | 844 | ||
844 | static struct media_table { | 845 | static const struct media_table { |
845 | char *name; | 846 | char *name; |
846 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ | 847 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ |
847 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ | 848 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ |
@@ -1445,7 +1446,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1445 | } | 1446 | } |
1446 | 1447 | ||
1447 | { | 1448 | { |
1448 | static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 1449 | static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
1449 | unsigned int config; | 1450 | unsigned int config; |
1450 | EL3WINDOW(3); | 1451 | EL3WINDOW(3); |
1451 | vp->available_media = ioread16(ioaddr + Wn3_Options); | 1452 | vp->available_media = ioread16(ioaddr + Wn3_Options); |
@@ -2724,7 +2725,7 @@ boomerang_rx(struct net_device *dev) | |||
2724 | skb = dev_alloc_skb(PKT_BUF_SZ); | 2725 | skb = dev_alloc_skb(PKT_BUF_SZ); |
2725 | if (skb == NULL) { | 2726 | if (skb == NULL) { |
2726 | static unsigned long last_jif; | 2727 | static unsigned long last_jif; |
2727 | if ((jiffies - last_jif) > 10 * HZ) { | 2728 | if (time_after(jiffies, last_jif + 10 * HZ)) { |
2728 | printk(KERN_WARNING "%s: memory shortage\n", dev->name); | 2729 | printk(KERN_WARNING "%s: memory shortage\n", dev->name); |
2729 | last_jif = jiffies; | 2730 | last_jif = jiffies; |
2730 | } | 2731 | } |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 18b027e73f28..86633c5f1a4b 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
32 | #include <linux/irq.h> | 32 | #include <asm/irq.h> |
33 | /* Used for the temporal inet entries and routing */ | 33 | /* Used for the temporal inet entries and routing */ |
34 | #include <linux/socket.h> | 34 | #include <linux/socket.h> |
35 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index dd410496aadb..ce99845d8266 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1276,7 +1276,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) | |||
1276 | } | 1276 | } |
1277 | #endif /* BROKEN */ | 1277 | #endif /* BROKEN */ |
1278 | 1278 | ||
1279 | static char mii_2_8139_map[8] = { | 1279 | static const char mii_2_8139_map[8] = { |
1280 | BasicModeCtrl, | 1280 | BasicModeCtrl, |
1281 | BasicModeStatus, | 1281 | BasicModeStatus, |
1282 | 0, | 1282 | 0, |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 2beac55b57d6..e58d4c50c2e1 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -229,7 +229,7 @@ typedef enum { | |||
229 | 229 | ||
230 | 230 | ||
231 | /* indexed by board_t, above */ | 231 | /* indexed by board_t, above */ |
232 | static struct { | 232 | static const struct { |
233 | const char *name; | 233 | const char *name; |
234 | u32 hw_flags; | 234 | u32 hw_flags; |
235 | } board_info[] __devinitdata = { | 235 | } board_info[] __devinitdata = { |
@@ -1192,7 +1192,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l | |||
1192 | #define mdio_delay() RTL_R8(Config4) | 1192 | #define mdio_delay() RTL_R8(Config4) |
1193 | 1193 | ||
1194 | 1194 | ||
1195 | static char mii_2_8139_map[8] = { | 1195 | static const char mii_2_8139_map[8] = { |
1196 | BasicModeCtrl, | 1196 | BasicModeCtrl, |
1197 | BasicModeStatus, | 1197 | BasicModeStatus, |
1198 | 0, | 1198 | 0, |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 13b745b39667..da0c878dcba8 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -614,7 +614,7 @@ static void rebuild_rx_bufs(struct net_device *dev) | |||
614 | static int init_i596_mem(struct net_device *dev) | 614 | static int init_i596_mem(struct net_device *dev) |
615 | { | 615 | { |
616 | struct i596_private *lp = dev->priv; | 616 | struct i596_private *lp = dev->priv; |
617 | #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) | 617 | #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) |
618 | short ioaddr = dev->base_addr; | 618 | short ioaddr = dev->base_addr; |
619 | #endif | 619 | #endif |
620 | unsigned long flags; | 620 | unsigned long flags; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index aa633fa95e64..e0b11095b9da 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -66,7 +66,7 @@ config BONDING | |||
66 | 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux. | 66 | 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux. |
67 | 67 | ||
68 | The driver supports multiple bonding modes to allow for both high | 68 | The driver supports multiple bonding modes to allow for both high |
69 | perfomance and high availability operation. | 69 | performance and high availability operation. |
70 | 70 | ||
71 | Refer to <file:Documentation/networking/bonding.txt> for more | 71 | Refer to <file:Documentation/networking/bonding.txt> for more |
72 | information. | 72 | information. |
@@ -698,8 +698,8 @@ config VORTEX | |||
698 | depends on NET_VENDOR_3COM && (PCI || EISA) | 698 | depends on NET_VENDOR_3COM && (PCI || EISA) |
699 | select MII | 699 | select MII |
700 | ---help--- | 700 | ---help--- |
701 | This option enables driver support for a large number of 10mbps and | 701 | This option enables driver support for a large number of 10Mbps and |
702 | 10/100mbps EISA, PCI and PCMCIA 3Com network cards: | 702 | 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: |
703 | 703 | ||
704 | "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI | 704 | "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI |
705 | "Boomerang" (EtherLink XL 3c900 or 3c905) PCI | 705 | "Boomerang" (EtherLink XL 3c900 or 3c905) PCI |
@@ -1021,7 +1021,7 @@ config EEXPRESS_PRO | |||
1021 | depends on NET_ISA | 1021 | depends on NET_ISA |
1022 | ---help--- | 1022 | ---help--- |
1023 | If you have a network (Ethernet) card of this type, say Y. This | 1023 | If you have a network (Ethernet) card of this type, say Y. This |
1024 | driver supports intel i82595{FX,TX} based boards. Note however | 1024 | driver supports Intel i82595{FX,TX} based boards. Note however |
1025 | that the EtherExpress PRO/100 Ethernet card has its own separate | 1025 | that the EtherExpress PRO/100 Ethernet card has its own separate |
1026 | driver. Please read the Ethernet-HOWTO, available from | 1026 | driver. Please read the Ethernet-HOWTO, available from |
1027 | <http://www.tldp.org/docs.html#howto>. | 1027 | <http://www.tldp.org/docs.html#howto>. |
@@ -1208,7 +1208,7 @@ config IBM_EMAC_RX_SKB_HEADROOM | |||
1208 | help | 1208 | help |
1209 | Additional receive skb headroom. Note, that driver | 1209 | Additional receive skb headroom. Note, that driver |
1210 | will always reserve at least 2 bytes to make IP header | 1210 | will always reserve at least 2 bytes to make IP header |
1211 | aligned, so usualy there is no need to add any additional | 1211 | aligned, so usually there is no need to add any additional |
1212 | headroom. | 1212 | headroom. |
1213 | 1213 | ||
1214 | If unsure, set to 0. | 1214 | If unsure, set to 0. |
@@ -1372,8 +1372,8 @@ config B44 | |||
1372 | called b44. | 1372 | called b44. |
1373 | 1373 | ||
1374 | config FORCEDETH | 1374 | config FORCEDETH |
1375 | tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)" | 1375 | tristate "nForce Ethernet support" |
1376 | depends on NET_PCI && PCI && EXPERIMENTAL | 1376 | depends on NET_PCI && PCI |
1377 | help | 1377 | help |
1378 | If you have a network (Ethernet) controller of this type, say Y and | 1378 | If you have a network (Ethernet) controller of this type, say Y and |
1379 | read the Ethernet-HOWTO, available from | 1379 | read the Ethernet-HOWTO, available from |
@@ -1614,11 +1614,7 @@ config SIS900 | |||
1614 | ---help--- | 1614 | ---help--- |
1615 | This is a driver for the Fast Ethernet PCI network cards based on | 1615 | This is a driver for the Fast Ethernet PCI network cards based on |
1616 | the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in | 1616 | the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in |
1617 | SiS 630 and SiS 540 chipsets. If you have one of those, say Y and | 1617 | SiS 630 and SiS 540 chipsets. |
1618 | read the Ethernet-HOWTO, available at | ||
1619 | <http://www.tldp.org/docs.html#howto>. Please read | ||
1620 | <file:Documentation/networking/sis900.txt> and comments at the | ||
1621 | beginning of <file:drivers/net/sis900.c> for more information. | ||
1622 | 1618 | ||
1623 | This driver also supports AMD 79C901 HomePNA so that you can use | 1619 | This driver also supports AMD 79C901 HomePNA so that you can use |
1624 | your phone line as a network cable. | 1620 | your phone line as a network cable. |
@@ -1934,7 +1930,7 @@ config MYRI_SBUS | |||
1934 | will be called myri_sbus. This is recommended. | 1930 | will be called myri_sbus. This is recommended. |
1935 | 1931 | ||
1936 | config NS83820 | 1932 | config NS83820 |
1937 | tristate "National Semiconduct DP83820 support" | 1933 | tristate "National Semiconductor DP83820 support" |
1938 | depends on PCI | 1934 | depends on PCI |
1939 | help | 1935 | help |
1940 | This is a driver for the National Semiconductor DP83820 series | 1936 | This is a driver for the National Semiconductor DP83820 series |
@@ -2195,6 +2191,7 @@ config GFAR_NAPI | |||
2195 | config MV643XX_ETH | 2191 | config MV643XX_ETH |
2196 | tristate "MV-643XX Ethernet support" | 2192 | tristate "MV-643XX Ethernet support" |
2197 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM | 2193 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM |
2194 | select MII | ||
2198 | help | 2195 | help |
2199 | This driver supports the gigabit Ethernet on the Marvell MV643XX | 2196 | This driver supports the gigabit Ethernet on the Marvell MV643XX |
2200 | chipset which is used in the Momenco Ocelot C and Jaguar ATX and | 2197 | chipset which is used in the Momenco Ocelot C and Jaguar ATX and |
@@ -2514,7 +2511,7 @@ config PPP_FILTER | |||
2514 | Say Y here if you want to be able to filter the packets passing over | 2511 | Say Y here if you want to be able to filter the packets passing over |
2515 | PPP interfaces. This allows you to control which packets count as | 2512 | PPP interfaces. This allows you to control which packets count as |
2516 | activity (i.e. which packets will reset the idle timer or bring up | 2513 | activity (i.e. which packets will reset the idle timer or bring up |
2517 | a demand-dialled link) and which packets are to be dropped entirely. | 2514 | a demand-dialed link) and which packets are to be dropped entirely. |
2518 | You need to say Y here if you wish to use the pass-filter and | 2515 | You need to say Y here if you wish to use the pass-filter and |
2519 | active-filter options to pppd. | 2516 | active-filter options to pppd. |
2520 | 2517 | ||
@@ -2702,8 +2699,8 @@ config SHAPER | |||
2702 | <file:Documentation/networking/shaper.txt> for more information. | 2699 | <file:Documentation/networking/shaper.txt> for more information. |
2703 | 2700 | ||
2704 | An alternative to this traffic shaper is the experimental | 2701 | An alternative to this traffic shaper is the experimental |
2705 | Class-Based Queueing (CBQ) scheduling support which you get if you | 2702 | Class-Based Queuing (CBQ) scheduling support which you get if you |
2706 | say Y to "QoS and/or fair queueing" above. | 2703 | say Y to "QoS and/or fair queuing" above. |
2707 | 2704 | ||
2708 | To compile this driver as a module, choose M here: the module | 2705 | To compile this driver as a module, choose M here: the module |
2709 | will be called shaper. If unsure, say N. | 2706 | will be called shaper. If unsure, say N. |
diff --git a/drivers/net/apne.c b/drivers/net/apne.c index a94216b87184..b9820b86cdcc 100644 --- a/drivers/net/apne.c +++ b/drivers/net/apne.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/netdevice.h> | 37 | #include <linux/netdevice.h> |
38 | #include <linux/etherdevice.h> | 38 | #include <linux/etherdevice.h> |
39 | #include <linux/jiffies.h> | ||
39 | 40 | ||
40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -216,7 +217,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) | |||
216 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 217 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
217 | 218 | ||
218 | while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) | 219 | while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) |
219 | if (jiffies - reset_start_time > 2*HZ/100) { | 220 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
220 | printk(" not found (no reset ack).\n"); | 221 | printk(" not found (no reset ack).\n"); |
221 | return -ENODEV; | 222 | return -ENODEV; |
222 | } | 223 | } |
@@ -382,7 +383,7 @@ apne_reset_8390(struct net_device *dev) | |||
382 | 383 | ||
383 | /* This check _should_not_ be necessary, omit eventually. */ | 384 | /* This check _should_not_ be necessary, omit eventually. */ |
384 | while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) | 385 | while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) |
385 | if (jiffies - reset_start_time > 2*HZ/100) { | 386 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
386 | printk("%s: ne_reset_8390() did not complete.\n", dev->name); | 387 | printk("%s: ne_reset_8390() did not complete.\n", dev->name); |
387 | break; | 388 | break; |
388 | } | 389 | } |
@@ -530,7 +531,7 @@ apne_block_output(struct net_device *dev, int count, | |||
530 | dma_start = jiffies; | 531 | dma_start = jiffies; |
531 | 532 | ||
532 | while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) | 533 | while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) |
533 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 534 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
534 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 535 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
535 | apne_reset_8390(dev); | 536 | apne_reset_8390(dev); |
536 | NS8390_init(dev,1); | 537 | NS8390_init(dev,1); |
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig index 948de2532a1e..7284ccad0b91 100644 --- a/drivers/net/arcnet/Kconfig +++ b/drivers/net/arcnet/Kconfig | |||
@@ -68,10 +68,10 @@ config ARCNET_CAP | |||
68 | packet is stuffed with an extra 4 byte "cookie" which doesn't | 68 | packet is stuffed with an extra 4 byte "cookie" which doesn't |
69 | actually appear on the network. After transmit the driver will send | 69 | actually appear on the network. After transmit the driver will send |
70 | back a packet with protocol byte 0 containing the status of the | 70 | back a packet with protocol byte 0 containing the status of the |
71 | transmition: | 71 | transmission: |
72 | 0=no hardware acknowledge | 72 | 0=no hardware acknowledge |
73 | 1=excessive nak | 73 | 1=excessive nak |
74 | 2=transmition accepted by the reciever hardware | 74 | 2=transmission accepted by the receiver hardware |
75 | 75 | ||
76 | Received packets are also stuffed with the extra 4 bytes but it will | 76 | Received packets are also stuffed with the extra 4 bytes but it will |
77 | be random data. | 77 | be random data. |
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index e1ea29b0cd14..e7555d4e6ff1 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c | |||
@@ -42,7 +42,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, | |||
42 | static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | 42 | static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, |
43 | int bufnum); | 43 | int bufnum); |
44 | 44 | ||
45 | struct ArcProto rawmode_proto = | 45 | static struct ArcProto rawmode_proto = |
46 | { | 46 | { |
47 | .suffix = 'r', | 47 | .suffix = 'r', |
48 | .mtu = XMTU, | 48 | .mtu = XMTU, |
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 38c3f033f739..8c8d6c453c45 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c | |||
@@ -97,25 +97,44 @@ static int __init arcrimi_probe(struct net_device *dev) | |||
97 | "must specify the shmem and irq!\n"); | 97 | "must specify the shmem and irq!\n"); |
98 | return -ENODEV; | 98 | return -ENODEV; |
99 | } | 99 | } |
100 | if (dev->dev_addr[0] == 0) { | ||
101 | BUGMSG(D_NORMAL, "You need to specify your card's station " | ||
102 | "ID!\n"); | ||
103 | return -ENODEV; | ||
104 | } | ||
100 | /* | 105 | /* |
101 | * Grab the memory region at mem_start for BUFFER_SIZE bytes. | 106 | * Grab the memory region at mem_start for MIRROR_SIZE bytes. |
102 | * Later in arcrimi_found() the real size will be determined | 107 | * Later in arcrimi_found() the real size will be determined |
103 | * and this reserve will be released and the correct size | 108 | * and this reserve will be released and the correct size |
104 | * will be taken. | 109 | * will be taken. |
105 | */ | 110 | */ |
106 | if (!request_mem_region(dev->mem_start, BUFFER_SIZE, "arcnet (90xx)")) { | 111 | if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { |
107 | BUGMSG(D_NORMAL, "Card memory already allocated\n"); | 112 | BUGMSG(D_NORMAL, "Card memory already allocated\n"); |
108 | return -ENODEV; | 113 | return -ENODEV; |
109 | } | 114 | } |
110 | if (dev->dev_addr[0] == 0) { | ||
111 | release_mem_region(dev->mem_start, BUFFER_SIZE); | ||
112 | BUGMSG(D_NORMAL, "You need to specify your card's station " | ||
113 | "ID!\n"); | ||
114 | return -ENODEV; | ||
115 | } | ||
116 | return arcrimi_found(dev); | 115 | return arcrimi_found(dev); |
117 | } | 116 | } |
118 | 117 | ||
118 | static int check_mirror(unsigned long addr, size_t size) | ||
119 | { | ||
120 | void __iomem *p; | ||
121 | int res = -1; | ||
122 | |||
123 | if (!request_mem_region(addr, size, "arcnet (90xx)")) | ||
124 | return -1; | ||
125 | |||
126 | p = ioremap(addr, size); | ||
127 | if (p) { | ||
128 | if (readb(p) == TESTvalue) | ||
129 | res = 1; | ||
130 | else | ||
131 | res = 0; | ||
132 | iounmap(p); | ||
133 | } | ||
134 | |||
135 | release_mem_region(addr, size); | ||
136 | return res; | ||
137 | } | ||
119 | 138 | ||
120 | /* | 139 | /* |
121 | * Set up the struct net_device associated with this card. Called after | 140 | * Set up the struct net_device associated with this card. Called after |
@@ -125,19 +144,28 @@ static int __init arcrimi_found(struct net_device *dev) | |||
125 | { | 144 | { |
126 | struct arcnet_local *lp; | 145 | struct arcnet_local *lp; |
127 | unsigned long first_mirror, last_mirror, shmem; | 146 | unsigned long first_mirror, last_mirror, shmem; |
147 | void __iomem *p; | ||
128 | int mirror_size; | 148 | int mirror_size; |
129 | int err; | 149 | int err; |
130 | 150 | ||
151 | p = ioremap(dev->mem_start, MIRROR_SIZE); | ||
152 | if (!p) { | ||
153 | release_mem_region(dev->mem_start, MIRROR_SIZE); | ||
154 | BUGMSG(D_NORMAL, "Can't ioremap\n"); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | |||
131 | /* reserve the irq */ | 158 | /* reserve the irq */ |
132 | if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { | 159 | if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { |
133 | release_mem_region(dev->mem_start, BUFFER_SIZE); | 160 | iounmap(p); |
161 | release_mem_region(dev->mem_start, MIRROR_SIZE); | ||
134 | BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); | 162 | BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); |
135 | return -ENODEV; | 163 | return -ENODEV; |
136 | } | 164 | } |
137 | 165 | ||
138 | shmem = dev->mem_start; | 166 | shmem = dev->mem_start; |
139 | isa_writeb(TESTvalue, shmem); | 167 | writeb(TESTvalue, p); |
140 | isa_writeb(dev->dev_addr[0], shmem + 1); /* actually the node ID */ | 168 | writeb(dev->dev_addr[0], p + 1); /* actually the node ID */ |
141 | 169 | ||
142 | /* find the real shared memory start/end points, including mirrors */ | 170 | /* find the real shared memory start/end points, including mirrors */ |
143 | 171 | ||
@@ -146,17 +174,18 @@ static int __init arcrimi_found(struct net_device *dev) | |||
146 | * 2k (or there are no mirrors at all) but on some, it's 4k. | 174 | * 2k (or there are no mirrors at all) but on some, it's 4k. |
147 | */ | 175 | */ |
148 | mirror_size = MIRROR_SIZE; | 176 | mirror_size = MIRROR_SIZE; |
149 | if (isa_readb(shmem) == TESTvalue | 177 | if (readb(p) == TESTvalue |
150 | && isa_readb(shmem - mirror_size) != TESTvalue | 178 | && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 |
151 | && isa_readb(shmem - 2 * mirror_size) == TESTvalue) | 179 | && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) |
152 | mirror_size *= 2; | 180 | mirror_size = 2 * MIRROR_SIZE; |
153 | 181 | ||
154 | first_mirror = last_mirror = shmem; | 182 | first_mirror = shmem - mirror_size; |
155 | while (isa_readb(first_mirror) == TESTvalue) | 183 | while (check_mirror(first_mirror, mirror_size) == 1) |
156 | first_mirror -= mirror_size; | 184 | first_mirror -= mirror_size; |
157 | first_mirror += mirror_size; | 185 | first_mirror += mirror_size; |
158 | 186 | ||
159 | while (isa_readb(last_mirror) == TESTvalue) | 187 | last_mirror = shmem + mirror_size; |
188 | while (check_mirror(last_mirror, mirror_size) == 1) | ||
160 | last_mirror += mirror_size; | 189 | last_mirror += mirror_size; |
161 | last_mirror -= mirror_size; | 190 | last_mirror -= mirror_size; |
162 | 191 | ||
@@ -181,7 +210,8 @@ static int __init arcrimi_found(struct net_device *dev) | |||
181 | * with the correct size. There is a VERY slim chance this could | 210 | * with the correct size. There is a VERY slim chance this could |
182 | * fail. | 211 | * fail. |
183 | */ | 212 | */ |
184 | release_mem_region(shmem, BUFFER_SIZE); | 213 | iounmap(p); |
214 | release_mem_region(shmem, MIRROR_SIZE); | ||
185 | if (!request_mem_region(dev->mem_start, | 215 | if (!request_mem_region(dev->mem_start, |
186 | dev->mem_end - dev->mem_start + 1, | 216 | dev->mem_end - dev->mem_start + 1, |
187 | "arcnet (90xx)")) { | 217 | "arcnet (90xx)")) { |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 12ef52c193a3..64e2caf3083d 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <net/arp.h> | 52 | #include <net/arp.h> |
53 | #include <linux/init.h> | 53 | #include <linux/init.h> |
54 | #include <linux/arcdevice.h> | 54 | #include <linux/arcdevice.h> |
55 | #include <linux/jiffies.h> | ||
55 | 56 | ||
56 | /* "do nothing" functions for protocol drivers */ | 57 | /* "do nothing" functions for protocol drivers */ |
57 | static void null_rx(struct net_device *dev, int bufnum, | 58 | static void null_rx(struct net_device *dev, int bufnum, |
@@ -61,6 +62,7 @@ static int null_build_header(struct sk_buff *skb, struct net_device *dev, | |||
61 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, | 62 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, |
62 | int length, int bufnum); | 63 | int length, int bufnum); |
63 | 64 | ||
65 | static void arcnet_rx(struct net_device *dev, int bufnum); | ||
64 | 66 | ||
65 | /* | 67 | /* |
66 | * one ArcProto per possible proto ID. None of the elements of | 68 | * one ArcProto per possible proto ID. None of the elements of |
@@ -71,7 +73,7 @@ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, | |||
71 | struct ArcProto *arc_proto_map[256], *arc_proto_default, | 73 | struct ArcProto *arc_proto_map[256], *arc_proto_default, |
72 | *arc_bcast_proto, *arc_raw_proto; | 74 | *arc_bcast_proto, *arc_raw_proto; |
73 | 75 | ||
74 | struct ArcProto arc_proto_null = | 76 | static struct ArcProto arc_proto_null = |
75 | { | 77 | { |
76 | .suffix = '?', | 78 | .suffix = '?', |
77 | .mtu = XMTU, | 79 | .mtu = XMTU, |
@@ -90,7 +92,6 @@ EXPORT_SYMBOL(arc_proto_map); | |||
90 | EXPORT_SYMBOL(arc_proto_default); | 92 | EXPORT_SYMBOL(arc_proto_default); |
91 | EXPORT_SYMBOL(arc_bcast_proto); | 93 | EXPORT_SYMBOL(arc_bcast_proto); |
92 | EXPORT_SYMBOL(arc_raw_proto); | 94 | EXPORT_SYMBOL(arc_raw_proto); |
93 | EXPORT_SYMBOL(arc_proto_null); | ||
94 | EXPORT_SYMBOL(arcnet_unregister_proto); | 95 | EXPORT_SYMBOL(arcnet_unregister_proto); |
95 | EXPORT_SYMBOL(arcnet_debug); | 96 | EXPORT_SYMBOL(arcnet_debug); |
96 | EXPORT_SYMBOL(alloc_arcdev); | 97 | EXPORT_SYMBOL(alloc_arcdev); |
@@ -118,7 +119,7 @@ static int __init arcnet_init(void) | |||
118 | 119 | ||
119 | arcnet_debug = debug; | 120 | arcnet_debug = debug; |
120 | 121 | ||
121 | printk(VERSION); | 122 | printk("arcnet loaded.\n"); |
122 | 123 | ||
123 | #ifdef ALPHA_WARNING | 124 | #ifdef ALPHA_WARNING |
124 | BUGLVL(D_EXTRA) { | 125 | BUGLVL(D_EXTRA) { |
@@ -178,8 +179,8 @@ EXPORT_SYMBOL(arcnet_dump_skb); | |||
178 | * Dump the contents of an ARCnet buffer | 179 | * Dump the contents of an ARCnet buffer |
179 | */ | 180 | */ |
180 | #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) | 181 | #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) |
181 | void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | 182 | static void arcnet_dump_packet(struct net_device *dev, int bufnum, |
182 | int take_arcnet_lock) | 183 | char *desc, int take_arcnet_lock) |
183 | { | 184 | { |
184 | struct arcnet_local *lp = dev->priv; | 185 | struct arcnet_local *lp = dev->priv; |
185 | int i, length; | 186 | int i, length; |
@@ -208,7 +209,10 @@ void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | |||
208 | 209 | ||
209 | } | 210 | } |
210 | 211 | ||
211 | EXPORT_SYMBOL(arcnet_dump_packet); | 212 | #else |
213 | |||
214 | #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0) | ||
215 | |||
212 | #endif | 216 | #endif |
213 | 217 | ||
214 | 218 | ||
@@ -733,7 +737,7 @@ static void arcnet_timeout(struct net_device *dev) | |||
733 | 737 | ||
734 | spin_unlock_irqrestore(&lp->lock, flags); | 738 | spin_unlock_irqrestore(&lp->lock, flags); |
735 | 739 | ||
736 | if (jiffies - lp->last_timeout > 10*HZ) { | 740 | if (time_after(jiffies, lp->last_timeout + 10*HZ)) { |
737 | BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", | 741 | BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", |
738 | msg, status, lp->intmask, lp->lasttrans_dest); | 742 | msg, status, lp->intmask, lp->lasttrans_dest); |
739 | lp->last_timeout = jiffies; | 743 | lp->last_timeout = jiffies; |
@@ -996,7 +1000,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
996 | * This is a generic packet receiver that calls arcnet??_rx depending on the | 1000 | * This is a generic packet receiver that calls arcnet??_rx depending on the |
997 | * protocol ID found. | 1001 | * protocol ID found. |
998 | */ | 1002 | */ |
999 | void arcnet_rx(struct net_device *dev, int bufnum) | 1003 | static void arcnet_rx(struct net_device *dev, int bufnum) |
1000 | { | 1004 | { |
1001 | struct arcnet_local *lp = dev->priv; | 1005 | struct arcnet_local *lp = dev->priv; |
1002 | struct archdr pkt; | 1006 | struct archdr pkt; |
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 6c2c9b9ac6db..43150b2bd13f 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | /* Internal function declarations */ | 55 | /* Internal function declarations */ |
56 | static int com90xx_found(int ioaddr, int airq, u_long shmem); | 56 | static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *); |
57 | static void com90xx_command(struct net_device *dev, int command); | 57 | static void com90xx_command(struct net_device *dev, int command); |
58 | static int com90xx_status(struct net_device *dev); | 58 | static int com90xx_status(struct net_device *dev); |
59 | static void com90xx_setmask(struct net_device *dev, int mask); | 59 | static void com90xx_setmask(struct net_device *dev, int mask); |
@@ -116,14 +116,26 @@ static void __init com90xx_probe(void) | |||
116 | unsigned long airqmask; | 116 | unsigned long airqmask; |
117 | int ports[(0x3f0 - 0x200) / 16 + 1] = | 117 | int ports[(0x3f0 - 0x200) / 16 + 1] = |
118 | {0}; | 118 | {0}; |
119 | u_long shmems[(0xFF800 - 0xA0000) / 2048 + 1] = | 119 | unsigned long *shmems; |
120 | {0}; | 120 | void __iomem **iomem; |
121 | int numports, numshmems, *port; | 121 | int numports, numshmems, *port; |
122 | u_long *p; | 122 | u_long *p; |
123 | int index; | ||
123 | 124 | ||
124 | if (!io && !irq && !shmem && !*device && com90xx_skip_probe) | 125 | if (!io && !irq && !shmem && !*device && com90xx_skip_probe) |
125 | return; | 126 | return; |
126 | 127 | ||
128 | shmems = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(unsigned long), | ||
129 | GFP_KERNEL); | ||
130 | if (!shmems) | ||
131 | return; | ||
132 | iomem = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(void __iomem *), | ||
133 | GFP_KERNEL); | ||
134 | if (!iomem) { | ||
135 | kfree(shmems); | ||
136 | return; | ||
137 | } | ||
138 | |||
127 | BUGLVL(D_NORMAL) printk(VERSION); | 139 | BUGLVL(D_NORMAL) printk(VERSION); |
128 | 140 | ||
129 | /* set up the arrays where we'll store the possible probe addresses */ | 141 | /* set up the arrays where we'll store the possible probe addresses */ |
@@ -179,6 +191,8 @@ static void __init com90xx_probe(void) | |||
179 | 191 | ||
180 | if (!numports) { | 192 | if (!numports) { |
181 | BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n"); | 193 | BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n"); |
194 | kfree(shmems); | ||
195 | kfree(iomem); | ||
182 | return; | 196 | return; |
183 | } | 197 | } |
184 | /* Stage 2: we have now reset any possible ARCnet cards, so we can't | 198 | /* Stage 2: we have now reset any possible ARCnet cards, so we can't |
@@ -202,8 +216,8 @@ static void __init com90xx_probe(void) | |||
202 | * 0xD1 byte in the right place, or are read-only. | 216 | * 0xD1 byte in the right place, or are read-only. |
203 | */ | 217 | */ |
204 | numprint = -1; | 218 | numprint = -1; |
205 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 219 | for (index = 0, p = &shmems[0]; index < numshmems; p++, index++) { |
206 | u_long ptr = *p; | 220 | void __iomem *base; |
207 | 221 | ||
208 | numprint++; | 222 | numprint++; |
209 | numprint %= 8; | 223 | numprint %= 8; |
@@ -213,38 +227,49 @@ static void __init com90xx_probe(void) | |||
213 | } | 227 | } |
214 | BUGMSG2(D_INIT, "%lXh ", *p); | 228 | BUGMSG2(D_INIT, "%lXh ", *p); |
215 | 229 | ||
216 | if (!request_mem_region(*p, BUFFER_SIZE, "arcnet (90xx)")) { | 230 | if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) { |
217 | BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n"); | 231 | BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n"); |
218 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); | 232 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); |
219 | BUGLVL(D_INIT_REASONS) numprint = 0; | 233 | BUGLVL(D_INIT_REASONS) numprint = 0; |
220 | *p-- = shmems[--numshmems]; | 234 | goto out; |
221 | continue; | 235 | } |
236 | base = ioremap(*p, MIRROR_SIZE); | ||
237 | if (!base) { | ||
238 | BUGMSG2(D_INIT_REASONS, "(ioremap)\n"); | ||
239 | BUGMSG2(D_INIT_REASONS, "Stage 3: "); | ||
240 | BUGLVL(D_INIT_REASONS) numprint = 0; | ||
241 | goto out1; | ||
222 | } | 242 | } |
223 | if (isa_readb(ptr) != TESTvalue) { | 243 | if (readb(base) != TESTvalue) { |
224 | BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n", | 244 | BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n", |
225 | isa_readb(ptr), TESTvalue); | 245 | readb(base), TESTvalue); |
226 | BUGMSG2(D_INIT_REASONS, "S3: "); | 246 | BUGMSG2(D_INIT_REASONS, "S3: "); |
227 | BUGLVL(D_INIT_REASONS) numprint = 0; | 247 | BUGLVL(D_INIT_REASONS) numprint = 0; |
228 | release_mem_region(*p, BUFFER_SIZE); | 248 | goto out2; |
229 | *p-- = shmems[--numshmems]; | ||
230 | continue; | ||
231 | } | 249 | } |
232 | /* By writing 0x42 to the TESTvalue location, we also make | 250 | /* By writing 0x42 to the TESTvalue location, we also make |
233 | * sure no "mirror" shmem areas show up - if they occur | 251 | * sure no "mirror" shmem areas show up - if they occur |
234 | * in another pass through this loop, they will be discarded | 252 | * in another pass through this loop, they will be discarded |
235 | * because *cptr != TESTvalue. | 253 | * because *cptr != TESTvalue. |
236 | */ | 254 | */ |
237 | isa_writeb(0x42, ptr); | 255 | writeb(0x42, base); |
238 | if (isa_readb(ptr) != 0x42) { | 256 | if (readb(base) != 0x42) { |
239 | BUGMSG2(D_INIT_REASONS, "(read only)\n"); | 257 | BUGMSG2(D_INIT_REASONS, "(read only)\n"); |
240 | BUGMSG2(D_INIT_REASONS, "S3: "); | 258 | BUGMSG2(D_INIT_REASONS, "S3: "); |
241 | release_mem_region(*p, BUFFER_SIZE); | 259 | goto out2; |
242 | *p-- = shmems[--numshmems]; | ||
243 | continue; | ||
244 | } | 260 | } |
245 | BUGMSG2(D_INIT_REASONS, "\n"); | 261 | BUGMSG2(D_INIT_REASONS, "\n"); |
246 | BUGMSG2(D_INIT_REASONS, "S3: "); | 262 | BUGMSG2(D_INIT_REASONS, "S3: "); |
247 | BUGLVL(D_INIT_REASONS) numprint = 0; | 263 | BUGLVL(D_INIT_REASONS) numprint = 0; |
264 | iomem[index] = base; | ||
265 | continue; | ||
266 | out2: | ||
267 | iounmap(base); | ||
268 | out1: | ||
269 | release_mem_region(*p, MIRROR_SIZE); | ||
270 | out: | ||
271 | *p-- = shmems[--numshmems]; | ||
272 | index--; | ||
248 | } | 273 | } |
249 | BUGMSG2(D_INIT, "\n"); | 274 | BUGMSG2(D_INIT, "\n"); |
250 | 275 | ||
@@ -252,6 +277,8 @@ static void __init com90xx_probe(void) | |||
252 | BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n"); | 277 | BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n"); |
253 | for (port = &ports[0]; port < ports + numports; port++) | 278 | for (port = &ports[0]; port < ports + numports; port++) |
254 | release_region(*port, ARCNET_TOTAL_SIZE); | 279 | release_region(*port, ARCNET_TOTAL_SIZE); |
280 | kfree(shmems); | ||
281 | kfree(iomem); | ||
255 | return; | 282 | return; |
256 | } | 283 | } |
257 | /* Stage 4: something of a dummy, to report the shmems that are | 284 | /* Stage 4: something of a dummy, to report the shmems that are |
@@ -351,30 +378,32 @@ static void __init com90xx_probe(void) | |||
351 | mdelay(RESETtime); | 378 | mdelay(RESETtime); |
352 | } else { | 379 | } else { |
353 | /* just one shmem and port, assume they match */ | 380 | /* just one shmem and port, assume they match */ |
354 | isa_writeb(TESTvalue, shmems[0]); | 381 | writeb(TESTvalue, iomem[0]); |
355 | } | 382 | } |
356 | #else | 383 | #else |
357 | inb(_RESET); | 384 | inb(_RESET); |
358 | mdelay(RESETtime); | 385 | mdelay(RESETtime); |
359 | #endif | 386 | #endif |
360 | 387 | ||
361 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 388 | for (index = 0; index < numshmems; index++) { |
362 | u_long ptr = *p; | 389 | u_long ptr = shmems[index]; |
390 | void __iomem *base = iomem[index]; | ||
363 | 391 | ||
364 | if (isa_readb(ptr) == TESTvalue) { /* found one */ | 392 | if (readb(base) == TESTvalue) { /* found one */ |
365 | BUGMSG2(D_INIT, "%lXh)\n", *p); | 393 | BUGMSG2(D_INIT, "%lXh)\n", *p); |
366 | openparen = 0; | 394 | openparen = 0; |
367 | 395 | ||
368 | /* register the card */ | 396 | /* register the card */ |
369 | if (com90xx_found(*port, airq, *p) == 0) | 397 | if (com90xx_found(*port, airq, ptr, base) == 0) |
370 | found = 1; | 398 | found = 1; |
371 | numprint = -1; | 399 | numprint = -1; |
372 | 400 | ||
373 | /* remove shmem from the list */ | 401 | /* remove shmem from the list */ |
374 | *p = shmems[--numshmems]; | 402 | shmems[index] = shmems[--numshmems]; |
403 | iomem[index] = iomem[numshmems]; | ||
375 | break; /* go to the next I/O port */ | 404 | break; /* go to the next I/O port */ |
376 | } else { | 405 | } else { |
377 | BUGMSG2(D_INIT_REASONS, "%Xh-", isa_readb(ptr)); | 406 | BUGMSG2(D_INIT_REASONS, "%Xh-", readb(base)); |
378 | } | 407 | } |
379 | } | 408 | } |
380 | 409 | ||
@@ -391,17 +420,40 @@ static void __init com90xx_probe(void) | |||
391 | BUGLVL(D_INIT_REASONS) printk("\n"); | 420 | BUGLVL(D_INIT_REASONS) printk("\n"); |
392 | 421 | ||
393 | /* Now put back TESTvalue on all leftover shmems. */ | 422 | /* Now put back TESTvalue on all leftover shmems. */ |
394 | for (p = &shmems[0]; p < shmems + numshmems; p++) { | 423 | for (index = 0; index < numshmems; index++) { |
395 | isa_writeb(TESTvalue, *p); | 424 | writeb(TESTvalue, iomem[index]); |
396 | release_mem_region(*p, BUFFER_SIZE); | 425 | iounmap(iomem[index]); |
426 | release_mem_region(shmems[index], MIRROR_SIZE); | ||
397 | } | 427 | } |
428 | kfree(shmems); | ||
429 | kfree(iomem); | ||
398 | } | 430 | } |
399 | 431 | ||
432 | static int check_mirror(unsigned long addr, size_t size) | ||
433 | { | ||
434 | void __iomem *p; | ||
435 | int res = -1; | ||
436 | |||
437 | if (!request_mem_region(addr, size, "arcnet (90xx)")) | ||
438 | return -1; | ||
439 | |||
440 | p = ioremap(addr, size); | ||
441 | if (p) { | ||
442 | if (readb(p) == TESTvalue) | ||
443 | res = 1; | ||
444 | else | ||
445 | res = 0; | ||
446 | iounmap(p); | ||
447 | } | ||
448 | |||
449 | release_mem_region(addr, size); | ||
450 | return res; | ||
451 | } | ||
400 | 452 | ||
401 | /* Set up the struct net_device associated with this card. Called after | 453 | /* Set up the struct net_device associated with this card. Called after |
402 | * probing succeeds. | 454 | * probing succeeds. |
403 | */ | 455 | */ |
404 | static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | 456 | static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *p) |
405 | { | 457 | { |
406 | struct net_device *dev = NULL; | 458 | struct net_device *dev = NULL; |
407 | struct arcnet_local *lp; | 459 | struct arcnet_local *lp; |
@@ -412,7 +464,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | |||
412 | dev = alloc_arcdev(device); | 464 | dev = alloc_arcdev(device); |
413 | if (!dev) { | 465 | if (!dev) { |
414 | BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n"); | 466 | BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n"); |
415 | release_mem_region(shmem, BUFFER_SIZE); | 467 | iounmap(p); |
468 | release_mem_region(shmem, MIRROR_SIZE); | ||
416 | return -ENOMEM; | 469 | return -ENOMEM; |
417 | } | 470 | } |
418 | lp = dev->priv; | 471 | lp = dev->priv; |
@@ -423,24 +476,27 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem) | |||
423 | * 2k (or there are no mirrors at all) but on some, it's 4k. | 476 | * 2k (or there are no mirrors at all) but on some, it's 4k. |
424 | */ | 477 | */ |
425 | mirror_size = MIRROR_SIZE; | 478 | mirror_size = MIRROR_SIZE; |
426 | if (isa_readb(shmem) == TESTvalue | 479 | if (readb(p) == TESTvalue && |
427 | && isa_readb(shmem - mirror_size) != TESTvalue | 480 | check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && |
428 | && isa_readb(shmem - 2 * mirror_size) == TESTvalue) | 481 | check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) |
429 | mirror_size *= 2; | 482 | mirror_size = 2 * MIRROR_SIZE; |
430 | 483 | ||
431 | first_mirror = last_mirror = shmem; | 484 | first_mirror = shmem - mirror_size; |
432 | while (isa_readb(first_mirror) == TESTvalue) | 485 | while (check_mirror(first_mirror, mirror_size) == 1) |
433 | first_mirror -= mirror_size; | 486 | first_mirror -= mirror_size; |
434 | first_mirror += mirror_size; | 487 | first_mirror += mirror_size; |
435 | 488 | ||
436 | while (isa_readb(last_mirror) == TESTvalue) | 489 | last_mirror = shmem + mirror_size; |
490 | while (check_mirror(last_mirror, mirror_size) == 1) | ||
437 | last_mirror += mirror_size; | 491 | last_mirror += mirror_size; |
438 | last_mirror -= mirror_size; | 492 | last_mirror -= mirror_size; |
439 | 493 | ||
440 | dev->mem_start = first_mirror; | 494 | dev->mem_start = first_mirror; |
441 | dev->mem_end = last_mirror + MIRROR_SIZE - 1; | 495 | dev->mem_end = last_mirror + MIRROR_SIZE - 1; |
442 | 496 | ||
443 | release_mem_region(shmem, BUFFER_SIZE); | 497 | iounmap(p); |
498 | release_mem_region(shmem, MIRROR_SIZE); | ||
499 | |||
444 | if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) | 500 | if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) |
445 | goto err_free_dev; | 501 | goto err_free_dev; |
446 | 502 | ||
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 6d7913704fb5..6d6c69f036ef 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c | |||
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | |||
43 | int bufnum); | 43 | int bufnum); |
44 | 44 | ||
45 | 45 | ||
46 | struct ArcProto rfc1051_proto = | 46 | static struct ArcProto rfc1051_proto = |
47 | { | 47 | { |
48 | .suffix = 's', | 48 | .suffix = 's', |
49 | .mtu = XMTU - RFC1051_HDR_SIZE, | 49 | .mtu = XMTU - RFC1051_HDR_SIZE, |
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 6b6ae4bf3d39..bee34226abfa 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c | |||
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | |||
43 | int bufnum); | 43 | int bufnum); |
44 | static int continue_tx(struct net_device *dev, int bufnum); | 44 | static int continue_tx(struct net_device *dev, int bufnum); |
45 | 45 | ||
46 | struct ArcProto rfc1201_proto = | 46 | static struct ArcProto rfc1201_proto = |
47 | { | 47 | { |
48 | .suffix = 'a', | 48 | .suffix = 'a', |
49 | .mtu = 1500, /* could be more, but some receivers can't handle it... */ | 49 | .mtu = 1500, /* could be more, but some receivers can't handle it... */ |
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 6a93b666eb72..d52deb8d2075 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <linux/init.h> | 47 | #include <linux/init.h> |
48 | #include <linux/bitops.h> | 48 | #include <linux/bitops.h> |
49 | #include <linux/jiffies.h> | ||
49 | 50 | ||
50 | #include <asm/system.h> | 51 | #include <asm/system.h> |
51 | #include <asm/ecard.h> | 52 | #include <asm/ecard.h> |
@@ -355,7 +356,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf | |||
355 | dma_start = jiffies; | 356 | dma_start = jiffies; |
356 | 357 | ||
357 | while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) | 358 | while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) |
358 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 359 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
359 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", | 360 | printk(KERN_ERR "%s: timeout waiting for TX RDC\n", |
360 | dev->name); | 361 | dev->name); |
361 | etherh_reset (dev); | 362 | etherh_reset (dev); |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a24200d0a616..b787b6582e50 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -46,7 +46,7 @@ typedef enum { | |||
46 | } board_t; | 46 | } board_t; |
47 | 47 | ||
48 | /* indexed by board_t, above */ | 48 | /* indexed by board_t, above */ |
49 | static struct { | 49 | static const struct { |
50 | char *name; | 50 | char *name; |
51 | } board_info[] __devinitdata = { | 51 | } board_info[] __devinitdata = { |
52 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, | 52 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, |
@@ -3476,7 +3476,7 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3476 | { | 3476 | { |
3477 | int ret; | 3477 | int ret; |
3478 | int i; | 3478 | int i; |
3479 | static struct { | 3479 | static const struct { |
3480 | u16 offset; | 3480 | u16 offset; |
3481 | u16 flags; | 3481 | u16 flags; |
3482 | u32 rw_mask; | 3482 | u32 rw_mask; |
@@ -3891,7 +3891,7 @@ reg_test_err: | |||
3891 | static int | 3891 | static int |
3892 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) | 3892 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) |
3893 | { | 3893 | { |
3894 | static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, | 3894 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, |
3895 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; | 3895 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; |
3896 | int i; | 3896 | int i; |
3897 | 3897 | ||
@@ -3916,7 +3916,7 @@ bnx2_test_memory(struct bnx2 *bp) | |||
3916 | { | 3916 | { |
3917 | int ret = 0; | 3917 | int ret = 0; |
3918 | int i; | 3918 | int i; |
3919 | static struct { | 3919 | static const struct { |
3920 | u32 offset; | 3920 | u32 offset; |
3921 | u32 len; | 3921 | u32 len; |
3922 | } mem_tbl[] = { | 3922 | } mem_tbl[] = { |
@@ -5122,7 +5122,7 @@ static struct { | |||
5122 | 5122 | ||
5123 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) | 5123 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) |
5124 | 5124 | ||
5125 | static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { | 5125 | static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { |
5126 | STATS_OFFSET32(stat_IfHCInOctets_hi), | 5126 | STATS_OFFSET32(stat_IfHCInOctets_hi), |
5127 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), | 5127 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), |
5128 | STATS_OFFSET32(stat_IfHCOutOctets_hi), | 5128 | STATS_OFFSET32(stat_IfHCOutOctets_hi), |
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h index 0c21bd849814..8158974c35a8 100644 --- a/drivers/net/bnx2_fw.h +++ b/drivers/net/bnx2_fw.h | |||
@@ -14,20 +14,20 @@ | |||
14 | * accompanying it. | 14 | * accompanying it. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | static int bnx2_COM_b06FwReleaseMajor = 0x1; | 17 | static const int bnx2_COM_b06FwReleaseMajor = 0x1; |
18 | static int bnx2_COM_b06FwReleaseMinor = 0x0; | 18 | static const int bnx2_COM_b06FwReleaseMinor = 0x0; |
19 | static int bnx2_COM_b06FwReleaseFix = 0x0; | 19 | static const int bnx2_COM_b06FwReleaseFix = 0x0; |
20 | static u32 bnx2_COM_b06FwStartAddr = 0x080008b4; | 20 | static const u32 bnx2_COM_b06FwStartAddr = 0x080008b4; |
21 | static u32 bnx2_COM_b06FwTextAddr = 0x08000000; | 21 | static const u32 bnx2_COM_b06FwTextAddr = 0x08000000; |
22 | static int bnx2_COM_b06FwTextLen = 0x57bc; | 22 | static const int bnx2_COM_b06FwTextLen = 0x57bc; |
23 | static u32 bnx2_COM_b06FwDataAddr = 0x08005840; | 23 | static const u32 bnx2_COM_b06FwDataAddr = 0x08005840; |
24 | static int bnx2_COM_b06FwDataLen = 0x0; | 24 | static const int bnx2_COM_b06FwDataLen = 0x0; |
25 | static u32 bnx2_COM_b06FwRodataAddr = 0x080057c0; | 25 | static const u32 bnx2_COM_b06FwRodataAddr = 0x080057c0; |
26 | static int bnx2_COM_b06FwRodataLen = 0x58; | 26 | static const int bnx2_COM_b06FwRodataLen = 0x58; |
27 | static u32 bnx2_COM_b06FwBssAddr = 0x08005860; | 27 | static const u32 bnx2_COM_b06FwBssAddr = 0x08005860; |
28 | static int bnx2_COM_b06FwBssLen = 0x88; | 28 | static const int bnx2_COM_b06FwBssLen = 0x88; |
29 | static u32 bnx2_COM_b06FwSbssAddr = 0x08005840; | 29 | static const u32 bnx2_COM_b06FwSbssAddr = 0x08005840; |
30 | static int bnx2_COM_b06FwSbssLen = 0x1c; | 30 | static const int bnx2_COM_b06FwSbssLen = 0x1c; |
31 | static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = { | 31 | static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = { |
32 | 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e, | 32 | 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e, |
33 | 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032, | 33 | 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032, |
@@ -2325,20 +2325,20 @@ static u32 bnx2_rv2p_proc2[] = { | |||
2325 | 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000, | 2325 | 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000, |
2326 | 0x00000018, 0x00570000 }; | 2326 | 0x00000018, 0x00570000 }; |
2327 | 2327 | ||
2328 | static int bnx2_TPAT_b06FwReleaseMajor = 0x1; | 2328 | static const int bnx2_TPAT_b06FwReleaseMajor = 0x1; |
2329 | static int bnx2_TPAT_b06FwReleaseMinor = 0x0; | 2329 | static const int bnx2_TPAT_b06FwReleaseMinor = 0x0; |
2330 | static int bnx2_TPAT_b06FwReleaseFix = 0x0; | 2330 | static const int bnx2_TPAT_b06FwReleaseFix = 0x0; |
2331 | static u32 bnx2_TPAT_b06FwStartAddr = 0x08000860; | 2331 | static const u32 bnx2_TPAT_b06FwStartAddr = 0x08000860; |
2332 | static u32 bnx2_TPAT_b06FwTextAddr = 0x08000800; | 2332 | static const u32 bnx2_TPAT_b06FwTextAddr = 0x08000800; |
2333 | static int bnx2_TPAT_b06FwTextLen = 0x122c; | 2333 | static const int bnx2_TPAT_b06FwTextLen = 0x122c; |
2334 | static u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60; | 2334 | static const u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60; |
2335 | static int bnx2_TPAT_b06FwDataLen = 0x0; | 2335 | static const int bnx2_TPAT_b06FwDataLen = 0x0; |
2336 | static u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000; | 2336 | static const u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000; |
2337 | static int bnx2_TPAT_b06FwRodataLen = 0x0; | 2337 | static const int bnx2_TPAT_b06FwRodataLen = 0x0; |
2338 | static u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0; | 2338 | static const u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0; |
2339 | static int bnx2_TPAT_b06FwBssLen = 0x250; | 2339 | static const int bnx2_TPAT_b06FwBssLen = 0x250; |
2340 | static u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60; | 2340 | static const u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60; |
2341 | static int bnx2_TPAT_b06FwSbssLen = 0x34; | 2341 | static const int bnx2_TPAT_b06FwSbssLen = 0x34; |
2342 | static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = { | 2342 | static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = { |
2343 | 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35, | 2343 | 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35, |
2344 | 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 2344 | 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
@@ -2540,20 +2540,20 @@ static u32 bnx2_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 }; | |||
2540 | static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; | 2540 | static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; |
2541 | static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; | 2541 | static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; |
2542 | 2542 | ||
2543 | static int bnx2_TXP_b06FwReleaseMajor = 0x1; | 2543 | static const int bnx2_TXP_b06FwReleaseMajor = 0x1; |
2544 | static int bnx2_TXP_b06FwReleaseMinor = 0x0; | 2544 | static const int bnx2_TXP_b06FwReleaseMinor = 0x0; |
2545 | static int bnx2_TXP_b06FwReleaseFix = 0x0; | 2545 | static const int bnx2_TXP_b06FwReleaseFix = 0x0; |
2546 | static u32 bnx2_TXP_b06FwStartAddr = 0x080034b0; | 2546 | static const u32 bnx2_TXP_b06FwStartAddr = 0x080034b0; |
2547 | static u32 bnx2_TXP_b06FwTextAddr = 0x08000000; | 2547 | static const u32 bnx2_TXP_b06FwTextAddr = 0x08000000; |
2548 | static int bnx2_TXP_b06FwTextLen = 0x5748; | 2548 | static const int bnx2_TXP_b06FwTextLen = 0x5748; |
2549 | static u32 bnx2_TXP_b06FwDataAddr = 0x08005760; | 2549 | static const u32 bnx2_TXP_b06FwDataAddr = 0x08005760; |
2550 | static int bnx2_TXP_b06FwDataLen = 0x0; | 2550 | static const int bnx2_TXP_b06FwDataLen = 0x0; |
2551 | static u32 bnx2_TXP_b06FwRodataAddr = 0x00000000; | 2551 | static const u32 bnx2_TXP_b06FwRodataAddr = 0x00000000; |
2552 | static int bnx2_TXP_b06FwRodataLen = 0x0; | 2552 | static const int bnx2_TXP_b06FwRodataLen = 0x0; |
2553 | static u32 bnx2_TXP_b06FwBssAddr = 0x080057a0; | 2553 | static const u32 bnx2_TXP_b06FwBssAddr = 0x080057a0; |
2554 | static int bnx2_TXP_b06FwBssLen = 0x1c4; | 2554 | static const int bnx2_TXP_b06FwBssLen = 0x1c4; |
2555 | static u32 bnx2_TXP_b06FwSbssAddr = 0x08005760; | 2555 | static const u32 bnx2_TXP_b06FwSbssAddr = 0x08005760; |
2556 | static int bnx2_TXP_b06FwSbssLen = 0x38; | 2556 | static const int bnx2_TXP_b06FwSbssLen = 0x38; |
2557 | static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = { | 2557 | static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = { |
2558 | 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e, | 2558 | 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e, |
2559 | 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000, | 2559 | 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000, |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index f2a63186ae05..e83bc825f6af 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1261,7 +1261,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1261 | struct ethhdr *eth_data; | 1261 | struct ethhdr *eth_data; |
1262 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 1262 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
1263 | struct slave *tx_slave = NULL; | 1263 | struct slave *tx_slave = NULL; |
1264 | static u32 ip_bcast = 0xffffffff; | 1264 | static const u32 ip_bcast = 0xffffffff; |
1265 | int hash_size = 0; | 1265 | int hash_size = 0; |
1266 | int do_tx_balance = 1; | 1266 | int do_tx_balance = 1; |
1267 | u32 hash_index = 0; | 1267 | u32 hash_index = 0; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bcf9f17daf0d..2d0ac169a86c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); | |||
131 | 131 | ||
132 | /*----------------------------- Global variables ----------------------------*/ | 132 | /*----------------------------- Global variables ----------------------------*/ |
133 | 133 | ||
134 | static const char *version = | 134 | static const char * const version = |
135 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; | 135 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; |
136 | 136 | ||
137 | LIST_HEAD(bond_dev_list); | 137 | LIST_HEAD(bond_dev_list); |
@@ -1040,6 +1040,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1040 | if ((bond->params.mode == BOND_MODE_TLB) || | 1040 | if ((bond->params.mode == BOND_MODE_TLB) || |
1041 | (bond->params.mode == BOND_MODE_ALB)) { | 1041 | (bond->params.mode == BOND_MODE_ALB)) { |
1042 | bond_alb_handle_active_change(bond, new_active); | 1042 | bond_alb_handle_active_change(bond, new_active); |
1043 | if (old_active) | ||
1044 | bond_set_slave_inactive_flags(old_active); | ||
1045 | if (new_active) | ||
1046 | bond_set_slave_active_flags(new_active); | ||
1043 | } else { | 1047 | } else { |
1044 | bond->curr_active_slave = new_active; | 1048 | bond->curr_active_slave = new_active; |
1045 | } | 1049 | } |
@@ -1443,15 +1447,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1443 | 1447 | ||
1444 | switch (bond->params.mode) { | 1448 | switch (bond->params.mode) { |
1445 | case BOND_MODE_ACTIVEBACKUP: | 1449 | case BOND_MODE_ACTIVEBACKUP: |
1446 | /* if we're in active-backup mode, we need one and only one active | 1450 | /* if we're in active-backup mode, we need one and |
1447 | * interface. The backup interfaces will have their NOARP flag set | 1451 | * only one active interface. The backup interfaces |
1448 | * because we need them to be completely deaf and not to respond to | 1452 | * will have their SLAVE_INACTIVE flag set because we |
1449 | * any ARP request on the network to avoid fooling a switch. Thus, | 1453 | * need them to be drop all packets. Thus, since we |
1450 | * since we guarantee that curr_active_slave always point to the last | 1454 | * guarantee that curr_active_slave always point to |
1451 | * usable interface, we just have to verify this interface's flag. | 1455 | * the last usable interface, we just have to verify |
1456 | * this interface's flag. | ||
1452 | */ | 1457 | */ |
1453 | if (((!bond->curr_active_slave) || | 1458 | if (((!bond->curr_active_slave) || |
1454 | (bond->curr_active_slave->dev->flags & IFF_NOARP)) && | 1459 | (bond->curr_active_slave->dev->priv_flags & IFF_SLAVE_INACTIVE)) && |
1455 | (new_slave->link != BOND_LINK_DOWN)) { | 1460 | (new_slave->link != BOND_LINK_DOWN)) { |
1456 | dprintk("This is the first active slave\n"); | 1461 | dprintk("This is the first active slave\n"); |
1457 | /* first slave or no active slave yet, and this link | 1462 | /* first slave or no active slave yet, and this link |
@@ -1492,6 +1497,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1492 | * is OK, so make this interface the active one | 1497 | * is OK, so make this interface the active one |
1493 | */ | 1498 | */ |
1494 | bond_change_active_slave(bond, new_slave); | 1499 | bond_change_active_slave(bond, new_slave); |
1500 | } else { | ||
1501 | bond_set_slave_inactive_flags(new_slave); | ||
1495 | } | 1502 | } |
1496 | break; | 1503 | break; |
1497 | default: | 1504 | default: |
@@ -1724,13 +1731,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1724 | addr.sa_family = slave_dev->type; | 1731 | addr.sa_family = slave_dev->type; |
1725 | dev_set_mac_address(slave_dev, &addr); | 1732 | dev_set_mac_address(slave_dev, &addr); |
1726 | 1733 | ||
1727 | /* restore the original state of the | 1734 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | |
1728 | * IFF_NOARP flag that might have been | 1735 | IFF_SLAVE_INACTIVE); |
1729 | * set by bond_set_slave_inactive_flags() | ||
1730 | */ | ||
1731 | if ((slave->original_flags & IFF_NOARP) == 0) { | ||
1732 | slave_dev->flags &= ~IFF_NOARP; | ||
1733 | } | ||
1734 | 1736 | ||
1735 | kfree(slave); | 1737 | kfree(slave); |
1736 | 1738 | ||
@@ -1816,12 +1818,8 @@ static int bond_release_all(struct net_device *bond_dev) | |||
1816 | addr.sa_family = slave_dev->type; | 1818 | addr.sa_family = slave_dev->type; |
1817 | dev_set_mac_address(slave_dev, &addr); | 1819 | dev_set_mac_address(slave_dev, &addr); |
1818 | 1820 | ||
1819 | /* restore the original state of the IFF_NOARP flag that might have | 1821 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | |
1820 | * been set by bond_set_slave_inactive_flags() | 1822 | IFF_SLAVE_INACTIVE); |
1821 | */ | ||
1822 | if ((slave->original_flags & IFF_NOARP) == 0) { | ||
1823 | slave_dev->flags &= ~IFF_NOARP; | ||
1824 | } | ||
1825 | 1823 | ||
1826 | kfree(slave); | 1824 | kfree(slave); |
1827 | 1825 | ||
@@ -4061,14 +4059,17 @@ void bond_set_mode_ops(struct bonding *bond, int mode) | |||
4061 | bond_dev->hard_start_xmit = bond_xmit_broadcast; | 4059 | bond_dev->hard_start_xmit = bond_xmit_broadcast; |
4062 | break; | 4060 | break; |
4063 | case BOND_MODE_8023AD: | 4061 | case BOND_MODE_8023AD: |
4062 | bond_set_master_3ad_flags(bond); | ||
4064 | bond_dev->hard_start_xmit = bond_3ad_xmit_xor; | 4063 | bond_dev->hard_start_xmit = bond_3ad_xmit_xor; |
4065 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) | 4064 | if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) |
4066 | bond->xmit_hash_policy = bond_xmit_hash_policy_l34; | 4065 | bond->xmit_hash_policy = bond_xmit_hash_policy_l34; |
4067 | else | 4066 | else |
4068 | bond->xmit_hash_policy = bond_xmit_hash_policy_l2; | 4067 | bond->xmit_hash_policy = bond_xmit_hash_policy_l2; |
4069 | break; | 4068 | break; |
4070 | case BOND_MODE_TLB: | ||
4071 | case BOND_MODE_ALB: | 4069 | case BOND_MODE_ALB: |
4070 | bond_set_master_alb_flags(bond); | ||
4071 | /* FALLTHRU */ | ||
4072 | case BOND_MODE_TLB: | ||
4072 | bond_dev->hard_start_xmit = bond_alb_xmit; | 4073 | bond_dev->hard_start_xmit = bond_alb_xmit; |
4073 | bond_dev->set_mac_address = bond_alb_set_mac_address; | 4074 | bond_dev->set_mac_address = bond_alb_set_mac_address; |
4074 | break; | 4075 | break; |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 041bcc583557..5a9bd95884be 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -424,6 +424,12 @@ static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size | |||
424 | ret = -EINVAL; | 424 | ret = -EINVAL; |
425 | goto out; | 425 | goto out; |
426 | } else { | 426 | } else { |
427 | if (bond->params.mode == BOND_MODE_8023AD) | ||
428 | bond_unset_master_3ad_flags(bond); | ||
429 | |||
430 | if (bond->params.mode == BOND_MODE_ALB) | ||
431 | bond_unset_master_alb_flags(bond); | ||
432 | |||
427 | bond->params.mode = new_value; | 433 | bond->params.mode = new_value; |
428 | bond_set_mode_ops(bond, bond->params.mode); | 434 | bond_set_mode_ops(bond, bond->params.mode); |
429 | printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n", | 435 | printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n", |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 3dd78d048c3e..ce9dc9b4e2dc 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "bond_3ad.h" | 22 | #include "bond_3ad.h" |
23 | #include "bond_alb.h" | 23 | #include "bond_alb.h" |
24 | 24 | ||
25 | #define DRV_VERSION "3.0.1" | 25 | #define DRV_VERSION "3.0.2" |
26 | #define DRV_RELDATE "January 9, 2006" | 26 | #define DRV_RELDATE "February 21, 2006" |
27 | #define DRV_NAME "bonding" | 27 | #define DRV_NAME "bonding" |
28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
29 | 29 | ||
@@ -230,14 +230,37 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) | |||
230 | 230 | ||
231 | static inline void bond_set_slave_inactive_flags(struct slave *slave) | 231 | static inline void bond_set_slave_inactive_flags(struct slave *slave) |
232 | { | 232 | { |
233 | slave->state = BOND_STATE_BACKUP; | 233 | struct bonding *bond = slave->dev->master->priv; |
234 | slave->dev->flags |= IFF_NOARP; | 234 | if (bond->params.mode != BOND_MODE_TLB && |
235 | bond->params.mode != BOND_MODE_ALB) | ||
236 | slave->state = BOND_STATE_BACKUP; | ||
237 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | ||
235 | } | 238 | } |
236 | 239 | ||
237 | static inline void bond_set_slave_active_flags(struct slave *slave) | 240 | static inline void bond_set_slave_active_flags(struct slave *slave) |
238 | { | 241 | { |
239 | slave->state = BOND_STATE_ACTIVE; | 242 | slave->state = BOND_STATE_ACTIVE; |
240 | slave->dev->flags &= ~IFF_NOARP; | 243 | slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; |
244 | } | ||
245 | |||
246 | static inline void bond_set_master_3ad_flags(struct bonding *bond) | ||
247 | { | ||
248 | bond->dev->priv_flags |= IFF_MASTER_8023AD; | ||
249 | } | ||
250 | |||
251 | static inline void bond_unset_master_3ad_flags(struct bonding *bond) | ||
252 | { | ||
253 | bond->dev->priv_flags &= ~IFF_MASTER_8023AD; | ||
254 | } | ||
255 | |||
256 | static inline void bond_set_master_alb_flags(struct bonding *bond) | ||
257 | { | ||
258 | bond->dev->priv_flags |= IFF_MASTER_ALB; | ||
259 | } | ||
260 | |||
261 | static inline void bond_unset_master_alb_flags(struct bonding *bond) | ||
262 | { | ||
263 | bond->dev->priv_flags &= ~IFF_MASTER_ALB; | ||
241 | } | 264 | } |
242 | 265 | ||
243 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); | 266 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); |
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c index e824acaf188a..542e5e065c6f 100644 --- a/drivers/net/chelsio/espi.c +++ b/drivers/net/chelsio/espi.c | |||
@@ -87,15 +87,9 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, | |||
87 | static int tricn_init(adapter_t *adapter) | 87 | static int tricn_init(adapter_t *adapter) |
88 | { | 88 | { |
89 | int i = 0; | 89 | int i = 0; |
90 | int sme = 1; | ||
91 | int stat = 0; | 90 | int stat = 0; |
92 | int timeout = 0; | 91 | int timeout = 0; |
93 | int is_ready = 0; | 92 | int is_ready = 0; |
94 | int dynamic_deskew = 0; | ||
95 | |||
96 | if (dynamic_deskew) | ||
97 | sme = 0; | ||
98 | |||
99 | 93 | ||
100 | /* 1 */ | 94 | /* 1 */ |
101 | timeout=1000; | 95 | timeout=1000; |
@@ -113,11 +107,9 @@ static int tricn_init(adapter_t *adapter) | |||
113 | } | 107 | } |
114 | 108 | ||
115 | /* 2 */ | 109 | /* 2 */ |
116 | if (sme) { | 110 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); |
117 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); | 111 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); |
118 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); | 112 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); |
119 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); | ||
120 | } | ||
121 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); | 113 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); |
122 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); | 114 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); |
123 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | 115 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); |
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 1ebb5d149aef..12e4e96dba2d 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -686,7 +686,7 @@ int t1_init_hw_modules(adapter_t *adapter) | |||
686 | */ | 686 | */ |
687 | static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) | 687 | static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) |
688 | { | 688 | { |
689 | static unsigned short speed_map[] = { 33, 66, 100, 133 }; | 689 | static const unsigned short speed_map[] = { 33, 66, 100, 133 }; |
690 | u32 pci_mode; | 690 | u32 pci_mode; |
691 | 691 | ||
692 | pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); | 692 | pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); |
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index 70b47e4c4e9c..32d13166c6e8 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c | |||
@@ -993,7 +993,7 @@ dgrs_download(struct net_device *dev0) | |||
993 | int is; | 993 | int is; |
994 | unsigned long i; | 994 | unsigned long i; |
995 | 995 | ||
996 | static int iv2is[16] = { | 996 | static const int iv2is[16] = { |
997 | 0, 0, 0, ES4H_IS_INT3, | 997 | 0, 0, 0, ES4H_IS_INT3, |
998 | 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, | 998 | 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, |
999 | 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, | 999 | 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, |
diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c index 1e49e1e1f201..8c20d4c99937 100644 --- a/drivers/net/dgrs_firmware.c +++ b/drivers/net/dgrs_firmware.c | |||
@@ -1,4 +1,4 @@ | |||
1 | static int dgrs_firmnum = 550; | 1 | static const int dgrs_firmnum = 550; |
2 | static char dgrs_firmver[] = "$Version$"; | 2 | static char dgrs_firmver[] = "$Version$"; |
3 | static char dgrs_firmdate[] = "11/16/96 03:45:15"; | 3 | static char dgrs_firmdate[] = "11/16/96 03:45:15"; |
4 | static unsigned char dgrs_code[] __initdata = { | 4 | static unsigned char dgrs_code[] __initdata = { |
@@ -9963,4 +9963,4 @@ static unsigned char dgrs_code[] __initdata = { | |||
9963 | 109,46,99,0,114,99,0,0,48,120,0,0, | 9963 | 109,46,99,0,114,99,0,0,48,120,0,0, |
9964 | 0,0,0,0,0,0,0,0,0,0,0,0 | 9964 | 0,0,0,0,0,0,0,0,0,0,0,0 |
9965 | } ; | 9965 | } ; |
9966 | static int dgrs_ncode = 119520 ; | 9966 | static const int dgrs_ncode = 119520 ; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index fb9dae302dcc..1f3627470c95 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -90,8 +90,8 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ | |||
90 | #define EnableInt() \ | 90 | #define EnableInt() \ |
91 | writew(DEFAULT_INTR, ioaddr + IntEnable) | 91 | writew(DEFAULT_INTR, ioaddr + IntEnable) |
92 | 92 | ||
93 | static int max_intrloop = 50; | 93 | static const int max_intrloop = 50; |
94 | static int multicast_filter_limit = 0x40; | 94 | static const int multicast_filter_limit = 0x40; |
95 | 95 | ||
96 | static int rio_open (struct net_device *dev); | 96 | static int rio_open (struct net_device *dev); |
97 | static void rio_timer (unsigned long data); | 97 | static void rio_timer (unsigned long data); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index f57a85feda3d..31ac001f5517 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -598,8 +598,8 @@ static void e100_enable_irq(struct nic *nic) | |||
598 | 598 | ||
599 | spin_lock_irqsave(&nic->cmd_lock, flags); | 599 | spin_lock_irqsave(&nic->cmd_lock, flags); |
600 | writeb(irq_mask_none, &nic->csr->scb.cmd_hi); | 600 | writeb(irq_mask_none, &nic->csr->scb.cmd_hi); |
601 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
602 | e100_write_flush(nic); | 601 | e100_write_flush(nic); |
602 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
603 | } | 603 | } |
604 | 604 | ||
605 | static void e100_disable_irq(struct nic *nic) | 605 | static void e100_disable_irq(struct nic *nic) |
@@ -608,8 +608,8 @@ static void e100_disable_irq(struct nic *nic) | |||
608 | 608 | ||
609 | spin_lock_irqsave(&nic->cmd_lock, flags); | 609 | spin_lock_irqsave(&nic->cmd_lock, flags); |
610 | writeb(irq_mask_all, &nic->csr->scb.cmd_hi); | 610 | writeb(irq_mask_all, &nic->csr->scb.cmd_hi); |
611 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
612 | e100_write_flush(nic); | 611 | e100_write_flush(nic); |
612 | spin_unlock_irqrestore(&nic->cmd_lock, flags); | ||
613 | } | 613 | } |
614 | 614 | ||
615 | static void e100_hw_reset(struct nic *nic) | 615 | static void e100_hw_reset(struct nic *nic) |
@@ -1582,8 +1582,8 @@ static void e100_watchdog(unsigned long data) | |||
1582 | * interrupt mask bit and the SW Interrupt generation bit */ | 1582 | * interrupt mask bit and the SW Interrupt generation bit */ |
1583 | spin_lock_irq(&nic->cmd_lock); | 1583 | spin_lock_irq(&nic->cmd_lock); |
1584 | writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); | 1584 | writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); |
1585 | spin_unlock_irq(&nic->cmd_lock); | ||
1586 | e100_write_flush(nic); | 1585 | e100_write_flush(nic); |
1586 | spin_unlock_irq(&nic->cmd_lock); | ||
1587 | 1587 | ||
1588 | e100_update_stats(nic); | 1588 | e100_update_stats(nic); |
1589 | e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); | 1589 | e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 99baf0e099fc..281de41d030a 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -83,10 +83,6 @@ | |||
83 | struct e1000_adapter; | 83 | struct e1000_adapter; |
84 | 84 | ||
85 | #include "e1000_hw.h" | 85 | #include "e1000_hw.h" |
86 | #ifdef CONFIG_E1000_MQ | ||
87 | #include <linux/cpu.h> | ||
88 | #include <linux/smp.h> | ||
89 | #endif | ||
90 | 86 | ||
91 | #ifdef DBG | 87 | #ifdef DBG |
92 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) | 88 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) |
@@ -169,12 +165,6 @@ struct e1000_buffer { | |||
169 | uint16_t next_to_watch; | 165 | uint16_t next_to_watch; |
170 | }; | 166 | }; |
171 | 167 | ||
172 | #ifdef CONFIG_E1000_MQ | ||
173 | struct e1000_queue_stats { | ||
174 | uint64_t packets; | ||
175 | uint64_t bytes; | ||
176 | }; | ||
177 | #endif | ||
178 | 168 | ||
179 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; | 169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; |
180 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; | 170 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; |
@@ -198,12 +188,7 @@ struct e1000_tx_ring { | |||
198 | spinlock_t tx_lock; | 188 | spinlock_t tx_lock; |
199 | uint16_t tdh; | 189 | uint16_t tdh; |
200 | uint16_t tdt; | 190 | uint16_t tdt; |
201 | |||
202 | boolean_t last_tx_tso; | 191 | boolean_t last_tx_tso; |
203 | |||
204 | #ifdef CONFIG_E1000_MQ | ||
205 | struct e1000_queue_stats tx_stats; | ||
206 | #endif | ||
207 | }; | 192 | }; |
208 | 193 | ||
209 | struct e1000_rx_ring { | 194 | struct e1000_rx_ring { |
@@ -230,9 +215,6 @@ struct e1000_rx_ring { | |||
230 | 215 | ||
231 | uint16_t rdh; | 216 | uint16_t rdh; |
232 | uint16_t rdt; | 217 | uint16_t rdt; |
233 | #ifdef CONFIG_E1000_MQ | ||
234 | struct e1000_queue_stats rx_stats; | ||
235 | #endif | ||
236 | }; | 218 | }; |
237 | 219 | ||
238 | #define E1000_DESC_UNUSED(R) \ | 220 | #define E1000_DESC_UNUSED(R) \ |
@@ -260,6 +242,7 @@ struct e1000_adapter { | |||
260 | uint32_t rx_buffer_len; | 242 | uint32_t rx_buffer_len; |
261 | uint32_t part_num; | 243 | uint32_t part_num; |
262 | uint32_t wol; | 244 | uint32_t wol; |
245 | uint32_t ksp3_port_a; | ||
263 | uint32_t smartspeed; | 246 | uint32_t smartspeed; |
264 | uint32_t en_mng_pt; | 247 | uint32_t en_mng_pt; |
265 | uint16_t link_speed; | 248 | uint16_t link_speed; |
@@ -269,8 +252,8 @@ struct e1000_adapter { | |||
269 | spinlock_t tx_queue_lock; | 252 | spinlock_t tx_queue_lock; |
270 | #endif | 253 | #endif |
271 | atomic_t irq_sem; | 254 | atomic_t irq_sem; |
272 | struct work_struct tx_timeout_task; | ||
273 | struct work_struct watchdog_task; | 255 | struct work_struct watchdog_task; |
256 | struct work_struct reset_task; | ||
274 | uint8_t fc_autoneg; | 257 | uint8_t fc_autoneg; |
275 | 258 | ||
276 | struct timer_list blink_timer; | 259 | struct timer_list blink_timer; |
@@ -278,9 +261,6 @@ struct e1000_adapter { | |||
278 | 261 | ||
279 | /* TX */ | 262 | /* TX */ |
280 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 263 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
281 | #ifdef CONFIG_E1000_MQ | ||
282 | struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ | ||
283 | #endif | ||
284 | unsigned long tx_queue_len; | 264 | unsigned long tx_queue_len; |
285 | uint32_t txd_cmd; | 265 | uint32_t txd_cmd; |
286 | uint32_t tx_int_delay; | 266 | uint32_t tx_int_delay; |
@@ -301,24 +281,19 @@ struct e1000_adapter { | |||
301 | /* RX */ | 281 | /* RX */ |
302 | #ifdef CONFIG_E1000_NAPI | 282 | #ifdef CONFIG_E1000_NAPI |
303 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, | 283 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, |
304 | struct e1000_rx_ring *rx_ring, | 284 | struct e1000_rx_ring *rx_ring, |
305 | int *work_done, int work_to_do); | 285 | int *work_done, int work_to_do); |
306 | #else | 286 | #else |
307 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, | 287 | boolean_t (*clean_rx) (struct e1000_adapter *adapter, |
308 | struct e1000_rx_ring *rx_ring); | 288 | struct e1000_rx_ring *rx_ring); |
309 | #endif | 289 | #endif |
310 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, | 290 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, |
311 | struct e1000_rx_ring *rx_ring, | 291 | struct e1000_rx_ring *rx_ring, |
312 | int cleaned_count); | 292 | int cleaned_count); |
313 | struct e1000_rx_ring *rx_ring; /* One per active queue */ | 293 | struct e1000_rx_ring *rx_ring; /* One per active queue */ |
314 | #ifdef CONFIG_E1000_NAPI | 294 | #ifdef CONFIG_E1000_NAPI |
315 | struct net_device *polling_netdev; /* One per active queue */ | 295 | struct net_device *polling_netdev; /* One per active queue */ |
316 | #endif | 296 | #endif |
317 | #ifdef CONFIG_E1000_MQ | ||
318 | struct net_device **cpu_netdev; /* per-cpu */ | ||
319 | struct call_async_data_struct rx_sched_call_data; | ||
320 | cpumask_t cpumask; | ||
321 | #endif | ||
322 | int num_tx_queues; | 297 | int num_tx_queues; |
323 | int num_rx_queues; | 298 | int num_rx_queues; |
324 | 299 | ||
@@ -353,10 +328,37 @@ struct e1000_adapter { | |||
353 | struct e1000_rx_ring test_rx_ring; | 328 | struct e1000_rx_ring test_rx_ring; |
354 | 329 | ||
355 | 330 | ||
356 | u32 *config_space; | 331 | uint32_t *config_space; |
357 | int msg_enable; | 332 | int msg_enable; |
358 | #ifdef CONFIG_PCI_MSI | 333 | #ifdef CONFIG_PCI_MSI |
359 | boolean_t have_msi; | 334 | boolean_t have_msi; |
360 | #endif | 335 | #endif |
336 | /* to not mess up cache alignment, always add to the bottom */ | ||
337 | boolean_t txb2b; | ||
338 | #ifdef NETIF_F_TSO | ||
339 | boolean_t tso_force; | ||
340 | #endif | ||
361 | }; | 341 | }; |
342 | |||
343 | |||
344 | /* e1000_main.c */ | ||
345 | extern char e1000_driver_name[]; | ||
346 | extern char e1000_driver_version[]; | ||
347 | int e1000_up(struct e1000_adapter *adapter); | ||
348 | void e1000_down(struct e1000_adapter *adapter); | ||
349 | void e1000_reset(struct e1000_adapter *adapter); | ||
350 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
351 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
352 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
353 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
354 | void e1000_update_stats(struct e1000_adapter *adapter); | ||
355 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
356 | |||
357 | /* e1000_ethtool.c */ | ||
358 | void e1000_set_ethtool_ops(struct net_device *netdev); | ||
359 | |||
360 | /* e1000_param.c */ | ||
361 | void e1000_check_options(struct e1000_adapter *adapter); | ||
362 | |||
363 | |||
362 | #endif /* _E1000_H_ */ | 364 | #endif /* _E1000_H_ */ |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 5cedc81786e3..ecccca35c6f4 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -32,19 +32,6 @@ | |||
32 | 32 | ||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | 34 | ||
35 | extern char e1000_driver_name[]; | ||
36 | extern char e1000_driver_version[]; | ||
37 | |||
38 | extern int e1000_up(struct e1000_adapter *adapter); | ||
39 | extern void e1000_down(struct e1000_adapter *adapter); | ||
40 | extern void e1000_reset(struct e1000_adapter *adapter); | ||
41 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
42 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
43 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
44 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
45 | extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
46 | extern void e1000_update_stats(struct e1000_adapter *adapter); | ||
47 | |||
48 | struct e1000_stats { | 35 | struct e1000_stats { |
49 | char stat_string[ETH_GSTRING_LEN]; | 36 | char stat_string[ETH_GSTRING_LEN]; |
50 | int sizeof_stat; | 37 | int sizeof_stat; |
@@ -60,7 +47,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
60 | { "tx_bytes", E1000_STAT(net_stats.tx_bytes) }, | 47 | { "tx_bytes", E1000_STAT(net_stats.tx_bytes) }, |
61 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | 48 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, |
62 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | 49 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, |
63 | { "rx_dropped", E1000_STAT(net_stats.rx_dropped) }, | ||
64 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | 50 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, |
65 | { "multicast", E1000_STAT(net_stats.multicast) }, | 51 | { "multicast", E1000_STAT(net_stats.multicast) }, |
66 | { "collisions", E1000_STAT(net_stats.collisions) }, | 52 | { "collisions", E1000_STAT(net_stats.collisions) }, |
@@ -68,7 +54,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
68 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | 54 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, |
69 | { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, | 55 | { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, |
70 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | 56 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, |
71 | { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, | ||
72 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | 57 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, |
73 | { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, | 58 | { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, |
74 | { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, | 59 | { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, |
@@ -97,14 +82,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
97 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, | 82 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, |
98 | }; | 83 | }; |
99 | 84 | ||
100 | #ifdef CONFIG_E1000_MQ | ||
101 | #define E1000_QUEUE_STATS_LEN \ | ||
102 | (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \ | ||
103 | ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \ | ||
104 | * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t)) | ||
105 | #else | ||
106 | #define E1000_QUEUE_STATS_LEN 0 | 85 | #define E1000_QUEUE_STATS_LEN 0 |
107 | #endif | ||
108 | #define E1000_GLOBAL_STATS_LEN \ | 86 | #define E1000_GLOBAL_STATS_LEN \ |
109 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) | 87 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) |
110 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) | 88 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) |
@@ -346,6 +324,9 @@ e1000_set_tso(struct net_device *netdev, uint32_t data) | |||
346 | netdev->features |= NETIF_F_TSO; | 324 | netdev->features |= NETIF_F_TSO; |
347 | else | 325 | else |
348 | netdev->features &= ~NETIF_F_TSO; | 326 | netdev->features &= ~NETIF_F_TSO; |
327 | |||
328 | DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); | ||
329 | adapter->tso_force = TRUE; | ||
349 | return 0; | 330 | return 0; |
350 | } | 331 | } |
351 | #endif /* NETIF_F_TSO */ | 332 | #endif /* NETIF_F_TSO */ |
@@ -594,6 +575,7 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
594 | case e1000_82571: | 575 | case e1000_82571: |
595 | case e1000_82572: | 576 | case e1000_82572: |
596 | case e1000_82573: | 577 | case e1000_82573: |
578 | case e1000_80003es2lan: | ||
597 | sprintf(firmware_version, "%d.%d-%d", | 579 | sprintf(firmware_version, "%d.%d-%d", |
598 | (eeprom_data & 0xF000) >> 12, | 580 | (eeprom_data & 0xF000) >> 12, |
599 | (eeprom_data & 0x0FF0) >> 4, | 581 | (eeprom_data & 0x0FF0) >> 4, |
@@ -642,6 +624,9 @@ e1000_set_ringparam(struct net_device *netdev, | |||
642 | struct e1000_rx_ring *rxdr, *rx_old, *rx_new; | 624 | struct e1000_rx_ring *rxdr, *rx_old, *rx_new; |
643 | int i, err, tx_ring_size, rx_ring_size; | 625 | int i, err, tx_ring_size, rx_ring_size; |
644 | 626 | ||
627 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
628 | return -EINVAL; | ||
629 | |||
645 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; | 630 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; |
646 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; | 631 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; |
647 | 632 | ||
@@ -669,9 +654,6 @@ e1000_set_ringparam(struct net_device *netdev, | |||
669 | txdr = adapter->tx_ring; | 654 | txdr = adapter->tx_ring; |
670 | rxdr = adapter->rx_ring; | 655 | rxdr = adapter->rx_ring; |
671 | 656 | ||
672 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
673 | return -EINVAL; | ||
674 | |||
675 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); | 657 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); |
676 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? | 658 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? |
677 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); | 659 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); |
@@ -767,6 +749,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
767 | /* there are several bits on newer hardware that are r/w */ | 749 | /* there are several bits on newer hardware that are r/w */ |
768 | case e1000_82571: | 750 | case e1000_82571: |
769 | case e1000_82572: | 751 | case e1000_82572: |
752 | case e1000_80003es2lan: | ||
770 | toggle = 0x7FFFF3FF; | 753 | toggle = 0x7FFFF3FF; |
771 | break; | 754 | break; |
772 | case e1000_82573: | 755 | case e1000_82573: |
@@ -1256,6 +1239,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1256 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); | 1239 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); |
1257 | /* autoneg off */ | 1240 | /* autoneg off */ |
1258 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); | 1241 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); |
1242 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { | ||
1243 | e1000_write_phy_reg(&adapter->hw, | ||
1244 | GG82563_PHY_KMRN_MODE_CTRL, | ||
1245 | 0x1CE); | ||
1259 | } | 1246 | } |
1260 | /* force 1000, set loopback */ | 1247 | /* force 1000, set loopback */ |
1261 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | 1248 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); |
@@ -1325,6 +1312,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1325 | case e1000_82571: | 1312 | case e1000_82571: |
1326 | case e1000_82572: | 1313 | case e1000_82572: |
1327 | case e1000_82573: | 1314 | case e1000_82573: |
1315 | case e1000_80003es2lan: | ||
1328 | return e1000_integrated_phy_loopback(adapter); | 1316 | return e1000_integrated_phy_loopback(adapter); |
1329 | break; | 1317 | break; |
1330 | 1318 | ||
@@ -1405,6 +1393,11 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1405 | case e1000_82546_rev_3: | 1393 | case e1000_82546_rev_3: |
1406 | default: | 1394 | default: |
1407 | hw->autoneg = TRUE; | 1395 | hw->autoneg = TRUE; |
1396 | if (hw->phy_type == e1000_phy_gg82563) { | ||
1397 | e1000_write_phy_reg(hw, | ||
1398 | GG82563_PHY_KMRN_MODE_CTRL, | ||
1399 | 0x180); | ||
1400 | } | ||
1408 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); | 1401 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1409 | if (phy_reg & MII_CR_LOOPBACK) { | 1402 | if (phy_reg & MII_CR_LOOPBACK) { |
1410 | phy_reg &= ~MII_CR_LOOPBACK; | 1403 | phy_reg &= ~MII_CR_LOOPBACK; |
@@ -1640,10 +1633,26 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1640 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | 1633 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
1641 | case E1000_DEV_ID_82545EM_FIBER: | 1634 | case E1000_DEV_ID_82545EM_FIBER: |
1642 | case E1000_DEV_ID_82545EM_COPPER: | 1635 | case E1000_DEV_ID_82545EM_COPPER: |
1636 | case E1000_DEV_ID_82546GB_QUAD_COPPER: | ||
1643 | wol->supported = 0; | 1637 | wol->supported = 0; |
1644 | wol->wolopts = 0; | 1638 | wol->wolopts = 0; |
1645 | return; | 1639 | return; |
1646 | 1640 | ||
1641 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | ||
1642 | /* device id 10B5 port-A supports wol */ | ||
1643 | if (!adapter->ksp3_port_a) { | ||
1644 | wol->supported = 0; | ||
1645 | return; | ||
1646 | } | ||
1647 | /* KSP3 does not suppport UCAST wake-ups for any interface */ | ||
1648 | wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; | ||
1649 | |||
1650 | if (adapter->wol & E1000_WUFC_EX) | ||
1651 | DPRINTK(DRV, ERR, "Interface does not support " | ||
1652 | "directed (unicast) frame wake-up packets\n"); | ||
1653 | wol->wolopts = 0; | ||
1654 | goto do_defaults; | ||
1655 | |||
1647 | case E1000_DEV_ID_82546EB_FIBER: | 1656 | case E1000_DEV_ID_82546EB_FIBER: |
1648 | case E1000_DEV_ID_82546GB_FIBER: | 1657 | case E1000_DEV_ID_82546GB_FIBER: |
1649 | case E1000_DEV_ID_82571EB_FIBER: | 1658 | case E1000_DEV_ID_82571EB_FIBER: |
@@ -1658,8 +1667,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1658 | default: | 1667 | default: |
1659 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1668 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1660 | WAKE_BCAST | WAKE_MAGIC; | 1669 | WAKE_BCAST | WAKE_MAGIC; |
1661 | |||
1662 | wol->wolopts = 0; | 1670 | wol->wolopts = 0; |
1671 | |||
1672 | do_defaults: | ||
1663 | if (adapter->wol & E1000_WUFC_EX) | 1673 | if (adapter->wol & E1000_WUFC_EX) |
1664 | wol->wolopts |= WAKE_UCAST; | 1674 | wol->wolopts |= WAKE_UCAST; |
1665 | if (adapter->wol & E1000_WUFC_MC) | 1675 | if (adapter->wol & E1000_WUFC_MC) |
@@ -1684,10 +1694,22 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1684 | case E1000_DEV_ID_82543GC_COPPER: | 1694 | case E1000_DEV_ID_82543GC_COPPER: |
1685 | case E1000_DEV_ID_82544EI_FIBER: | 1695 | case E1000_DEV_ID_82544EI_FIBER: |
1686 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | 1696 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
1697 | case E1000_DEV_ID_82546GB_QUAD_COPPER: | ||
1687 | case E1000_DEV_ID_82545EM_FIBER: | 1698 | case E1000_DEV_ID_82545EM_FIBER: |
1688 | case E1000_DEV_ID_82545EM_COPPER: | 1699 | case E1000_DEV_ID_82545EM_COPPER: |
1689 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1700 | return wol->wolopts ? -EOPNOTSUPP : 0; |
1690 | 1701 | ||
1702 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | ||
1703 | /* device id 10B5 port-A supports wol */ | ||
1704 | if (!adapter->ksp3_port_a) | ||
1705 | return wol->wolopts ? -EOPNOTSUPP : 0; | ||
1706 | |||
1707 | if (wol->wolopts & WAKE_UCAST) { | ||
1708 | DPRINTK(DRV, ERR, "Interface does not support " | ||
1709 | "directed (unicast) frame wake-up packets\n"); | ||
1710 | return -EOPNOTSUPP; | ||
1711 | } | ||
1712 | |||
1691 | case E1000_DEV_ID_82546EB_FIBER: | 1713 | case E1000_DEV_ID_82546EB_FIBER: |
1692 | case E1000_DEV_ID_82546GB_FIBER: | 1714 | case E1000_DEV_ID_82546GB_FIBER: |
1693 | case E1000_DEV_ID_82571EB_FIBER: | 1715 | case E1000_DEV_ID_82571EB_FIBER: |
@@ -1799,11 +1821,6 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1799 | struct ethtool_stats *stats, uint64_t *data) | 1821 | struct ethtool_stats *stats, uint64_t *data) |
1800 | { | 1822 | { |
1801 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1823 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1802 | #ifdef CONFIG_E1000_MQ | ||
1803 | uint64_t *queue_stat; | ||
1804 | int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t); | ||
1805 | int j, k; | ||
1806 | #endif | ||
1807 | int i; | 1824 | int i; |
1808 | 1825 | ||
1809 | e1000_update_stats(adapter); | 1826 | e1000_update_stats(adapter); |
@@ -1812,29 +1829,12 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1812 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1829 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1813 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | 1830 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; |
1814 | } | 1831 | } |
1815 | #ifdef CONFIG_E1000_MQ | ||
1816 | for (j = 0; j < adapter->num_tx_queues; j++) { | ||
1817 | queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats; | ||
1818 | for (k = 0; k < stat_count; k++) | ||
1819 | data[i + k] = queue_stat[k]; | ||
1820 | i += k; | ||
1821 | } | ||
1822 | for (j = 0; j < adapter->num_rx_queues; j++) { | ||
1823 | queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats; | ||
1824 | for (k = 0; k < stat_count; k++) | ||
1825 | data[i + k] = queue_stat[k]; | ||
1826 | i += k; | ||
1827 | } | ||
1828 | #endif | ||
1829 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1832 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1830 | } | 1833 | } |
1831 | 1834 | ||
1832 | static void | 1835 | static void |
1833 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 1836 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) |
1834 | { | 1837 | { |
1835 | #ifdef CONFIG_E1000_MQ | ||
1836 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1837 | #endif | ||
1838 | uint8_t *p = data; | 1838 | uint8_t *p = data; |
1839 | int i; | 1839 | int i; |
1840 | 1840 | ||
@@ -1849,20 +1849,6 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | |||
1849 | ETH_GSTRING_LEN); | 1849 | ETH_GSTRING_LEN); |
1850 | p += ETH_GSTRING_LEN; | 1850 | p += ETH_GSTRING_LEN; |
1851 | } | 1851 | } |
1852 | #ifdef CONFIG_E1000_MQ | ||
1853 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1854 | sprintf(p, "tx_queue_%u_packets", i); | ||
1855 | p += ETH_GSTRING_LEN; | ||
1856 | sprintf(p, "tx_queue_%u_bytes", i); | ||
1857 | p += ETH_GSTRING_LEN; | ||
1858 | } | ||
1859 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1860 | sprintf(p, "rx_queue_%u_packets", i); | ||
1861 | p += ETH_GSTRING_LEN; | ||
1862 | sprintf(p, "rx_queue_%u_bytes", i); | ||
1863 | p += ETH_GSTRING_LEN; | ||
1864 | } | ||
1865 | #endif | ||
1866 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ | 1852 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ |
1867 | break; | 1853 | break; |
1868 | } | 1854 | } |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index beeec0fbbeac..523c2c9fc0ac 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -100,6 +100,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, | |||
100 | 100 | ||
101 | #define E1000_WRITE_REG_IO(a, reg, val) \ | 101 | #define E1000_WRITE_REG_IO(a, reg, val) \ |
102 | e1000_write_reg_io((a), E1000_##reg, val) | 102 | e1000_write_reg_io((a), E1000_##reg, val) |
103 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw); | ||
104 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | ||
103 | 105 | ||
104 | /* IGP cable length table */ | 106 | /* IGP cable length table */ |
105 | static const | 107 | static const |
@@ -153,6 +155,11 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
153 | hw->phy_type = e1000_phy_igp; | 155 | hw->phy_type = e1000_phy_igp; |
154 | break; | 156 | break; |
155 | } | 157 | } |
158 | case GG82563_E_PHY_ID: | ||
159 | if (hw->mac_type == e1000_80003es2lan) { | ||
160 | hw->phy_type = e1000_phy_gg82563; | ||
161 | break; | ||
162 | } | ||
156 | /* Fall Through */ | 163 | /* Fall Through */ |
157 | default: | 164 | default: |
158 | /* Should never have loaded on this device */ | 165 | /* Should never have loaded on this device */ |
@@ -353,12 +360,19 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
353 | case E1000_DEV_ID_82573L: | 360 | case E1000_DEV_ID_82573L: |
354 | hw->mac_type = e1000_82573; | 361 | hw->mac_type = e1000_82573; |
355 | break; | 362 | break; |
363 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: | ||
364 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | ||
365 | hw->mac_type = e1000_80003es2lan; | ||
366 | break; | ||
356 | default: | 367 | default: |
357 | /* Should never have loaded on this device */ | 368 | /* Should never have loaded on this device */ |
358 | return -E1000_ERR_MAC_TYPE; | 369 | return -E1000_ERR_MAC_TYPE; |
359 | } | 370 | } |
360 | 371 | ||
361 | switch(hw->mac_type) { | 372 | switch(hw->mac_type) { |
373 | case e1000_80003es2lan: | ||
374 | hw->swfw_sync_present = TRUE; | ||
375 | /* fall through */ | ||
362 | case e1000_82571: | 376 | case e1000_82571: |
363 | case e1000_82572: | 377 | case e1000_82572: |
364 | case e1000_82573: | 378 | case e1000_82573: |
@@ -399,6 +413,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
399 | case E1000_DEV_ID_82546GB_SERDES: | 413 | case E1000_DEV_ID_82546GB_SERDES: |
400 | case E1000_DEV_ID_82571EB_SERDES: | 414 | case E1000_DEV_ID_82571EB_SERDES: |
401 | case E1000_DEV_ID_82572EI_SERDES: | 415 | case E1000_DEV_ID_82572EI_SERDES: |
416 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | ||
402 | hw->media_type = e1000_media_type_internal_serdes; | 417 | hw->media_type = e1000_media_type_internal_serdes; |
403 | break; | 418 | break; |
404 | default: | 419 | default: |
@@ -575,6 +590,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
575 | /* fall through */ | 590 | /* fall through */ |
576 | case e1000_82571: | 591 | case e1000_82571: |
577 | case e1000_82572: | 592 | case e1000_82572: |
593 | case e1000_80003es2lan: | ||
578 | ret_val = e1000_get_auto_rd_done(hw); | 594 | ret_val = e1000_get_auto_rd_done(hw); |
579 | if(ret_val) | 595 | if(ret_val) |
580 | /* We don't want to continue accessing MAC registers. */ | 596 | /* We don't want to continue accessing MAC registers. */ |
@@ -641,6 +657,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
641 | uint16_t cmd_mmrbc; | 657 | uint16_t cmd_mmrbc; |
642 | uint16_t stat_mmrbc; | 658 | uint16_t stat_mmrbc; |
643 | uint32_t mta_size; | 659 | uint32_t mta_size; |
660 | uint32_t reg_data; | ||
644 | uint32_t ctrl_ext; | 661 | uint32_t ctrl_ext; |
645 | 662 | ||
646 | DEBUGFUNC("e1000_init_hw"); | 663 | DEBUGFUNC("e1000_init_hw"); |
@@ -739,6 +756,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
739 | case e1000_82571: | 756 | case e1000_82571: |
740 | case e1000_82572: | 757 | case e1000_82572: |
741 | case e1000_82573: | 758 | case e1000_82573: |
759 | case e1000_80003es2lan: | ||
742 | ctrl |= E1000_TXDCTL_COUNT_DESC; | 760 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
743 | break; | 761 | break; |
744 | } | 762 | } |
@@ -752,12 +770,34 @@ e1000_init_hw(struct e1000_hw *hw) | |||
752 | switch (hw->mac_type) { | 770 | switch (hw->mac_type) { |
753 | default: | 771 | default: |
754 | break; | 772 | break; |
773 | case e1000_80003es2lan: | ||
774 | /* Enable retransmit on late collisions */ | ||
775 | reg_data = E1000_READ_REG(hw, TCTL); | ||
776 | reg_data |= E1000_TCTL_RTLC; | ||
777 | E1000_WRITE_REG(hw, TCTL, reg_data); | ||
778 | |||
779 | /* Configure Gigabit Carry Extend Padding */ | ||
780 | reg_data = E1000_READ_REG(hw, TCTL_EXT); | ||
781 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; | ||
782 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | ||
783 | E1000_WRITE_REG(hw, TCTL_EXT, reg_data); | ||
784 | |||
785 | /* Configure Transmit Inter-Packet Gap */ | ||
786 | reg_data = E1000_READ_REG(hw, TIPG); | ||
787 | reg_data &= ~E1000_TIPG_IPGT_MASK; | ||
788 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | ||
789 | E1000_WRITE_REG(hw, TIPG, reg_data); | ||
790 | |||
791 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); | ||
792 | reg_data &= ~0x00100000; | ||
793 | E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data); | ||
794 | /* Fall through */ | ||
755 | case e1000_82571: | 795 | case e1000_82571: |
756 | case e1000_82572: | 796 | case e1000_82572: |
757 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 797 | ctrl = E1000_READ_REG(hw, TXDCTL1); |
758 | ctrl &= ~E1000_TXDCTL_WTHRESH; | 798 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
759 | ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB; | 799 | if(hw->mac_type >= e1000_82571) |
760 | ctrl |= (1 << 22); | 800 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
761 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); | 801 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); |
762 | break; | 802 | break; |
763 | } | 803 | } |
@@ -906,7 +946,13 @@ e1000_setup_link(struct e1000_hw *hw) | |||
906 | * signal detection. So this should be done before e1000_setup_pcs_link() | 946 | * signal detection. So this should be done before e1000_setup_pcs_link() |
907 | * or e1000_phy_setup() is called. | 947 | * or e1000_phy_setup() is called. |
908 | */ | 948 | */ |
909 | if(hw->mac_type == e1000_82543) { | 949 | if (hw->mac_type == e1000_82543) { |
950 | ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, | ||
951 | 1, &eeprom_data); | ||
952 | if (ret_val) { | ||
953 | DEBUGOUT("EEPROM Read Error\n"); | ||
954 | return -E1000_ERR_EEPROM; | ||
955 | } | ||
910 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << | 956 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << |
911 | SWDPIO__EXT_SHIFT); | 957 | SWDPIO__EXT_SHIFT); |
912 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 958 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); |
@@ -1308,6 +1354,154 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1308 | return E1000_SUCCESS; | 1354 | return E1000_SUCCESS; |
1309 | } | 1355 | } |
1310 | 1356 | ||
1357 | /******************************************************************** | ||
1358 | * Copper link setup for e1000_phy_gg82563 series. | ||
1359 | * | ||
1360 | * hw - Struct containing variables accessed by shared code | ||
1361 | *********************************************************************/ | ||
1362 | static int32_t | ||
1363 | e1000_copper_link_ggp_setup(struct e1000_hw *hw) | ||
1364 | { | ||
1365 | int32_t ret_val; | ||
1366 | uint16_t phy_data; | ||
1367 | uint32_t reg_data; | ||
1368 | |||
1369 | DEBUGFUNC("e1000_copper_link_ggp_setup"); | ||
1370 | |||
1371 | if(!hw->phy_reset_disable) { | ||
1372 | |||
1373 | /* Enable CRS on TX for half-duplex operation. */ | ||
1374 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, | ||
1375 | &phy_data); | ||
1376 | if(ret_val) | ||
1377 | return ret_val; | ||
1378 | |||
1379 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
1380 | /* Use 25MHz for both link down and 1000BASE-T for Tx clock */ | ||
1381 | phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ; | ||
1382 | |||
1383 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, | ||
1384 | phy_data); | ||
1385 | if(ret_val) | ||
1386 | return ret_val; | ||
1387 | |||
1388 | /* Options: | ||
1389 | * MDI/MDI-X = 0 (default) | ||
1390 | * 0 - Auto for all speeds | ||
1391 | * 1 - MDI mode | ||
1392 | * 2 - MDI-X mode | ||
1393 | * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) | ||
1394 | */ | ||
1395 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); | ||
1396 | if(ret_val) | ||
1397 | return ret_val; | ||
1398 | |||
1399 | phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; | ||
1400 | |||
1401 | switch (hw->mdix) { | ||
1402 | case 1: | ||
1403 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI; | ||
1404 | break; | ||
1405 | case 2: | ||
1406 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; | ||
1407 | break; | ||
1408 | case 0: | ||
1409 | default: | ||
1410 | phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; | ||
1411 | break; | ||
1412 | } | ||
1413 | |||
1414 | /* Options: | ||
1415 | * disable_polarity_correction = 0 (default) | ||
1416 | * Automatic Correction for Reversed Cable Polarity | ||
1417 | * 0 - Disabled | ||
1418 | * 1 - Enabled | ||
1419 | */ | ||
1420 | phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1421 | if(hw->disable_polarity_correction == 1) | ||
1422 | phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1423 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); | ||
1424 | |||
1425 | if(ret_val) | ||
1426 | return ret_val; | ||
1427 | |||
1428 | /* SW Reset the PHY so all changes take effect */ | ||
1429 | ret_val = e1000_phy_reset(hw); | ||
1430 | if (ret_val) { | ||
1431 | DEBUGOUT("Error Resetting the PHY\n"); | ||
1432 | return ret_val; | ||
1433 | } | ||
1434 | } /* phy_reset_disable */ | ||
1435 | |||
1436 | if (hw->mac_type == e1000_80003es2lan) { | ||
1437 | /* Bypass RX and TX FIFO's */ | ||
1438 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL, | ||
1439 | E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | | ||
1440 | E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); | ||
1441 | if (ret_val) | ||
1442 | return ret_val; | ||
1443 | |||
1444 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data); | ||
1445 | if (ret_val) | ||
1446 | return ret_val; | ||
1447 | |||
1448 | phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; | ||
1449 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data); | ||
1450 | |||
1451 | if (ret_val) | ||
1452 | return ret_val; | ||
1453 | |||
1454 | reg_data = E1000_READ_REG(hw, CTRL_EXT); | ||
1455 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | ||
1456 | E1000_WRITE_REG(hw, CTRL_EXT, reg_data); | ||
1457 | |||
1458 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | ||
1459 | &phy_data); | ||
1460 | if (ret_val) | ||
1461 | return ret_val; | ||
1462 | |||
1463 | /* Do not init these registers when the HW is in IAMT mode, since the | ||
1464 | * firmware will have already initialized them. We only initialize | ||
1465 | * them if the HW is not in IAMT mode. | ||
1466 | */ | ||
1467 | if (e1000_check_mng_mode(hw) == FALSE) { | ||
1468 | /* Enable Electrical Idle on the PHY */ | ||
1469 | phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; | ||
1470 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | ||
1471 | phy_data); | ||
1472 | if (ret_val) | ||
1473 | return ret_val; | ||
1474 | |||
1475 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | ||
1476 | &phy_data); | ||
1477 | if (ret_val) | ||
1478 | return ret_val; | ||
1479 | |||
1480 | /* Enable Pass False Carrier on the PHY */ | ||
1481 | phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1482 | |||
1483 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | ||
1484 | phy_data); | ||
1485 | if (ret_val) | ||
1486 | return ret_val; | ||
1487 | } | ||
1488 | |||
1489 | /* Workaround: Disable padding in Kumeran interface in the MAC | ||
1490 | * and in the PHY to avoid CRC errors. | ||
1491 | */ | ||
1492 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, | ||
1493 | &phy_data); | ||
1494 | if (ret_val) | ||
1495 | return ret_val; | ||
1496 | phy_data |= GG82563_ICR_DIS_PADDING; | ||
1497 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, | ||
1498 | phy_data); | ||
1499 | if (ret_val) | ||
1500 | return ret_val; | ||
1501 | } | ||
1502 | |||
1503 | return E1000_SUCCESS; | ||
1504 | } | ||
1311 | 1505 | ||
1312 | /******************************************************************** | 1506 | /******************************************************************** |
1313 | * Copper link setup for e1000_phy_m88 series. | 1507 | * Copper link setup for e1000_phy_m88 series. |
@@ -1518,6 +1712,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1518 | int32_t ret_val; | 1712 | int32_t ret_val; |
1519 | uint16_t i; | 1713 | uint16_t i; |
1520 | uint16_t phy_data; | 1714 | uint16_t phy_data; |
1715 | uint16_t reg_data; | ||
1521 | 1716 | ||
1522 | DEBUGFUNC("e1000_setup_copper_link"); | 1717 | DEBUGFUNC("e1000_setup_copper_link"); |
1523 | 1718 | ||
@@ -1526,6 +1721,22 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1526 | if(ret_val) | 1721 | if(ret_val) |
1527 | return ret_val; | 1722 | return ret_val; |
1528 | 1723 | ||
1724 | switch (hw->mac_type) { | ||
1725 | case e1000_80003es2lan: | ||
1726 | ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | ||
1727 | ®_data); | ||
1728 | if (ret_val) | ||
1729 | return ret_val; | ||
1730 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; | ||
1731 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | ||
1732 | reg_data); | ||
1733 | if (ret_val) | ||
1734 | return ret_val; | ||
1735 | break; | ||
1736 | default: | ||
1737 | break; | ||
1738 | } | ||
1739 | |||
1529 | if (hw->phy_type == e1000_phy_igp || | 1740 | if (hw->phy_type == e1000_phy_igp || |
1530 | hw->phy_type == e1000_phy_igp_2) { | 1741 | hw->phy_type == e1000_phy_igp_2) { |
1531 | ret_val = e1000_copper_link_igp_setup(hw); | 1742 | ret_val = e1000_copper_link_igp_setup(hw); |
@@ -1535,6 +1746,10 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1535 | ret_val = e1000_copper_link_mgp_setup(hw); | 1746 | ret_val = e1000_copper_link_mgp_setup(hw); |
1536 | if(ret_val) | 1747 | if(ret_val) |
1537 | return ret_val; | 1748 | return ret_val; |
1749 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
1750 | ret_val = e1000_copper_link_ggp_setup(hw); | ||
1751 | if(ret_val) | ||
1752 | return ret_val; | ||
1538 | } | 1753 | } |
1539 | 1754 | ||
1540 | if(hw->autoneg) { | 1755 | if(hw->autoneg) { |
@@ -1582,6 +1797,59 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1582 | } | 1797 | } |
1583 | 1798 | ||
1584 | /****************************************************************************** | 1799 | /****************************************************************************** |
1800 | * Configure the MAC-to-PHY interface for 10/100Mbps | ||
1801 | * | ||
1802 | * hw - Struct containing variables accessed by shared code | ||
1803 | ******************************************************************************/ | ||
1804 | static int32_t | ||
1805 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | ||
1806 | { | ||
1807 | int32_t ret_val = E1000_SUCCESS; | ||
1808 | uint32_t tipg; | ||
1809 | uint16_t reg_data; | ||
1810 | |||
1811 | DEBUGFUNC("e1000_configure_kmrn_for_10_100"); | ||
1812 | |||
1813 | reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT; | ||
1814 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, | ||
1815 | reg_data); | ||
1816 | if (ret_val) | ||
1817 | return ret_val; | ||
1818 | |||
1819 | /* Configure Transmit Inter-Packet Gap */ | ||
1820 | tipg = E1000_READ_REG(hw, TIPG); | ||
1821 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1822 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | ||
1823 | E1000_WRITE_REG(hw, TIPG, tipg); | ||
1824 | |||
1825 | return ret_val; | ||
1826 | } | ||
1827 | |||
1828 | static int32_t | ||
1829 | e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | ||
1830 | { | ||
1831 | int32_t ret_val = E1000_SUCCESS; | ||
1832 | uint16_t reg_data; | ||
1833 | uint32_t tipg; | ||
1834 | |||
1835 | DEBUGFUNC("e1000_configure_kmrn_for_1000"); | ||
1836 | |||
1837 | reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT; | ||
1838 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, | ||
1839 | reg_data); | ||
1840 | if (ret_val) | ||
1841 | return ret_val; | ||
1842 | |||
1843 | /* Configure Transmit Inter-Packet Gap */ | ||
1844 | tipg = E1000_READ_REG(hw, TIPG); | ||
1845 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1846 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | ||
1847 | E1000_WRITE_REG(hw, TIPG, tipg); | ||
1848 | |||
1849 | return ret_val; | ||
1850 | } | ||
1851 | |||
1852 | /****************************************************************************** | ||
1585 | * Configures PHY autoneg and flow control advertisement settings | 1853 | * Configures PHY autoneg and flow control advertisement settings |
1586 | * | 1854 | * |
1587 | * hw - Struct containing variables accessed by shared code | 1855 | * hw - Struct containing variables accessed by shared code |
@@ -1802,7 +2070,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1802 | /* Write the configured values back to the Device Control Reg. */ | 2070 | /* Write the configured values back to the Device Control Reg. */ |
1803 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2071 | E1000_WRITE_REG(hw, CTRL, ctrl); |
1804 | 2072 | ||
1805 | if (hw->phy_type == e1000_phy_m88) { | 2073 | if ((hw->phy_type == e1000_phy_m88) || |
2074 | (hw->phy_type == e1000_phy_gg82563)) { | ||
1806 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 2075 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
1807 | if(ret_val) | 2076 | if(ret_val) |
1808 | return ret_val; | 2077 | return ret_val; |
@@ -1871,7 +2140,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1871 | msec_delay(100); | 2140 | msec_delay(100); |
1872 | } | 2141 | } |
1873 | if((i == 0) && | 2142 | if((i == 0) && |
1874 | (hw->phy_type == e1000_phy_m88)) { | 2143 | ((hw->phy_type == e1000_phy_m88) || |
2144 | (hw->phy_type == e1000_phy_gg82563))) { | ||
1875 | /* We didn't get link. Reset the DSP and wait again for link. */ | 2145 | /* We didn't get link. Reset the DSP and wait again for link. */ |
1876 | ret_val = e1000_phy_reset_dsp(hw); | 2146 | ret_val = e1000_phy_reset_dsp(hw); |
1877 | if(ret_val) { | 2147 | if(ret_val) { |
@@ -1930,6 +2200,27 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1930 | if(ret_val) | 2200 | if(ret_val) |
1931 | return ret_val; | 2201 | return ret_val; |
1932 | } | 2202 | } |
2203 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
2204 | /* The TX_CLK of the Extended PHY Specific Control Register defaults | ||
2205 | * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if | ||
2206 | * we're not in a forced 10/duplex configuration. */ | ||
2207 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); | ||
2208 | if (ret_val) | ||
2209 | return ret_val; | ||
2210 | |||
2211 | phy_data &= ~GG82563_MSCR_TX_CLK_MASK; | ||
2212 | if ((hw->forced_speed_duplex == e1000_10_full) || | ||
2213 | (hw->forced_speed_duplex == e1000_10_half)) | ||
2214 | phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ; | ||
2215 | else | ||
2216 | phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ; | ||
2217 | |||
2218 | /* Also due to the reset, we need to enable CRS on Tx. */ | ||
2219 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
2220 | |||
2221 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); | ||
2222 | if (ret_val) | ||
2223 | return ret_val; | ||
1933 | } | 2224 | } |
1934 | return E1000_SUCCESS; | 2225 | return E1000_SUCCESS; |
1935 | } | 2226 | } |
@@ -2592,6 +2883,16 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2592 | } | 2883 | } |
2593 | } | 2884 | } |
2594 | 2885 | ||
2886 | if ((hw->mac_type == e1000_80003es2lan) && | ||
2887 | (hw->media_type == e1000_media_type_copper)) { | ||
2888 | if (*speed == SPEED_1000) | ||
2889 | ret_val = e1000_configure_kmrn_for_1000(hw); | ||
2890 | else | ||
2891 | ret_val = e1000_configure_kmrn_for_10_100(hw); | ||
2892 | if (ret_val) | ||
2893 | return ret_val; | ||
2894 | } | ||
2895 | |||
2595 | return E1000_SUCCESS; | 2896 | return E1000_SUCCESS; |
2596 | } | 2897 | } |
2597 | 2898 | ||
@@ -2767,6 +3068,72 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
2767 | return data; | 3068 | return data; |
2768 | } | 3069 | } |
2769 | 3070 | ||
3071 | int32_t | ||
3072 | e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | ||
3073 | { | ||
3074 | uint32_t swfw_sync = 0; | ||
3075 | uint32_t swmask = mask; | ||
3076 | uint32_t fwmask = mask << 16; | ||
3077 | int32_t timeout = 200; | ||
3078 | |||
3079 | DEBUGFUNC("e1000_swfw_sync_acquire"); | ||
3080 | |||
3081 | if (!hw->swfw_sync_present) | ||
3082 | return e1000_get_hw_eeprom_semaphore(hw); | ||
3083 | |||
3084 | while(timeout) { | ||
3085 | if (e1000_get_hw_eeprom_semaphore(hw)) | ||
3086 | return -E1000_ERR_SWFW_SYNC; | ||
3087 | |||
3088 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | ||
3089 | if (!(swfw_sync & (fwmask | swmask))) { | ||
3090 | break; | ||
3091 | } | ||
3092 | |||
3093 | /* firmware currently using resource (fwmask) */ | ||
3094 | /* or other software thread currently using resource (swmask) */ | ||
3095 | e1000_put_hw_eeprom_semaphore(hw); | ||
3096 | msec_delay_irq(5); | ||
3097 | timeout--; | ||
3098 | } | ||
3099 | |||
3100 | if (!timeout) { | ||
3101 | DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
3102 | return -E1000_ERR_SWFW_SYNC; | ||
3103 | } | ||
3104 | |||
3105 | swfw_sync |= swmask; | ||
3106 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | ||
3107 | |||
3108 | e1000_put_hw_eeprom_semaphore(hw); | ||
3109 | return E1000_SUCCESS; | ||
3110 | } | ||
3111 | |||
3112 | void | ||
3113 | e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | ||
3114 | { | ||
3115 | uint32_t swfw_sync; | ||
3116 | uint32_t swmask = mask; | ||
3117 | |||
3118 | DEBUGFUNC("e1000_swfw_sync_release"); | ||
3119 | |||
3120 | if (!hw->swfw_sync_present) { | ||
3121 | e1000_put_hw_eeprom_semaphore(hw); | ||
3122 | return; | ||
3123 | } | ||
3124 | |||
3125 | /* if (e1000_get_hw_eeprom_semaphore(hw)) | ||
3126 | * return -E1000_ERR_SWFW_SYNC; */ | ||
3127 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); | ||
3128 | /* empty */ | ||
3129 | |||
3130 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | ||
3131 | swfw_sync &= ~swmask; | ||
3132 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | ||
3133 | |||
3134 | e1000_put_hw_eeprom_semaphore(hw); | ||
3135 | } | ||
3136 | |||
2770 | /***************************************************************************** | 3137 | /***************************************************************************** |
2771 | * Reads the value from a PHY register, if the value is on a specific non zero | 3138 | * Reads the value from a PHY register, if the value is on a specific non zero |
2772 | * page, sets the page first. | 3139 | * page, sets the page first. |
@@ -2779,22 +3146,55 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
2779 | uint16_t *phy_data) | 3146 | uint16_t *phy_data) |
2780 | { | 3147 | { |
2781 | uint32_t ret_val; | 3148 | uint32_t ret_val; |
3149 | uint16_t swfw; | ||
2782 | 3150 | ||
2783 | DEBUGFUNC("e1000_read_phy_reg"); | 3151 | DEBUGFUNC("e1000_read_phy_reg"); |
2784 | 3152 | ||
3153 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3154 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3155 | swfw = E1000_SWFW_PHY1_SM; | ||
3156 | } else { | ||
3157 | swfw = E1000_SWFW_PHY0_SM; | ||
3158 | } | ||
3159 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3160 | return -E1000_ERR_SWFW_SYNC; | ||
3161 | |||
2785 | if((hw->phy_type == e1000_phy_igp || | 3162 | if((hw->phy_type == e1000_phy_igp || |
2786 | hw->phy_type == e1000_phy_igp_2) && | 3163 | hw->phy_type == e1000_phy_igp_2) && |
2787 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3164 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
2788 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3165 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
2789 | (uint16_t)reg_addr); | 3166 | (uint16_t)reg_addr); |
2790 | if(ret_val) { | 3167 | if(ret_val) { |
3168 | e1000_swfw_sync_release(hw, swfw); | ||
2791 | return ret_val; | 3169 | return ret_val; |
2792 | } | 3170 | } |
3171 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
3172 | if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || | ||
3173 | (hw->mac_type == e1000_80003es2lan)) { | ||
3174 | /* Select Configuration Page */ | ||
3175 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
3176 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | ||
3177 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3178 | } else { | ||
3179 | /* Use Alternative Page Select register to access | ||
3180 | * registers 30 and 31 | ||
3181 | */ | ||
3182 | ret_val = e1000_write_phy_reg_ex(hw, | ||
3183 | GG82563_PHY_PAGE_SELECT_ALT, | ||
3184 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3185 | } | ||
3186 | |||
3187 | if (ret_val) { | ||
3188 | e1000_swfw_sync_release(hw, swfw); | ||
3189 | return ret_val; | ||
3190 | } | ||
3191 | } | ||
2793 | } | 3192 | } |
2794 | 3193 | ||
2795 | ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, | 3194 | ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, |
2796 | phy_data); | 3195 | phy_data); |
2797 | 3196 | ||
3197 | e1000_swfw_sync_release(hw, swfw); | ||
2798 | return ret_val; | 3198 | return ret_val; |
2799 | } | 3199 | } |
2800 | 3200 | ||
@@ -2885,22 +3285,55 @@ e1000_write_phy_reg(struct e1000_hw *hw, | |||
2885 | uint16_t phy_data) | 3285 | uint16_t phy_data) |
2886 | { | 3286 | { |
2887 | uint32_t ret_val; | 3287 | uint32_t ret_val; |
3288 | uint16_t swfw; | ||
2888 | 3289 | ||
2889 | DEBUGFUNC("e1000_write_phy_reg"); | 3290 | DEBUGFUNC("e1000_write_phy_reg"); |
2890 | 3291 | ||
3292 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3293 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3294 | swfw = E1000_SWFW_PHY1_SM; | ||
3295 | } else { | ||
3296 | swfw = E1000_SWFW_PHY0_SM; | ||
3297 | } | ||
3298 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3299 | return -E1000_ERR_SWFW_SYNC; | ||
3300 | |||
2891 | if((hw->phy_type == e1000_phy_igp || | 3301 | if((hw->phy_type == e1000_phy_igp || |
2892 | hw->phy_type == e1000_phy_igp_2) && | 3302 | hw->phy_type == e1000_phy_igp_2) && |
2893 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3303 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
2894 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3304 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
2895 | (uint16_t)reg_addr); | 3305 | (uint16_t)reg_addr); |
2896 | if(ret_val) { | 3306 | if(ret_val) { |
3307 | e1000_swfw_sync_release(hw, swfw); | ||
2897 | return ret_val; | 3308 | return ret_val; |
2898 | } | 3309 | } |
3310 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
3311 | if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || | ||
3312 | (hw->mac_type == e1000_80003es2lan)) { | ||
3313 | /* Select Configuration Page */ | ||
3314 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
3315 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | ||
3316 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3317 | } else { | ||
3318 | /* Use Alternative Page Select register to access | ||
3319 | * registers 30 and 31 | ||
3320 | */ | ||
3321 | ret_val = e1000_write_phy_reg_ex(hw, | ||
3322 | GG82563_PHY_PAGE_SELECT_ALT, | ||
3323 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | ||
3324 | } | ||
3325 | |||
3326 | if (ret_val) { | ||
3327 | e1000_swfw_sync_release(hw, swfw); | ||
3328 | return ret_val; | ||
3329 | } | ||
3330 | } | ||
2899 | } | 3331 | } |
2900 | 3332 | ||
2901 | ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, | 3333 | ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, |
2902 | phy_data); | 3334 | phy_data); |
2903 | 3335 | ||
3336 | e1000_swfw_sync_release(hw, swfw); | ||
2904 | return ret_val; | 3337 | return ret_val; |
2905 | } | 3338 | } |
2906 | 3339 | ||
@@ -2967,6 +3400,65 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, | |||
2967 | return E1000_SUCCESS; | 3400 | return E1000_SUCCESS; |
2968 | } | 3401 | } |
2969 | 3402 | ||
3403 | int32_t | ||
3404 | e1000_read_kmrn_reg(struct e1000_hw *hw, | ||
3405 | uint32_t reg_addr, | ||
3406 | uint16_t *data) | ||
3407 | { | ||
3408 | uint32_t reg_val; | ||
3409 | uint16_t swfw; | ||
3410 | DEBUGFUNC("e1000_read_kmrn_reg"); | ||
3411 | |||
3412 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3413 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3414 | swfw = E1000_SWFW_PHY1_SM; | ||
3415 | } else { | ||
3416 | swfw = E1000_SWFW_PHY0_SM; | ||
3417 | } | ||
3418 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3419 | return -E1000_ERR_SWFW_SYNC; | ||
3420 | |||
3421 | /* Write register address */ | ||
3422 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | ||
3423 | E1000_KUMCTRLSTA_OFFSET) | | ||
3424 | E1000_KUMCTRLSTA_REN; | ||
3425 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | ||
3426 | udelay(2); | ||
3427 | |||
3428 | /* Read the data returned */ | ||
3429 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); | ||
3430 | *data = (uint16_t)reg_val; | ||
3431 | |||
3432 | e1000_swfw_sync_release(hw, swfw); | ||
3433 | return E1000_SUCCESS; | ||
3434 | } | ||
3435 | |||
3436 | int32_t | ||
3437 | e1000_write_kmrn_reg(struct e1000_hw *hw, | ||
3438 | uint32_t reg_addr, | ||
3439 | uint16_t data) | ||
3440 | { | ||
3441 | uint32_t reg_val; | ||
3442 | uint16_t swfw; | ||
3443 | DEBUGFUNC("e1000_write_kmrn_reg"); | ||
3444 | |||
3445 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3446 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3447 | swfw = E1000_SWFW_PHY1_SM; | ||
3448 | } else { | ||
3449 | swfw = E1000_SWFW_PHY0_SM; | ||
3450 | } | ||
3451 | if (e1000_swfw_sync_acquire(hw, swfw)) | ||
3452 | return -E1000_ERR_SWFW_SYNC; | ||
3453 | |||
3454 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | ||
3455 | E1000_KUMCTRLSTA_OFFSET) | data; | ||
3456 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | ||
3457 | udelay(2); | ||
3458 | |||
3459 | e1000_swfw_sync_release(hw, swfw); | ||
3460 | return E1000_SUCCESS; | ||
3461 | } | ||
2970 | 3462 | ||
2971 | /****************************************************************************** | 3463 | /****************************************************************************** |
2972 | * Returns the PHY to the power-on reset state | 3464 | * Returns the PHY to the power-on reset state |
@@ -2979,6 +3471,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
2979 | uint32_t ctrl, ctrl_ext; | 3471 | uint32_t ctrl, ctrl_ext; |
2980 | uint32_t led_ctrl; | 3472 | uint32_t led_ctrl; |
2981 | int32_t ret_val; | 3473 | int32_t ret_val; |
3474 | uint16_t swfw; | ||
2982 | 3475 | ||
2983 | DEBUGFUNC("e1000_phy_hw_reset"); | 3476 | DEBUGFUNC("e1000_phy_hw_reset"); |
2984 | 3477 | ||
@@ -2991,11 +3484,21 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
2991 | DEBUGOUT("Resetting Phy...\n"); | 3484 | DEBUGOUT("Resetting Phy...\n"); |
2992 | 3485 | ||
2993 | if(hw->mac_type > e1000_82543) { | 3486 | if(hw->mac_type > e1000_82543) { |
3487 | if ((hw->mac_type == e1000_80003es2lan) && | ||
3488 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | ||
3489 | swfw = E1000_SWFW_PHY1_SM; | ||
3490 | } else { | ||
3491 | swfw = E1000_SWFW_PHY0_SM; | ||
3492 | } | ||
3493 | if (e1000_swfw_sync_acquire(hw, swfw)) { | ||
3494 | e1000_release_software_semaphore(hw); | ||
3495 | return -E1000_ERR_SWFW_SYNC; | ||
3496 | } | ||
2994 | /* Read the device control register and assert the E1000_CTRL_PHY_RST | 3497 | /* Read the device control register and assert the E1000_CTRL_PHY_RST |
2995 | * bit. Then, take it out of reset. | 3498 | * bit. Then, take it out of reset. |
2996 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert | 3499 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert |
2997 | * and deassert. For e1000_82571 hardware and later, we instead delay | 3500 | * and deassert. For e1000_82571 hardware and later, we instead delay |
2998 | * for 10ms after the deassertion. | 3501 | * for 50us between and 10ms after the deassertion. |
2999 | */ | 3502 | */ |
3000 | ctrl = E1000_READ_REG(hw, CTRL); | 3503 | ctrl = E1000_READ_REG(hw, CTRL); |
3001 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); | 3504 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); |
@@ -3011,6 +3514,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3011 | 3514 | ||
3012 | if (hw->mac_type >= e1000_82571) | 3515 | if (hw->mac_type >= e1000_82571) |
3013 | msec_delay(10); | 3516 | msec_delay(10); |
3517 | e1000_swfw_sync_release(hw, swfw); | ||
3014 | } else { | 3518 | } else { |
3015 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3519 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
3016 | * bit to put the PHY into reset. Then, take it out of reset. | 3520 | * bit to put the PHY into reset. Then, take it out of reset. |
@@ -3037,6 +3541,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3037 | 3541 | ||
3038 | /* Wait for FW to finish PHY configuration. */ | 3542 | /* Wait for FW to finish PHY configuration. */ |
3039 | ret_val = e1000_get_phy_cfg_done(hw); | 3543 | ret_val = e1000_get_phy_cfg_done(hw); |
3544 | e1000_release_software_semaphore(hw); | ||
3040 | 3545 | ||
3041 | return ret_val; | 3546 | return ret_val; |
3042 | } | 3547 | } |
@@ -3114,6 +3619,15 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3114 | return E1000_SUCCESS; | 3619 | return E1000_SUCCESS; |
3115 | } | 3620 | } |
3116 | 3621 | ||
3622 | /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work- | ||
3623 | * around that forces PHY page 0 to be set or the reads fail. The rest of | ||
3624 | * the code in this routine uses e1000_read_phy_reg to read the PHY ID. | ||
3625 | * So for ESB-2 we need to have this set so our reads won't fail. If the | ||
3626 | * attached PHY is not a e1000_phy_gg82563, the routines below will figure | ||
3627 | * this out as well. */ | ||
3628 | if (hw->mac_type == e1000_80003es2lan) | ||
3629 | hw->phy_type = e1000_phy_gg82563; | ||
3630 | |||
3117 | /* Read the PHY ID Registers to identify which PHY is onboard. */ | 3631 | /* Read the PHY ID Registers to identify which PHY is onboard. */ |
3118 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); | 3632 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); |
3119 | if(ret_val) | 3633 | if(ret_val) |
@@ -3151,6 +3665,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3151 | case e1000_82573: | 3665 | case e1000_82573: |
3152 | if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; | 3666 | if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; |
3153 | break; | 3667 | break; |
3668 | case e1000_80003es2lan: | ||
3669 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; | ||
3670 | break; | ||
3154 | default: | 3671 | default: |
3155 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); | 3672 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); |
3156 | return -E1000_ERR_CONFIG; | 3673 | return -E1000_ERR_CONFIG; |
@@ -3177,8 +3694,10 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) | |||
3177 | DEBUGFUNC("e1000_phy_reset_dsp"); | 3694 | DEBUGFUNC("e1000_phy_reset_dsp"); |
3178 | 3695 | ||
3179 | do { | 3696 | do { |
3180 | ret_val = e1000_write_phy_reg(hw, 29, 0x001d); | 3697 | if (hw->phy_type != e1000_phy_gg82563) { |
3181 | if(ret_val) break; | 3698 | ret_val = e1000_write_phy_reg(hw, 29, 0x001d); |
3699 | if(ret_val) break; | ||
3700 | } | ||
3182 | ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); | 3701 | ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); |
3183 | if(ret_val) break; | 3702 | if(ret_val) break; |
3184 | ret_val = e1000_write_phy_reg(hw, 30, 0x0000); | 3703 | ret_val = e1000_write_phy_reg(hw, 30, 0x0000); |
@@ -3310,8 +3829,17 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
3310 | /* Cable Length Estimation and Local/Remote Receiver Information | 3829 | /* Cable Length Estimation and Local/Remote Receiver Information |
3311 | * are only valid at 1000 Mbps. | 3830 | * are only valid at 1000 Mbps. |
3312 | */ | 3831 | */ |
3313 | phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | 3832 | if (hw->phy_type != e1000_phy_gg82563) { |
3314 | M88E1000_PSSR_CABLE_LENGTH_SHIFT); | 3833 | phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> |
3834 | M88E1000_PSSR_CABLE_LENGTH_SHIFT); | ||
3835 | } else { | ||
3836 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, | ||
3837 | &phy_data); | ||
3838 | if (ret_val) | ||
3839 | return ret_val; | ||
3840 | |||
3841 | phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; | ||
3842 | } | ||
3315 | 3843 | ||
3316 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); | 3844 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); |
3317 | if(ret_val) | 3845 | if(ret_val) |
@@ -3392,7 +3920,8 @@ e1000_validate_mdi_setting(struct e1000_hw *hw) | |||
3392 | 3920 | ||
3393 | /****************************************************************************** | 3921 | /****************************************************************************** |
3394 | * Sets up eeprom variables in the hw struct. Must be called after mac_type | 3922 | * Sets up eeprom variables in the hw struct. Must be called after mac_type |
3395 | * is configured. | 3923 | * is configured. Additionally, if this is ICH8, the flash controller GbE |
3924 | * registers must be mapped, or this will crash. | ||
3396 | * | 3925 | * |
3397 | * hw - Struct containing variables accessed by shared code | 3926 | * hw - Struct containing variables accessed by shared code |
3398 | *****************************************************************************/ | 3927 | *****************************************************************************/ |
@@ -3505,6 +4034,20 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
3505 | E1000_WRITE_REG(hw, EECD, eecd); | 4034 | E1000_WRITE_REG(hw, EECD, eecd); |
3506 | } | 4035 | } |
3507 | break; | 4036 | break; |
4037 | case e1000_80003es2lan: | ||
4038 | eeprom->type = e1000_eeprom_spi; | ||
4039 | eeprom->opcode_bits = 8; | ||
4040 | eeprom->delay_usec = 1; | ||
4041 | if (eecd & E1000_EECD_ADDR_BITS) { | ||
4042 | eeprom->page_size = 32; | ||
4043 | eeprom->address_bits = 16; | ||
4044 | } else { | ||
4045 | eeprom->page_size = 8; | ||
4046 | eeprom->address_bits = 8; | ||
4047 | } | ||
4048 | eeprom->use_eerd = TRUE; | ||
4049 | eeprom->use_eewr = FALSE; | ||
4050 | break; | ||
3508 | default: | 4051 | default: |
3509 | break; | 4052 | break; |
3510 | } | 4053 | } |
@@ -3685,9 +4228,8 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
3685 | 4228 | ||
3686 | DEBUGFUNC("e1000_acquire_eeprom"); | 4229 | DEBUGFUNC("e1000_acquire_eeprom"); |
3687 | 4230 | ||
3688 | if(e1000_get_hw_eeprom_semaphore(hw)) | 4231 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) |
3689 | return -E1000_ERR_EEPROM; | 4232 | return -E1000_ERR_SWFW_SYNC; |
3690 | |||
3691 | eecd = E1000_READ_REG(hw, EECD); | 4233 | eecd = E1000_READ_REG(hw, EECD); |
3692 | 4234 | ||
3693 | if (hw->mac_type != e1000_82573) { | 4235 | if (hw->mac_type != e1000_82573) { |
@@ -3706,7 +4248,7 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
3706 | eecd &= ~E1000_EECD_REQ; | 4248 | eecd &= ~E1000_EECD_REQ; |
3707 | E1000_WRITE_REG(hw, EECD, eecd); | 4249 | E1000_WRITE_REG(hw, EECD, eecd); |
3708 | DEBUGOUT("Could not acquire EEPROM grant\n"); | 4250 | DEBUGOUT("Could not acquire EEPROM grant\n"); |
3709 | e1000_put_hw_eeprom_semaphore(hw); | 4251 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
3710 | return -E1000_ERR_EEPROM; | 4252 | return -E1000_ERR_EEPROM; |
3711 | } | 4253 | } |
3712 | } | 4254 | } |
@@ -3829,7 +4371,7 @@ e1000_release_eeprom(struct e1000_hw *hw) | |||
3829 | E1000_WRITE_REG(hw, EECD, eecd); | 4371 | E1000_WRITE_REG(hw, EECD, eecd); |
3830 | } | 4372 | } |
3831 | 4373 | ||
3832 | e1000_put_hw_eeprom_semaphore(hw); | 4374 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
3833 | } | 4375 | } |
3834 | 4376 | ||
3835 | /****************************************************************************** | 4377 | /****************************************************************************** |
@@ -3908,6 +4450,8 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
3908 | if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && | 4450 | if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && |
3909 | hw->eeprom.use_eerd == FALSE) { | 4451 | hw->eeprom.use_eerd == FALSE) { |
3910 | switch (hw->mac_type) { | 4452 | switch (hw->mac_type) { |
4453 | case e1000_80003es2lan: | ||
4454 | break; | ||
3911 | default: | 4455 | default: |
3912 | /* Prepare the EEPROM for reading */ | 4456 | /* Prepare the EEPROM for reading */ |
3913 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) | 4457 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) |
@@ -4025,6 +4569,9 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
4025 | uint32_t i = 0; | 4569 | uint32_t i = 0; |
4026 | int32_t error = 0; | 4570 | int32_t error = 0; |
4027 | 4571 | ||
4572 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) | ||
4573 | return -E1000_ERR_SWFW_SYNC; | ||
4574 | |||
4028 | for (i = 0; i < words; i++) { | 4575 | for (i = 0; i < words; i++) { |
4029 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | | 4576 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | |
4030 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | | 4577 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | |
@@ -4044,6 +4591,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
4044 | } | 4591 | } |
4045 | } | 4592 | } |
4046 | 4593 | ||
4594 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | ||
4047 | return error; | 4595 | return error; |
4048 | } | 4596 | } |
4049 | 4597 | ||
@@ -4085,6 +4633,8 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
4085 | { | 4633 | { |
4086 | uint32_t eecd = 0; | 4634 | uint32_t eecd = 0; |
4087 | 4635 | ||
4636 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); | ||
4637 | |||
4088 | if(hw->mac_type == e1000_82573) { | 4638 | if(hw->mac_type == e1000_82573) { |
4089 | eecd = E1000_READ_REG(hw, EECD); | 4639 | eecd = E1000_READ_REG(hw, EECD); |
4090 | 4640 | ||
@@ -4511,6 +5061,7 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
4511 | case e1000_82546: | 5061 | case e1000_82546: |
4512 | case e1000_82546_rev_3: | 5062 | case e1000_82546_rev_3: |
4513 | case e1000_82571: | 5063 | case e1000_82571: |
5064 | case e1000_80003es2lan: | ||
4514 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 5065 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) |
4515 | hw->perm_mac_addr[5] ^= 0x01; | 5066 | hw->perm_mac_addr[5] ^= 0x01; |
4516 | break; | 5067 | break; |
@@ -4749,8 +5300,37 @@ e1000_rar_set(struct e1000_hw *hw, | |||
4749 | rar_low = ((uint32_t) addr[0] | | 5300 | rar_low = ((uint32_t) addr[0] | |
4750 | ((uint32_t) addr[1] << 8) | | 5301 | ((uint32_t) addr[1] << 8) | |
4751 | ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); | 5302 | ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); |
5303 | rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); | ||
4752 | 5304 | ||
4753 | rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8) | E1000_RAH_AV); | 5305 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx |
5306 | * unit hang. | ||
5307 | * | ||
5308 | * Description: | ||
5309 | * If there are any Rx frames queued up or otherwise present in the HW | ||
5310 | * before RSS is enabled, and then we enable RSS, the HW Rx unit will | ||
5311 | * hang. To work around this issue, we have to disable receives and | ||
5312 | * flush out all Rx frames before we enable RSS. To do so, we modify we | ||
5313 | * redirect all Rx traffic to manageability and then reset the HW. | ||
5314 | * This flushes away Rx frames, and (since the redirections to | ||
5315 | * manageability persists across resets) keeps new ones from coming in | ||
5316 | * while we work. Then, we clear the Address Valid AV bit for all MAC | ||
5317 | * addresses and undo the re-direction to manageability. | ||
5318 | * Now, frames are coming in again, but the MAC won't accept them, so | ||
5319 | * far so good. We now proceed to initialize RSS (if necessary) and | ||
5320 | * configure the Rx unit. Last, we re-enable the AV bits and continue | ||
5321 | * on our merry way. | ||
5322 | */ | ||
5323 | switch (hw->mac_type) { | ||
5324 | case e1000_82571: | ||
5325 | case e1000_82572: | ||
5326 | case e1000_80003es2lan: | ||
5327 | if (hw->leave_av_bit_off == TRUE) | ||
5328 | break; | ||
5329 | default: | ||
5330 | /* Indicate to hardware the Address is Valid. */ | ||
5331 | rar_high |= E1000_RAH_AV; | ||
5332 | break; | ||
5333 | } | ||
4754 | 5334 | ||
4755 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5335 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
4756 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5336 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
@@ -5330,6 +5910,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5330 | hw->bus_width = e1000_bus_width_pciex_1; | 5910 | hw->bus_width = e1000_bus_width_pciex_1; |
5331 | break; | 5911 | break; |
5332 | case e1000_82571: | 5912 | case e1000_82571: |
5913 | case e1000_80003es2lan: | ||
5333 | hw->bus_type = e1000_bus_type_pci_express; | 5914 | hw->bus_type = e1000_bus_type_pci_express; |
5334 | hw->bus_speed = e1000_bus_speed_2500; | 5915 | hw->bus_speed = e1000_bus_speed_2500; |
5335 | hw->bus_width = e1000_bus_width_pciex_4; | 5916 | hw->bus_width = e1000_bus_width_pciex_4; |
@@ -5475,6 +6056,34 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
5475 | return -E1000_ERR_PHY; | 6056 | return -E1000_ERR_PHY; |
5476 | break; | 6057 | break; |
5477 | } | 6058 | } |
6059 | } else if (hw->phy_type == e1000_phy_gg82563) { | ||
6060 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, | ||
6061 | &phy_data); | ||
6062 | if (ret_val) | ||
6063 | return ret_val; | ||
6064 | cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; | ||
6065 | |||
6066 | switch (cable_length) { | ||
6067 | case e1000_gg_cable_length_60: | ||
6068 | *min_length = 0; | ||
6069 | *max_length = e1000_igp_cable_length_60; | ||
6070 | break; | ||
6071 | case e1000_gg_cable_length_60_115: | ||
6072 | *min_length = e1000_igp_cable_length_60; | ||
6073 | *max_length = e1000_igp_cable_length_115; | ||
6074 | break; | ||
6075 | case e1000_gg_cable_length_115_150: | ||
6076 | *min_length = e1000_igp_cable_length_115; | ||
6077 | *max_length = e1000_igp_cable_length_150; | ||
6078 | break; | ||
6079 | case e1000_gg_cable_length_150: | ||
6080 | *min_length = e1000_igp_cable_length_150; | ||
6081 | *max_length = e1000_igp_cable_length_180; | ||
6082 | break; | ||
6083 | default: | ||
6084 | return -E1000_ERR_PHY; | ||
6085 | break; | ||
6086 | } | ||
5478 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ | 6087 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ |
5479 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6088 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
5480 | {IGP01E1000_PHY_AGC_A, | 6089 | {IGP01E1000_PHY_AGC_A, |
@@ -5584,7 +6193,8 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
5584 | 6193 | ||
5585 | DEBUGFUNC("e1000_check_polarity"); | 6194 | DEBUGFUNC("e1000_check_polarity"); |
5586 | 6195 | ||
5587 | if(hw->phy_type == e1000_phy_m88) { | 6196 | if ((hw->phy_type == e1000_phy_m88) || |
6197 | (hw->phy_type == e1000_phy_gg82563)) { | ||
5588 | /* return the Polarity bit in the Status register. */ | 6198 | /* return the Polarity bit in the Status register. */ |
5589 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, | 6199 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
5590 | &phy_data); | 6200 | &phy_data); |
@@ -5653,7 +6263,8 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
5653 | return ret_val; | 6263 | return ret_val; |
5654 | 6264 | ||
5655 | hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; | 6265 | hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; |
5656 | } else if(hw->phy_type == e1000_phy_m88) { | 6266 | } else if ((hw->phy_type == e1000_phy_m88) || |
6267 | (hw->phy_type == e1000_phy_gg82563)) { | ||
5657 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, | 6268 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
5658 | &phy_data); | 6269 | &phy_data); |
5659 | if(ret_val) | 6270 | if(ret_val) |
@@ -6686,6 +7297,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
6686 | case e1000_82571: | 7297 | case e1000_82571: |
6687 | case e1000_82572: | 7298 | case e1000_82572: |
6688 | case e1000_82573: | 7299 | case e1000_82573: |
7300 | case e1000_80003es2lan: | ||
6689 | while(timeout) { | 7301 | while(timeout) { |
6690 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; | 7302 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; |
6691 | else msec_delay(1); | 7303 | else msec_delay(1); |
@@ -6729,6 +7341,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
6729 | default: | 7341 | default: |
6730 | msec_delay(10); | 7342 | msec_delay(10); |
6731 | break; | 7343 | break; |
7344 | case e1000_80003es2lan: | ||
7345 | /* Separate *_CFG_DONE_* bit for each port */ | ||
7346 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | ||
7347 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; | ||
7348 | /* Fall Through */ | ||
6732 | case e1000_82571: | 7349 | case e1000_82571: |
6733 | case e1000_82572: | 7350 | case e1000_82572: |
6734 | while (timeout) { | 7351 | while (timeout) { |
@@ -6746,12 +7363,6 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
6746 | break; | 7363 | break; |
6747 | } | 7364 | } |
6748 | 7365 | ||
6749 | /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high. | ||
6750 | * Need to wait for PHY configuration completion before accessing NVM | ||
6751 | * and PHY. */ | ||
6752 | if (hw->mac_type == e1000_82573) | ||
6753 | msec_delay(25); | ||
6754 | |||
6755 | return E1000_SUCCESS; | 7366 | return E1000_SUCCESS; |
6756 | } | 7367 | } |
6757 | 7368 | ||
@@ -6777,6 +7388,11 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
6777 | if(!hw->eeprom_semaphore_present) | 7388 | if(!hw->eeprom_semaphore_present) |
6778 | return E1000_SUCCESS; | 7389 | return E1000_SUCCESS; |
6779 | 7390 | ||
7391 | if (hw->mac_type == e1000_80003es2lan) { | ||
7392 | /* Get the SW semaphore. */ | ||
7393 | if (e1000_get_software_semaphore(hw) != E1000_SUCCESS) | ||
7394 | return -E1000_ERR_EEPROM; | ||
7395 | } | ||
6780 | 7396 | ||
6781 | /* Get the FW semaphore. */ | 7397 | /* Get the FW semaphore. */ |
6782 | timeout = hw->eeprom.word_size + 1; | 7398 | timeout = hw->eeprom.word_size + 1; |
@@ -6822,10 +7438,75 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
6822 | return; | 7438 | return; |
6823 | 7439 | ||
6824 | swsm = E1000_READ_REG(hw, SWSM); | 7440 | swsm = E1000_READ_REG(hw, SWSM); |
7441 | if (hw->mac_type == e1000_80003es2lan) { | ||
7442 | /* Release both semaphores. */ | ||
7443 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | ||
7444 | } else | ||
6825 | swsm &= ~(E1000_SWSM_SWESMBI); | 7445 | swsm &= ~(E1000_SWSM_SWESMBI); |
6826 | E1000_WRITE_REG(hw, SWSM, swsm); | 7446 | E1000_WRITE_REG(hw, SWSM, swsm); |
6827 | } | 7447 | } |
6828 | 7448 | ||
7449 | /*************************************************************************** | ||
7450 | * | ||
7451 | * Obtaining software semaphore bit (SMBI) before resetting PHY. | ||
7452 | * | ||
7453 | * hw: Struct containing variables accessed by shared code | ||
7454 | * | ||
7455 | * returns: - E1000_ERR_RESET if fail to obtain semaphore. | ||
7456 | * E1000_SUCCESS at any other case. | ||
7457 | * | ||
7458 | ***************************************************************************/ | ||
7459 | int32_t | ||
7460 | e1000_get_software_semaphore(struct e1000_hw *hw) | ||
7461 | { | ||
7462 | int32_t timeout = hw->eeprom.word_size + 1; | ||
7463 | uint32_t swsm; | ||
7464 | |||
7465 | DEBUGFUNC("e1000_get_software_semaphore"); | ||
7466 | |||
7467 | if (hw->mac_type != e1000_80003es2lan) | ||
7468 | return E1000_SUCCESS; | ||
7469 | |||
7470 | while(timeout) { | ||
7471 | swsm = E1000_READ_REG(hw, SWSM); | ||
7472 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ | ||
7473 | if(!(swsm & E1000_SWSM_SMBI)) | ||
7474 | break; | ||
7475 | msec_delay_irq(1); | ||
7476 | timeout--; | ||
7477 | } | ||
7478 | |||
7479 | if(!timeout) { | ||
7480 | DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); | ||
7481 | return -E1000_ERR_RESET; | ||
7482 | } | ||
7483 | |||
7484 | return E1000_SUCCESS; | ||
7485 | } | ||
7486 | |||
7487 | /*************************************************************************** | ||
7488 | * | ||
7489 | * Release semaphore bit (SMBI). | ||
7490 | * | ||
7491 | * hw: Struct containing variables accessed by shared code | ||
7492 | * | ||
7493 | ***************************************************************************/ | ||
7494 | void | ||
7495 | e1000_release_software_semaphore(struct e1000_hw *hw) | ||
7496 | { | ||
7497 | uint32_t swsm; | ||
7498 | |||
7499 | DEBUGFUNC("e1000_release_software_semaphore"); | ||
7500 | |||
7501 | if (hw->mac_type != e1000_80003es2lan) | ||
7502 | return; | ||
7503 | |||
7504 | swsm = E1000_READ_REG(hw, SWSM); | ||
7505 | /* Release the SW semaphores.*/ | ||
7506 | swsm &= ~E1000_SWSM_SMBI; | ||
7507 | E1000_WRITE_REG(hw, SWSM, swsm); | ||
7508 | } | ||
7509 | |||
6829 | /****************************************************************************** | 7510 | /****************************************************************************** |
6830 | * Checks if PHY reset is blocked due to SOL/IDER session, for example. | 7511 | * Checks if PHY reset is blocked due to SOL/IDER session, for example. |
6831 | * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to | 7512 | * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to |
@@ -6862,6 +7543,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
6862 | case e1000_82571: | 7543 | case e1000_82571: |
6863 | case e1000_82572: | 7544 | case e1000_82572: |
6864 | case e1000_82573: | 7545 | case e1000_82573: |
7546 | case e1000_80003es2lan: | ||
6865 | fwsm = E1000_READ_REG(hw, FWSM); | 7547 | fwsm = E1000_READ_REG(hw, FWSM); |
6866 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) | 7548 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) |
6867 | return TRUE; | 7549 | return TRUE; |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index f1219dd9dbac..150e45e30f87 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -60,6 +60,7 @@ typedef enum { | |||
60 | e1000_82571, | 60 | e1000_82571, |
61 | e1000_82572, | 61 | e1000_82572, |
62 | e1000_82573, | 62 | e1000_82573, |
63 | e1000_80003es2lan, | ||
63 | e1000_num_macs | 64 | e1000_num_macs |
64 | } e1000_mac_type; | 65 | } e1000_mac_type; |
65 | 66 | ||
@@ -139,6 +140,13 @@ typedef enum { | |||
139 | } e1000_cable_length; | 140 | } e1000_cable_length; |
140 | 141 | ||
141 | typedef enum { | 142 | typedef enum { |
143 | e1000_gg_cable_length_60 = 0, | ||
144 | e1000_gg_cable_length_60_115 = 1, | ||
145 | e1000_gg_cable_length_115_150 = 2, | ||
146 | e1000_gg_cable_length_150 = 4 | ||
147 | } e1000_gg_cable_length; | ||
148 | |||
149 | typedef enum { | ||
142 | e1000_igp_cable_length_10 = 10, | 150 | e1000_igp_cable_length_10 = 10, |
143 | e1000_igp_cable_length_20 = 20, | 151 | e1000_igp_cable_length_20 = 20, |
144 | e1000_igp_cable_length_30 = 30, | 152 | e1000_igp_cable_length_30 = 30, |
@@ -208,6 +216,7 @@ typedef enum { | |||
208 | e1000_phy_m88 = 0, | 216 | e1000_phy_m88 = 0, |
209 | e1000_phy_igp, | 217 | e1000_phy_igp, |
210 | e1000_phy_igp_2, | 218 | e1000_phy_igp_2, |
219 | e1000_phy_gg82563, | ||
211 | e1000_phy_undefined = 0xFF | 220 | e1000_phy_undefined = 0xFF |
212 | } e1000_phy_type; | 221 | } e1000_phy_type; |
213 | 222 | ||
@@ -281,6 +290,7 @@ typedef enum { | |||
281 | #define E1000_ERR_MASTER_REQUESTS_PENDING 10 | 290 | #define E1000_ERR_MASTER_REQUESTS_PENDING 10 |
282 | #define E1000_ERR_HOST_INTERFACE_COMMAND 11 | 291 | #define E1000_ERR_HOST_INTERFACE_COMMAND 11 |
283 | #define E1000_BLK_PHY_RESET 12 | 292 | #define E1000_BLK_PHY_RESET 12 |
293 | #define E1000_ERR_SWFW_SYNC 13 | ||
284 | 294 | ||
285 | /* Function prototypes */ | 295 | /* Function prototypes */ |
286 | /* Initialization */ | 296 | /* Initialization */ |
@@ -304,6 +314,8 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | |||
304 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 314 | int32_t e1000_phy_reset(struct e1000_hw *hw); |
305 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 315 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
306 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 316 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); |
317 | int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); | ||
318 | int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | ||
307 | 319 | ||
308 | /* EEPROM Functions */ | 320 | /* EEPROM Functions */ |
309 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); | 321 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); |
@@ -454,6 +466,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
454 | #define E1000_DEV_ID_82573E_IAMT 0x108C | 466 | #define E1000_DEV_ID_82573E_IAMT 0x108C |
455 | #define E1000_DEV_ID_82573L 0x109A | 467 | #define E1000_DEV_ID_82573L 0x109A |
456 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 | 468 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 |
469 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | ||
470 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | ||
457 | 471 | ||
458 | 472 | ||
459 | #define NODE_ADDRESS_SIZE 6 | 473 | #define NODE_ADDRESS_SIZE 6 |
@@ -850,6 +864,7 @@ struct e1000_ffvt_entry { | |||
850 | #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ | 864 | #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ |
851 | #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ | 865 | #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ |
852 | #define E1000_TCTL 0x00400 /* TX Control - RW */ | 866 | #define E1000_TCTL 0x00400 /* TX Control - RW */ |
867 | #define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ | ||
853 | #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ | 868 | #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ |
854 | #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ | 869 | #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ |
855 | #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ | 870 | #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ |
@@ -996,6 +1011,11 @@ struct e1000_ffvt_entry { | |||
996 | #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ | 1011 | #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ |
997 | #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ | 1012 | #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ |
998 | 1013 | ||
1014 | #define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ | ||
1015 | #define E1000_MDPHYA 0x0003C /* PHY address - RW */ | ||
1016 | #define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */ | ||
1017 | #define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ | ||
1018 | |||
999 | #define E1000_GCR 0x05B00 /* PCI-Ex Control */ | 1019 | #define E1000_GCR 0x05B00 /* PCI-Ex Control */ |
1000 | #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ | 1020 | #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ |
1001 | #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ | 1021 | #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ |
@@ -1065,6 +1085,7 @@ struct e1000_ffvt_entry { | |||
1065 | #define E1000_82542_RXCW E1000_RXCW | 1085 | #define E1000_82542_RXCW E1000_RXCW |
1066 | #define E1000_82542_MTA 0x00200 | 1086 | #define E1000_82542_MTA 0x00200 |
1067 | #define E1000_82542_TCTL E1000_TCTL | 1087 | #define E1000_82542_TCTL E1000_TCTL |
1088 | #define E1000_82542_TCTL_EXT E1000_TCTL_EXT | ||
1068 | #define E1000_82542_TIPG E1000_TIPG | 1089 | #define E1000_82542_TIPG E1000_TIPG |
1069 | #define E1000_82542_TDBAL 0x00420 | 1090 | #define E1000_82542_TDBAL 0x00420 |
1070 | #define E1000_82542_TDBAH 0x00424 | 1091 | #define E1000_82542_TDBAH 0x00424 |
@@ -1212,6 +1233,8 @@ struct e1000_ffvt_entry { | |||
1212 | #define E1000_82542_RSSRK E1000_RSSRK | 1233 | #define E1000_82542_RSSRK E1000_RSSRK |
1213 | #define E1000_82542_RSSIM E1000_RSSIM | 1234 | #define E1000_82542_RSSIM E1000_RSSIM |
1214 | #define E1000_82542_RSSIR E1000_RSSIR | 1235 | #define E1000_82542_RSSIR E1000_RSSIR |
1236 | #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA | ||
1237 | #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC | ||
1215 | 1238 | ||
1216 | /* Statistics counters collected by the MAC */ | 1239 | /* Statistics counters collected by the MAC */ |
1217 | struct e1000_hw_stats { | 1240 | struct e1000_hw_stats { |
@@ -1303,6 +1326,7 @@ struct e1000_hw { | |||
1303 | e1000_ffe_config ffe_config_state; | 1326 | e1000_ffe_config ffe_config_state; |
1304 | uint32_t asf_firmware_present; | 1327 | uint32_t asf_firmware_present; |
1305 | uint32_t eeprom_semaphore_present; | 1328 | uint32_t eeprom_semaphore_present; |
1329 | uint32_t swfw_sync_present; | ||
1306 | unsigned long io_base; | 1330 | unsigned long io_base; |
1307 | uint32_t phy_id; | 1331 | uint32_t phy_id; |
1308 | uint32_t phy_revision; | 1332 | uint32_t phy_revision; |
@@ -1361,6 +1385,7 @@ struct e1000_hw { | |||
1361 | boolean_t ifs_params_forced; | 1385 | boolean_t ifs_params_forced; |
1362 | boolean_t in_ifs_mode; | 1386 | boolean_t in_ifs_mode; |
1363 | boolean_t mng_reg_access_disabled; | 1387 | boolean_t mng_reg_access_disabled; |
1388 | boolean_t leave_av_bit_off; | ||
1364 | }; | 1389 | }; |
1365 | 1390 | ||
1366 | 1391 | ||
@@ -1393,6 +1418,8 @@ struct e1000_hw { | |||
1393 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ | 1418 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ |
1394 | #define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ | 1419 | #define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ |
1395 | #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ | 1420 | #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ |
1421 | #define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ | ||
1422 | #define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ | ||
1396 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 1423 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
1397 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 1424 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
1398 | #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ | 1425 | #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ |
@@ -1429,6 +1456,16 @@ struct e1000_hw { | |||
1429 | #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ | 1456 | #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ |
1430 | #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ | 1457 | #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ |
1431 | #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ | 1458 | #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ |
1459 | #define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ | ||
1460 | #define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ | ||
1461 | #define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ | ||
1462 | #define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ | ||
1463 | #define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ | ||
1464 | #define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ | ||
1465 | #define E1000_STATUS_FUSE_8 0x04000000 | ||
1466 | #define E1000_STATUS_FUSE_9 0x08000000 | ||
1467 | #define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ | ||
1468 | #define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ | ||
1432 | 1469 | ||
1433 | /* Constants used to intrepret the masked PCI-X bus speed. */ | 1470 | /* Constants used to intrepret the masked PCI-X bus speed. */ |
1434 | #define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ | 1471 | #define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ |
@@ -1506,6 +1543,8 @@ struct e1000_hw { | |||
1506 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | 1543 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 |
1507 | #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 | 1544 | #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 |
1508 | #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 | 1545 | #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 |
1546 | #define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 | ||
1547 | #define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 | ||
1509 | #define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 | 1548 | #define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 |
1510 | #define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 | 1549 | #define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 |
1511 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 | 1550 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 |
@@ -1515,6 +1554,9 @@ struct e1000_hw { | |||
1515 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | 1554 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ |
1516 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | 1555 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ |
1517 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | 1556 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ |
1557 | #define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ | ||
1558 | #define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ | ||
1559 | #define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 | ||
1518 | 1560 | ||
1519 | /* MDI Control */ | 1561 | /* MDI Control */ |
1520 | #define E1000_MDIC_DATA_MASK 0x0000FFFF | 1562 | #define E1000_MDIC_DATA_MASK 0x0000FFFF |
@@ -1528,6 +1570,32 @@ struct e1000_hw { | |||
1528 | #define E1000_MDIC_INT_EN 0x20000000 | 1570 | #define E1000_MDIC_INT_EN 0x20000000 |
1529 | #define E1000_MDIC_ERROR 0x40000000 | 1571 | #define E1000_MDIC_ERROR 0x40000000 |
1530 | 1572 | ||
1573 | #define E1000_KUMCTRLSTA_MASK 0x0000FFFF | ||
1574 | #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 | ||
1575 | #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 | ||
1576 | #define E1000_KUMCTRLSTA_REN 0x00200000 | ||
1577 | |||
1578 | #define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 | ||
1579 | #define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 | ||
1580 | #define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 | ||
1581 | #define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 | ||
1582 | #define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 | ||
1583 | #define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 | ||
1584 | #define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 | ||
1585 | #define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E | ||
1586 | #define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F | ||
1587 | |||
1588 | /* FIFO Control */ | ||
1589 | #define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 | ||
1590 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 | ||
1591 | |||
1592 | /* In-Band Control */ | ||
1593 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 | ||
1594 | |||
1595 | /* Half-Duplex Control */ | ||
1596 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 | ||
1597 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 | ||
1598 | |||
1531 | /* LED Control */ | 1599 | /* LED Control */ |
1532 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F | 1600 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F |
1533 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 | 1601 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 |
@@ -1590,6 +1658,13 @@ struct e1000_hw { | |||
1590 | #define E1000_ICR_MNG 0x00040000 /* Manageability event */ | 1658 | #define E1000_ICR_MNG 0x00040000 /* Manageability event */ |
1591 | #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ | 1659 | #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ |
1592 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | 1660 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ |
1661 | #define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ | ||
1662 | #define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ | ||
1663 | #define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ | ||
1664 | #define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ | ||
1665 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ | ||
1666 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ | ||
1667 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ | ||
1593 | 1668 | ||
1594 | /* Interrupt Cause Set */ | 1669 | /* Interrupt Cause Set */ |
1595 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1670 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1610,6 +1685,12 @@ struct e1000_hw { | |||
1610 | #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1685 | #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1611 | #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ | 1686 | #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ |
1612 | #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1687 | #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1688 | #define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1689 | #define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1690 | #define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1691 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1692 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1693 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1613 | 1694 | ||
1614 | /* Interrupt Mask Set */ | 1695 | /* Interrupt Mask Set */ |
1615 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1696 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1630,6 +1711,12 @@ struct e1000_hw { | |||
1630 | #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1711 | #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1631 | #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ | 1712 | #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ |
1632 | #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1713 | #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1714 | #define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1715 | #define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1716 | #define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1717 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1718 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1719 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1633 | 1720 | ||
1634 | /* Interrupt Mask Clear */ | 1721 | /* Interrupt Mask Clear */ |
1635 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1722 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1650,6 +1737,12 @@ struct e1000_hw { | |||
1650 | #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ | 1737 | #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ |
1651 | #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ | 1738 | #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ |
1652 | #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ | 1739 | #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ |
1740 | #define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ | ||
1741 | #define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ | ||
1742 | #define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ | ||
1743 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | ||
1744 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | ||
1745 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | ||
1653 | 1746 | ||
1654 | /* Receive Control */ | 1747 | /* Receive Control */ |
1655 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ | 1748 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ |
@@ -1719,6 +1812,12 @@ struct e1000_hw { | |||
1719 | #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ | 1812 | #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ |
1720 | #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ | 1813 | #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ |
1721 | 1814 | ||
1815 | /* SW_W_SYNC definitions */ | ||
1816 | #define E1000_SWFW_EEP_SM 0x0001 | ||
1817 | #define E1000_SWFW_PHY0_SM 0x0002 | ||
1818 | #define E1000_SWFW_PHY1_SM 0x0004 | ||
1819 | #define E1000_SWFW_MAC_CSR_SM 0x0008 | ||
1820 | |||
1722 | /* Receive Descriptor */ | 1821 | /* Receive Descriptor */ |
1723 | #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ | 1822 | #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ |
1724 | #define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ | 1823 | #define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ |
@@ -1797,6 +1896,11 @@ struct e1000_hw { | |||
1797 | #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ | 1896 | #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ |
1798 | #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ | 1897 | #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ |
1799 | #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ | 1898 | #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ |
1899 | /* Extended Transmit Control */ | ||
1900 | #define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ | ||
1901 | #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ | ||
1902 | |||
1903 | #define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000 | ||
1800 | 1904 | ||
1801 | /* Receive Checksum Control */ | 1905 | /* Receive Checksum Control */ |
1802 | #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ | 1906 | #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ |
@@ -1874,6 +1978,7 @@ struct e1000_hw { | |||
1874 | #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ | 1978 | #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ |
1875 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ | 1979 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ |
1876 | #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ | 1980 | #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ |
1981 | #define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ | ||
1877 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ | 1982 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ |
1878 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address | 1983 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address |
1879 | * filtering */ | 1984 | * filtering */ |
@@ -1962,19 +2067,19 @@ struct e1000_host_command_info { | |||
1962 | /* PCI-Ex registers */ | 2067 | /* PCI-Ex registers */ |
1963 | 2068 | ||
1964 | /* PCI-Ex Control Register */ | 2069 | /* PCI-Ex Control Register */ |
1965 | #define E1000_GCR_RXD_NO_SNOOP 0x00000001 | 2070 | #define E1000_GCR_RXD_NO_SNOOP 0x00000001 |
1966 | #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 | 2071 | #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 |
1967 | #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 | 2072 | #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 |
1968 | #define E1000_GCR_TXD_NO_SNOOP 0x00000008 | 2073 | #define E1000_GCR_TXD_NO_SNOOP 0x00000008 |
1969 | #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 | 2074 | #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 |
1970 | #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 | 2075 | #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 |
1971 | 2076 | ||
1972 | #define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ | 2077 | #define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ |
1973 | E1000_GCR_RXDSCW_NO_SNOOP | \ | 2078 | E1000_GCR_RXDSCW_NO_SNOOP | \ |
1974 | E1000_GCR_RXDSCR_NO_SNOOP | \ | 2079 | E1000_GCR_RXDSCR_NO_SNOOP | \ |
1975 | E1000_GCR TXD_NO_SNOOP | \ | 2080 | E1000_GCR_TXD_NO_SNOOP | \ |
1976 | E1000_GCR_TXDSCW_NO_SNOOP | \ | 2081 | E1000_GCR_TXDSCW_NO_SNOOP | \ |
1977 | E1000_GCR_TXDSCR_NO_SNOOP) | 2082 | E1000_GCR_TXDSCR_NO_SNOOP) |
1978 | 2083 | ||
1979 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | 2084 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 |
1980 | /* Function Active and Power State to MNG */ | 2085 | /* Function Active and Power State to MNG */ |
@@ -2035,12 +2140,14 @@ struct e1000_host_command_info { | |||
2035 | #define EEPROM_INIT_CONTROL1_REG 0x000A | 2140 | #define EEPROM_INIT_CONTROL1_REG 0x000A |
2036 | #define EEPROM_INIT_CONTROL2_REG 0x000F | 2141 | #define EEPROM_INIT_CONTROL2_REG 0x000F |
2037 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 | 2142 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 |
2143 | #define EEPROM_INIT_3GIO_3 0x001A | ||
2038 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 | 2144 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 |
2039 | #define EEPROM_CFG 0x0012 | 2145 | #define EEPROM_CFG 0x0012 |
2040 | #define EEPROM_FLASH_VERSION 0x0032 | 2146 | #define EEPROM_FLASH_VERSION 0x0032 |
2041 | #define EEPROM_CHECKSUM_REG 0x003F | 2147 | #define EEPROM_CHECKSUM_REG 0x003F |
2042 | 2148 | ||
2043 | #define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ | 2149 | #define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ |
2150 | #define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ | ||
2044 | 2151 | ||
2045 | /* Word definitions for ID LED Settings */ | 2152 | /* Word definitions for ID LED Settings */ |
2046 | #define ID_LED_RESERVED_0000 0x0000 | 2153 | #define ID_LED_RESERVED_0000 0x0000 |
@@ -2084,6 +2191,9 @@ struct e1000_host_command_info { | |||
2084 | #define EEPROM_WORD0F_ANE 0x0800 | 2191 | #define EEPROM_WORD0F_ANE 0x0800 |
2085 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 | 2192 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 |
2086 | 2193 | ||
2194 | /* Mask bits for fields in Word 0x1a of the EEPROM */ | ||
2195 | #define EEPROM_WORD1A_ASPM_MASK 0x000C | ||
2196 | |||
2087 | /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ | 2197 | /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ |
2088 | #define EEPROM_SUM 0xBABA | 2198 | #define EEPROM_SUM 0xBABA |
2089 | 2199 | ||
@@ -2126,8 +2236,11 @@ struct e1000_host_command_info { | |||
2126 | 2236 | ||
2127 | #define DEFAULT_82542_TIPG_IPGR2 10 | 2237 | #define DEFAULT_82542_TIPG_IPGR2 10 |
2128 | #define DEFAULT_82543_TIPG_IPGR2 6 | 2238 | #define DEFAULT_82543_TIPG_IPGR2 6 |
2239 | #define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 | ||
2129 | #define E1000_TIPG_IPGR2_SHIFT 20 | 2240 | #define E1000_TIPG_IPGR2_SHIFT 20 |
2130 | 2241 | ||
2242 | #define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009 | ||
2243 | #define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008 | ||
2131 | #define E1000_TXDMAC_DPP 0x00000001 | 2244 | #define E1000_TXDMAC_DPP 0x00000001 |
2132 | 2245 | ||
2133 | /* Adaptive IFS defines */ | 2246 | /* Adaptive IFS defines */ |
@@ -2368,6 +2481,78 @@ struct e1000_host_command_info { | |||
2368 | 2481 | ||
2369 | #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 | 2482 | #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 |
2370 | 2483 | ||
2484 | /* Bits... | ||
2485 | * 15-5: page | ||
2486 | * 4-0: register offset | ||
2487 | */ | ||
2488 | #define GG82563_PAGE_SHIFT 5 | ||
2489 | #define GG82563_REG(page, reg) \ | ||
2490 | (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
2491 | #define GG82563_MIN_ALT_REG 30 | ||
2492 | |||
2493 | /* GG82563 Specific Registers */ | ||
2494 | #define GG82563_PHY_SPEC_CTRL \ | ||
2495 | GG82563_REG(0, 16) /* PHY Specific Control */ | ||
2496 | #define GG82563_PHY_SPEC_STATUS \ | ||
2497 | GG82563_REG(0, 17) /* PHY Specific Status */ | ||
2498 | #define GG82563_PHY_INT_ENABLE \ | ||
2499 | GG82563_REG(0, 18) /* Interrupt Enable */ | ||
2500 | #define GG82563_PHY_SPEC_STATUS_2 \ | ||
2501 | GG82563_REG(0, 19) /* PHY Specific Status 2 */ | ||
2502 | #define GG82563_PHY_RX_ERR_CNTR \ | ||
2503 | GG82563_REG(0, 21) /* Receive Error Counter */ | ||
2504 | #define GG82563_PHY_PAGE_SELECT \ | ||
2505 | GG82563_REG(0, 22) /* Page Select */ | ||
2506 | #define GG82563_PHY_SPEC_CTRL_2 \ | ||
2507 | GG82563_REG(0, 26) /* PHY Specific Control 2 */ | ||
2508 | #define GG82563_PHY_PAGE_SELECT_ALT \ | ||
2509 | GG82563_REG(0, 29) /* Alternate Page Select */ | ||
2510 | #define GG82563_PHY_TEST_CLK_CTRL \ | ||
2511 | GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ | ||
2512 | |||
2513 | #define GG82563_PHY_MAC_SPEC_CTRL \ | ||
2514 | GG82563_REG(2, 21) /* MAC Specific Control Register */ | ||
2515 | #define GG82563_PHY_MAC_SPEC_CTRL_2 \ | ||
2516 | GG82563_REG(2, 26) /* MAC Specific Control 2 */ | ||
2517 | |||
2518 | #define GG82563_PHY_DSP_DISTANCE \ | ||
2519 | GG82563_REG(5, 26) /* DSP Distance */ | ||
2520 | |||
2521 | /* Page 193 - Port Control Registers */ | ||
2522 | #define GG82563_PHY_KMRN_MODE_CTRL \ | ||
2523 | GG82563_REG(193, 16) /* Kumeran Mode Control */ | ||
2524 | #define GG82563_PHY_PORT_RESET \ | ||
2525 | GG82563_REG(193, 17) /* Port Reset */ | ||
2526 | #define GG82563_PHY_REVISION_ID \ | ||
2527 | GG82563_REG(193, 18) /* Revision ID */ | ||
2528 | #define GG82563_PHY_DEVICE_ID \ | ||
2529 | GG82563_REG(193, 19) /* Device ID */ | ||
2530 | #define GG82563_PHY_PWR_MGMT_CTRL \ | ||
2531 | GG82563_REG(193, 20) /* Power Management Control */ | ||
2532 | #define GG82563_PHY_RATE_ADAPT_CTRL \ | ||
2533 | GG82563_REG(193, 25) /* Rate Adaptation Control */ | ||
2534 | |||
2535 | /* Page 194 - KMRN Registers */ | ||
2536 | #define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ | ||
2537 | GG82563_REG(194, 16) /* FIFO's Control/Status */ | ||
2538 | #define GG82563_PHY_KMRN_CTRL \ | ||
2539 | GG82563_REG(194, 17) /* Control */ | ||
2540 | #define GG82563_PHY_INBAND_CTRL \ | ||
2541 | GG82563_REG(194, 18) /* Inband Control */ | ||
2542 | #define GG82563_PHY_KMRN_DIAGNOSTIC \ | ||
2543 | GG82563_REG(194, 19) /* Diagnostic */ | ||
2544 | #define GG82563_PHY_ACK_TIMEOUTS \ | ||
2545 | GG82563_REG(194, 20) /* Acknowledge Timeouts */ | ||
2546 | #define GG82563_PHY_ADV_ABILITY \ | ||
2547 | GG82563_REG(194, 21) /* Advertised Ability */ | ||
2548 | #define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ | ||
2549 | GG82563_REG(194, 23) /* Link Partner Advertised Ability */ | ||
2550 | #define GG82563_PHY_ADV_NEXT_PAGE \ | ||
2551 | GG82563_REG(194, 24) /* Advertised Next Page */ | ||
2552 | #define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ | ||
2553 | GG82563_REG(194, 25) /* Link Partner Advertised Next page */ | ||
2554 | #define GG82563_PHY_KMRN_MISC \ | ||
2555 | GG82563_REG(194, 26) /* Misc. */ | ||
2371 | 2556 | ||
2372 | /* PHY Control Register */ | 2557 | /* PHY Control Register */ |
2373 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ | 2558 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ |
@@ -2681,6 +2866,113 @@ struct e1000_host_command_info { | |||
2681 | #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 | 2866 | #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 |
2682 | #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 | 2867 | #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 |
2683 | 2868 | ||
2869 | /* GG82563 PHY Specific Status Register (Page 0, Register 16 */ | ||
2870 | #define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */ | ||
2871 | #define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */ | ||
2872 | #define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */ | ||
2873 | #define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */ | ||
2874 | #define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 | ||
2875 | #define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */ | ||
2876 | #define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */ | ||
2877 | #define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */ | ||
2878 | #define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */ | ||
2879 | #define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300 | ||
2880 | #define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */ | ||
2881 | #define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */ | ||
2882 | #define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */ | ||
2883 | #define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */ | ||
2884 | #define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */ | ||
2885 | #define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000 | ||
2886 | #define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12 | ||
2887 | |||
2888 | /* PHY Specific Status Register (Page 0, Register 17) */ | ||
2889 | #define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */ | ||
2890 | #define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */ | ||
2891 | #define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */ | ||
2892 | #define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */ | ||
2893 | #define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */ | ||
2894 | #define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */ | ||
2895 | #define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */ | ||
2896 | #define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */ | ||
2897 | #define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */ | ||
2898 | #define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */ | ||
2899 | #define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */ | ||
2900 | #define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */ | ||
2901 | #define GG82563_PSSR_SPEED_MASK 0xC000 | ||
2902 | #define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */ | ||
2903 | #define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */ | ||
2904 | #define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */ | ||
2905 | |||
2906 | /* PHY Specific Status Register 2 (Page 0, Register 19) */ | ||
2907 | #define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */ | ||
2908 | #define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */ | ||
2909 | #define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */ | ||
2910 | #define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */ | ||
2911 | #define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */ | ||
2912 | #define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */ | ||
2913 | #define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */ | ||
2914 | #define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */ | ||
2915 | #define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */ | ||
2916 | #define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */ | ||
2917 | #define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */ | ||
2918 | #define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */ | ||
2919 | #define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */ | ||
2920 | |||
2921 | /* PHY Specific Control Register 2 (Page 0, Register 26) */ | ||
2922 | #define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */ | ||
2923 | #define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C | ||
2924 | #define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */ | ||
2925 | #define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */ | ||
2926 | #define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */ | ||
2927 | #define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */ | ||
2928 | #define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */ | ||
2929 | #define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000 | ||
2930 | #define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */ | ||
2931 | #define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */ | ||
2932 | |||
2933 | /* MAC Specific Control Register (Page 2, Register 21) */ | ||
2934 | /* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ | ||
2935 | #define GG82563_MSCR_TX_CLK_MASK 0x0007 | ||
2936 | #define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004 | ||
2937 | #define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005 | ||
2938 | #define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006 | ||
2939 | #define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007 | ||
2940 | |||
2941 | #define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ | ||
2942 | |||
2943 | /* DSP Distance Register (Page 5, Register 26) */ | ||
2944 | #define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; | ||
2945 | 1 = 50-80M; | ||
2946 | 2 = 80-110M; | ||
2947 | 3 = 110-140M; | ||
2948 | 4 = >140M */ | ||
2949 | |||
2950 | /* Kumeran Mode Control Register (Page 193, Register 16) */ | ||
2951 | #define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ | ||
2952 | #define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */ | ||
2953 | #define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080 | ||
2954 | #define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400 | ||
2955 | #define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */ | ||
2956 | #define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 | ||
2957 | |||
2958 | /* Power Management Control Register (Page 193, Register 20) */ | ||
2959 | #define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */ | ||
2960 | #define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */ | ||
2961 | #define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */ | ||
2962 | #define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */ | ||
2963 | #define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */ | ||
2964 | #define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */ | ||
2965 | #define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */ | ||
2966 | #define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */ | ||
2967 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300 | ||
2968 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */ | ||
2969 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */ | ||
2970 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */ | ||
2971 | #define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */ | ||
2972 | |||
2973 | /* In-Band Control Register (Page 194, Register 18) */ | ||
2974 | #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */ | ||
2975 | |||
2684 | 2976 | ||
2685 | /* Bit definitions for valid PHY IDs. */ | 2977 | /* Bit definitions for valid PHY IDs. */ |
2686 | /* I = Integrated | 2978 | /* I = Integrated |
@@ -2695,6 +2987,7 @@ struct e1000_host_command_info { | |||
2695 | #define M88E1011_I_REV_4 0x04 | 2987 | #define M88E1011_I_REV_4 0x04 |
2696 | #define M88E1111_I_PHY_ID 0x01410CC0 | 2988 | #define M88E1111_I_PHY_ID 0x01410CC0 |
2697 | #define L1LXT971A_PHY_ID 0x001378E0 | 2989 | #define L1LXT971A_PHY_ID 0x001378E0 |
2990 | #define GG82563_E_PHY_ID 0x01410CA0 | ||
2698 | 2991 | ||
2699 | /* Miscellaneous PHY bit definitions. */ | 2992 | /* Miscellaneous PHY bit definitions. */ |
2700 | #define PHY_PREAMBLE 0xFFFFFFFF | 2993 | #define PHY_PREAMBLE 0xFFFFFFFF |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 84dcca3776ee..f39de16e6b97 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -29,6 +29,23 @@ | |||
29 | #include "e1000.h" | 29 | #include "e1000.h" |
30 | 30 | ||
31 | /* Change Log | 31 | /* Change Log |
32 | * 7.0.33 3-Feb-2006 | ||
33 | * o Added another fix for the pass false carrier bit | ||
34 | * 7.0.32 24-Jan-2006 | ||
35 | * o Need to rebuild with noew version number for the pass false carrier | ||
36 | * fix in e1000_hw.c | ||
37 | * 7.0.30 18-Jan-2006 | ||
38 | * o fixup for tso workaround to disable it for pci-x | ||
39 | * o fix mem leak on 82542 | ||
40 | * o fixes for 10 Mb/s connections and incorrect stats | ||
41 | * 7.0.28 01/06/2006 | ||
42 | * o hardware workaround to only set "speed mode" bit for 1G link. | ||
43 | * 7.0.26 12/23/2005 | ||
44 | * o wake on lan support modified for device ID 10B5 | ||
45 | * o fix dhcp + vlan issue not making it to the iAMT firmware | ||
46 | * 7.0.24 12/9/2005 | ||
47 | * o New hardware support for the Gigabit NIC embedded in the south bridge | ||
48 | * o Fixes to the recycling logic (skb->tail) from IBM LTC | ||
32 | * 6.3.9 12/16/2005 | 49 | * 6.3.9 12/16/2005 |
33 | * o incorporate fix for recycled skbs from IBM LTC | 50 | * o incorporate fix for recycled skbs from IBM LTC |
34 | * 6.3.7 11/18/2005 | 51 | * 6.3.7 11/18/2005 |
@@ -46,54 +63,8 @@ | |||
46 | * rx_buffer_len | 63 | * rx_buffer_len |
47 | * 6.3.1 9/19/05 | 64 | * 6.3.1 9/19/05 |
48 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic | 65 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic |
49 | (e1000_clean_tx_irq) | 66 | * (e1000_clean_tx_irq) |
50 | * o Support for 8086:10B5 device (Quad Port) | 67 | * o Support for 8086:10B5 device (Quad Port) |
51 | * 6.2.14 9/15/05 | ||
52 | * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface | ||
53 | * open/close | ||
54 | * 6.2.13 9/14/05 | ||
55 | * o Invoke e1000_check_mng_mode only for 8257x controllers since it | ||
56 | * accesses the FWSM that is not supported in other controllers | ||
57 | * 6.2.12 9/9/05 | ||
58 | * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER | ||
59 | * o set RCTL:SECRC only for controllers newer than 82543. | ||
60 | * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w. | ||
61 | * This code was moved from e1000_remove to e1000_close | ||
62 | * 6.2.10 9/6/05 | ||
63 | * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off. | ||
64 | * o Enable fc by default on 82573 controllers (do not read eeprom) | ||
65 | * o Fix rx_errors statistic not to include missed_packet_count | ||
66 | * o Fix rx_dropped statistic not to include missed_packet_count | ||
67 | (Padraig Brady) | ||
68 | * 6.2.9 8/30/05 | ||
69 | * o Remove call to update statistics from the controller ib e1000_get_stats | ||
70 | * 6.2.8 8/30/05 | ||
71 | * o Improved algorithm for rx buffer allocation/rdt update | ||
72 | * o Flow control watermarks relative to rx PBA size | ||
73 | * o Simplified 'Tx Hung' detect logic | ||
74 | * 6.2.7 8/17/05 | ||
75 | * o Report rx buffer allocation failures and tx timeout counts in stats | ||
76 | * 6.2.6 8/16/05 | ||
77 | * o Implement workaround for controller erratum -- linear non-tso packet | ||
78 | * following a TSO gets written back prematurely | ||
79 | * 6.2.5 8/15/05 | ||
80 | * o Set netdev->tx_queue_len based on link speed/duplex settings. | ||
81 | * o Fix net_stats.rx_fifo_errors <p@draigBrady.com> | ||
82 | * o Do not power off PHY if SoL/IDER session is active | ||
83 | * 6.2.4 8/10/05 | ||
84 | * o Fix loopback test setup/cleanup for 82571/3 controllers | ||
85 | * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat | ||
86 | * all packets as raw | ||
87 | * o Prevent operations that will cause the PHY to be reset if SoL/IDER | ||
88 | * sessions are active and log a message | ||
89 | * 6.2.2 7/21/05 | ||
90 | * o used fixed size descriptors for all MTU sizes, reduces memory load | ||
91 | * 6.1.2 4/13/05 | ||
92 | * o Fixed ethtool diagnostics | ||
93 | * o Enabled flow control to take default eeprom settings | ||
94 | * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent | ||
95 | * calls, one from mii_ioctl and other from within update_stats while | ||
96 | * processing MIIREG ioctl. | ||
97 | */ | 68 | */ |
98 | 69 | ||
99 | char e1000_driver_name[] = "e1000"; | 70 | char e1000_driver_name[] = "e1000"; |
@@ -103,7 +74,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 74 | #else |
104 | #define DRIVERNAPI "-NAPI" | 75 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 76 | #endif |
106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI | 77 | #define DRV_VERSION "7.0.33-k2"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 78 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 79 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 80 | ||
@@ -157,32 +128,26 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
157 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | 128 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
158 | INTEL_E1000_ETHERNET_DEVICE(0x108B), | 129 | INTEL_E1000_ETHERNET_DEVICE(0x108B), |
159 | INTEL_E1000_ETHERNET_DEVICE(0x108C), | 130 | INTEL_E1000_ETHERNET_DEVICE(0x108C), |
131 | INTEL_E1000_ETHERNET_DEVICE(0x1096), | ||
132 | INTEL_E1000_ETHERNET_DEVICE(0x1098), | ||
160 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | 133 | INTEL_E1000_ETHERNET_DEVICE(0x1099), |
161 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 134 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
162 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 135 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
136 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | ||
163 | /* required last entry */ | 137 | /* required last entry */ |
164 | {0,} | 138 | {0,} |
165 | }; | 139 | }; |
166 | 140 | ||
167 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | 141 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
168 | 142 | ||
169 | int e1000_up(struct e1000_adapter *adapter); | ||
170 | void e1000_down(struct e1000_adapter *adapter); | ||
171 | void e1000_reset(struct e1000_adapter *adapter); | ||
172 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
173 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
174 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
175 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
176 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
177 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | 143 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
178 | struct e1000_tx_ring *txdr); | 144 | struct e1000_tx_ring *txdr); |
179 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | 145 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
180 | struct e1000_rx_ring *rxdr); | 146 | struct e1000_rx_ring *rxdr); |
181 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | 147 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
182 | struct e1000_tx_ring *tx_ring); | 148 | struct e1000_tx_ring *tx_ring); |
183 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | 149 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
184 | struct e1000_rx_ring *rx_ring); | 150 | struct e1000_rx_ring *rx_ring); |
185 | void e1000_update_stats(struct e1000_adapter *adapter); | ||
186 | 151 | ||
187 | /* Local Function Prototypes */ | 152 | /* Local Function Prototypes */ |
188 | 153 | ||
@@ -191,9 +156,6 @@ static void e1000_exit_module(void); | |||
191 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 156 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | static void __devexit e1000_remove(struct pci_dev *pdev); | 157 | static void __devexit e1000_remove(struct pci_dev *pdev); |
193 | static int e1000_alloc_queues(struct e1000_adapter *adapter); | 158 | static int e1000_alloc_queues(struct e1000_adapter *adapter); |
194 | #ifdef CONFIG_E1000_MQ | ||
195 | static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); | ||
196 | #endif | ||
197 | static int e1000_sw_init(struct e1000_adapter *adapter); | 159 | static int e1000_sw_init(struct e1000_adapter *adapter); |
198 | static int e1000_open(struct net_device *netdev); | 160 | static int e1000_open(struct net_device *netdev); |
199 | static int e1000_close(struct net_device *netdev); | 161 | static int e1000_close(struct net_device *netdev); |
@@ -241,11 +203,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
241 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 203 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
242 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 204 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
243 | int cmd); | 205 | int cmd); |
244 | void e1000_set_ethtool_ops(struct net_device *netdev); | ||
245 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | 206 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
246 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | 207 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
247 | static void e1000_tx_timeout(struct net_device *dev); | 208 | static void e1000_tx_timeout(struct net_device *dev); |
248 | static void e1000_tx_timeout_task(struct net_device *dev); | 209 | static void e1000_reset_task(struct net_device *dev); |
249 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 210 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
250 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 211 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
251 | struct sk_buff *skb); | 212 | struct sk_buff *skb); |
@@ -265,14 +226,6 @@ static int e1000_resume(struct pci_dev *pdev); | |||
265 | static void e1000_netpoll (struct net_device *netdev); | 226 | static void e1000_netpoll (struct net_device *netdev); |
266 | #endif | 227 | #endif |
267 | 228 | ||
268 | #ifdef CONFIG_E1000_MQ | ||
269 | /* for multiple Rx queues */ | ||
270 | void e1000_rx_schedule(void *data); | ||
271 | #endif | ||
272 | |||
273 | /* Exported from other modules */ | ||
274 | |||
275 | extern void e1000_check_options(struct e1000_adapter *adapter); | ||
276 | 229 | ||
277 | static struct pci_driver e1000_driver = { | 230 | static struct pci_driver e1000_driver = { |
278 | .name = e1000_driver_name, | 231 | .name = e1000_driver_name, |
@@ -380,7 +333,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
380 | (vid != old_vid) && | 333 | (vid != old_vid) && |
381 | !adapter->vlgrp->vlan_devices[old_vid]) | 334 | !adapter->vlgrp->vlan_devices[old_vid]) |
382 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 335 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
383 | } | 336 | } else |
337 | adapter->mng_vlan_id = vid; | ||
384 | } | 338 | } |
385 | } | 339 | } |
386 | 340 | ||
@@ -502,10 +456,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
502 | return err; | 456 | return err; |
503 | } | 457 | } |
504 | 458 | ||
505 | #ifdef CONFIG_E1000_MQ | ||
506 | e1000_setup_queue_mapping(adapter); | ||
507 | #endif | ||
508 | |||
509 | adapter->tx_queue_len = netdev->tx_queue_len; | 459 | adapter->tx_queue_len = netdev->tx_queue_len; |
510 | 460 | ||
511 | mod_timer(&adapter->watchdog_timer, jiffies); | 461 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -526,9 +476,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
526 | e1000_check_mng_mode(&adapter->hw); | 476 | e1000_check_mng_mode(&adapter->hw); |
527 | 477 | ||
528 | e1000_irq_disable(adapter); | 478 | e1000_irq_disable(adapter); |
529 | #ifdef CONFIG_E1000_MQ | 479 | |
530 | while (atomic_read(&adapter->rx_sched_call_data.count) != 0); | ||
531 | #endif | ||
532 | free_irq(adapter->pdev->irq, netdev); | 480 | free_irq(adapter->pdev->irq, netdev); |
533 | #ifdef CONFIG_PCI_MSI | 481 | #ifdef CONFIG_PCI_MSI |
534 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | 482 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
@@ -587,6 +535,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
587 | break; | 535 | break; |
588 | case e1000_82571: | 536 | case e1000_82571: |
589 | case e1000_82572: | 537 | case e1000_82572: |
538 | case e1000_80003es2lan: | ||
590 | pba = E1000_PBA_38K; | 539 | pba = E1000_PBA_38K; |
591 | break; | 540 | break; |
592 | case e1000_82573: | 541 | case e1000_82573: |
@@ -619,7 +568,10 @@ e1000_reset(struct e1000_adapter *adapter) | |||
619 | 568 | ||
620 | adapter->hw.fc_high_water = fc_high_water_mark; | 569 | adapter->hw.fc_high_water = fc_high_water_mark; |
621 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 570 | adapter->hw.fc_low_water = fc_high_water_mark - 8; |
622 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 571 | if (adapter->hw.mac_type == e1000_80003es2lan) |
572 | adapter->hw.fc_pause_time = 0xFFFF; | ||
573 | else | ||
574 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | ||
623 | adapter->hw.fc_send_xon = 1; | 575 | adapter->hw.fc_send_xon = 1; |
624 | adapter->hw.fc = adapter->hw.original_fc; | 576 | adapter->hw.fc = adapter->hw.original_fc; |
625 | 577 | ||
@@ -663,6 +615,7 @@ e1000_probe(struct pci_dev *pdev, | |||
663 | unsigned long mmio_start, mmio_len; | 615 | unsigned long mmio_start, mmio_len; |
664 | 616 | ||
665 | static int cards_found = 0; | 617 | static int cards_found = 0; |
618 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ | ||
666 | int i, err, pci_using_dac; | 619 | int i, err, pci_using_dac; |
667 | uint16_t eeprom_data; | 620 | uint16_t eeprom_data; |
668 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 621 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
@@ -755,6 +708,15 @@ e1000_probe(struct pci_dev *pdev, | |||
755 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) | 708 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
756 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 709 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
757 | 710 | ||
711 | /* if ksp3, indicate if it's port a being setup */ | ||
712 | if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && | ||
713 | e1000_ksp3_port_a == 0) | ||
714 | adapter->ksp3_port_a = 1; | ||
715 | e1000_ksp3_port_a++; | ||
716 | /* Reset for multiple KP3 adapters */ | ||
717 | if (e1000_ksp3_port_a == 4) | ||
718 | e1000_ksp3_port_a = 0; | ||
719 | |||
758 | if (adapter->hw.mac_type >= e1000_82543) { | 720 | if (adapter->hw.mac_type >= e1000_82543) { |
759 | netdev->features = NETIF_F_SG | | 721 | netdev->features = NETIF_F_SG | |
760 | NETIF_F_HW_CSUM | | 722 | NETIF_F_HW_CSUM | |
@@ -826,8 +788,8 @@ e1000_probe(struct pci_dev *pdev, | |||
826 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 788 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
827 | adapter->phy_info_timer.data = (unsigned long) adapter; | 789 | adapter->phy_info_timer.data = (unsigned long) adapter; |
828 | 790 | ||
829 | INIT_WORK(&adapter->tx_timeout_task, | 791 | INIT_WORK(&adapter->reset_task, |
830 | (void (*)(void *))e1000_tx_timeout_task, netdev); | 792 | (void (*)(void *))e1000_reset_task, netdev); |
831 | 793 | ||
832 | /* we're going to reset, so assume we have no link for now */ | 794 | /* we're going to reset, so assume we have no link for now */ |
833 | 795 | ||
@@ -854,6 +816,7 @@ e1000_probe(struct pci_dev *pdev, | |||
854 | case e1000_82546: | 816 | case e1000_82546: |
855 | case e1000_82546_rev_3: | 817 | case e1000_82546_rev_3: |
856 | case e1000_82571: | 818 | case e1000_82571: |
819 | case e1000_80003es2lan: | ||
857 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 820 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
858 | e1000_read_eeprom(&adapter->hw, | 821 | e1000_read_eeprom(&adapter->hw, |
859 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 822 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
@@ -972,10 +935,6 @@ e1000_remove(struct pci_dev *pdev) | |||
972 | iounmap(adapter->hw.hw_addr); | 935 | iounmap(adapter->hw.hw_addr); |
973 | pci_release_regions(pdev); | 936 | pci_release_regions(pdev); |
974 | 937 | ||
975 | #ifdef CONFIG_E1000_MQ | ||
976 | free_percpu(adapter->cpu_netdev); | ||
977 | free_percpu(adapter->cpu_tx_ring); | ||
978 | #endif | ||
979 | free_netdev(netdev); | 938 | free_netdev(netdev); |
980 | 939 | ||
981 | pci_disable_device(pdev); | 940 | pci_disable_device(pdev); |
@@ -1056,40 +1015,8 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1056 | hw->master_slave = E1000_MASTER_SLAVE; | 1015 | hw->master_slave = E1000_MASTER_SLAVE; |
1057 | } | 1016 | } |
1058 | 1017 | ||
1059 | #ifdef CONFIG_E1000_MQ | ||
1060 | /* Number of supported queues */ | ||
1061 | switch (hw->mac_type) { | ||
1062 | case e1000_82571: | ||
1063 | case e1000_82572: | ||
1064 | /* These controllers support 2 tx queues, but with a single | ||
1065 | * qdisc implementation, multiple tx queues aren't quite as | ||
1066 | * interesting. If we can find a logical way of mapping | ||
1067 | * flows to a queue, then perhaps we can up the num_tx_queue | ||
1068 | * count back to its default. Until then, we run the risk of | ||
1069 | * terrible performance due to SACK overload. */ | ||
1070 | adapter->num_tx_queues = 1; | ||
1071 | adapter->num_rx_queues = 2; | ||
1072 | break; | ||
1073 | default: | ||
1074 | adapter->num_tx_queues = 1; | ||
1075 | adapter->num_rx_queues = 1; | ||
1076 | break; | ||
1077 | } | ||
1078 | adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); | ||
1079 | adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); | ||
1080 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", | ||
1081 | adapter->num_rx_queues, | ||
1082 | ((adapter->num_rx_queues == 1) | ||
1083 | ? ((num_online_cpus() > 1) | ||
1084 | ? "(due to unsupported feature in current adapter)" | ||
1085 | : "(due to unsupported system configuration)") | ||
1086 | : "")); | ||
1087 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", | ||
1088 | adapter->num_tx_queues); | ||
1089 | #else | ||
1090 | adapter->num_tx_queues = 1; | 1018 | adapter->num_tx_queues = 1; |
1091 | adapter->num_rx_queues = 1; | 1019 | adapter->num_rx_queues = 1; |
1092 | #endif | ||
1093 | 1020 | ||
1094 | if (e1000_alloc_queues(adapter)) { | 1021 | if (e1000_alloc_queues(adapter)) { |
1095 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 1022 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); |
@@ -1152,51 +1079,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1152 | memset(adapter->polling_netdev, 0, size); | 1079 | memset(adapter->polling_netdev, 0, size); |
1153 | #endif | 1080 | #endif |
1154 | 1081 | ||
1155 | #ifdef CONFIG_E1000_MQ | ||
1156 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1157 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1158 | |||
1159 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1160 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1161 | #endif | ||
1162 | |||
1163 | return E1000_SUCCESS; | 1082 | return E1000_SUCCESS; |
1164 | } | 1083 | } |
1165 | 1084 | ||
1166 | #ifdef CONFIG_E1000_MQ | ||
1167 | static void __devinit | ||
1168 | e1000_setup_queue_mapping(struct e1000_adapter *adapter) | ||
1169 | { | ||
1170 | int i, cpu; | ||
1171 | |||
1172 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1173 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1174 | cpus_clear(adapter->rx_sched_call_data.cpumask); | ||
1175 | |||
1176 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1177 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1178 | |||
1179 | lock_cpu_hotplug(); | ||
1180 | i = 0; | ||
1181 | for_each_online_cpu(cpu) { | ||
1182 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; | ||
1183 | /* This is incomplete because we'd like to assign separate | ||
1184 | * physical cpus to these netdev polling structures and | ||
1185 | * avoid saturating a subset of cpus. | ||
1186 | */ | ||
1187 | if (i < adapter->num_rx_queues) { | ||
1188 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | ||
1189 | adapter->rx_ring[i].cpu = cpu; | ||
1190 | cpu_set(cpu, adapter->cpumask); | ||
1191 | } else | ||
1192 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | ||
1193 | |||
1194 | i++; | ||
1195 | } | ||
1196 | unlock_cpu_hotplug(); | ||
1197 | } | ||
1198 | #endif | ||
1199 | |||
1200 | /** | 1085 | /** |
1201 | * e1000_open - Called when a network interface is made active | 1086 | * e1000_open - Called when a network interface is made active |
1202 | * @netdev: network interface device structure | 1087 | * @netdev: network interface device structure |
@@ -1435,18 +1320,6 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1435 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1320 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1436 | 1321 | ||
1437 | switch (adapter->num_tx_queues) { | 1322 | switch (adapter->num_tx_queues) { |
1438 | case 2: | ||
1439 | tdba = adapter->tx_ring[1].dma; | ||
1440 | tdlen = adapter->tx_ring[1].count * | ||
1441 | sizeof(struct e1000_tx_desc); | ||
1442 | E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); | ||
1443 | E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); | ||
1444 | E1000_WRITE_REG(hw, TDLEN1, tdlen); | ||
1445 | E1000_WRITE_REG(hw, TDH1, 0); | ||
1446 | E1000_WRITE_REG(hw, TDT1, 0); | ||
1447 | adapter->tx_ring[1].tdh = E1000_TDH1; | ||
1448 | adapter->tx_ring[1].tdt = E1000_TDT1; | ||
1449 | /* Fall Through */ | ||
1450 | case 1: | 1323 | case 1: |
1451 | default: | 1324 | default: |
1452 | tdba = adapter->tx_ring[0].dma; | 1325 | tdba = adapter->tx_ring[0].dma; |
@@ -1477,6 +1350,10 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1477 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; | 1350 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
1478 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; | 1351 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
1479 | break; | 1352 | break; |
1353 | case e1000_80003es2lan: | ||
1354 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | ||
1355 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; | ||
1356 | break; | ||
1480 | default: | 1357 | default: |
1481 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | 1358 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
1482 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; | 1359 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
@@ -1497,10 +1374,13 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1497 | tctl = E1000_READ_REG(hw, TCTL); | 1374 | tctl = E1000_READ_REG(hw, TCTL); |
1498 | 1375 | ||
1499 | tctl &= ~E1000_TCTL_CT; | 1376 | tctl &= ~E1000_TCTL_CT; |
1500 | tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | | 1377 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1501 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1378 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1502 | 1379 | ||
1503 | E1000_WRITE_REG(hw, TCTL, tctl); | 1380 | #ifdef DISABLE_MULR |
1381 | /* disable Multiple Reads for debugging */ | ||
1382 | tctl &= ~E1000_TCTL_MULR; | ||
1383 | #endif | ||
1504 | 1384 | ||
1505 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1385 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1506 | tarc = E1000_READ_REG(hw, TARC0); | 1386 | tarc = E1000_READ_REG(hw, TARC0); |
@@ -1513,6 +1393,15 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1513 | else | 1393 | else |
1514 | tarc |= (1 << 28); | 1394 | tarc |= (1 << 28); |
1515 | E1000_WRITE_REG(hw, TARC1, tarc); | 1395 | E1000_WRITE_REG(hw, TARC1, tarc); |
1396 | } else if (hw->mac_type == e1000_80003es2lan) { | ||
1397 | tarc = E1000_READ_REG(hw, TARC0); | ||
1398 | tarc |= 1; | ||
1399 | if (hw->media_type == e1000_media_type_internal_serdes) | ||
1400 | tarc |= (1 << 20); | ||
1401 | E1000_WRITE_REG(hw, TARC0, tarc); | ||
1402 | tarc = E1000_READ_REG(hw, TARC1); | ||
1403 | tarc |= 1; | ||
1404 | E1000_WRITE_REG(hw, TARC1, tarc); | ||
1516 | } | 1405 | } |
1517 | 1406 | ||
1518 | e1000_config_collision_dist(hw); | 1407 | e1000_config_collision_dist(hw); |
@@ -1531,6 +1420,9 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1531 | if (hw->mac_type == e1000_82544 && | 1420 | if (hw->mac_type == e1000_82544 && |
1532 | hw->bus_type == e1000_bus_type_pcix) | 1421 | hw->bus_type == e1000_bus_type_pcix) |
1533 | adapter->pcix_82544 = 1; | 1422 | adapter->pcix_82544 = 1; |
1423 | |||
1424 | E1000_WRITE_REG(hw, TCTL, tctl); | ||
1425 | |||
1534 | } | 1426 | } |
1535 | 1427 | ||
1536 | /** | 1428 | /** |
@@ -1790,12 +1682,9 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1790 | uint64_t rdba; | 1682 | uint64_t rdba; |
1791 | struct e1000_hw *hw = &adapter->hw; | 1683 | struct e1000_hw *hw = &adapter->hw; |
1792 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; | 1684 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; |
1793 | #ifdef CONFIG_E1000_MQ | ||
1794 | uint32_t reta, mrqc; | ||
1795 | int i; | ||
1796 | #endif | ||
1797 | 1685 | ||
1798 | if (adapter->rx_ps_pages) { | 1686 | if (adapter->rx_ps_pages) { |
1687 | /* this is a 32 byte descriptor */ | ||
1799 | rdlen = adapter->rx_ring[0].count * | 1688 | rdlen = adapter->rx_ring[0].count * |
1800 | sizeof(union e1000_rx_desc_packet_split); | 1689 | sizeof(union e1000_rx_desc_packet_split); |
1801 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 1690 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
@@ -1837,18 +1726,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1837 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1726 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1838 | * the Base and Length of the Rx Descriptor Ring */ | 1727 | * the Base and Length of the Rx Descriptor Ring */ |
1839 | switch (adapter->num_rx_queues) { | 1728 | switch (adapter->num_rx_queues) { |
1840 | #ifdef CONFIG_E1000_MQ | ||
1841 | case 2: | ||
1842 | rdba = adapter->rx_ring[1].dma; | ||
1843 | E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); | ||
1844 | E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); | ||
1845 | E1000_WRITE_REG(hw, RDLEN1, rdlen); | ||
1846 | E1000_WRITE_REG(hw, RDH1, 0); | ||
1847 | E1000_WRITE_REG(hw, RDT1, 0); | ||
1848 | adapter->rx_ring[1].rdh = E1000_RDH1; | ||
1849 | adapter->rx_ring[1].rdt = E1000_RDT1; | ||
1850 | /* Fall Through */ | ||
1851 | #endif | ||
1852 | case 1: | 1729 | case 1: |
1853 | default: | 1730 | default: |
1854 | rdba = adapter->rx_ring[0].dma; | 1731 | rdba = adapter->rx_ring[0].dma; |
@@ -1862,46 +1739,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1862 | break; | 1739 | break; |
1863 | } | 1740 | } |
1864 | 1741 | ||
1865 | #ifdef CONFIG_E1000_MQ | ||
1866 | if (adapter->num_rx_queues > 1) { | ||
1867 | uint32_t random[10]; | ||
1868 | |||
1869 | get_random_bytes(&random[0], 40); | ||
1870 | |||
1871 | if (hw->mac_type <= e1000_82572) { | ||
1872 | E1000_WRITE_REG(hw, RSSIR, 0); | ||
1873 | E1000_WRITE_REG(hw, RSSIM, 0); | ||
1874 | } | ||
1875 | |||
1876 | switch (adapter->num_rx_queues) { | ||
1877 | case 2: | ||
1878 | default: | ||
1879 | reta = 0x00800080; | ||
1880 | mrqc = E1000_MRQC_ENABLE_RSS_2Q; | ||
1881 | break; | ||
1882 | } | ||
1883 | |||
1884 | /* Fill out redirection table */ | ||
1885 | for (i = 0; i < 32; i++) | ||
1886 | E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); | ||
1887 | /* Fill out hash function seeds */ | ||
1888 | for (i = 0; i < 10; i++) | ||
1889 | E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); | ||
1890 | |||
1891 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
1892 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
1893 | E1000_WRITE_REG(hw, MRQC, mrqc); | ||
1894 | } | ||
1895 | |||
1896 | /* Multiqueue and packet checksumming are mutually exclusive. */ | ||
1897 | if (hw->mac_type >= e1000_82571) { | ||
1898 | rxcsum = E1000_READ_REG(hw, RXCSUM); | ||
1899 | rxcsum |= E1000_RXCSUM_PCSD; | ||
1900 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | ||
1901 | } | ||
1902 | |||
1903 | #else | ||
1904 | |||
1905 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1742 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1906 | if (hw->mac_type >= e1000_82543) { | 1743 | if (hw->mac_type >= e1000_82543) { |
1907 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1744 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
@@ -1920,7 +1757,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1920 | } | 1757 | } |
1921 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1758 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1922 | } | 1759 | } |
1923 | #endif /* CONFIG_E1000_MQ */ | ||
1924 | 1760 | ||
1925 | if (hw->mac_type == e1000_82573) | 1761 | if (hw->mac_type == e1000_82573) |
1926 | E1000_WRITE_REG(hw, ERT, 0x0100); | 1762 | E1000_WRITE_REG(hw, ERT, 0x0100); |
@@ -2392,7 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2392 | { | 2228 | { |
2393 | struct net_device *netdev = adapter->netdev; | 2229 | struct net_device *netdev = adapter->netdev; |
2394 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2230 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2395 | uint32_t link; | 2231 | uint32_t link, tctl; |
2396 | 2232 | ||
2397 | e1000_check_for_link(&adapter->hw); | 2233 | e1000_check_for_link(&adapter->hw); |
2398 | if (adapter->hw.mac_type == e1000_82573) { | 2234 | if (adapter->hw.mac_type == e1000_82573) { |
@@ -2418,20 +2254,61 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2418 | adapter->link_duplex == FULL_DUPLEX ? | 2254 | adapter->link_duplex == FULL_DUPLEX ? |
2419 | "Full Duplex" : "Half Duplex"); | 2255 | "Full Duplex" : "Half Duplex"); |
2420 | 2256 | ||
2421 | /* tweak tx_queue_len according to speed/duplex */ | 2257 | /* tweak tx_queue_len according to speed/duplex |
2258 | * and adjust the timeout factor */ | ||
2422 | netdev->tx_queue_len = adapter->tx_queue_len; | 2259 | netdev->tx_queue_len = adapter->tx_queue_len; |
2423 | adapter->tx_timeout_factor = 1; | 2260 | adapter->tx_timeout_factor = 1; |
2424 | if (adapter->link_duplex == HALF_DUPLEX) { | 2261 | adapter->txb2b = 1; |
2262 | switch (adapter->link_speed) { | ||
2263 | case SPEED_10: | ||
2264 | adapter->txb2b = 0; | ||
2265 | netdev->tx_queue_len = 10; | ||
2266 | adapter->tx_timeout_factor = 8; | ||
2267 | break; | ||
2268 | case SPEED_100: | ||
2269 | adapter->txb2b = 0; | ||
2270 | netdev->tx_queue_len = 100; | ||
2271 | /* maybe add some timeout factor ? */ | ||
2272 | break; | ||
2273 | } | ||
2274 | |||
2275 | if ((adapter->hw.mac_type == e1000_82571 || | ||
2276 | adapter->hw.mac_type == e1000_82572) && | ||
2277 | adapter->txb2b == 0) { | ||
2278 | #define SPEED_MODE_BIT (1 << 21) | ||
2279 | uint32_t tarc0; | ||
2280 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | ||
2281 | tarc0 &= ~SPEED_MODE_BIT; | ||
2282 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | ||
2283 | } | ||
2284 | |||
2285 | #ifdef NETIF_F_TSO | ||
2286 | /* disable TSO for pcie and 10/100 speeds, to avoid | ||
2287 | * some hardware issues */ | ||
2288 | if (!adapter->tso_force && | ||
2289 | adapter->hw.bus_type == e1000_bus_type_pci_express){ | ||
2425 | switch (adapter->link_speed) { | 2290 | switch (adapter->link_speed) { |
2426 | case SPEED_10: | 2291 | case SPEED_10: |
2427 | netdev->tx_queue_len = 10; | ||
2428 | adapter->tx_timeout_factor = 8; | ||
2429 | break; | ||
2430 | case SPEED_100: | 2292 | case SPEED_100: |
2431 | netdev->tx_queue_len = 100; | 2293 | DPRINTK(PROBE,INFO, |
2294 | "10/100 speed: disabling TSO\n"); | ||
2295 | netdev->features &= ~NETIF_F_TSO; | ||
2296 | break; | ||
2297 | case SPEED_1000: | ||
2298 | netdev->features |= NETIF_F_TSO; | ||
2299 | break; | ||
2300 | default: | ||
2301 | /* oops */ | ||
2432 | break; | 2302 | break; |
2433 | } | 2303 | } |
2434 | } | 2304 | } |
2305 | #endif | ||
2306 | |||
2307 | /* enable transmits in the hardware, need to do this | ||
2308 | * after setting TARC0 */ | ||
2309 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | ||
2310 | tctl |= E1000_TCTL_EN; | ||
2311 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | ||
2435 | 2312 | ||
2436 | netif_carrier_on(netdev); | 2313 | netif_carrier_on(netdev); |
2437 | netif_wake_queue(netdev); | 2314 | netif_wake_queue(netdev); |
@@ -2446,6 +2323,16 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2446 | netif_carrier_off(netdev); | 2323 | netif_carrier_off(netdev); |
2447 | netif_stop_queue(netdev); | 2324 | netif_stop_queue(netdev); |
2448 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); | 2325 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
2326 | |||
2327 | /* 80003ES2LAN workaround-- | ||
2328 | * For packet buffer work-around on link down event; | ||
2329 | * disable receives in the ISR and | ||
2330 | * reset device here in the watchdog | ||
2331 | */ | ||
2332 | if (adapter->hw.mac_type == e1000_80003es2lan) { | ||
2333 | /* reset device */ | ||
2334 | schedule_work(&adapter->reset_task); | ||
2335 | } | ||
2449 | } | 2336 | } |
2450 | 2337 | ||
2451 | e1000_smartspeed(adapter); | 2338 | e1000_smartspeed(adapter); |
@@ -2465,16 +2352,14 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2465 | 2352 | ||
2466 | e1000_update_adaptive(&adapter->hw); | 2353 | e1000_update_adaptive(&adapter->hw); |
2467 | 2354 | ||
2468 | #ifdef CONFIG_E1000_MQ | ||
2469 | txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2470 | #endif | ||
2471 | if (!netif_carrier_ok(netdev)) { | 2355 | if (!netif_carrier_ok(netdev)) { |
2472 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2356 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2473 | /* We've lost link, so the controller stops DMA, | 2357 | /* We've lost link, so the controller stops DMA, |
2474 | * but we've got queued Tx work that's never going | 2358 | * but we've got queued Tx work that's never going |
2475 | * to get done, so reset controller to flush Tx. | 2359 | * to get done, so reset controller to flush Tx. |
2476 | * (Do the reset outside of interrupt context). */ | 2360 | * (Do the reset outside of interrupt context). */ |
2477 | schedule_work(&adapter->tx_timeout_task); | 2361 | adapter->tx_timeout_count++; |
2362 | schedule_work(&adapter->reset_task); | ||
2478 | } | 2363 | } |
2479 | } | 2364 | } |
2480 | 2365 | ||
@@ -2649,9 +2534,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2649 | /* Workaround for Controller erratum -- | 2534 | /* Workaround for Controller erratum -- |
2650 | * descriptor for non-tso packet in a linear SKB that follows a | 2535 | * descriptor for non-tso packet in a linear SKB that follows a |
2651 | * tso gets written back prematurely before the data is fully | 2536 | * tso gets written back prematurely before the data is fully |
2652 | * DMAd to the controller */ | 2537 | * DMA'd to the controller */ |
2653 | if (!skb->data_len && tx_ring->last_tx_tso && | 2538 | if (!skb->data_len && tx_ring->last_tx_tso && |
2654 | !skb_shinfo(skb)->tso_size) { | 2539 | !skb_shinfo(skb)->tso_size) { |
2655 | tx_ring->last_tx_tso = 0; | 2540 | tx_ring->last_tx_tso = 0; |
2656 | size -= 4; | 2541 | size -= 4; |
2657 | } | 2542 | } |
@@ -2840,7 +2725,7 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2840 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 2725 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
2841 | return 0; | 2726 | return 0; |
2842 | } | 2727 | } |
2843 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { | 2728 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { |
2844 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 2729 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
2845 | if ((htons(ETH_P_IP) == eth->h_proto)) { | 2730 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
2846 | const struct iphdr *ip = | 2731 | const struct iphdr *ip = |
@@ -2881,11 +2766,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2881 | unsigned int f; | 2766 | unsigned int f; |
2882 | len -= skb->data_len; | 2767 | len -= skb->data_len; |
2883 | 2768 | ||
2884 | #ifdef CONFIG_E1000_MQ | ||
2885 | tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2886 | #else | ||
2887 | tx_ring = adapter->tx_ring; | 2769 | tx_ring = adapter->tx_ring; |
2888 | #endif | ||
2889 | 2770 | ||
2890 | if (unlikely(skb->len <= 0)) { | 2771 | if (unlikely(skb->len <= 0)) { |
2891 | dev_kfree_skb_any(skb); | 2772 | dev_kfree_skb_any(skb); |
@@ -2905,21 +2786,29 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2905 | max_per_txd = min(mss << 2, max_per_txd); | 2786 | max_per_txd = min(mss << 2, max_per_txd); |
2906 | max_txd_pwr = fls(max_per_txd) - 1; | 2787 | max_txd_pwr = fls(max_per_txd) - 1; |
2907 | 2788 | ||
2908 | /* TSO Workaround for 82571/2 Controllers -- if skb->data | 2789 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
2909 | * points to just header, pull a few bytes of payload from | 2790 | * points to just header, pull a few bytes of payload from |
2910 | * frags into skb->data */ | 2791 | * frags into skb->data */ |
2911 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2792 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2912 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && | 2793 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
2913 | (adapter->hw.mac_type == e1000_82571 || | 2794 | switch (adapter->hw.mac_type) { |
2914 | adapter->hw.mac_type == e1000_82572)) { | 2795 | unsigned int pull_size; |
2915 | unsigned int pull_size; | 2796 | case e1000_82571: |
2916 | pull_size = min((unsigned int)4, skb->data_len); | 2797 | case e1000_82572: |
2917 | if (!__pskb_pull_tail(skb, pull_size)) { | 2798 | case e1000_82573: |
2918 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | 2799 | pull_size = min((unsigned int)4, skb->data_len); |
2919 | dev_kfree_skb_any(skb); | 2800 | if (!__pskb_pull_tail(skb, pull_size)) { |
2920 | return NETDEV_TX_OK; | 2801 | printk(KERN_ERR |
2802 | "__pskb_pull_tail failed.\n"); | ||
2803 | dev_kfree_skb_any(skb); | ||
2804 | return NETDEV_TX_OK; | ||
2805 | } | ||
2806 | len = skb->len - skb->data_len; | ||
2807 | break; | ||
2808 | default: | ||
2809 | /* do nothing */ | ||
2810 | break; | ||
2921 | } | 2811 | } |
2922 | len = skb->len - skb->data_len; | ||
2923 | } | 2812 | } |
2924 | } | 2813 | } |
2925 | 2814 | ||
@@ -2935,7 +2824,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2935 | #ifdef NETIF_F_TSO | 2824 | #ifdef NETIF_F_TSO |
2936 | /* Controller Erratum workaround */ | 2825 | /* Controller Erratum workaround */ |
2937 | if (!skb->data_len && tx_ring->last_tx_tso && | 2826 | if (!skb->data_len && tx_ring->last_tx_tso && |
2938 | !skb_shinfo(skb)->tso_size) | 2827 | !skb_shinfo(skb)->tso_size) |
2939 | count++; | 2828 | count++; |
2940 | #endif | 2829 | #endif |
2941 | 2830 | ||
@@ -2958,7 +2847,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2958 | if (adapter->pcix_82544) | 2847 | if (adapter->pcix_82544) |
2959 | count += nr_frags; | 2848 | count += nr_frags; |
2960 | 2849 | ||
2961 | if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2850 | |
2851 | if (adapter->hw.tx_pkt_filtering && | ||
2852 | (adapter->hw.mac_type == e1000_82573)) | ||
2962 | e1000_transfer_dhcp_info(adapter, skb); | 2853 | e1000_transfer_dhcp_info(adapter, skb); |
2963 | 2854 | ||
2964 | local_irq_save(flags); | 2855 | local_irq_save(flags); |
@@ -3036,15 +2927,15 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3036 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2927 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3037 | 2928 | ||
3038 | /* Do the reset outside of interrupt context */ | 2929 | /* Do the reset outside of interrupt context */ |
3039 | schedule_work(&adapter->tx_timeout_task); | 2930 | adapter->tx_timeout_count++; |
2931 | schedule_work(&adapter->reset_task); | ||
3040 | } | 2932 | } |
3041 | 2933 | ||
3042 | static void | 2934 | static void |
3043 | e1000_tx_timeout_task(struct net_device *netdev) | 2935 | e1000_reset_task(struct net_device *netdev) |
3044 | { | 2936 | { |
3045 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2937 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3046 | 2938 | ||
3047 | adapter->tx_timeout_count++; | ||
3048 | e1000_down(adapter); | 2939 | e1000_down(adapter); |
3049 | e1000_up(adapter); | 2940 | e1000_up(adapter); |
3050 | } | 2941 | } |
@@ -3079,6 +2970,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3079 | { | 2970 | { |
3080 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2971 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3081 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 2972 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
2973 | uint16_t eeprom_data = 0; | ||
3082 | 2974 | ||
3083 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 2975 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3084 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 2976 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
@@ -3090,14 +2982,28 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3090 | switch (adapter->hw.mac_type) { | 2982 | switch (adapter->hw.mac_type) { |
3091 | case e1000_82542_rev2_0: | 2983 | case e1000_82542_rev2_0: |
3092 | case e1000_82542_rev2_1: | 2984 | case e1000_82542_rev2_1: |
3093 | case e1000_82573: | ||
3094 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 2985 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
3095 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 2986 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
3096 | return -EINVAL; | 2987 | return -EINVAL; |
3097 | } | 2988 | } |
3098 | break; | 2989 | break; |
2990 | case e1000_82573: | ||
2991 | /* only enable jumbo frames if ASPM is disabled completely | ||
2992 | * this means both bits must be zero in 0x1A bits 3:2 */ | ||
2993 | e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, | ||
2994 | &eeprom_data); | ||
2995 | if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) { | ||
2996 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | ||
2997 | DPRINTK(PROBE, ERR, | ||
2998 | "Jumbo Frames not supported.\n"); | ||
2999 | return -EINVAL; | ||
3000 | } | ||
3001 | break; | ||
3002 | } | ||
3003 | /* fall through to get support */ | ||
3099 | case e1000_82571: | 3004 | case e1000_82571: |
3100 | case e1000_82572: | 3005 | case e1000_82572: |
3006 | case e1000_80003es2lan: | ||
3101 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | 3007 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
3102 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 3008 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3103 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); | 3009 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); |
@@ -3251,11 +3157,15 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3251 | 3157 | ||
3252 | /* Rx Errors */ | 3158 | /* Rx Errors */ |
3253 | 3159 | ||
3160 | /* RLEC on some newer hardware can be incorrect so build | ||
3161 | * our own version based on RUC and ROC */ | ||
3254 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3162 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
3255 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3163 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3256 | adapter->stats.rlec + adapter->stats.cexterr; | 3164 | adapter->stats.ruc + adapter->stats.roc + |
3165 | adapter->stats.cexterr; | ||
3257 | adapter->net_stats.rx_dropped = 0; | 3166 | adapter->net_stats.rx_dropped = 0; |
3258 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; | 3167 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + |
3168 | adapter->stats.roc; | ||
3259 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3169 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
3260 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3170 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
3261 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3171 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
@@ -3288,29 +3198,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3288 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3198 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3289 | } | 3199 | } |
3290 | 3200 | ||
3291 | #ifdef CONFIG_E1000_MQ | ||
3292 | void | ||
3293 | e1000_rx_schedule(void *data) | ||
3294 | { | ||
3295 | struct net_device *poll_dev, *netdev = data; | ||
3296 | struct e1000_adapter *adapter = netdev->priv; | ||
3297 | int this_cpu = get_cpu(); | ||
3298 | |||
3299 | poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); | ||
3300 | if (poll_dev == NULL) { | ||
3301 | put_cpu(); | ||
3302 | return; | ||
3303 | } | ||
3304 | |||
3305 | if (likely(netif_rx_schedule_prep(poll_dev))) | ||
3306 | __netif_rx_schedule(poll_dev); | ||
3307 | else | ||
3308 | e1000_irq_enable(adapter); | ||
3309 | |||
3310 | put_cpu(); | ||
3311 | } | ||
3312 | #endif | ||
3313 | |||
3314 | /** | 3201 | /** |
3315 | * e1000_intr - Interrupt Handler | 3202 | * e1000_intr - Interrupt Handler |
3316 | * @irq: interrupt number | 3203 | * @irq: interrupt number |
@@ -3324,7 +3211,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3324 | struct net_device *netdev = data; | 3211 | struct net_device *netdev = data; |
3325 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3212 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3326 | struct e1000_hw *hw = &adapter->hw; | 3213 | struct e1000_hw *hw = &adapter->hw; |
3327 | uint32_t icr = E1000_READ_REG(hw, ICR); | 3214 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); |
3328 | #ifndef CONFIG_E1000_NAPI | 3215 | #ifndef CONFIG_E1000_NAPI |
3329 | int i; | 3216 | int i; |
3330 | #else | 3217 | #else |
@@ -3346,6 +3233,17 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3346 | 3233 | ||
3347 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3234 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3348 | hw->get_link_status = 1; | 3235 | hw->get_link_status = 1; |
3236 | /* 80003ES2LAN workaround-- | ||
3237 | * For packet buffer work-around on link down event; | ||
3238 | * disable receives here in the ISR and | ||
3239 | * reset adapter in watchdog | ||
3240 | */ | ||
3241 | if (netif_carrier_ok(netdev) && | ||
3242 | (adapter->hw.mac_type == e1000_80003es2lan)) { | ||
3243 | /* disable receives */ | ||
3244 | rctl = E1000_READ_REG(hw, RCTL); | ||
3245 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | ||
3246 | } | ||
3349 | mod_timer(&adapter->watchdog_timer, jiffies); | 3247 | mod_timer(&adapter->watchdog_timer, jiffies); |
3350 | } | 3248 | } |
3351 | 3249 | ||
@@ -3355,26 +3253,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3355 | E1000_WRITE_REG(hw, IMC, ~0); | 3253 | E1000_WRITE_REG(hw, IMC, ~0); |
3356 | E1000_WRITE_FLUSH(hw); | 3254 | E1000_WRITE_FLUSH(hw); |
3357 | } | 3255 | } |
3358 | #ifdef CONFIG_E1000_MQ | ||
3359 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | ||
3360 | /* We must setup the cpumask once count == 0 since | ||
3361 | * each cpu bit is cleared when the work is done. */ | ||
3362 | adapter->rx_sched_call_data.cpumask = adapter->cpumask; | ||
3363 | atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); | ||
3364 | atomic_set(&adapter->rx_sched_call_data.count, | ||
3365 | adapter->num_rx_queues); | ||
3366 | smp_call_async_mask(&adapter->rx_sched_call_data); | ||
3367 | } else { | ||
3368 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | ||
3369 | } | ||
3370 | #else /* if !CONFIG_E1000_MQ */ | ||
3371 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3256 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) |
3372 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3257 | __netif_rx_schedule(&adapter->polling_netdev[0]); |
3373 | else | 3258 | else |
3374 | e1000_irq_enable(adapter); | 3259 | e1000_irq_enable(adapter); |
3375 | #endif /* CONFIG_E1000_MQ */ | 3260 | #else |
3376 | |||
3377 | #else /* if !CONFIG_E1000_NAPI */ | ||
3378 | /* Writing IMC and IMS is needed for 82547. | 3261 | /* Writing IMC and IMS is needed for 82547. |
3379 | * Due to Hub Link bus being occupied, an interrupt | 3262 | * Due to Hub Link bus being occupied, an interrupt |
3380 | * de-assertion message is not able to be sent. | 3263 | * de-assertion message is not able to be sent. |
@@ -3398,7 +3281,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3398 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3281 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3399 | e1000_irq_enable(adapter); | 3282 | e1000_irq_enable(adapter); |
3400 | 3283 | ||
3401 | #endif /* CONFIG_E1000_NAPI */ | 3284 | #endif |
3402 | 3285 | ||
3403 | return IRQ_HANDLED; | 3286 | return IRQ_HANDLED; |
3404 | } | 3287 | } |
@@ -3474,6 +3357,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3474 | struct e1000_tx_desc *tx_desc, *eop_desc; | 3357 | struct e1000_tx_desc *tx_desc, *eop_desc; |
3475 | struct e1000_buffer *buffer_info; | 3358 | struct e1000_buffer *buffer_info; |
3476 | unsigned int i, eop; | 3359 | unsigned int i, eop; |
3360 | #ifdef CONFIG_E1000_NAPI | ||
3361 | unsigned int count = 0; | ||
3362 | #endif | ||
3477 | boolean_t cleaned = FALSE; | 3363 | boolean_t cleaned = FALSE; |
3478 | 3364 | ||
3479 | i = tx_ring->next_to_clean; | 3365 | i = tx_ring->next_to_clean; |
@@ -3486,21 +3372,20 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3486 | buffer_info = &tx_ring->buffer_info[i]; | 3372 | buffer_info = &tx_ring->buffer_info[i]; |
3487 | cleaned = (i == eop); | 3373 | cleaned = (i == eop); |
3488 | 3374 | ||
3489 | #ifdef CONFIG_E1000_MQ | ||
3490 | tx_ring->tx_stats.bytes += buffer_info->length; | ||
3491 | #endif | ||
3492 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3375 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3493 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3376 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
3494 | 3377 | ||
3495 | if (unlikely(++i == tx_ring->count)) i = 0; | 3378 | if (unlikely(++i == tx_ring->count)) i = 0; |
3496 | } | 3379 | } |
3497 | 3380 | ||
3498 | #ifdef CONFIG_E1000_MQ | ||
3499 | tx_ring->tx_stats.packets++; | ||
3500 | #endif | ||
3501 | 3381 | ||
3502 | eop = tx_ring->buffer_info[i].next_to_watch; | 3382 | eop = tx_ring->buffer_info[i].next_to_watch; |
3503 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3383 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3384 | #ifdef CONFIG_E1000_NAPI | ||
3385 | #define E1000_TX_WEIGHT 64 | ||
3386 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | ||
3387 | if (count++ == E1000_TX_WEIGHT) break; | ||
3388 | #endif | ||
3504 | } | 3389 | } |
3505 | 3390 | ||
3506 | tx_ring->next_to_clean = i; | 3391 | tx_ring->next_to_clean = i; |
@@ -3519,7 +3404,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3519 | adapter->detect_tx_hung = FALSE; | 3404 | adapter->detect_tx_hung = FALSE; |
3520 | if (tx_ring->buffer_info[eop].dma && | 3405 | if (tx_ring->buffer_info[eop].dma && |
3521 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 3406 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3522 | adapter->tx_timeout_factor * HZ) | 3407 | (adapter->tx_timeout_factor * HZ)) |
3523 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 3408 | && !(E1000_READ_REG(&adapter->hw, STATUS) & |
3524 | E1000_STATUS_TXOFF)) { | 3409 | E1000_STATUS_TXOFF)) { |
3525 | 3410 | ||
@@ -3644,10 +3529,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3644 | skb = buffer_info->skb; | 3529 | skb = buffer_info->skb; |
3645 | buffer_info->skb = NULL; | 3530 | buffer_info->skb = NULL; |
3646 | 3531 | ||
3532 | prefetch(skb->data - NET_IP_ALIGN); | ||
3533 | |||
3647 | if (++i == rx_ring->count) i = 0; | 3534 | if (++i == rx_ring->count) i = 0; |
3648 | next_rxd = E1000_RX_DESC(*rx_ring, i); | 3535 | next_rxd = E1000_RX_DESC(*rx_ring, i); |
3536 | prefetch(next_rxd); | ||
3537 | |||
3649 | next_buffer = &rx_ring->buffer_info[i]; | 3538 | next_buffer = &rx_ring->buffer_info[i]; |
3650 | next_skb = next_buffer->skb; | 3539 | next_skb = next_buffer->skb; |
3540 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3651 | 3541 | ||
3652 | cleaned = TRUE; | 3542 | cleaned = TRUE; |
3653 | cleaned_count++; | 3543 | cleaned_count++; |
@@ -3733,10 +3623,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3733 | } | 3623 | } |
3734 | #endif /* CONFIG_E1000_NAPI */ | 3624 | #endif /* CONFIG_E1000_NAPI */ |
3735 | netdev->last_rx = jiffies; | 3625 | netdev->last_rx = jiffies; |
3736 | #ifdef CONFIG_E1000_MQ | ||
3737 | rx_ring->rx_stats.packets++; | ||
3738 | rx_ring->rx_stats.bytes += length; | ||
3739 | #endif | ||
3740 | 3626 | ||
3741 | next_desc: | 3627 | next_desc: |
3742 | rx_desc->status = 0; | 3628 | rx_desc->status = 0; |
@@ -3747,6 +3633,7 @@ next_desc: | |||
3747 | cleaned_count = 0; | 3633 | cleaned_count = 0; |
3748 | } | 3634 | } |
3749 | 3635 | ||
3636 | /* use prefetched values */ | ||
3750 | rx_desc = next_rxd; | 3637 | rx_desc = next_rxd; |
3751 | buffer_info = next_buffer; | 3638 | buffer_info = next_buffer; |
3752 | } | 3639 | } |
@@ -3789,9 +3676,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3789 | i = rx_ring->next_to_clean; | 3676 | i = rx_ring->next_to_clean; |
3790 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3677 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3791 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3678 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3792 | buffer_info = &rx_ring->buffer_info[i]; | ||
3793 | 3679 | ||
3794 | while (staterr & E1000_RXD_STAT_DD) { | 3680 | while (staterr & E1000_RXD_STAT_DD) { |
3681 | buffer_info = &rx_ring->buffer_info[i]; | ||
3795 | ps_page = &rx_ring->ps_page[i]; | 3682 | ps_page = &rx_ring->ps_page[i]; |
3796 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3683 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3797 | #ifdef CONFIG_E1000_NAPI | 3684 | #ifdef CONFIG_E1000_NAPI |
@@ -3801,10 +3688,16 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3801 | #endif | 3688 | #endif |
3802 | skb = buffer_info->skb; | 3689 | skb = buffer_info->skb; |
3803 | 3690 | ||
3691 | /* in the packet split case this is header only */ | ||
3692 | prefetch(skb->data - NET_IP_ALIGN); | ||
3693 | |||
3804 | if (++i == rx_ring->count) i = 0; | 3694 | if (++i == rx_ring->count) i = 0; |
3805 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | 3695 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); |
3696 | prefetch(next_rxd); | ||
3697 | |||
3806 | next_buffer = &rx_ring->buffer_info[i]; | 3698 | next_buffer = &rx_ring->buffer_info[i]; |
3807 | next_skb = next_buffer->skb; | 3699 | next_skb = next_buffer->skb; |
3700 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3808 | 3701 | ||
3809 | cleaned = TRUE; | 3702 | cleaned = TRUE; |
3810 | cleaned_count++; | 3703 | cleaned_count++; |
@@ -3836,23 +3729,49 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3836 | /* Good Receive */ | 3729 | /* Good Receive */ |
3837 | skb_put(skb, length); | 3730 | skb_put(skb, length); |
3838 | 3731 | ||
3732 | { | ||
3733 | /* this looks ugly, but it seems compiler issues make it | ||
3734 | more efficient than reusing j */ | ||
3735 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | ||
3736 | |||
3737 | /* page alloc/put takes too long and effects small packet | ||
3738 | * throughput, so unsplit small packets and save the alloc/put*/ | ||
3739 | if (l1 && ((length + l1) < E1000_CB_LENGTH)) { | ||
3740 | u8 *vaddr; | ||
3741 | /* there is no documentation about how to call | ||
3742 | * kmap_atomic, so we can't hold the mapping | ||
3743 | * very long */ | ||
3744 | pci_dma_sync_single_for_cpu(pdev, | ||
3745 | ps_page_dma->ps_page_dma[0], | ||
3746 | PAGE_SIZE, | ||
3747 | PCI_DMA_FROMDEVICE); | ||
3748 | vaddr = kmap_atomic(ps_page->ps_page[0], | ||
3749 | KM_SKB_DATA_SOFTIRQ); | ||
3750 | memcpy(skb->tail, vaddr, l1); | ||
3751 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | ||
3752 | pci_dma_sync_single_for_device(pdev, | ||
3753 | ps_page_dma->ps_page_dma[0], | ||
3754 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
3755 | skb_put(skb, l1); | ||
3756 | length += l1; | ||
3757 | goto copydone; | ||
3758 | } /* if */ | ||
3759 | } | ||
3760 | |||
3839 | for (j = 0; j < adapter->rx_ps_pages; j++) { | 3761 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
3840 | if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) | 3762 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) |
3841 | break; | 3763 | break; |
3842 | |||
3843 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 3764 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
3844 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3765 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3845 | ps_page_dma->ps_page_dma[j] = 0; | 3766 | ps_page_dma->ps_page_dma[j] = 0; |
3846 | skb_shinfo(skb)->frags[j].page = | 3767 | skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, |
3847 | ps_page->ps_page[j]; | 3768 | length); |
3848 | ps_page->ps_page[j] = NULL; | 3769 | ps_page->ps_page[j] = NULL; |
3849 | skb_shinfo(skb)->frags[j].page_offset = 0; | ||
3850 | skb_shinfo(skb)->frags[j].size = length; | ||
3851 | skb_shinfo(skb)->nr_frags++; | ||
3852 | skb->len += length; | 3770 | skb->len += length; |
3853 | skb->data_len += length; | 3771 | skb->data_len += length; |
3854 | } | 3772 | } |
3855 | 3773 | ||
3774 | copydone: | ||
3856 | e1000_rx_checksum(adapter, staterr, | 3775 | e1000_rx_checksum(adapter, staterr, |
3857 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 3776 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
3858 | skb->protocol = eth_type_trans(skb, netdev); | 3777 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -3878,10 +3797,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3878 | } | 3797 | } |
3879 | #endif /* CONFIG_E1000_NAPI */ | 3798 | #endif /* CONFIG_E1000_NAPI */ |
3880 | netdev->last_rx = jiffies; | 3799 | netdev->last_rx = jiffies; |
3881 | #ifdef CONFIG_E1000_MQ | ||
3882 | rx_ring->rx_stats.packets++; | ||
3883 | rx_ring->rx_stats.bytes += length; | ||
3884 | #endif | ||
3885 | 3800 | ||
3886 | next_desc: | 3801 | next_desc: |
3887 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | 3802 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); |
@@ -3893,6 +3808,7 @@ next_desc: | |||
3893 | cleaned_count = 0; | 3808 | cleaned_count = 0; |
3894 | } | 3809 | } |
3895 | 3810 | ||
3811 | /* use prefetched values */ | ||
3896 | rx_desc = next_rxd; | 3812 | rx_desc = next_rxd; |
3897 | buffer_info = next_buffer; | 3813 | buffer_info = next_buffer; |
3898 | 3814 | ||
@@ -3936,7 +3852,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3936 | goto map_skb; | 3852 | goto map_skb; |
3937 | } | 3853 | } |
3938 | 3854 | ||
3939 | |||
3940 | if (unlikely(!skb)) { | 3855 | if (unlikely(!skb)) { |
3941 | /* Better luck next round */ | 3856 | /* Better luck next round */ |
3942 | adapter->alloc_rx_buff_failed++; | 3857 | adapter->alloc_rx_buff_failed++; |
@@ -4242,7 +4157,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4242 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4157 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4243 | return -EIO; | 4158 | return -EIO; |
4244 | } | 4159 | } |
4245 | if (adapter->hw.phy_type == e1000_phy_m88) { | 4160 | if (adapter->hw.phy_type == e1000_media_type_copper) { |
4246 | switch (data->reg_num) { | 4161 | switch (data->reg_num) { |
4247 | case PHY_CTRL: | 4162 | case PHY_CTRL: |
4248 | if (mii_reg & MII_CR_POWER_DOWN) | 4163 | if (mii_reg & MII_CR_POWER_DOWN) |
@@ -4258,8 +4173,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4258 | else | 4173 | else |
4259 | spddplx = SPEED_10; | 4174 | spddplx = SPEED_10; |
4260 | spddplx += (mii_reg & 0x100) | 4175 | spddplx += (mii_reg & 0x100) |
4261 | ? FULL_DUPLEX : | 4176 | ? DUPLEX_FULL : |
4262 | HALF_DUPLEX; | 4177 | DUPLEX_HALF; |
4263 | retval = e1000_set_spd_dplx(adapter, | 4178 | retval = e1000_set_spd_dplx(adapter, |
4264 | spddplx); | 4179 | spddplx); |
4265 | if (retval) { | 4180 | if (retval) { |
@@ -4489,8 +4404,8 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
4489 | } | 4404 | } |
4490 | 4405 | ||
4491 | #ifdef CONFIG_PM | 4406 | #ifdef CONFIG_PM |
4492 | /* these functions save and restore 16 or 64 dwords (64-256 bytes) of config | 4407 | /* Save/restore 16 or 64 dwords of PCI config space depending on which |
4493 | * space versus the 64 bytes that pci_[save|restore]_state handle | 4408 | * bus we're on (PCI(X) vs. PCI-E) |
4494 | */ | 4409 | */ |
4495 | #define PCIE_CONFIG_SPACE_LEN 256 | 4410 | #define PCIE_CONFIG_SPACE_LEN 256 |
4496 | #define PCI_CONFIG_SPACE_LEN 64 | 4411 | #define PCI_CONFIG_SPACE_LEN 64 |
@@ -4500,6 +4415,7 @@ e1000_pci_save_state(struct e1000_adapter *adapter) | |||
4500 | struct pci_dev *dev = adapter->pdev; | 4415 | struct pci_dev *dev = adapter->pdev; |
4501 | int size; | 4416 | int size; |
4502 | int i; | 4417 | int i; |
4418 | |||
4503 | if (adapter->hw.mac_type >= e1000_82571) | 4419 | if (adapter->hw.mac_type >= e1000_82571) |
4504 | size = PCIE_CONFIG_SPACE_LEN; | 4420 | size = PCIE_CONFIG_SPACE_LEN; |
4505 | else | 4421 | else |
@@ -4523,8 +4439,10 @@ e1000_pci_restore_state(struct e1000_adapter *adapter) | |||
4523 | struct pci_dev *dev = adapter->pdev; | 4439 | struct pci_dev *dev = adapter->pdev; |
4524 | int size; | 4440 | int size; |
4525 | int i; | 4441 | int i; |
4442 | |||
4526 | if (adapter->config_space == NULL) | 4443 | if (adapter->config_space == NULL) |
4527 | return; | 4444 | return; |
4445 | |||
4528 | if (adapter->hw.mac_type >= e1000_82571) | 4446 | if (adapter->hw.mac_type >= e1000_82571) |
4529 | size = PCIE_CONFIG_SPACE_LEN; | 4447 | size = PCIE_CONFIG_SPACE_LEN; |
4530 | else | 4448 | else |
@@ -4552,8 +4470,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4552 | e1000_down(adapter); | 4470 | e1000_down(adapter); |
4553 | 4471 | ||
4554 | #ifdef CONFIG_PM | 4472 | #ifdef CONFIG_PM |
4555 | /* implement our own version of pci_save_state(pdev) because pci | 4473 | /* Implement our own version of pci_save_state(pdev) because pci- |
4556 | * express adapters have larger 256 byte config spaces */ | 4474 | * express adapters have 256-byte config spaces. */ |
4557 | retval = e1000_pci_save_state(adapter); | 4475 | retval = e1000_pci_save_state(adapter); |
4558 | if (retval) | 4476 | if (retval) |
4559 | return retval; | 4477 | return retval; |
@@ -4610,7 +4528,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4610 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); | 4528 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); |
4611 | if (retval) | 4529 | if (retval) |
4612 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4530 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4613 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | 4531 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); |
4614 | if (retval) | 4532 | if (retval) |
4615 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4533 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); |
4616 | } | 4534 | } |
@@ -4626,7 +4544,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4626 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4544 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4627 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | 4545 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); |
4628 | if (retval) | 4546 | if (retval) |
4629 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4547 | DPRINTK(PROBE, ERR, |
4548 | "Error enabling D3 cold wake\n"); | ||
4630 | } | 4549 | } |
4631 | } | 4550 | } |
4632 | 4551 | ||
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 3768d83cd577..e0a4d37d1b85 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -268,7 +268,7 @@ e1000_validate_option(int *value, struct e1000_option *opt, | |||
268 | BUG(); | 268 | BUG(); |
269 | } | 269 | } |
270 | 270 | ||
271 | DPRINTK(PROBE, INFO, "Invalid %s specified (%i) %s\n", | 271 | DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", |
272 | opt->name, *value, opt->err); | 272 | opt->name, *value, opt->err); |
273 | *value = opt->def; | 273 | *value = opt->def; |
274 | return -1; | 274 | return -1; |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 8c62ced2c9b2..467fc861360d 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -27,7 +27,7 @@ | |||
27 | rx_align support: enables rx DMA without causing unaligned accesses. | 27 | rx_align support: enables rx DMA without causing unaligned accesses. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | static const char *version = | 30 | static const char * const version = |
31 | "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" | 31 | "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" |
32 | "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; | 32 | "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; |
33 | 33 | ||
@@ -469,7 +469,7 @@ static const char i82558_config_cmd[CONFIG_DATA_SIZE] = { | |||
469 | 0x31, 0x05, }; | 469 | 0x31, 0x05, }; |
470 | 470 | ||
471 | /* PHY media interface chips. */ | 471 | /* PHY media interface chips. */ |
472 | static const char *phys[] = { | 472 | static const char * const phys[] = { |
473 | "None", "i82553-A/B", "i82553-C", "i82503", | 473 | "None", "i82553-A/B", "i82553-C", "i82503", |
474 | "DP83840", "80c240", "80c24", "i82555", | 474 | "DP83840", "80c240", "80c24", "i82555", |
475 | "unknown-8", "unknown-9", "DP83840A", "unknown-11", | 475 | "unknown-8", "unknown-9", "DP83840A", "unknown-11", |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index f119ec4e89ea..2f7b86837fe8 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -225,7 +225,7 @@ struct epic_chip_info { | |||
225 | 225 | ||
226 | 226 | ||
227 | /* indexed by chip_t */ | 227 | /* indexed by chip_t */ |
228 | static struct epic_chip_info pci_id_tbl[] = { | 228 | static const struct epic_chip_info pci_id_tbl[] = { |
229 | { "SMSC EPIC/100 83c170", | 229 | { "SMSC EPIC/100 83c170", |
230 | EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, | 230 | EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, |
231 | { "SMSC EPIC/100 83c170", | 231 | { "SMSC EPIC/100 83c170", |
@@ -291,7 +291,7 @@ enum CommandBits { | |||
291 | RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) | 291 | RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) |
292 | #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) | 292 | #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) |
293 | 293 | ||
294 | static u16 media2miictl[16] = { | 294 | static const u16 media2miictl[16] = { |
295 | 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, | 295 | 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, |
296 | 0, 0, 0, 0, 0, 0, 0, 0 }; | 296 | 0, 0, 0, 0, 0, 0, 0, 0 }; |
297 | 297 | ||
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index f32a6b3acb2a..b67545be2caa 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -161,6 +161,7 @@ static char *version = | |||
161 | #include <linux/etherdevice.h> | 161 | #include <linux/etherdevice.h> |
162 | #include <linux/skbuff.h> | 162 | #include <linux/skbuff.h> |
163 | #include <linux/bitops.h> | 163 | #include <linux/bitops.h> |
164 | #include <linux/jiffies.h> | ||
164 | 165 | ||
165 | #include <asm/system.h> | 166 | #include <asm/system.h> |
166 | #include <asm/io.h> | 167 | #include <asm/io.h> |
@@ -754,7 +755,7 @@ static void eth16i_set_port(int ioaddr, int porttype) | |||
754 | 755 | ||
755 | static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | 756 | static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) |
756 | { | 757 | { |
757 | int starttime; | 758 | unsigned long starttime; |
758 | 759 | ||
759 | outb(0xff, ioaddr + TX_STATUS_REG); | 760 | outb(0xff, ioaddr + TX_STATUS_REG); |
760 | 761 | ||
@@ -765,7 +766,7 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | |||
765 | outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); | 766 | outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); |
766 | 767 | ||
767 | while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { | 768 | while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { |
768 | if( (jiffies - starttime) > TX_TIMEOUT) { | 769 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
769 | return -1; | 770 | return -1; |
770 | } | 771 | } |
771 | } | 772 | } |
@@ -775,18 +776,18 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) | |||
775 | 776 | ||
776 | static int eth16i_receive_probe_packet(int ioaddr) | 777 | static int eth16i_receive_probe_packet(int ioaddr) |
777 | { | 778 | { |
778 | int starttime; | 779 | unsigned long starttime; |
779 | 780 | ||
780 | starttime = jiffies; | 781 | starttime = jiffies; |
781 | 782 | ||
782 | while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { | 783 | while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { |
783 | if( (jiffies - starttime) > TX_TIMEOUT) { | 784 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
784 | 785 | ||
785 | if(eth16i_debug > 1) | 786 | if(eth16i_debug > 1) |
786 | printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); | 787 | printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); |
787 | starttime = jiffies; | 788 | starttime = jiffies; |
788 | while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { | 789 | while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { |
789 | if( (jiffies - starttime) > TX_TIMEOUT) { | 790 | if( time_after(jiffies, starttime + TX_TIMEOUT)) { |
790 | if(eth16i_debug > 1) | 791 | if(eth16i_debug > 1) |
791 | printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); | 792 | printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); |
792 | return -1; | 793 | return -1; |
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 55dbe9a3fd56..a8449265e5fd 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -160,7 +160,7 @@ struct chip_info { | |||
160 | int flags; | 160 | int flags; |
161 | }; | 161 | }; |
162 | 162 | ||
163 | static struct chip_info skel_netdrv_tbl[] = { | 163 | static const struct chip_info skel_netdrv_tbl[] = { |
164 | {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 164 | {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, |
165 | {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, | 165 | {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, |
166 | {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 166 | {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 3682ec61e8a8..e7fc28b07e5a 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -102,6 +102,9 @@ | |||
102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. | 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single | 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. | ||
106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | ||
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | ||
105 | * | 108 | * |
106 | * Known bugs: | 109 | * Known bugs: |
107 | * We suspect that on some hardware no TX done interrupts are generated. | 110 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -113,7 +116,7 @@ | |||
113 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 116 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
114 | * superfluous timer interrupts from the nic. | 117 | * superfluous timer interrupts from the nic. |
115 | */ | 118 | */ |
116 | #define FORCEDETH_VERSION "0.49" | 119 | #define FORCEDETH_VERSION "0.52" |
117 | #define DRV_NAME "forcedeth" | 120 | #define DRV_NAME "forcedeth" |
118 | 121 | ||
119 | #include <linux/module.h> | 122 | #include <linux/module.h> |
@@ -153,6 +156,9 @@ | |||
153 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | 156 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ |
154 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ | 157 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
155 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ | 158 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
159 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ | ||
160 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | ||
161 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | ||
156 | 162 | ||
157 | enum { | 163 | enum { |
158 | NvRegIrqStatus = 0x000, | 164 | NvRegIrqStatus = 0x000, |
@@ -166,14 +172,17 @@ enum { | |||
166 | #define NVREG_IRQ_TX_OK 0x0010 | 172 | #define NVREG_IRQ_TX_OK 0x0010 |
167 | #define NVREG_IRQ_TIMER 0x0020 | 173 | #define NVREG_IRQ_TIMER 0x0020 |
168 | #define NVREG_IRQ_LINK 0x0040 | 174 | #define NVREG_IRQ_LINK 0x0040 |
169 | #define NVREG_IRQ_TX_ERROR 0x0080 | 175 | #define NVREG_IRQ_RX_FORCED 0x0080 |
170 | #define NVREG_IRQ_TX1 0x0100 | 176 | #define NVREG_IRQ_TX_FORCED 0x0100 |
171 | #define NVREG_IRQMASK_THROUGHPUT 0x00df | 177 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
172 | #define NVREG_IRQMASK_CPU 0x0040 | 178 | #define NVREG_IRQMASK_CPU 0x0040 |
179 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | ||
180 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | ||
181 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | ||
173 | 182 | ||
174 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | 183 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
175 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | 184 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
176 | NVREG_IRQ_TX1)) | 185 | NVREG_IRQ_TX_FORCED)) |
177 | 186 | ||
178 | NvRegUnknownSetupReg6 = 0x008, | 187 | NvRegUnknownSetupReg6 = 0x008, |
179 | #define NVREG_UNKSETUP6_VAL 3 | 188 | #define NVREG_UNKSETUP6_VAL 3 |
@@ -185,6 +194,10 @@ enum { | |||
185 | NvRegPollingInterval = 0x00c, | 194 | NvRegPollingInterval = 0x00c, |
186 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 | 195 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
187 | #define NVREG_POLL_DEFAULT_CPU 13 | 196 | #define NVREG_POLL_DEFAULT_CPU 13 |
197 | NvRegMSIMap0 = 0x020, | ||
198 | NvRegMSIMap1 = 0x024, | ||
199 | NvRegMSIIrqMask = 0x030, | ||
200 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | ||
188 | NvRegMisc1 = 0x080, | 201 | NvRegMisc1 = 0x080, |
189 | #define NVREG_MISC1_HD 0x02 | 202 | #define NVREG_MISC1_HD 0x02 |
190 | #define NVREG_MISC1_FORCE 0x3b0f3c | 203 | #define NVREG_MISC1_FORCE 0x3b0f3c |
@@ -254,6 +267,10 @@ enum { | |||
254 | #define NVREG_TXRXCTL_DESC_1 0 | 267 | #define NVREG_TXRXCTL_DESC_1 0 |
255 | #define NVREG_TXRXCTL_DESC_2 0x02100 | 268 | #define NVREG_TXRXCTL_DESC_2 0x02100 |
256 | #define NVREG_TXRXCTL_DESC_3 0x02200 | 269 | #define NVREG_TXRXCTL_DESC_3 0x02200 |
270 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 | ||
271 | #define NVREG_TXRXCTL_VLANINS 0x00080 | ||
272 | NvRegTxRingPhysAddrHigh = 0x148, | ||
273 | NvRegRxRingPhysAddrHigh = 0x14C, | ||
257 | NvRegMIIStatus = 0x180, | 274 | NvRegMIIStatus = 0x180, |
258 | #define NVREG_MIISTAT_ERROR 0x0001 | 275 | #define NVREG_MIISTAT_ERROR 0x0001 |
259 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 276 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -303,6 +320,11 @@ enum { | |||
303 | #define NVREG_POWERSTATE_D1 0x0001 | 320 | #define NVREG_POWERSTATE_D1 0x0001 |
304 | #define NVREG_POWERSTATE_D2 0x0002 | 321 | #define NVREG_POWERSTATE_D2 0x0002 |
305 | #define NVREG_POWERSTATE_D3 0x0003 | 322 | #define NVREG_POWERSTATE_D3 0x0003 |
323 | NvRegVlanControl = 0x300, | ||
324 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | ||
325 | NvRegMSIXMap0 = 0x3e0, | ||
326 | NvRegMSIXMap1 = 0x3e4, | ||
327 | NvRegMSIXIrqStatus = 0x3f0, | ||
306 | }; | 328 | }; |
307 | 329 | ||
308 | /* Big endian: should work, but is untested */ | 330 | /* Big endian: should work, but is untested */ |
@@ -314,7 +336,7 @@ struct ring_desc { | |||
314 | struct ring_desc_ex { | 336 | struct ring_desc_ex { |
315 | u32 PacketBufferHigh; | 337 | u32 PacketBufferHigh; |
316 | u32 PacketBufferLow; | 338 | u32 PacketBufferLow; |
317 | u32 Reserved; | 339 | u32 TxVlan; |
318 | u32 FlagLen; | 340 | u32 FlagLen; |
319 | }; | 341 | }; |
320 | 342 | ||
@@ -355,6 +377,8 @@ typedef union _ring_type { | |||
355 | #define NV_TX2_CHECKSUM_L3 (1<<27) | 377 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
356 | #define NV_TX2_CHECKSUM_L4 (1<<26) | 378 | #define NV_TX2_CHECKSUM_L4 (1<<26) |
357 | 379 | ||
380 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) | ||
381 | |||
358 | #define NV_RX_DESCRIPTORVALID (1<<16) | 382 | #define NV_RX_DESCRIPTORVALID (1<<16) |
359 | #define NV_RX_MISSEDFRAME (1<<17) | 383 | #define NV_RX_MISSEDFRAME (1<<17) |
360 | #define NV_RX_SUBSTRACT1 (1<<18) | 384 | #define NV_RX_SUBSTRACT1 (1<<18) |
@@ -385,6 +409,9 @@ typedef union _ring_type { | |||
385 | #define NV_RX2_ERROR (1<<30) | 409 | #define NV_RX2_ERROR (1<<30) |
386 | #define NV_RX2_AVAIL (1<<31) | 410 | #define NV_RX2_AVAIL (1<<31) |
387 | 411 | ||
412 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) | ||
413 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | ||
414 | |||
388 | /* Miscelaneous hardware related defines: */ | 415 | /* Miscelaneous hardware related defines: */ |
389 | #define NV_PCI_REGSZ 0x270 | 416 | #define NV_PCI_REGSZ 0x270 |
390 | 417 | ||
@@ -475,6 +502,18 @@ typedef union _ring_type { | |||
475 | #define LPA_1000FULL 0x0800 | 502 | #define LPA_1000FULL 0x0800 |
476 | #define LPA_1000HALF 0x0400 | 503 | #define LPA_1000HALF 0x0400 |
477 | 504 | ||
505 | /* MSI/MSI-X defines */ | ||
506 | #define NV_MSI_X_MAX_VECTORS 8 | ||
507 | #define NV_MSI_X_VECTORS_MASK 0x000f | ||
508 | #define NV_MSI_CAPABLE 0x0010 | ||
509 | #define NV_MSI_X_CAPABLE 0x0020 | ||
510 | #define NV_MSI_ENABLED 0x0040 | ||
511 | #define NV_MSI_X_ENABLED 0x0080 | ||
512 | |||
513 | #define NV_MSI_X_VECTOR_ALL 0x0 | ||
514 | #define NV_MSI_X_VECTOR_RX 0x0 | ||
515 | #define NV_MSI_X_VECTOR_TX 0x1 | ||
516 | #define NV_MSI_X_VECTOR_OTHER 0x2 | ||
478 | 517 | ||
479 | /* | 518 | /* |
480 | * SMP locking: | 519 | * SMP locking: |
@@ -511,6 +550,7 @@ struct fe_priv { | |||
511 | u32 irqmask; | 550 | u32 irqmask; |
512 | u32 desc_ver; | 551 | u32 desc_ver; |
513 | u32 txrxctl_bits; | 552 | u32 txrxctl_bits; |
553 | u32 vlanctl_bits; | ||
514 | 554 | ||
515 | void __iomem *base; | 555 | void __iomem *base; |
516 | 556 | ||
@@ -525,6 +565,7 @@ struct fe_priv { | |||
525 | unsigned int pkt_limit; | 565 | unsigned int pkt_limit; |
526 | struct timer_list oom_kick; | 566 | struct timer_list oom_kick; |
527 | struct timer_list nic_poll; | 567 | struct timer_list nic_poll; |
568 | u32 nic_poll_irq; | ||
528 | 569 | ||
529 | /* media detection workaround. | 570 | /* media detection workaround. |
530 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 571 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
@@ -540,6 +581,13 @@ struct fe_priv { | |||
540 | dma_addr_t tx_dma[TX_RING]; | 581 | dma_addr_t tx_dma[TX_RING]; |
541 | unsigned int tx_dma_len[TX_RING]; | 582 | unsigned int tx_dma_len[TX_RING]; |
542 | u32 tx_flags; | 583 | u32 tx_flags; |
584 | |||
585 | /* vlan fields */ | ||
586 | struct vlan_group *vlangrp; | ||
587 | |||
588 | /* msi/msi-x fields */ | ||
589 | u32 msi_flags; | ||
590 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | ||
543 | }; | 591 | }; |
544 | 592 | ||
545 | /* | 593 | /* |
@@ -567,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |||
567 | */ | 615 | */ |
568 | static int poll_interval = -1; | 616 | static int poll_interval = -1; |
569 | 617 | ||
618 | /* | ||
619 | * Disable MSI interrupts | ||
620 | */ | ||
621 | static int disable_msi = 0; | ||
622 | |||
623 | /* | ||
624 | * Disable MSIX interrupts | ||
625 | */ | ||
626 | static int disable_msix = 0; | ||
627 | |||
570 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) | 628 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
571 | { | 629 | { |
572 | return netdev_priv(dev); | 630 | return netdev_priv(dev); |
@@ -612,6 +670,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | |||
612 | return 0; | 670 | return 0; |
613 | } | 671 | } |
614 | 672 | ||
673 | #define NV_SETUP_RX_RING 0x01 | ||
674 | #define NV_SETUP_TX_RING 0x02 | ||
675 | |||
676 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | ||
677 | { | ||
678 | struct fe_priv *np = get_nvpriv(dev); | ||
679 | u8 __iomem *base = get_hwbase(dev); | ||
680 | |||
681 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
682 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
683 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
684 | } | ||
685 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
686 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
687 | } | ||
688 | } else { | ||
689 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
690 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
691 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | ||
692 | } | ||
693 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
694 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
695 | writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | ||
696 | } | ||
697 | } | ||
698 | } | ||
699 | |||
615 | #define MII_READ (-1) | 700 | #define MII_READ (-1) |
616 | /* mii_rw: read/write a register on the PHY. | 701 | /* mii_rw: read/write a register on the PHY. |
617 | * | 702 | * |
@@ -903,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data) | |||
903 | struct net_device *dev = (struct net_device *) data; | 988 | struct net_device *dev = (struct net_device *) data; |
904 | struct fe_priv *np = netdev_priv(dev); | 989 | struct fe_priv *np = netdev_priv(dev); |
905 | 990 | ||
906 | disable_irq(dev->irq); | 991 | |
992 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
993 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
994 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
995 | disable_irq(dev->irq); | ||
996 | } else { | ||
997 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
998 | } | ||
907 | if (nv_alloc_rx(dev)) { | 999 | if (nv_alloc_rx(dev)) { |
908 | spin_lock(&np->lock); | 1000 | spin_lock(&np->lock); |
909 | if (!np->in_shutdown) | 1001 | if (!np->in_shutdown) |
910 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1002 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
911 | spin_unlock(&np->lock); | 1003 | spin_unlock(&np->lock); |
912 | } | 1004 | } |
913 | enable_irq(dev->irq); | 1005 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1006 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1007 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1008 | enable_irq(dev->irq); | ||
1009 | } else { | ||
1010 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1011 | } | ||
914 | } | 1012 | } |
915 | 1013 | ||
916 | static void nv_init_rx(struct net_device *dev) | 1014 | static void nv_init_rx(struct net_device *dev) |
@@ -965,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) | |||
965 | } | 1063 | } |
966 | 1064 | ||
967 | if (np->tx_skbuff[skbnr]) { | 1065 | if (np->tx_skbuff[skbnr]) { |
968 | dev_kfree_skb_irq(np->tx_skbuff[skbnr]); | 1066 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
969 | np->tx_skbuff[skbnr] = NULL; | 1067 | np->tx_skbuff[skbnr] = NULL; |
970 | return 1; | 1068 | return 1; |
971 | } else { | 1069 | } else { |
@@ -1031,6 +1129,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1031 | u32 bcnt; | 1129 | u32 bcnt; |
1032 | u32 size = skb->len-skb->data_len; | 1130 | u32 size = skb->len-skb->data_len; |
1033 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1131 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1132 | u32 tx_flags_vlan = 0; | ||
1034 | 1133 | ||
1035 | /* add fragments to entries count */ | 1134 | /* add fragments to entries count */ |
1036 | for (i = 0; i < fragments; i++) { | 1135 | for (i = 0; i < fragments; i++) { |
@@ -1111,10 +1210,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1111 | #endif | 1210 | #endif |
1112 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); | 1211 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
1113 | 1212 | ||
1213 | /* vlan tag */ | ||
1214 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | ||
1215 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | ||
1216 | } | ||
1217 | |||
1114 | /* set tx flags */ | 1218 | /* set tx flags */ |
1115 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1219 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1116 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1220 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1117 | } else { | 1221 | } else { |
1222 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | ||
1118 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1223 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1119 | } | 1224 | } |
1120 | 1225 | ||
@@ -1209,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1209 | { | 1314 | { |
1210 | struct fe_priv *np = netdev_priv(dev); | 1315 | struct fe_priv *np = netdev_priv(dev); |
1211 | u8 __iomem *base = get_hwbase(dev); | 1316 | u8 __iomem *base = get_hwbase(dev); |
1317 | u32 status; | ||
1318 | |||
1319 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1320 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1321 | else | ||
1322 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1212 | 1323 | ||
1213 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, | 1324 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1214 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | ||
1215 | 1325 | ||
1216 | { | 1326 | { |
1217 | int i; | 1327 | int i; |
@@ -1273,10 +1383,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1273 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 1383 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
1274 | nv_drain_tx(dev); | 1384 | nv_drain_tx(dev); |
1275 | np->next_tx = np->nic_tx = 0; | 1385 | np->next_tx = np->nic_tx = 0; |
1276 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1386 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1277 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1278 | else | ||
1279 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1280 | netif_wake_queue(dev); | 1387 | netif_wake_queue(dev); |
1281 | } | 1388 | } |
1282 | 1389 | ||
@@ -1342,6 +1449,8 @@ static void nv_rx_process(struct net_device *dev) | |||
1342 | { | 1449 | { |
1343 | struct fe_priv *np = netdev_priv(dev); | 1450 | struct fe_priv *np = netdev_priv(dev); |
1344 | u32 Flags; | 1451 | u32 Flags; |
1452 | u32 vlanflags = 0; | ||
1453 | |||
1345 | 1454 | ||
1346 | for (;;) { | 1455 | for (;;) { |
1347 | struct sk_buff *skb; | 1456 | struct sk_buff *skb; |
@@ -1357,6 +1466,7 @@ static void nv_rx_process(struct net_device *dev) | |||
1357 | } else { | 1466 | } else { |
1358 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | 1467 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); |
1359 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | 1468 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); |
1469 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); | ||
1360 | } | 1470 | } |
1361 | 1471 | ||
1362 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1472 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", |
@@ -1474,7 +1584,11 @@ static void nv_rx_process(struct net_device *dev) | |||
1474 | skb->protocol = eth_type_trans(skb, dev); | 1584 | skb->protocol = eth_type_trans(skb, dev); |
1475 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | 1585 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", |
1476 | dev->name, np->cur_rx, len, skb->protocol); | 1586 | dev->name, np->cur_rx, len, skb->protocol); |
1477 | netif_rx(skb); | 1587 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { |
1588 | vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1589 | } else { | ||
1590 | netif_rx(skb); | ||
1591 | } | ||
1478 | dev->last_rx = jiffies; | 1592 | dev->last_rx = jiffies; |
1479 | np->stats.rx_packets++; | 1593 | np->stats.rx_packets++; |
1480 | np->stats.rx_bytes += len; | 1594 | np->stats.rx_bytes += len; |
@@ -1523,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1523 | * guessed, there is probably a simpler approach. | 1637 | * guessed, there is probably a simpler approach. |
1524 | * Changing the MTU is a rare event, it shouldn't matter. | 1638 | * Changing the MTU is a rare event, it shouldn't matter. |
1525 | */ | 1639 | */ |
1526 | disable_irq(dev->irq); | 1640 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1641 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1642 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1643 | disable_irq(dev->irq); | ||
1644 | } else { | ||
1645 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1646 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1647 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1648 | } | ||
1527 | spin_lock_bh(&dev->xmit_lock); | 1649 | spin_lock_bh(&dev->xmit_lock); |
1528 | spin_lock(&np->lock); | 1650 | spin_lock(&np->lock); |
1529 | /* stop engines */ | 1651 | /* stop engines */ |
@@ -1544,11 +1666,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1544 | } | 1666 | } |
1545 | /* reinit nic view of the rx queue */ | 1667 | /* reinit nic view of the rx queue */ |
1546 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1668 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1547 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 1669 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
1548 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
1549 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1550 | else | ||
1551 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1552 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1670 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
1553 | base + NvRegRingSizes); | 1671 | base + NvRegRingSizes); |
1554 | pci_push(base); | 1672 | pci_push(base); |
@@ -1560,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1560 | nv_start_tx(dev); | 1678 | nv_start_tx(dev); |
1561 | spin_unlock(&np->lock); | 1679 | spin_unlock(&np->lock); |
1562 | spin_unlock_bh(&dev->xmit_lock); | 1680 | spin_unlock_bh(&dev->xmit_lock); |
1563 | enable_irq(dev->irq); | 1681 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1682 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1683 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1684 | enable_irq(dev->irq); | ||
1685 | } else { | ||
1686 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1687 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1688 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1689 | } | ||
1564 | } | 1690 | } |
1565 | return 0; | 1691 | return 0; |
1566 | } | 1692 | } |
@@ -1866,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1866 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 1992 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
1867 | 1993 | ||
1868 | for (i=0; ; i++) { | 1994 | for (i=0; ; i++) { |
1869 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | 1995 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
1870 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 1996 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
1997 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
1998 | } else { | ||
1999 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2000 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
2001 | } | ||
1871 | pci_push(base); | 2002 | pci_push(base); |
1872 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2003 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
1873 | if (!(events & np->irqmask)) | 2004 | if (!(events & np->irqmask)) |
@@ -1907,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1907 | if (i > max_interrupt_work) { | 2038 | if (i > max_interrupt_work) { |
1908 | spin_lock(&np->lock); | 2039 | spin_lock(&np->lock); |
1909 | /* disable interrupts on the nic */ | 2040 | /* disable interrupts on the nic */ |
1910 | writel(0, base + NvRegIrqMask); | 2041 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
2042 | writel(0, base + NvRegIrqMask); | ||
2043 | else | ||
2044 | writel(np->irqmask, base + NvRegIrqMask); | ||
1911 | pci_push(base); | 2045 | pci_push(base); |
1912 | 2046 | ||
1913 | if (!np->in_shutdown) | 2047 | if (!np->in_shutdown) { |
2048 | np->nic_poll_irq = np->irqmask; | ||
1914 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2049 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2050 | } | ||
1915 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); | 2051 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
1916 | spin_unlock(&np->lock); | 2052 | spin_unlock(&np->lock); |
1917 | break; | 2053 | break; |
@@ -1923,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1923 | return IRQ_RETVAL(i); | 2059 | return IRQ_RETVAL(i); |
1924 | } | 2060 | } |
1925 | 2061 | ||
2062 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | ||
2063 | { | ||
2064 | struct net_device *dev = (struct net_device *) data; | ||
2065 | struct fe_priv *np = netdev_priv(dev); | ||
2066 | u8 __iomem *base = get_hwbase(dev); | ||
2067 | u32 events; | ||
2068 | int i; | ||
2069 | |||
2070 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | ||
2071 | |||
2072 | for (i=0; ; i++) { | ||
2073 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | ||
2074 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | ||
2075 | pci_push(base); | ||
2076 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | ||
2077 | if (!(events & np->irqmask)) | ||
2078 | break; | ||
2079 | |||
2080 | spin_lock(&np->lock); | ||
2081 | nv_tx_done(dev); | ||
2082 | spin_unlock(&np->lock); | ||
2083 | |||
2084 | if (events & (NVREG_IRQ_TX_ERR)) { | ||
2085 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | ||
2086 | dev->name, events); | ||
2087 | } | ||
2088 | if (i > max_interrupt_work) { | ||
2089 | spin_lock(&np->lock); | ||
2090 | /* disable interrupts on the nic */ | ||
2091 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | ||
2092 | pci_push(base); | ||
2093 | |||
2094 | if (!np->in_shutdown) { | ||
2095 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | ||
2096 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2097 | } | ||
2098 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | ||
2099 | spin_unlock(&np->lock); | ||
2100 | break; | ||
2101 | } | ||
2102 | |||
2103 | } | ||
2104 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | ||
2105 | |||
2106 | return IRQ_RETVAL(i); | ||
2107 | } | ||
2108 | |||
2109 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | ||
2110 | { | ||
2111 | struct net_device *dev = (struct net_device *) data; | ||
2112 | struct fe_priv *np = netdev_priv(dev); | ||
2113 | u8 __iomem *base = get_hwbase(dev); | ||
2114 | u32 events; | ||
2115 | int i; | ||
2116 | |||
2117 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | ||
2118 | |||
2119 | for (i=0; ; i++) { | ||
2120 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | ||
2121 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | ||
2122 | pci_push(base); | ||
2123 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | ||
2124 | if (!(events & np->irqmask)) | ||
2125 | break; | ||
2126 | |||
2127 | nv_rx_process(dev); | ||
2128 | if (nv_alloc_rx(dev)) { | ||
2129 | spin_lock(&np->lock); | ||
2130 | if (!np->in_shutdown) | ||
2131 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2132 | spin_unlock(&np->lock); | ||
2133 | } | ||
2134 | |||
2135 | if (i > max_interrupt_work) { | ||
2136 | spin_lock(&np->lock); | ||
2137 | /* disable interrupts on the nic */ | ||
2138 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2139 | pci_push(base); | ||
2140 | |||
2141 | if (!np->in_shutdown) { | ||
2142 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | ||
2143 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2144 | } | ||
2145 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | ||
2146 | spin_unlock(&np->lock); | ||
2147 | break; | ||
2148 | } | ||
2149 | |||
2150 | } | ||
2151 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | ||
2152 | |||
2153 | return IRQ_RETVAL(i); | ||
2154 | } | ||
2155 | |||
2156 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | ||
2157 | { | ||
2158 | struct net_device *dev = (struct net_device *) data; | ||
2159 | struct fe_priv *np = netdev_priv(dev); | ||
2160 | u8 __iomem *base = get_hwbase(dev); | ||
2161 | u32 events; | ||
2162 | int i; | ||
2163 | |||
2164 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | ||
2165 | |||
2166 | for (i=0; ; i++) { | ||
2167 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | ||
2168 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | ||
2169 | pci_push(base); | ||
2170 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2171 | if (!(events & np->irqmask)) | ||
2172 | break; | ||
2173 | |||
2174 | if (events & NVREG_IRQ_LINK) { | ||
2175 | spin_lock(&np->lock); | ||
2176 | nv_link_irq(dev); | ||
2177 | spin_unlock(&np->lock); | ||
2178 | } | ||
2179 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | ||
2180 | spin_lock(&np->lock); | ||
2181 | nv_linkchange(dev); | ||
2182 | spin_unlock(&np->lock); | ||
2183 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
2184 | } | ||
2185 | if (events & (NVREG_IRQ_UNKNOWN)) { | ||
2186 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | ||
2187 | dev->name, events); | ||
2188 | } | ||
2189 | if (i > max_interrupt_work) { | ||
2190 | spin_lock(&np->lock); | ||
2191 | /* disable interrupts on the nic */ | ||
2192 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | ||
2193 | pci_push(base); | ||
2194 | |||
2195 | if (!np->in_shutdown) { | ||
2196 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | ||
2197 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2198 | } | ||
2199 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | ||
2200 | spin_unlock(&np->lock); | ||
2201 | break; | ||
2202 | } | ||
2203 | |||
2204 | } | ||
2205 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | ||
2206 | |||
2207 | return IRQ_RETVAL(i); | ||
2208 | } | ||
2209 | |||
1926 | static void nv_do_nic_poll(unsigned long data) | 2210 | static void nv_do_nic_poll(unsigned long data) |
1927 | { | 2211 | { |
1928 | struct net_device *dev = (struct net_device *) data; | 2212 | struct net_device *dev = (struct net_device *) data; |
1929 | struct fe_priv *np = netdev_priv(dev); | 2213 | struct fe_priv *np = netdev_priv(dev); |
1930 | u8 __iomem *base = get_hwbase(dev); | 2214 | u8 __iomem *base = get_hwbase(dev); |
2215 | u32 mask = 0; | ||
1931 | 2216 | ||
1932 | disable_irq(dev->irq); | ||
1933 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
1934 | /* | 2217 | /* |
2218 | * First disable irq(s) and then | ||
1935 | * reenable interrupts on the nic, we have to do this before calling | 2219 | * reenable interrupts on the nic, we have to do this before calling |
1936 | * nv_nic_irq because that may decide to do otherwise | 2220 | * nv_nic_irq because that may decide to do otherwise |
1937 | */ | 2221 | */ |
1938 | writel(np->irqmask, base + NvRegIrqMask); | 2222 | |
2223 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
2224 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2225 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2226 | disable_irq(dev->irq); | ||
2227 | mask = np->irqmask; | ||
2228 | } else { | ||
2229 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2230 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2231 | mask |= NVREG_IRQ_RX_ALL; | ||
2232 | } | ||
2233 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2234 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2235 | mask |= NVREG_IRQ_TX_ALL; | ||
2236 | } | ||
2237 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2238 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2239 | mask |= NVREG_IRQ_OTHER; | ||
2240 | } | ||
2241 | } | ||
2242 | np->nic_poll_irq = 0; | ||
2243 | |||
2244 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
2245 | |||
2246 | writel(mask, base + NvRegIrqMask); | ||
1939 | pci_push(base); | 2247 | pci_push(base); |
1940 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2248 | |
1941 | enable_irq(dev->irq); | 2249 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
2250 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2251 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2252 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2253 | enable_irq(dev->irq); | ||
2254 | } else { | ||
2255 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2256 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2257 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2258 | } | ||
2259 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2260 | nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2261 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2262 | } | ||
2263 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2264 | nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2265 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2266 | } | ||
2267 | } | ||
1942 | } | 2268 | } |
1943 | 2269 | ||
1944 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2217,11 +2543,66 @@ static struct ethtool_ops ops = { | |||
2217 | .get_perm_addr = ethtool_op_get_perm_addr, | 2543 | .get_perm_addr = ethtool_op_get_perm_addr, |
2218 | }; | 2544 | }; |
2219 | 2545 | ||
2546 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
2547 | { | ||
2548 | struct fe_priv *np = get_nvpriv(dev); | ||
2549 | |||
2550 | spin_lock_irq(&np->lock); | ||
2551 | |||
2552 | /* save vlan group */ | ||
2553 | np->vlangrp = grp; | ||
2554 | |||
2555 | if (grp) { | ||
2556 | /* enable vlan on MAC */ | ||
2557 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | ||
2558 | } else { | ||
2559 | /* disable vlan on MAC */ | ||
2560 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | ||
2561 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | ||
2562 | } | ||
2563 | |||
2564 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
2565 | |||
2566 | spin_unlock_irq(&np->lock); | ||
2567 | }; | ||
2568 | |||
2569 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
2570 | { | ||
2571 | /* nothing to do */ | ||
2572 | }; | ||
2573 | |||
2574 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
2575 | { | ||
2576 | u8 __iomem *base = get_hwbase(dev); | ||
2577 | int i; | ||
2578 | u32 msixmap = 0; | ||
2579 | |||
2580 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
2581 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
2582 | * the remaining 8 interrupts. | ||
2583 | */ | ||
2584 | for (i = 0; i < 8; i++) { | ||
2585 | if ((irqmask >> i) & 0x1) { | ||
2586 | msixmap |= vector << (i << 2); | ||
2587 | } | ||
2588 | } | ||
2589 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2590 | |||
2591 | msixmap = 0; | ||
2592 | for (i = 0; i < 8; i++) { | ||
2593 | if ((irqmask >> (i + 8)) & 0x1) { | ||
2594 | msixmap |= vector << (i << 2); | ||
2595 | } | ||
2596 | } | ||
2597 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
2598 | } | ||
2599 | |||
2220 | static int nv_open(struct net_device *dev) | 2600 | static int nv_open(struct net_device *dev) |
2221 | { | 2601 | { |
2222 | struct fe_priv *np = netdev_priv(dev); | 2602 | struct fe_priv *np = netdev_priv(dev); |
2223 | u8 __iomem *base = get_hwbase(dev); | 2603 | u8 __iomem *base = get_hwbase(dev); |
2224 | int ret, oom, i; | 2604 | int ret = 1; |
2605 | int oom, i; | ||
2225 | 2606 | ||
2226 | dprintk(KERN_DEBUG "nv_open: begin\n"); | 2607 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
2227 | 2608 | ||
@@ -2253,11 +2634,7 @@ static int nv_open(struct net_device *dev) | |||
2253 | nv_copy_mac_to_hw(dev); | 2634 | nv_copy_mac_to_hw(dev); |
2254 | 2635 | ||
2255 | /* 4) give hw rings */ | 2636 | /* 4) give hw rings */ |
2256 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 2637 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
2257 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
2258 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
2259 | else | ||
2260 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
2261 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 2638 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
2262 | base + NvRegRingSizes); | 2639 | base + NvRegRingSizes); |
2263 | 2640 | ||
@@ -2265,6 +2642,7 @@ static int nv_open(struct net_device *dev) | |||
2265 | writel(np->linkspeed, base + NvRegLinkSpeed); | 2642 | writel(np->linkspeed, base + NvRegLinkSpeed); |
2266 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | 2643 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); |
2267 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 2644 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
2645 | writel(np->vlanctl_bits, base + NvRegVlanControl); | ||
2268 | pci_push(base); | 2646 | pci_push(base); |
2269 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); | 2647 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
2270 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, | 2648 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
@@ -2315,9 +2693,77 @@ static int nv_open(struct net_device *dev) | |||
2315 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2693 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2316 | pci_push(base); | 2694 | pci_push(base); |
2317 | 2695 | ||
2318 | ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); | 2696 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
2319 | if (ret) | 2697 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
2320 | goto out_drain; | 2698 | np->msi_x_entry[i].entry = i; |
2699 | } | ||
2700 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2701 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2702 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2703 | /* Request irq for rx handling */ | ||
2704 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2705 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2706 | pci_disable_msix(np->pci_dev); | ||
2707 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2708 | goto out_drain; | ||
2709 | } | ||
2710 | /* Request irq for tx handling */ | ||
2711 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2712 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2713 | pci_disable_msix(np->pci_dev); | ||
2714 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2715 | goto out_drain; | ||
2716 | } | ||
2717 | /* Request irq for link and timer handling */ | ||
2718 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2719 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2720 | pci_disable_msix(np->pci_dev); | ||
2721 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2722 | goto out_drain; | ||
2723 | } | ||
2724 | |||
2725 | /* map interrupts to their respective vector */ | ||
2726 | writel(0, base + NvRegMSIXMap0); | ||
2727 | writel(0, base + NvRegMSIXMap1); | ||
2728 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2729 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2730 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2731 | } else { | ||
2732 | /* Request irq for all interrupts */ | ||
2733 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2734 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2735 | pci_disable_msix(np->pci_dev); | ||
2736 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2737 | goto out_drain; | ||
2738 | } | ||
2739 | |||
2740 | /* map interrupts to vector 0 */ | ||
2741 | writel(0, base + NvRegMSIXMap0); | ||
2742 | writel(0, base + NvRegMSIXMap1); | ||
2743 | } | ||
2744 | } | ||
2745 | } | ||
2746 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2747 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2748 | np->msi_flags |= NV_MSI_ENABLED; | ||
2749 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2750 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2751 | pci_disable_msi(np->pci_dev); | ||
2752 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2753 | goto out_drain; | ||
2754 | } | ||
2755 | |||
2756 | /* map interrupts to vector 0 */ | ||
2757 | writel(0, base + NvRegMSIMap0); | ||
2758 | writel(0, base + NvRegMSIMap1); | ||
2759 | /* enable msi vector 0 */ | ||
2760 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2761 | } | ||
2762 | } | ||
2763 | if (ret != 0) { | ||
2764 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2765 | goto out_drain; | ||
2766 | } | ||
2321 | 2767 | ||
2322 | /* ask for interrupts */ | 2768 | /* ask for interrupts */ |
2323 | writel(np->irqmask, base + NvRegIrqMask); | 2769 | writel(np->irqmask, base + NvRegIrqMask); |
@@ -2364,6 +2810,7 @@ static int nv_close(struct net_device *dev) | |||
2364 | { | 2810 | { |
2365 | struct fe_priv *np = netdev_priv(dev); | 2811 | struct fe_priv *np = netdev_priv(dev); |
2366 | u8 __iomem *base; | 2812 | u8 __iomem *base; |
2813 | int i; | ||
2367 | 2814 | ||
2368 | spin_lock_irq(&np->lock); | 2815 | spin_lock_irq(&np->lock); |
2369 | np->in_shutdown = 1; | 2816 | np->in_shutdown = 1; |
@@ -2381,13 +2828,31 @@ static int nv_close(struct net_device *dev) | |||
2381 | 2828 | ||
2382 | /* disable interrupts on the nic or we will lock up */ | 2829 | /* disable interrupts on the nic or we will lock up */ |
2383 | base = get_hwbase(dev); | 2830 | base = get_hwbase(dev); |
2384 | writel(0, base + NvRegIrqMask); | 2831 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2832 | writel(np->irqmask, base + NvRegIrqMask); | ||
2833 | } else { | ||
2834 | if (np->msi_flags & NV_MSI_ENABLED) | ||
2835 | writel(0, base + NvRegMSIIrqMask); | ||
2836 | writel(0, base + NvRegIrqMask); | ||
2837 | } | ||
2385 | pci_push(base); | 2838 | pci_push(base); |
2386 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2839 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
2387 | 2840 | ||
2388 | spin_unlock_irq(&np->lock); | 2841 | spin_unlock_irq(&np->lock); |
2389 | 2842 | ||
2390 | free_irq(dev->irq, dev); | 2843 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2844 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2845 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2846 | } | ||
2847 | pci_disable_msix(np->pci_dev); | ||
2848 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2849 | } else { | ||
2850 | free_irq(np->pci_dev->irq, dev); | ||
2851 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2852 | pci_disable_msi(np->pci_dev); | ||
2853 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2854 | } | ||
2855 | } | ||
2391 | 2856 | ||
2392 | drain_ring(dev); | 2857 | drain_ring(dev); |
2393 | 2858 | ||
@@ -2471,7 +2936,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2471 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 2936 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
2472 | pci_name(pci_dev)); | 2937 | pci_name(pci_dev)); |
2473 | } else { | 2938 | } else { |
2474 | dev->features |= NETIF_F_HIGHDMA; | 2939 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
2940 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | ||
2941 | pci_name(pci_dev)); | ||
2942 | goto out_relreg; | ||
2943 | } else { | ||
2944 | dev->features |= NETIF_F_HIGHDMA; | ||
2945 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
2946 | } | ||
2475 | } | 2947 | } |
2476 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | 2948 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
2477 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 2949 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
@@ -2496,6 +2968,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2496 | #endif | 2968 | #endif |
2497 | } | 2969 | } |
2498 | 2970 | ||
2971 | np->vlanctl_bits = 0; | ||
2972 | if (id->driver_data & DEV_HAS_VLAN) { | ||
2973 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | ||
2974 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | ||
2975 | dev->vlan_rx_register = nv_vlan_rx_register; | ||
2976 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | ||
2977 | } | ||
2978 | |||
2979 | np->msi_flags = 0; | ||
2980 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | ||
2981 | np->msi_flags |= NV_MSI_CAPABLE; | ||
2982 | } | ||
2983 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | ||
2984 | np->msi_flags |= NV_MSI_X_CAPABLE; | ||
2985 | } | ||
2986 | |||
2499 | err = -ENOMEM; | 2987 | err = -ENOMEM; |
2500 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2988 | np->base = ioremap(addr, NV_PCI_REGSZ); |
2501 | if (!np->base) | 2989 | if (!np->base) |
@@ -2578,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2578 | } else { | 3066 | } else { |
2579 | np->tx_flags = NV_TX2_VALID; | 3067 | np->tx_flags = NV_TX2_VALID; |
2580 | } | 3068 | } |
2581 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3069 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
2582 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; | 3070 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
2583 | else | 3071 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
3072 | np->msi_flags |= 0x0003; | ||
3073 | } else { | ||
2584 | np->irqmask = NVREG_IRQMASK_CPU; | 3074 | np->irqmask = NVREG_IRQMASK_CPU; |
3075 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | ||
3076 | np->msi_flags |= 0x0001; | ||
3077 | } | ||
2585 | 3078 | ||
2586 | if (id->driver_data & DEV_NEED_TIMERIRQ) | 3079 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
2587 | np->irqmask |= NVREG_IRQ_TIMER; | 3080 | np->irqmask |= NVREG_IRQ_TIMER; |
@@ -2737,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = { | |||
2737 | }, | 3230 | }, |
2738 | { /* MCP55 Ethernet Controller */ | 3231 | { /* MCP55 Ethernet Controller */ |
2739 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 3232 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
2740 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3233 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2741 | }, | 3234 | }, |
2742 | { /* MCP55 Ethernet Controller */ | 3235 | { /* MCP55 Ethernet Controller */ |
2743 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 3236 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
2744 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3237 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2745 | }, | 3238 | }, |
2746 | {0,}, | 3239 | {0,}, |
2747 | }; | 3240 | }; |
@@ -2771,6 +3264,10 @@ module_param(optimization_mode, int, 0); | |||
2771 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | 3264 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
2772 | module_param(poll_interval, int, 0); | 3265 | module_param(poll_interval, int, 0); |
2773 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | 3266 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
3267 | module_param(disable_msi, int, 0); | ||
3268 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | ||
3269 | module_param(disable_msix, int, 0); | ||
3270 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | ||
2774 | 3271 | ||
2775 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | 3272 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
2776 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | 3273 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index bc9a3bf8d560..0ea4cb4a0d80 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -427,7 +427,7 @@ that case. | |||
427 | static void hamachi_timer(unsigned long data); | 427 | static void hamachi_timer(unsigned long data); |
428 | 428 | ||
429 | enum capability_flags {CanHaveMII=1, }; | 429 | enum capability_flags {CanHaveMII=1, }; |
430 | static struct chip_info { | 430 | static const struct chip_info { |
431 | u16 vendor_id, device_id, device_id_mask, pad; | 431 | u16 vendor_id, device_id, device_id_mask, pad; |
432 | const char *name; | 432 | const char *name; |
433 | void (*media_timer)(unsigned long data); | 433 | void (*media_timer)(unsigned long data); |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index e4188d082f01..9220de9f4fe7 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -905,7 +905,7 @@ static int epp_open(struct net_device *dev) | |||
905 | /* autoprobe baud rate */ | 905 | /* autoprobe baud rate */ |
906 | tstart = jiffies; | 906 | tstart = jiffies; |
907 | i = 0; | 907 | i = 0; |
908 | while ((signed)(jiffies-tstart-HZ/3) < 0) { | 908 | while (time_before(jiffies, tstart + HZ/3)) { |
909 | if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) | 909 | if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) |
910 | goto epptimeout; | 910 | goto epptimeout; |
911 | if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) { | 911 | if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) { |
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 55c7ed608391..247c8ca86033 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c | |||
@@ -115,6 +115,7 @@ | |||
115 | #include <linux/delay.h> | 115 | #include <linux/delay.h> |
116 | #include <linux/init.h> | 116 | #include <linux/init.h> |
117 | #include <linux/bitops.h> | 117 | #include <linux/bitops.h> |
118 | #include <linux/jiffies.h> | ||
118 | 119 | ||
119 | #include <asm/io.h> | 120 | #include <asm/io.h> |
120 | 121 | ||
@@ -1499,7 +1500,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) | |||
1499 | printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); | 1500 | printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); |
1500 | #endif | 1501 | #endif |
1501 | /* not waited long enough since last tx? */ | 1502 | /* not waited long enough since last tx? */ |
1502 | if (jiffies - dev->trans_start < HZ) | 1503 | if (time_before(jiffies, dev->trans_start + HZ)) |
1503 | return -EAGAIN; | 1504 | return -EAGAIN; |
1504 | 1505 | ||
1505 | if (hp100_check_lan(dev)) | 1506 | if (hp100_check_lan(dev)) |
@@ -1652,7 +1653,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1652 | printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); | 1653 | printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); |
1653 | #endif | 1654 | #endif |
1654 | /* not waited long enough since last failed tx try? */ | 1655 | /* not waited long enough since last failed tx try? */ |
1655 | if (jiffies - dev->trans_start < HZ) { | 1656 | if (time_before(jiffies, dev->trans_start + HZ)) { |
1656 | #ifdef HP100_DEBUG | 1657 | #ifdef HP100_DEBUG |
1657 | printk("hp100: %s: trans_start timing problem\n", | 1658 | printk("hp100: %s: trans_start timing problem\n", |
1658 | dev->name); | 1659 | dev->name); |
@@ -1718,17 +1719,10 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1718 | hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ | 1719 | hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ |
1719 | 1720 | ||
1720 | if (lp->mode == 2) { /* memory mapped */ | 1721 | if (lp->mode == 2) { /* memory mapped */ |
1721 | if (lp->mem_ptr_virt) { /* high pci memory was remapped */ | 1722 | /* Note: The J2585B needs alignment to 32bits here! */ |
1722 | /* Note: The J2585B needs alignment to 32bits here! */ | 1723 | memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); |
1723 | memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); | 1724 | if (!ok_flag) |
1724 | if (!ok_flag) | 1725 | memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); |
1725 | memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); | ||
1726 | } else { | ||
1727 | /* Note: The J2585B needs alignment to 32bits here! */ | ||
1728 | isa_memcpy_toio(lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3); | ||
1729 | if (!ok_flag) | ||
1730 | isa_memset_io(lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len); | ||
1731 | } | ||
1732 | } else { /* programmed i/o */ | 1726 | } else { /* programmed i/o */ |
1733 | outsl(ioaddr + HP100_REG_DATA32, skb->data, | 1727 | outsl(ioaddr + HP100_REG_DATA32, skb->data, |
1734 | (skb->len + 3) >> 2); | 1728 | (skb->len + 3) >> 2); |
@@ -1798,10 +1792,7 @@ static void hp100_rx(struct net_device *dev) | |||
1798 | /* First we get the header, which contains information about the */ | 1792 | /* First we get the header, which contains information about the */ |
1799 | /* actual length of the received packet. */ | 1793 | /* actual length of the received packet. */ |
1800 | if (lp->mode == 2) { /* memory mapped mode */ | 1794 | if (lp->mode == 2) { /* memory mapped mode */ |
1801 | if (lp->mem_ptr_virt) /* if memory was remapped */ | 1795 | header = readl(lp->mem_ptr_virt); |
1802 | header = readl(lp->mem_ptr_virt); | ||
1803 | else | ||
1804 | header = isa_readl(lp->mem_ptr_phys); | ||
1805 | } else /* programmed i/o */ | 1796 | } else /* programmed i/o */ |
1806 | header = hp100_inl(DATA32); | 1797 | header = hp100_inl(DATA32); |
1807 | 1798 | ||
@@ -1833,13 +1824,9 @@ static void hp100_rx(struct net_device *dev) | |||
1833 | ptr = skb->data; | 1824 | ptr = skb->data; |
1834 | 1825 | ||
1835 | /* Now transfer the data from the card into that area */ | 1826 | /* Now transfer the data from the card into that area */ |
1836 | if (lp->mode == 2) { | 1827 | if (lp->mode == 2) |
1837 | if (lp->mem_ptr_virt) | 1828 | memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); |
1838 | memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); | 1829 | else /* io mapped */ |
1839 | /* Note alignment to 32bit transfers */ | ||
1840 | else | ||
1841 | isa_memcpy_fromio(ptr, lp->mem_ptr_phys, pkt_len); | ||
1842 | } else /* io mapped */ | ||
1843 | insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); | 1830 | insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); |
1844 | 1831 | ||
1845 | skb->protocol = eth_type_trans(skb, dev); | 1832 | skb->protocol = eth_type_trans(skb, dev); |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index 591c5864ffb1..7e49522b8b3c 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -204,7 +204,7 @@ static inline int emac_phy_gpcs(int phy_mode) | |||
204 | 204 | ||
205 | static inline void emac_tx_enable(struct ocp_enet_private *dev) | 205 | static inline void emac_tx_enable(struct ocp_enet_private *dev) |
206 | { | 206 | { |
207 | struct emac_regs *p = dev->emacp; | 207 | struct emac_regs __iomem *p = dev->emacp; |
208 | unsigned long flags; | 208 | unsigned long flags; |
209 | u32 r; | 209 | u32 r; |
210 | 210 | ||
@@ -220,7 +220,7 @@ static inline void emac_tx_enable(struct ocp_enet_private *dev) | |||
220 | 220 | ||
221 | static void emac_tx_disable(struct ocp_enet_private *dev) | 221 | static void emac_tx_disable(struct ocp_enet_private *dev) |
222 | { | 222 | { |
223 | struct emac_regs *p = dev->emacp; | 223 | struct emac_regs __iomem *p = dev->emacp; |
224 | unsigned long flags; | 224 | unsigned long flags; |
225 | u32 r; | 225 | u32 r; |
226 | 226 | ||
@@ -244,7 +244,7 @@ static void emac_tx_disable(struct ocp_enet_private *dev) | |||
244 | 244 | ||
245 | static void emac_rx_enable(struct ocp_enet_private *dev) | 245 | static void emac_rx_enable(struct ocp_enet_private *dev) |
246 | { | 246 | { |
247 | struct emac_regs *p = dev->emacp; | 247 | struct emac_regs __iomem *p = dev->emacp; |
248 | unsigned long flags; | 248 | unsigned long flags; |
249 | u32 r; | 249 | u32 r; |
250 | 250 | ||
@@ -275,7 +275,7 @@ static void emac_rx_enable(struct ocp_enet_private *dev) | |||
275 | 275 | ||
276 | static void emac_rx_disable(struct ocp_enet_private *dev) | 276 | static void emac_rx_disable(struct ocp_enet_private *dev) |
277 | { | 277 | { |
278 | struct emac_regs *p = dev->emacp; | 278 | struct emac_regs __iomem *p = dev->emacp; |
279 | unsigned long flags; | 279 | unsigned long flags; |
280 | u32 r; | 280 | u32 r; |
281 | 281 | ||
@@ -299,7 +299,7 @@ static void emac_rx_disable(struct ocp_enet_private *dev) | |||
299 | 299 | ||
300 | static inline void emac_rx_disable_async(struct ocp_enet_private *dev) | 300 | static inline void emac_rx_disable_async(struct ocp_enet_private *dev) |
301 | { | 301 | { |
302 | struct emac_regs *p = dev->emacp; | 302 | struct emac_regs __iomem *p = dev->emacp; |
303 | unsigned long flags; | 303 | unsigned long flags; |
304 | u32 r; | 304 | u32 r; |
305 | 305 | ||
@@ -315,7 +315,7 @@ static inline void emac_rx_disable_async(struct ocp_enet_private *dev) | |||
315 | 315 | ||
316 | static int emac_reset(struct ocp_enet_private *dev) | 316 | static int emac_reset(struct ocp_enet_private *dev) |
317 | { | 317 | { |
318 | struct emac_regs *p = dev->emacp; | 318 | struct emac_regs __iomem *p = dev->emacp; |
319 | unsigned long flags; | 319 | unsigned long flags; |
320 | int n = 20; | 320 | int n = 20; |
321 | 321 | ||
@@ -348,7 +348,7 @@ static int emac_reset(struct ocp_enet_private *dev) | |||
348 | 348 | ||
349 | static void emac_hash_mc(struct ocp_enet_private *dev) | 349 | static void emac_hash_mc(struct ocp_enet_private *dev) |
350 | { | 350 | { |
351 | struct emac_regs *p = dev->emacp; | 351 | struct emac_regs __iomem *p = dev->emacp; |
352 | u16 gaht[4] = { 0 }; | 352 | u16 gaht[4] = { 0 }; |
353 | struct dev_mc_list *dmi; | 353 | struct dev_mc_list *dmi; |
354 | 354 | ||
@@ -393,7 +393,7 @@ static inline int emac_opb_mhz(void) | |||
393 | /* BHs disabled */ | 393 | /* BHs disabled */ |
394 | static int emac_configure(struct ocp_enet_private *dev) | 394 | static int emac_configure(struct ocp_enet_private *dev) |
395 | { | 395 | { |
396 | struct emac_regs *p = dev->emacp; | 396 | struct emac_regs __iomem *p = dev->emacp; |
397 | struct net_device *ndev = dev->ndev; | 397 | struct net_device *ndev = dev->ndev; |
398 | int gige; | 398 | int gige; |
399 | u32 r; | 399 | u32 r; |
@@ -555,7 +555,7 @@ static void emac_full_tx_reset(struct net_device *ndev) | |||
555 | 555 | ||
556 | static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) | 556 | static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) |
557 | { | 557 | { |
558 | struct emac_regs *p = dev->emacp; | 558 | struct emac_regs __iomem *p = dev->emacp; |
559 | u32 r; | 559 | u32 r; |
560 | int n; | 560 | int n; |
561 | 561 | ||
@@ -604,7 +604,7 @@ static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) | |||
604 | static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg, | 604 | static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg, |
605 | u16 val) | 605 | u16 val) |
606 | { | 606 | { |
607 | struct emac_regs *p = dev->emacp; | 607 | struct emac_regs __iomem *p = dev->emacp; |
608 | int n; | 608 | int n; |
609 | 609 | ||
610 | DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg, | 610 | DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg, |
@@ -666,7 +666,7 @@ static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val) | |||
666 | static void emac_set_multicast_list(struct net_device *ndev) | 666 | static void emac_set_multicast_list(struct net_device *ndev) |
667 | { | 667 | { |
668 | struct ocp_enet_private *dev = ndev->priv; | 668 | struct ocp_enet_private *dev = ndev->priv; |
669 | struct emac_regs *p = dev->emacp; | 669 | struct emac_regs __iomem *p = dev->emacp; |
670 | u32 rmr = emac_iff2rmr(ndev); | 670 | u32 rmr = emac_iff2rmr(ndev); |
671 | 671 | ||
672 | DBG("%d: multicast %08x" NL, dev->def->index, rmr); | 672 | DBG("%d: multicast %08x" NL, dev->def->index, rmr); |
@@ -825,7 +825,7 @@ static void emac_clean_rx_ring(struct ocp_enet_private *dev) | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot, | 827 | static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot, |
828 | int flags) | 828 | gfp_t flags) |
829 | { | 829 | { |
830 | struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); | 830 | struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); |
831 | if (unlikely(!skb)) | 831 | if (unlikely(!skb)) |
@@ -1047,7 +1047,7 @@ static inline u16 emac_tx_csum(struct ocp_enet_private *dev, | |||
1047 | 1047 | ||
1048 | static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len) | 1048 | static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len) |
1049 | { | 1049 | { |
1050 | struct emac_regs *p = dev->emacp; | 1050 | struct emac_regs __iomem *p = dev->emacp; |
1051 | struct net_device *ndev = dev->ndev; | 1051 | struct net_device *ndev = dev->ndev; |
1052 | 1052 | ||
1053 | /* Send the packet out */ | 1053 | /* Send the packet out */ |
@@ -1519,7 +1519,7 @@ static void emac_rxde(void *param) | |||
1519 | static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs) | 1519 | static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs) |
1520 | { | 1520 | { |
1521 | struct ocp_enet_private *dev = dev_instance; | 1521 | struct ocp_enet_private *dev = dev_instance; |
1522 | struct emac_regs *p = dev->emacp; | 1522 | struct emac_regs __iomem *p = dev->emacp; |
1523 | struct ibm_emac_error_stats *st = &dev->estats; | 1523 | struct ibm_emac_error_stats *st = &dev->estats; |
1524 | 1524 | ||
1525 | u32 isr = in_be32(&p->isr); | 1525 | u32 isr = in_be32(&p->isr); |
@@ -1619,17 +1619,17 @@ static void emac_remove(struct ocp_device *ocpdev) | |||
1619 | 1619 | ||
1620 | DBG("%d: remove" NL, dev->def->index); | 1620 | DBG("%d: remove" NL, dev->def->index); |
1621 | 1621 | ||
1622 | ocp_set_drvdata(ocpdev, 0); | 1622 | ocp_set_drvdata(ocpdev, NULL); |
1623 | unregister_netdev(dev->ndev); | 1623 | unregister_netdev(dev->ndev); |
1624 | 1624 | ||
1625 | tah_fini(dev->tah_dev); | 1625 | tah_fini(dev->tah_dev); |
1626 | rgmii_fini(dev->rgmii_dev, dev->rgmii_input); | 1626 | rgmii_fini(dev->rgmii_dev, dev->rgmii_input); |
1627 | zmii_fini(dev->zmii_dev, dev->zmii_input); | 1627 | zmii_fini(dev->zmii_dev, dev->zmii_input); |
1628 | 1628 | ||
1629 | emac_dbg_register(dev->def->index, 0); | 1629 | emac_dbg_register(dev->def->index, NULL); |
1630 | 1630 | ||
1631 | mal_unregister_commac(dev->mal, &dev->commac); | 1631 | mal_unregister_commac(dev->mal, &dev->commac); |
1632 | iounmap((void *)dev->emacp); | 1632 | iounmap(dev->emacp); |
1633 | kfree(dev->ndev); | 1633 | kfree(dev->ndev); |
1634 | } | 1634 | } |
1635 | 1635 | ||
@@ -2048,9 +2048,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) | |||
2048 | goto out4; | 2048 | goto out4; |
2049 | 2049 | ||
2050 | /* Map EMAC regs */ | 2050 | /* Map EMAC regs */ |
2051 | dev->emacp = | 2051 | dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs)); |
2052 | (struct emac_regs *)ioremap(dev->def->paddr, | ||
2053 | sizeof(struct emac_regs)); | ||
2054 | if (!dev->emacp) { | 2052 | if (!dev->emacp) { |
2055 | printk(KERN_ERR "emac%d: could not ioremap device registers!\n", | 2053 | printk(KERN_ERR "emac%d: could not ioremap device registers!\n", |
2056 | dev->def->index); | 2054 | dev->def->index); |
@@ -2210,7 +2208,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) | |||
2210 | 2208 | ||
2211 | return 0; | 2209 | return 0; |
2212 | out6: | 2210 | out6: |
2213 | iounmap((void *)dev->emacp); | 2211 | iounmap(dev->emacp); |
2214 | out5: | 2212 | out5: |
2215 | tah_fini(dev->tah_dev); | 2213 | tah_fini(dev->tah_dev); |
2216 | out4: | 2214 | out4: |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h index 911abbaf471b..f61273b2e94f 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.h +++ b/drivers/net/ibm_emac/ibm_emac_core.h | |||
@@ -155,7 +155,7 @@ struct ibm_emac_error_stats { | |||
155 | 155 | ||
156 | struct ocp_enet_private { | 156 | struct ocp_enet_private { |
157 | struct net_device *ndev; /* 0 */ | 157 | struct net_device *ndev; /* 0 */ |
158 | struct emac_regs *emacp; | 158 | struct emac_regs __iomem *emacp; |
159 | 159 | ||
160 | struct mal_descriptor *tx_desc; | 160 | struct mal_descriptor *tx_desc; |
161 | int tx_cnt; | 161 | int tx_cnt; |
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c index 75d3b8639041..c7e1ecfa08fe 100644 --- a/drivers/net/ibm_emac/ibm_emac_debug.c +++ b/drivers/net/ibm_emac/ibm_emac_debug.c | |||
@@ -58,7 +58,7 @@ static void emac_desc_dump(int idx, struct ocp_enet_private *p) | |||
58 | 58 | ||
59 | static void emac_mac_dump(int idx, struct ocp_enet_private *dev) | 59 | static void emac_mac_dump(int idx, struct ocp_enet_private *dev) |
60 | { | 60 | { |
61 | struct emac_regs *p = dev->emacp; | 61 | struct emac_regs __iomem *p = dev->emacp; |
62 | 62 | ||
63 | printk("** EMAC%d registers **\n" | 63 | printk("** EMAC%d registers **\n" |
64 | "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" | 64 | "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" |
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h index a1ffb8a44fff..7f03d536c9a3 100644 --- a/drivers/net/ibm_emac/ibm_emac_rgmii.h +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h | |||
@@ -31,7 +31,7 @@ struct rgmii_regs { | |||
31 | 31 | ||
32 | /* RGMII device */ | 32 | /* RGMII device */ |
33 | struct ibm_ocp_rgmii { | 33 | struct ibm_ocp_rgmii { |
34 | struct rgmii_regs *base; | 34 | struct rgmii_regs __iomem *base; |
35 | int users; /* number of EMACs using this RGMII bridge */ | 35 | int users; /* number of EMACs using this RGMII bridge */ |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c index 35c1185079ed..e129e0aaa045 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.c +++ b/drivers/net/ibm_emac/ibm_emac_zmii.c | |||
@@ -80,7 +80,7 @@ static inline u32 zmii_mode_mask(int mode, int input) | |||
80 | static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) | 80 | static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) |
81 | { | 81 | { |
82 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); | 82 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); |
83 | struct zmii_regs *p; | 83 | struct zmii_regs __iomem *p; |
84 | 84 | ||
85 | ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode); | 85 | ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode); |
86 | 86 | ||
@@ -94,8 +94,7 @@ static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) | |||
94 | } | 94 | } |
95 | dev->mode = PHY_MODE_NA; | 95 | dev->mode = PHY_MODE_NA; |
96 | 96 | ||
97 | p = (struct zmii_regs *)ioremap(ocpdev->def->paddr, | 97 | p = ioremap(ocpdev->def->paddr, sizeof(struct zmii_regs)); |
98 | sizeof(struct zmii_regs)); | ||
99 | if (!p) { | 98 | if (!p) { |
100 | printk(KERN_ERR | 99 | printk(KERN_ERR |
101 | "zmii%d: could not ioremap device registers!\n", | 100 | "zmii%d: could not ioremap device registers!\n", |
@@ -231,7 +230,7 @@ void __exit __zmii_fini(struct ocp_device *ocpdev, int input) | |||
231 | if (!--dev->users) { | 230 | if (!--dev->users) { |
232 | /* Free everything if this is the last user */ | 231 | /* Free everything if this is the last user */ |
233 | ocp_set_drvdata(ocpdev, NULL); | 232 | ocp_set_drvdata(ocpdev, NULL); |
234 | iounmap((void *)dev->base); | 233 | iounmap(dev->base); |
235 | kfree(dev); | 234 | kfree(dev); |
236 | } | 235 | } |
237 | } | 236 | } |
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h index 0bb26062c0ad..92c854410753 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.h +++ b/drivers/net/ibm_emac/ibm_emac_zmii.h | |||
@@ -32,7 +32,7 @@ struct zmii_regs { | |||
32 | 32 | ||
33 | /* ZMII device */ | 33 | /* ZMII device */ |
34 | struct ibm_ocp_zmii { | 34 | struct ibm_ocp_zmii { |
35 | struct zmii_regs *base; | 35 | struct zmii_regs __iomem *base; |
36 | int mode; /* subset of PHY_MODE_XXXX */ | 36 | int mode; /* subset of PHY_MODE_XXXX */ |
37 | int users; /* number of EMACs using this ZMII bridge */ | 37 | int users; /* number of EMACs using this ZMII bridge */ |
38 | u32 fer_save; /* FER value left by firmware */ | 38 | u32 fer_save; /* FER value left by firmware */ |
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 7a081346f079..c81fe1c382d5 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -283,7 +283,7 @@ config USB_IRDA | |||
283 | Say Y here if you want to build support for the USB IrDA FIR Dongle | 283 | Say Y here if you want to build support for the USB IrDA FIR Dongle |
284 | device driver. To compile it as a module, choose M here: the module | 284 | device driver. To compile it as a module, choose M here: the module |
285 | will be called irda-usb. IrDA-USB support the various IrDA USB | 285 | will be called irda-usb. IrDA-USB support the various IrDA USB |
286 | dongles available and most of their pecularities. Those dongles | 286 | dongles available and most of their peculiarities. Those dongles |
287 | plug in the USB port of your computer, are plug and play, and | 287 | plug in the USB port of your computer, are plug and play, and |
288 | support SIR and FIR (4Mbps) speeds. On the other hand, those | 288 | support SIR and FIR (4Mbps) speeds. On the other hand, those |
289 | dongles tend to be less efficient than a FIR chipset. | 289 | dongles tend to be less efficient than a FIR chipset. |
@@ -360,7 +360,7 @@ config ALI_FIR | |||
360 | help | 360 | help |
361 | Say Y here if you want to build support for the ALi M5123 FIR | 361 | Say Y here if you want to build support for the ALi M5123 FIR |
362 | Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, | 362 | Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, |
363 | M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports | 363 | M1535, M1535D, M1535+, M1535D South Bridge. This driver supports |
364 | SIR, MIR and FIR (4Mbps) speeds. | 364 | SIR, MIR and FIR (4Mbps) speeds. |
365 | 365 | ||
366 | To compile it as a module, choose M here: the module will be called | 366 | To compile it as a module, choose M here: the module will be called |
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 02d5c6822733..f6f3dafe83ee 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c | |||
@@ -622,7 +622,7 @@ static int __init mac_sonic_init_module(void) | |||
622 | return 0; | 622 | return 0; |
623 | 623 | ||
624 | out_unregister: | 624 | out_unregister: |
625 | driver_unregister(&mac_sonic_driver); | 625 | platform_driver_unregister(&mac_sonic_driver); |
626 | 626 | ||
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | } | 628 | } |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index c0998ef938e0..9f2661355a4a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | 11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
12 | * | 12 | * |
13 | * Copyright (C) 2004-2005 MontaVista Software, Inc. | 13 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
14 | * Dale Farnsworth <dale@farnsworth.org> | 14 | * Dale Farnsworth <dale@farnsworth.org> |
15 | * | 15 | * |
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | 16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
@@ -37,8 +37,6 @@ | |||
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/udp.h> | 38 | #include <linux/udp.h> |
39 | #include <linux/etherdevice.h> | 39 | #include <linux/etherdevice.h> |
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | 40 | ||
43 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
44 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
@@ -52,39 +50,16 @@ | |||
52 | #include <asm/delay.h> | 50 | #include <asm/delay.h> |
53 | #include "mv643xx_eth.h" | 51 | #include "mv643xx_eth.h" |
54 | 52 | ||
55 | /* | ||
56 | * The first part is the high level driver of the gigE ethernet ports. | ||
57 | */ | ||
58 | |||
59 | /* Constants */ | ||
60 | #define VLAN_HLEN 4 | ||
61 | #define FCS_LEN 4 | ||
62 | #define DMA_ALIGN 8 /* hw requires 8-byte alignment */ | ||
63 | #define HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
64 | #define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | ||
65 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) | ||
66 | |||
67 | #define INT_UNMASK_ALL 0x0007ffff | ||
68 | #define INT_UNMASK_ALL_EXT 0x0011ffff | ||
69 | #define INT_MASK_ALL 0x00000000 | ||
70 | #define INT_MASK_ALL_EXT 0x00000000 | ||
71 | #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL | ||
72 | #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT | ||
73 | |||
74 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
75 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
76 | #else | ||
77 | #define MAX_DESCS_PER_SKB 1 | ||
78 | #endif | ||
79 | |||
80 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
81 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
82 | |||
83 | /* Static function declarations */ | 53 | /* Static function declarations */ |
84 | static int eth_port_link_is_up(unsigned int eth_port_num); | ||
85 | static void eth_port_uc_addr_get(struct net_device *dev, | 54 | static void eth_port_uc_addr_get(struct net_device *dev, |
86 | unsigned char *MacAddr); | 55 | unsigned char *MacAddr); |
87 | static void eth_port_set_multicast_list(struct net_device *); | 56 | static void eth_port_set_multicast_list(struct net_device *); |
57 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
58 | unsigned int queues); | ||
59 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
60 | unsigned int queues); | ||
61 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); | ||
62 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); | ||
88 | static int mv643xx_eth_open(struct net_device *); | 63 | static int mv643xx_eth_open(struct net_device *); |
89 | static int mv643xx_eth_stop(struct net_device *); | 64 | static int mv643xx_eth_stop(struct net_device *); |
90 | static int mv643xx_eth_change_mtu(struct net_device *, int); | 65 | static int mv643xx_eth_change_mtu(struct net_device *, int); |
@@ -93,8 +68,12 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num); | |||
93 | #ifdef MV643XX_NAPI | 68 | #ifdef MV643XX_NAPI |
94 | static int mv643xx_poll(struct net_device *dev, int *budget); | 69 | static int mv643xx_poll(struct net_device *dev, int *budget); |
95 | #endif | 70 | #endif |
71 | static int ethernet_phy_get(unsigned int eth_port_num); | ||
96 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 72 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
97 | static int ethernet_phy_detect(unsigned int eth_port_num); | 73 | static int ethernet_phy_detect(unsigned int eth_port_num); |
74 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); | ||
75 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); | ||
76 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
98 | static struct ethtool_ops mv643xx_ethtool_ops; | 77 | static struct ethtool_ops mv643xx_ethtool_ops; |
99 | 78 | ||
100 | static char mv643xx_driver_name[] = "mv643xx_eth"; | 79 | static char mv643xx_driver_name[] = "mv643xx_eth"; |
@@ -153,67 +132,53 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | |||
153 | } | 132 | } |
154 | 133 | ||
155 | /* | 134 | /* |
156 | * mv643xx_eth_rx_task | 135 | * mv643xx_eth_rx_refill_descs |
157 | * | 136 | * |
158 | * Fills / refills RX queue on a certain gigabit ethernet port | 137 | * Fills / refills RX queue on a certain gigabit ethernet port |
159 | * | 138 | * |
160 | * Input : pointer to ethernet interface network device structure | 139 | * Input : pointer to ethernet interface network device structure |
161 | * Output : N/A | 140 | * Output : N/A |
162 | */ | 141 | */ |
163 | static void mv643xx_eth_rx_task(void *data) | 142 | static void mv643xx_eth_rx_refill_descs(struct net_device *dev) |
164 | { | 143 | { |
165 | struct net_device *dev = (struct net_device *)data; | ||
166 | struct mv643xx_private *mp = netdev_priv(dev); | 144 | struct mv643xx_private *mp = netdev_priv(dev); |
167 | struct pkt_info pkt_info; | 145 | struct pkt_info pkt_info; |
168 | struct sk_buff *skb; | 146 | struct sk_buff *skb; |
169 | int unaligned; | 147 | int unaligned; |
170 | 148 | ||
171 | if (test_and_set_bit(0, &mp->rx_task_busy)) | 149 | while (mp->rx_desc_count < mp->rx_ring_size) { |
172 | panic("%s: Error in test_set_bit / clear_bit", dev->name); | 150 | skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); |
173 | |||
174 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { | ||
175 | skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); | ||
176 | if (!skb) | 151 | if (!skb) |
177 | break; | 152 | break; |
178 | mp->rx_ring_skbs++; | 153 | mp->rx_desc_count++; |
179 | unaligned = (u32)skb->data & (DMA_ALIGN - 1); | 154 | unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1); |
180 | if (unaligned) | 155 | if (unaligned) |
181 | skb_reserve(skb, DMA_ALIGN - unaligned); | 156 | skb_reserve(skb, ETH_DMA_ALIGN - unaligned); |
182 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | 157 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; |
183 | pkt_info.byte_cnt = RX_SKB_SIZE; | 158 | pkt_info.byte_cnt = ETH_RX_SKB_SIZE; |
184 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, | 159 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, |
185 | DMA_FROM_DEVICE); | 160 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); |
186 | pkt_info.return_info = skb; | 161 | pkt_info.return_info = skb; |
187 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { | 162 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { |
188 | printk(KERN_ERR | 163 | printk(KERN_ERR |
189 | "%s: Error allocating RX Ring\n", dev->name); | 164 | "%s: Error allocating RX Ring\n", dev->name); |
190 | break; | 165 | break; |
191 | } | 166 | } |
192 | skb_reserve(skb, HW_IP_ALIGN); | 167 | skb_reserve(skb, ETH_HW_IP_ALIGN); |
193 | } | 168 | } |
194 | clear_bit(0, &mp->rx_task_busy); | ||
195 | /* | 169 | /* |
196 | * If RX ring is empty of SKB, set a timer to try allocating | 170 | * If RX ring is empty of SKB, set a timer to try allocating |
197 | * again in a later time . | 171 | * again at a later time. |
198 | */ | 172 | */ |
199 | if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { | 173 | if (mp->rx_desc_count == 0) { |
200 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); | 174 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); |
201 | /* After 100mSec */ | 175 | mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */ |
202 | mp->timeout.expires = jiffies + (HZ / 10); | ||
203 | add_timer(&mp->timeout); | 176 | add_timer(&mp->timeout); |
204 | mp->rx_timer_flag = 1; | ||
205 | } | ||
206 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
207 | else { | ||
208 | /* Return interrupts */ | ||
209 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), | ||
210 | INT_UNMASK_ALL); | ||
211 | } | 177 | } |
212 | #endif | ||
213 | } | 178 | } |
214 | 179 | ||
215 | /* | 180 | /* |
216 | * mv643xx_eth_rx_task_timer_wrapper | 181 | * mv643xx_eth_rx_refill_descs_timer_wrapper |
217 | * | 182 | * |
218 | * Timer routine to wake up RX queue filling task. This function is | 183 | * Timer routine to wake up RX queue filling task. This function is |
219 | * used only in case the RX queue is empty, and all alloc_skb has | 184 | * used only in case the RX queue is empty, and all alloc_skb has |
@@ -222,13 +187,9 @@ static void mv643xx_eth_rx_task(void *data) | |||
222 | * Input : pointer to ethernet interface network device structure | 187 | * Input : pointer to ethernet interface network device structure |
223 | * Output : N/A | 188 | * Output : N/A |
224 | */ | 189 | */ |
225 | static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) | 190 | static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) |
226 | { | 191 | { |
227 | struct net_device *dev = (struct net_device *)data; | 192 | mv643xx_eth_rx_refill_descs((struct net_device *)data); |
228 | struct mv643xx_private *mp = netdev_priv(dev); | ||
229 | |||
230 | mp->rx_timer_flag = 0; | ||
231 | mv643xx_eth_rx_task((void *)data); | ||
232 | } | 193 | } |
233 | 194 | ||
234 | /* | 195 | /* |
@@ -245,8 +206,7 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
245 | unsigned int port_num = mp->port_num; | 206 | unsigned int port_num = mp->port_num; |
246 | 207 | ||
247 | eth_port_init_mac_tables(port_num); | 208 | eth_port_init_mac_tables(port_num); |
248 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | 209 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
249 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | ||
250 | } | 210 | } |
251 | 211 | ||
252 | /* | 212 | /* |
@@ -260,13 +220,14 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
260 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | 220 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
261 | { | 221 | { |
262 | struct mv643xx_private *mp = netdev_priv(dev); | 222 | struct mv643xx_private *mp = netdev_priv(dev); |
223 | u32 config_reg; | ||
263 | 224 | ||
225 | config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); | ||
264 | if (dev->flags & IFF_PROMISC) | 226 | if (dev->flags & IFF_PROMISC) |
265 | mp->port_config |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 227 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
266 | else | 228 | else |
267 | mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 229 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
268 | 230 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); | |
269 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); | ||
270 | 231 | ||
271 | eth_port_set_multicast_list(dev); | 232 | eth_port_set_multicast_list(dev); |
272 | } | 233 | } |
@@ -322,53 +283,82 @@ static void mv643xx_eth_tx_timeout_task(struct net_device *dev) | |||
322 | 283 | ||
323 | netif_device_detach(dev); | 284 | netif_device_detach(dev); |
324 | eth_port_reset(mp->port_num); | 285 | eth_port_reset(mp->port_num); |
325 | eth_port_start(mp); | 286 | eth_port_start(dev); |
326 | netif_device_attach(dev); | 287 | netif_device_attach(dev); |
327 | } | 288 | } |
328 | 289 | ||
329 | /* | 290 | /** |
330 | * mv643xx_eth_free_tx_queue | 291 | * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors |
331 | * | ||
332 | * Input : dev - a pointer to the required interface | ||
333 | * | 292 | * |
334 | * Output : 0 if was able to release skb , nonzero otherwise | 293 | * If force is non-zero, frees uncompleted descriptors as well |
335 | */ | 294 | */ |
336 | static int mv643xx_eth_free_tx_queue(struct net_device *dev, | 295 | int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) |
337 | unsigned int eth_int_cause_ext) | ||
338 | { | 296 | { |
339 | struct mv643xx_private *mp = netdev_priv(dev); | 297 | struct mv643xx_private *mp = netdev_priv(dev); |
340 | struct net_device_stats *stats = &mp->stats; | 298 | struct eth_tx_desc *desc; |
341 | struct pkt_info pkt_info; | 299 | u32 cmd_sts; |
342 | int released = 1; | 300 | struct sk_buff *skb; |
301 | unsigned long flags; | ||
302 | int tx_index; | ||
303 | dma_addr_t addr; | ||
304 | int count; | ||
305 | int released = 0; | ||
306 | |||
307 | while (mp->tx_desc_count > 0) { | ||
308 | spin_lock_irqsave(&mp->lock, flags); | ||
309 | tx_index = mp->tx_used_desc_q; | ||
310 | desc = &mp->p_tx_desc_area[tx_index]; | ||
311 | cmd_sts = desc->cmd_sts; | ||
312 | |||
313 | if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) { | ||
314 | spin_unlock_irqrestore(&mp->lock, flags); | ||
315 | return released; | ||
316 | } | ||
343 | 317 | ||
344 | if (!(eth_int_cause_ext & (BIT0 | BIT8))) | 318 | mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size; |
345 | return released; | 319 | mp->tx_desc_count--; |
346 | 320 | ||
347 | /* Check only queue 0 */ | 321 | addr = desc->buf_ptr; |
348 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | 322 | count = desc->byte_cnt; |
349 | if (pkt_info.cmd_sts & BIT0) { | 323 | skb = mp->tx_skb[tx_index]; |
324 | if (skb) | ||
325 | mp->tx_skb[tx_index] = NULL; | ||
326 | |||
327 | spin_unlock_irqrestore(&mp->lock, flags); | ||
328 | |||
329 | if (cmd_sts & ETH_ERROR_SUMMARY) { | ||
350 | printk("%s: Error in TX\n", dev->name); | 330 | printk("%s: Error in TX\n", dev->name); |
351 | stats->tx_errors++; | 331 | mp->stats.tx_errors++; |
352 | } | 332 | } |
353 | 333 | ||
354 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | 334 | if (cmd_sts & ETH_TX_FIRST_DESC) |
355 | dma_unmap_single(NULL, pkt_info.buf_ptr, | 335 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); |
356 | pkt_info.byte_cnt, | ||
357 | DMA_TO_DEVICE); | ||
358 | else | 336 | else |
359 | dma_unmap_page(NULL, pkt_info.buf_ptr, | 337 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); |
360 | pkt_info.byte_cnt, | ||
361 | DMA_TO_DEVICE); | ||
362 | 338 | ||
363 | if (pkt_info.return_info) { | 339 | if (skb) |
364 | dev_kfree_skb_irq(pkt_info.return_info); | 340 | dev_kfree_skb_irq(skb); |
365 | released = 0; | 341 | |
366 | } | 342 | released = 1; |
367 | } | 343 | } |
368 | 344 | ||
369 | return released; | 345 | return released; |
370 | } | 346 | } |
371 | 347 | ||
348 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev) | ||
349 | { | ||
350 | struct mv643xx_private *mp = netdev_priv(dev); | ||
351 | |||
352 | if (mv643xx_eth_free_tx_descs(dev, 0) && | ||
353 | mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | ||
354 | netif_wake_queue(dev); | ||
355 | } | ||
356 | |||
357 | static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | ||
358 | { | ||
359 | mv643xx_eth_free_tx_descs(dev, 1); | ||
360 | } | ||
361 | |||
372 | /* | 362 | /* |
373 | * mv643xx_eth_receive | 363 | * mv643xx_eth_receive |
374 | * | 364 | * |
@@ -380,11 +370,7 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, | |||
380 | * | 370 | * |
381 | * Output : number of served packets | 371 | * Output : number of served packets |
382 | */ | 372 | */ |
383 | #ifdef MV643XX_NAPI | ||
384 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 373 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) |
385 | #else | ||
386 | static int mv643xx_eth_receive_queue(struct net_device *dev) | ||
387 | #endif | ||
388 | { | 374 | { |
389 | struct mv643xx_private *mp = netdev_priv(dev); | 375 | struct mv643xx_private *mp = netdev_priv(dev); |
390 | struct net_device_stats *stats = &mp->stats; | 376 | struct net_device_stats *stats = &mp->stats; |
@@ -392,15 +378,14 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
392 | struct sk_buff *skb; | 378 | struct sk_buff *skb; |
393 | struct pkt_info pkt_info; | 379 | struct pkt_info pkt_info; |
394 | 380 | ||
395 | #ifdef MV643XX_NAPI | ||
396 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { | 381 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { |
397 | #else | 382 | mp->rx_desc_count--; |
398 | while (eth_port_receive(mp, &pkt_info) == ETH_OK) { | ||
399 | #endif | ||
400 | mp->rx_ring_skbs--; | ||
401 | received_packets++; | 383 | received_packets++; |
402 | 384 | ||
403 | /* Update statistics. Note byte count includes 4 byte CRC count */ | 385 | /* |
386 | * Update statistics. | ||
387 | * Note byte count includes 4 byte CRC count | ||
388 | */ | ||
404 | stats->rx_packets++; | 389 | stats->rx_packets++; |
405 | stats->rx_bytes += pkt_info.byte_cnt; | 390 | stats->rx_bytes += pkt_info.byte_cnt; |
406 | skb = pkt_info.return_info; | 391 | skb = pkt_info.return_info; |
@@ -448,10 +433,61 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
448 | } | 433 | } |
449 | dev->last_rx = jiffies; | 434 | dev->last_rx = jiffies; |
450 | } | 435 | } |
436 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
451 | 437 | ||
452 | return received_packets; | 438 | return received_packets; |
453 | } | 439 | } |
454 | 440 | ||
441 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ | ||
442 | static void mv643xx_eth_update_pscr(struct net_device *dev, | ||
443 | struct ethtool_cmd *ecmd) | ||
444 | { | ||
445 | struct mv643xx_private *mp = netdev_priv(dev); | ||
446 | int port_num = mp->port_num; | ||
447 | u32 o_pscr, n_pscr; | ||
448 | unsigned int queues; | ||
449 | |||
450 | o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
451 | n_pscr = o_pscr; | ||
452 | |||
453 | /* clear speed, duplex and rx buffer size fields */ | ||
454 | n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | | ||
455 | MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
456 | MV643XX_ETH_SET_FULL_DUPLEX_MODE | | ||
457 | MV643XX_ETH_MAX_RX_PACKET_MASK); | ||
458 | |||
459 | if (ecmd->duplex == DUPLEX_FULL) | ||
460 | n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; | ||
461 | |||
462 | if (ecmd->speed == SPEED_1000) | ||
463 | n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
464 | MV643XX_ETH_MAX_RX_PACKET_9700BYTE; | ||
465 | else { | ||
466 | if (ecmd->speed == SPEED_100) | ||
467 | n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; | ||
468 | n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; | ||
469 | } | ||
470 | |||
471 | if (n_pscr != o_pscr) { | ||
472 | if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) | ||
473 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
474 | n_pscr); | ||
475 | else { | ||
476 | queues = mv643xx_eth_port_disable_tx(port_num); | ||
477 | |||
478 | o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | ||
479 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
480 | o_pscr); | ||
481 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
482 | n_pscr); | ||
483 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
484 | n_pscr); | ||
485 | if (queues) | ||
486 | mv643xx_eth_port_enable_tx(port_num, queues); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
455 | /* | 491 | /* |
456 | * mv643xx_eth_int_handler | 492 | * mv643xx_eth_int_handler |
457 | * | 493 | * |
@@ -473,78 +509,52 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | |||
473 | 509 | ||
474 | /* Read interrupt cause registers */ | 510 | /* Read interrupt cause registers */ |
475 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | 511 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & |
476 | INT_UNMASK_ALL; | 512 | ETH_INT_UNMASK_ALL; |
477 | 513 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | |
478 | if (eth_int_cause & BIT1) | ||
479 | eth_int_cause_ext = mv_read( | 514 | eth_int_cause_ext = mv_read( |
480 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 515 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
481 | INT_UNMASK_ALL_EXT; | 516 | ETH_INT_UNMASK_ALL_EXT; |
517 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), | ||
518 | ~eth_int_cause_ext); | ||
519 | } | ||
482 | 520 | ||
483 | #ifdef MV643XX_NAPI | 521 | /* PHY status changed */ |
484 | if (!(eth_int_cause & 0x0007fffd)) { | 522 | if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { |
485 | /* Dont ack the Rx interrupt */ | 523 | struct ethtool_cmd cmd; |
486 | #endif | 524 | |
487 | /* | 525 | if (mii_link_ok(&mp->mii)) { |
488 | * Clear specific ethernet port intrerrupt registers by | 526 | mii_ethtool_gset(&mp->mii, &cmd); |
489 | * acknowleding relevant bits. | 527 | mv643xx_eth_update_pscr(dev, &cmd); |
490 | */ | 528 | mv643xx_eth_port_enable_tx(port_num, |
491 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), | 529 | ETH_TX_QUEUES_ENABLED); |
492 | ~eth_int_cause); | 530 | if (!netif_carrier_ok(dev)) { |
493 | if (eth_int_cause_ext != 0x0) | 531 | netif_carrier_on(dev); |
494 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG | 532 | if (mp->tx_ring_size - mp->tx_desc_count >= |
495 | (port_num), ~eth_int_cause_ext); | 533 | MAX_DESCS_PER_SKB) |
496 | 534 | netif_wake_queue(dev); | |
497 | /* UDP change : We may need this */ | 535 | } |
498 | if ((eth_int_cause_ext & 0x0000ffff) && | 536 | } else if (netif_carrier_ok(dev)) { |
499 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && | 537 | netif_stop_queue(dev); |
500 | (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | 538 | netif_carrier_off(dev); |
501 | netif_wake_queue(dev); | ||
502 | #ifdef MV643XX_NAPI | ||
503 | } else { | ||
504 | if (netif_rx_schedule_prep(dev)) { | ||
505 | /* Mask all the interrupts */ | ||
506 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
507 | INT_MASK_ALL); | ||
508 | /* wait for previous write to complete */ | ||
509 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | ||
510 | __netif_rx_schedule(dev); | ||
511 | } | 539 | } |
512 | #else | 540 | } |
513 | if (eth_int_cause & (BIT2 | BIT11)) | ||
514 | mv643xx_eth_receive_queue(dev, 0); | ||
515 | 541 | ||
516 | /* | 542 | #ifdef MV643XX_NAPI |
517 | * After forwarded received packets to upper layer, add a task | 543 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
518 | * in an interrupts enabled context that refills the RX ring | 544 | /* schedule the NAPI poll routine to maintain port */ |
519 | * with skb's. | ||
520 | */ | ||
521 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
522 | /* Mask all interrupts on ethernet port */ | ||
523 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 545 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
524 | INT_MASK_ALL); | 546 | ETH_INT_MASK_ALL); |
525 | /* wait for previous write to take effect */ | 547 | /* wait for previous write to complete */ |
526 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 548 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
527 | 549 | ||
528 | queue_task(&mp->rx_task, &tq_immediate); | 550 | netif_rx_schedule(dev); |
529 | mark_bh(IMMEDIATE_BH); | 551 | } |
530 | #else | 552 | #else |
531 | mp->rx_task.func(dev); | 553 | if (eth_int_cause & ETH_INT_CAUSE_RX) |
554 | mv643xx_eth_receive_queue(dev, INT_MAX); | ||
555 | if (eth_int_cause_ext & ETH_INT_CAUSE_TX) | ||
556 | mv643xx_eth_free_completed_tx_descs(dev); | ||
532 | #endif | 557 | #endif |
533 | #endif | ||
534 | } | ||
535 | /* PHY status changed */ | ||
536 | if (eth_int_cause_ext & (BIT16 | BIT20)) { | ||
537 | if (eth_port_link_is_up(port_num)) { | ||
538 | netif_carrier_on(dev); | ||
539 | netif_wake_queue(dev); | ||
540 | /* Start TX queue */ | ||
541 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG | ||
542 | (port_num), 1); | ||
543 | } else { | ||
544 | netif_carrier_off(dev); | ||
545 | netif_stop_queue(dev); | ||
546 | } | ||
547 | } | ||
548 | 558 | ||
549 | /* | 559 | /* |
550 | * If no real interrupt occured, exit. | 560 | * If no real interrupt occured, exit. |
@@ -670,9 +680,6 @@ static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | |||
670 | mp->rx_used_desc_q = 0; | 680 | mp->rx_used_desc_q = 0; |
671 | 681 | ||
672 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | 682 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); |
673 | |||
674 | /* Add the queue to the list of RX queues of this port */ | ||
675 | mp->port_rx_queue_command |= 1; | ||
676 | } | 683 | } |
677 | 684 | ||
678 | /* | 685 | /* |
@@ -712,14 +719,36 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | |||
712 | 719 | ||
713 | mp->tx_curr_desc_q = 0; | 720 | mp->tx_curr_desc_q = 0; |
714 | mp->tx_used_desc_q = 0; | 721 | mp->tx_used_desc_q = 0; |
715 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
716 | mp->tx_first_desc_q = 0; | ||
717 | #endif | ||
718 | 722 | ||
719 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | 723 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); |
724 | } | ||
720 | 725 | ||
721 | /* Add the queue to the list of Tx queues of this port */ | 726 | static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
722 | mp->port_tx_queue_command |= 1; | 727 | { |
728 | struct mv643xx_private *mp = netdev_priv(dev); | ||
729 | int err; | ||
730 | |||
731 | spin_lock_irq(&mp->lock); | ||
732 | err = mii_ethtool_sset(&mp->mii, cmd); | ||
733 | spin_unlock_irq(&mp->lock); | ||
734 | |||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
739 | { | ||
740 | struct mv643xx_private *mp = netdev_priv(dev); | ||
741 | int err; | ||
742 | |||
743 | spin_lock_irq(&mp->lock); | ||
744 | err = mii_ethtool_gset(&mp->mii, cmd); | ||
745 | spin_unlock_irq(&mp->lock); | ||
746 | |||
747 | /* The PHY may support 1000baseT_Half, but the mv643xx does not */ | ||
748 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
749 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
750 | |||
751 | return err; | ||
723 | } | 752 | } |
724 | 753 | ||
725 | /* | 754 | /* |
@@ -750,23 +779,12 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
750 | return -EAGAIN; | 779 | return -EAGAIN; |
751 | } | 780 | } |
752 | 781 | ||
753 | /* Stop RX Queues */ | ||
754 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
755 | |||
756 | /* Set the MAC Address */ | ||
757 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | ||
758 | |||
759 | eth_port_init(mp); | 782 | eth_port_init(mp); |
760 | 783 | ||
761 | INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev); | ||
762 | |||
763 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | 784 | memset(&mp->timeout, 0, sizeof(struct timer_list)); |
764 | mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; | 785 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; |
765 | mp->timeout.data = (unsigned long)dev; | 786 | mp->timeout.data = (unsigned long)dev; |
766 | 787 | ||
767 | mp->rx_task_busy = 0; | ||
768 | mp->rx_timer_flag = 0; | ||
769 | |||
770 | /* Allocate RX and TX skb rings */ | 788 | /* Allocate RX and TX skb rings */ |
771 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, | 789 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, |
772 | GFP_KERNEL); | 790 | GFP_KERNEL); |
@@ -784,7 +802,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
784 | } | 802 | } |
785 | 803 | ||
786 | /* Allocate TX ring */ | 804 | /* Allocate TX ring */ |
787 | mp->tx_ring_skbs = 0; | 805 | mp->tx_desc_count = 0; |
788 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); | 806 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); |
789 | mp->tx_desc_area_size = size; | 807 | mp->tx_desc_area_size = size; |
790 | 808 | ||
@@ -809,7 +827,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
809 | ether_init_tx_desc_ring(mp); | 827 | ether_init_tx_desc_ring(mp); |
810 | 828 | ||
811 | /* Allocate RX ring */ | 829 | /* Allocate RX ring */ |
812 | mp->rx_ring_skbs = 0; | 830 | mp->rx_desc_count = 0; |
813 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); | 831 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); |
814 | mp->rx_desc_area_size = size; | 832 | mp->rx_desc_area_size = size; |
815 | 833 | ||
@@ -839,9 +857,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
839 | 857 | ||
840 | ether_init_rx_desc_ring(mp); | 858 | ether_init_rx_desc_ring(mp); |
841 | 859 | ||
842 | mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ | 860 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ |
861 | |||
862 | /* Clear any pending ethernet port interrupts */ | ||
863 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
864 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
843 | 865 | ||
844 | eth_port_start(mp); | 866 | eth_port_start(dev); |
845 | 867 | ||
846 | /* Interrupt Coalescing */ | 868 | /* Interrupt Coalescing */ |
847 | 869 | ||
@@ -853,16 +875,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
853 | mp->tx_int_coal = | 875 | mp->tx_int_coal = |
854 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 876 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); |
855 | 877 | ||
856 | /* Clear any pending ethernet port interrupts */ | ||
857 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
858 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
859 | |||
860 | /* Unmask phy and link status changes interrupts */ | 878 | /* Unmask phy and link status changes interrupts */ |
861 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | 879 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), |
862 | INT_UNMASK_ALL_EXT); | 880 | ETH_INT_UNMASK_ALL_EXT); |
863 | 881 | ||
864 | /* Unmask RX buffer and TX end interrupt */ | 882 | /* Unmask RX buffer and TX end interrupt */ |
865 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 883 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
884 | |||
866 | return 0; | 885 | return 0; |
867 | 886 | ||
868 | out_free_tx_skb: | 887 | out_free_tx_skb: |
@@ -878,25 +897,14 @@ out_free_irq: | |||
878 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | 897 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) |
879 | { | 898 | { |
880 | struct mv643xx_private *mp = netdev_priv(dev); | 899 | struct mv643xx_private *mp = netdev_priv(dev); |
881 | unsigned int port_num = mp->port_num; | ||
882 | unsigned int curr; | ||
883 | struct sk_buff *skb; | ||
884 | 900 | ||
885 | /* Stop Tx Queues */ | 901 | /* Stop Tx Queues */ |
886 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 902 | mv643xx_eth_port_disable_tx(mp->port_num); |
887 | 903 | ||
888 | /* Free outstanding skb's on TX rings */ | 904 | /* Free outstanding skb's on TX ring */ |
889 | for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { | 905 | mv643xx_eth_free_all_tx_descs(dev); |
890 | skb = mp->tx_skb[curr]; | 906 | |
891 | if (skb) { | 907 | BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); |
892 | mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags; | ||
893 | dev_kfree_skb(skb); | ||
894 | mp->tx_ring_skbs--; | ||
895 | } | ||
896 | } | ||
897 | if (mp->tx_ring_skbs) | ||
898 | printk("%s: Error on Tx descriptor free - could not free %d" | ||
899 | " descriptors\n", dev->name, mp->tx_ring_skbs); | ||
900 | 908 | ||
901 | /* Free TX ring */ | 909 | /* Free TX ring */ |
902 | if (mp->tx_sram_size) | 910 | if (mp->tx_sram_size) |
@@ -913,21 +921,21 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) | |||
913 | int curr; | 921 | int curr; |
914 | 922 | ||
915 | /* Stop RX Queues */ | 923 | /* Stop RX Queues */ |
916 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 924 | mv643xx_eth_port_disable_rx(port_num); |
917 | 925 | ||
918 | /* Free preallocated skb's on RX rings */ | 926 | /* Free preallocated skb's on RX rings */ |
919 | for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { | 927 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { |
920 | if (mp->rx_skb[curr]) { | 928 | if (mp->rx_skb[curr]) { |
921 | dev_kfree_skb(mp->rx_skb[curr]); | 929 | dev_kfree_skb(mp->rx_skb[curr]); |
922 | mp->rx_ring_skbs--; | 930 | mp->rx_desc_count--; |
923 | } | 931 | } |
924 | } | 932 | } |
925 | 933 | ||
926 | if (mp->rx_ring_skbs) | 934 | if (mp->rx_desc_count) |
927 | printk(KERN_ERR | 935 | printk(KERN_ERR |
928 | "%s: Error in freeing Rx Ring. %d skb's still" | 936 | "%s: Error in freeing Rx Ring. %d skb's still" |
929 | " stuck in RX Ring - ignoring them\n", dev->name, | 937 | " stuck in RX Ring - ignoring them\n", dev->name, |
930 | mp->rx_ring_skbs); | 938 | mp->rx_desc_count); |
931 | /* Free RX ring */ | 939 | /* Free RX ring */ |
932 | if (mp->rx_sram_size) | 940 | if (mp->rx_sram_size) |
933 | iounmap(mp->p_rx_desc_area); | 941 | iounmap(mp->p_rx_desc_area); |
@@ -952,7 +960,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
952 | unsigned int port_num = mp->port_num; | 960 | unsigned int port_num = mp->port_num; |
953 | 961 | ||
954 | /* Mask all interrupts on ethernet port */ | 962 | /* Mask all interrupts on ethernet port */ |
955 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 963 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
956 | /* wait for previous write to complete */ | 964 | /* wait for previous write to complete */ |
957 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 965 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
958 | 966 | ||
@@ -977,30 +985,6 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
977 | } | 985 | } |
978 | 986 | ||
979 | #ifdef MV643XX_NAPI | 987 | #ifdef MV643XX_NAPI |
980 | static void mv643xx_tx(struct net_device *dev) | ||
981 | { | ||
982 | struct mv643xx_private *mp = netdev_priv(dev); | ||
983 | struct pkt_info pkt_info; | ||
984 | |||
985 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | ||
986 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | ||
987 | dma_unmap_single(NULL, pkt_info.buf_ptr, | ||
988 | pkt_info.byte_cnt, | ||
989 | DMA_TO_DEVICE); | ||
990 | else | ||
991 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
992 | pkt_info.byte_cnt, | ||
993 | DMA_TO_DEVICE); | ||
994 | |||
995 | if (pkt_info.return_info) | ||
996 | dev_kfree_skb_irq(pkt_info.return_info); | ||
997 | } | ||
998 | |||
999 | if (netif_queue_stopped(dev) && | ||
1000 | mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB) | ||
1001 | netif_wake_queue(dev); | ||
1002 | } | ||
1003 | |||
1004 | /* | 988 | /* |
1005 | * mv643xx_poll | 989 | * mv643xx_poll |
1006 | * | 990 | * |
@@ -1014,7 +998,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1014 | 998 | ||
1015 | #ifdef MV643XX_TX_FAST_REFILL | 999 | #ifdef MV643XX_TX_FAST_REFILL |
1016 | if (++mp->tx_clean_threshold > 5) { | 1000 | if (++mp->tx_clean_threshold > 5) { |
1017 | mv643xx_tx(dev); | 1001 | mv643xx_eth_free_completed_tx_descs(dev); |
1018 | mp->tx_clean_threshold = 0; | 1002 | mp->tx_clean_threshold = 0; |
1019 | } | 1003 | } |
1020 | #endif | 1004 | #endif |
@@ -1025,7 +1009,6 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1025 | if (orig_budget > dev->quota) | 1009 | if (orig_budget > dev->quota) |
1026 | orig_budget = dev->quota; | 1010 | orig_budget = dev->quota; |
1027 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); | 1011 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); |
1028 | mp->rx_task.func(dev); | ||
1029 | *budget -= work_done; | 1012 | *budget -= work_done; |
1030 | dev->quota -= work_done; | 1013 | dev->quota -= work_done; |
1031 | if (work_done >= orig_budget) | 1014 | if (work_done >= orig_budget) |
@@ -1037,14 +1020,17 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1037 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1020 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); |
1038 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1021 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1039 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1022 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
1040 | INT_UNMASK_ALL); | 1023 | ETH_INT_UNMASK_ALL); |
1041 | } | 1024 | } |
1042 | 1025 | ||
1043 | return done ? 0 : 1; | 1026 | return done ? 0 : 1; |
1044 | } | 1027 | } |
1045 | #endif | 1028 | #endif |
1046 | 1029 | ||
1047 | /* Hardware can't handle unaligned fragments smaller than 9 bytes. | 1030 | /** |
1031 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | ||
1032 | * | ||
1033 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | ||
1048 | * This helper function detects that case. | 1034 | * This helper function detects that case. |
1049 | */ | 1035 | */ |
1050 | 1036 | ||
@@ -1061,223 +1047,166 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
1061 | return 0; | 1047 | return 0; |
1062 | } | 1048 | } |
1063 | 1049 | ||
1050 | /** | ||
1051 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | ||
1052 | */ | ||
1053 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | ||
1054 | { | ||
1055 | int tx_desc_curr; | ||
1064 | 1056 | ||
1065 | /* | 1057 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); |
1066 | * mv643xx_eth_start_xmit | 1058 | |
1067 | * | 1059 | tx_desc_curr = mp->tx_curr_desc_q; |
1068 | * This function is queues a packet in the Tx descriptor for | 1060 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; |
1069 | * required port. | 1061 | |
1070 | * | 1062 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); |
1071 | * Input : skb - a pointer to socket buffer | 1063 | |
1072 | * dev - a pointer to the required port | 1064 | return tx_desc_curr; |
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | ||
1073 | * | 1069 | * |
1074 | * Output : zero upon success | 1070 | * Ensure the data for each fragment to be transmitted is mapped properly, |
1071 | * then fill in descriptors in the tx hw queue. | ||
1075 | */ | 1072 | */ |
1076 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1073 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, |
1074 | struct sk_buff *skb) | ||
1077 | { | 1075 | { |
1078 | struct mv643xx_private *mp = netdev_priv(dev); | 1076 | int frag; |
1079 | struct net_device_stats *stats = &mp->stats; | 1077 | int tx_index; |
1080 | ETH_FUNC_RET_STATUS status; | 1078 | struct eth_tx_desc *desc; |
1081 | unsigned long flags; | ||
1082 | struct pkt_info pkt_info; | ||
1083 | 1079 | ||
1084 | if (netif_queue_stopped(dev)) { | 1080 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1085 | printk(KERN_ERR | 1081 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1086 | "%s: Tried sending packet when interface is stopped\n", | 1082 | |
1087 | dev->name); | 1083 | tx_index = eth_alloc_tx_desc_index(mp); |
1088 | return 1; | 1084 | desc = &mp->p_tx_desc_area[tx_index]; |
1085 | |||
1086 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | ||
1087 | /* Last Frag enables interrupt and frees the skb */ | ||
1088 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1089 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1090 | ETH_TX_LAST_DESC | | ||
1091 | ETH_TX_ENABLE_INTERRUPT; | ||
1092 | mp->tx_skb[tx_index] = skb; | ||
1093 | } else | ||
1094 | mp->tx_skb[tx_index] = 0; | ||
1095 | |||
1096 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1097 | desc->l4i_chk = 0; | ||
1098 | desc->byte_cnt = this_frag->size; | ||
1099 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1100 | this_frag->page_offset, | ||
1101 | this_frag->size, | ||
1102 | DMA_TO_DEVICE); | ||
1089 | } | 1103 | } |
1104 | } | ||
1090 | 1105 | ||
1091 | /* This is a hard error, log it. */ | 1106 | /** |
1092 | if ((mp->tx_ring_size - mp->tx_ring_skbs) <= | 1107 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw |
1093 | (skb_shinfo(skb)->nr_frags + 1)) { | 1108 | * |
1094 | netif_stop_queue(dev); | 1109 | * Ensure the data for an skb to be transmitted is mapped properly, |
1095 | printk(KERN_ERR | 1110 | * then fill in descriptors in the tx hw queue and start the hardware. |
1096 | "%s: Bug in mv643xx_eth - Trying to transmit when" | 1111 | */ |
1097 | " queue full !\n", dev->name); | 1112 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, |
1098 | return 1; | 1113 | struct sk_buff *skb) |
1099 | } | 1114 | { |
1115 | int tx_index; | ||
1116 | struct eth_tx_desc *desc; | ||
1117 | u32 cmd_sts; | ||
1118 | int length; | ||
1119 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1100 | 1120 | ||
1101 | /* Paranoid check - this shouldn't happen */ | 1121 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; |
1102 | if (skb == NULL) { | ||
1103 | stats->tx_dropped++; | ||
1104 | printk(KERN_ERR "mv64320_eth paranoid check failed\n"); | ||
1105 | return 1; | ||
1106 | } | ||
1107 | 1122 | ||
1108 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1123 | tx_index = eth_alloc_tx_desc_index(mp); |
1109 | if (has_tiny_unaligned_frags(skb)) { | 1124 | desc = &mp->p_tx_desc_area[tx_index]; |
1110 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { | ||
1111 | stats->tx_dropped++; | ||
1112 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1113 | "unaligned fragment\n", dev->name); | ||
1114 | return 1; | ||
1115 | } | ||
1116 | } | ||
1117 | 1125 | ||
1118 | spin_lock_irqsave(&mp->lock, flags); | 1126 | if (nr_frags) { |
1127 | eth_tx_fill_frag_descs(mp, skb); | ||
1119 | 1128 | ||
1120 | if (!skb_shinfo(skb)->nr_frags) { | 1129 | length = skb_headlen(skb); |
1121 | if (skb->ip_summed != CHECKSUM_HW) { | 1130 | mp->tx_skb[tx_index] = 0; |
1122 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1123 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1124 | ETH_TX_FIRST_DESC | | ||
1125 | ETH_TX_LAST_DESC | | ||
1126 | 5 << ETH_TX_IHL_SHIFT; | ||
1127 | pkt_info.l4i_chk = 0; | ||
1128 | } else { | ||
1129 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1130 | ETH_TX_FIRST_DESC | | ||
1131 | ETH_TX_LAST_DESC | | ||
1132 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1133 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1134 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1135 | /* CPU already calculated pseudo header checksum. */ | ||
1136 | if ((skb->protocol == ETH_P_IP) && | ||
1137 | (skb->nh.iph->protocol == IPPROTO_UDP) ) { | ||
1138 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1139 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1140 | } else if ((skb->protocol == ETH_P_IP) && | ||
1141 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1142 | pkt_info.l4i_chk = skb->h.th->check; | ||
1143 | else { | ||
1144 | printk(KERN_ERR | ||
1145 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1146 | dev->name); | ||
1147 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1148 | return 1; | ||
1149 | } | ||
1150 | } | ||
1151 | pkt_info.byte_cnt = skb->len; | ||
1152 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1153 | DMA_TO_DEVICE); | ||
1154 | pkt_info.return_info = skb; | ||
1155 | status = eth_port_send(mp, &pkt_info); | ||
1156 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1157 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1158 | dev->name); | ||
1159 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1160 | } else { | 1131 | } else { |
1161 | unsigned int frag; | 1132 | cmd_sts |= ETH_ZERO_PADDING | |
1133 | ETH_TX_LAST_DESC | | ||
1134 | ETH_TX_ENABLE_INTERRUPT; | ||
1135 | length = skb->len; | ||
1136 | mp->tx_skb[tx_index] = skb; | ||
1137 | } | ||
1162 | 1138 | ||
1163 | /* first frag which is skb header */ | 1139 | desc->byte_cnt = length; |
1164 | pkt_info.byte_cnt = skb_headlen(skb); | 1140 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); |
1165 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | ||
1166 | skb_headlen(skb), | ||
1167 | DMA_TO_DEVICE); | ||
1168 | pkt_info.l4i_chk = 0; | ||
1169 | pkt_info.return_info = 0; | ||
1170 | |||
1171 | if (skb->ip_summed != CHECKSUM_HW) | ||
1172 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1173 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1174 | 5 << ETH_TX_IHL_SHIFT; | ||
1175 | else { | ||
1176 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1177 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1178 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1179 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1180 | /* CPU already calculated pseudo header checksum. */ | ||
1181 | if ((skb->protocol == ETH_P_IP) && | ||
1182 | (skb->nh.iph->protocol == IPPROTO_UDP)) { | ||
1183 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1184 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1185 | } else if ((skb->protocol == ETH_P_IP) && | ||
1186 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1187 | pkt_info.l4i_chk = skb->h.th->check; | ||
1188 | else { | ||
1189 | printk(KERN_ERR | ||
1190 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1191 | dev->name); | ||
1192 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1193 | return 1; | ||
1194 | } | ||
1195 | } | ||
1196 | 1141 | ||
1197 | status = eth_port_send(mp, &pkt_info); | 1142 | if (skb->ip_summed == CHECKSUM_HW) { |
1198 | if (status != ETH_OK) { | 1143 | BUG_ON(skb->protocol != ETH_P_IP); |
1199 | if ((status == ETH_ERROR)) | 1144 | |
1200 | printk(KERN_ERR | 1145 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1201 | "%s: Error on transmitting packet\n", | 1146 | ETH_GEN_IP_V_4_CHECKSUM | |
1202 | dev->name); | 1147 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; |
1203 | if (status == ETH_QUEUE_FULL) | 1148 | |
1204 | printk("Error on Queue Full \n"); | 1149 | switch (skb->nh.iph->protocol) { |
1205 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1150 | case IPPROTO_UDP: |
1206 | printk("Tx resource error \n"); | 1151 | cmd_sts |= ETH_UDP_FRAME; |
1152 | desc->l4i_chk = skb->h.uh->check; | ||
1153 | break; | ||
1154 | case IPPROTO_TCP: | ||
1155 | desc->l4i_chk = skb->h.th->check; | ||
1156 | break; | ||
1157 | default: | ||
1158 | BUG(); | ||
1207 | } | 1159 | } |
1208 | stats->tx_bytes += pkt_info.byte_cnt; | 1160 | } else { |
1209 | 1161 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | |
1210 | /* Check for the remaining frags */ | 1162 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; |
1211 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 1163 | desc->l4i_chk = 0; |
1212 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 1164 | } |
1213 | pkt_info.l4i_chk = 0x0000; | 1165 | |
1214 | pkt_info.cmd_sts = 0x00000000; | 1166 | /* ensure all other descriptors are written before first cmd_sts */ |
1215 | 1167 | wmb(); | |
1216 | /* Last Frag enables interrupt and frees the skb */ | 1168 | desc->cmd_sts = cmd_sts; |
1217 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1218 | pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | | ||
1219 | ETH_TX_LAST_DESC; | ||
1220 | pkt_info.return_info = skb; | ||
1221 | } else { | ||
1222 | pkt_info.return_info = 0; | ||
1223 | } | ||
1224 | pkt_info.l4i_chk = 0; | ||
1225 | pkt_info.byte_cnt = this_frag->size; | ||
1226 | 1169 | ||
1227 | pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, | 1170 | /* ensure all descriptors are written before poking hardware */ |
1228 | this_frag->page_offset, | 1171 | wmb(); |
1229 | this_frag->size, | 1172 | mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); |
1230 | DMA_TO_DEVICE); | ||
1231 | 1173 | ||
1232 | status = eth_port_send(mp, &pkt_info); | 1174 | mp->tx_desc_count += nr_frags + 1; |
1175 | } | ||
1233 | 1176 | ||
1234 | if (status != ETH_OK) { | 1177 | /** |
1235 | if ((status == ETH_ERROR)) | 1178 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission |
1236 | printk(KERN_ERR "%s: Error on " | 1179 | * |
1237 | "transmitting packet\n", | 1180 | */ |
1238 | dev->name); | 1181 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1182 | { | ||
1183 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1184 | struct net_device_stats *stats = &mp->stats; | ||
1185 | unsigned long flags; | ||
1239 | 1186 | ||
1240 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1187 | BUG_ON(netif_queue_stopped(dev)); |
1241 | printk("Tx resource error \n"); | 1188 | BUG_ON(skb == NULL); |
1189 | BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB); | ||
1242 | 1190 | ||
1243 | if (status == ETH_QUEUE_FULL) | 1191 | if (has_tiny_unaligned_frags(skb)) { |
1244 | printk("Queue is full \n"); | 1192 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { |
1245 | } | 1193 | stats->tx_dropped++; |
1246 | stats->tx_bytes += pkt_info.byte_cnt; | 1194 | printk(KERN_DEBUG "%s: failed to linearize tiny " |
1195 | "unaligned fragment\n", dev->name); | ||
1196 | return 1; | ||
1247 | } | 1197 | } |
1248 | } | 1198 | } |
1249 | #else | ||
1250 | spin_lock_irqsave(&mp->lock, flags); | ||
1251 | 1199 | ||
1252 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | | 1200 | spin_lock_irqsave(&mp->lock, flags); |
1253 | ETH_TX_LAST_DESC; | ||
1254 | pkt_info.l4i_chk = 0; | ||
1255 | pkt_info.byte_cnt = skb->len; | ||
1256 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1257 | DMA_TO_DEVICE); | ||
1258 | pkt_info.return_info = skb; | ||
1259 | status = eth_port_send(mp, &pkt_info); | ||
1260 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1261 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1262 | dev->name); | ||
1263 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1264 | #endif | ||
1265 | |||
1266 | /* Check if TX queue can handle another skb. If not, then | ||
1267 | * signal higher layers to stop requesting TX | ||
1268 | */ | ||
1269 | if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | ||
1270 | /* | ||
1271 | * Stop getting skb's from upper layers. | ||
1272 | * Getting skb's from upper layers will be enabled again after | ||
1273 | * packets are released. | ||
1274 | */ | ||
1275 | netif_stop_queue(dev); | ||
1276 | 1201 | ||
1277 | /* Update statistics and start of transmittion time */ | 1202 | eth_tx_submit_descs_for_skb(mp, skb); |
1203 | stats->tx_bytes = skb->len; | ||
1278 | stats->tx_packets++; | 1204 | stats->tx_packets++; |
1279 | dev->trans_start = jiffies; | 1205 | dev->trans_start = jiffies; |
1280 | 1206 | ||
1207 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | ||
1208 | netif_stop_queue(dev); | ||
1209 | |||
1281 | spin_unlock_irqrestore(&mp->lock, flags); | 1210 | spin_unlock_irqrestore(&mp->lock, flags); |
1282 | 1211 | ||
1283 | return 0; /* success */ | 1212 | return 0; /* success */ |
@@ -1306,16 +1235,45 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1306 | struct mv643xx_private *mp = netdev_priv(netdev); | 1235 | struct mv643xx_private *mp = netdev_priv(netdev); |
1307 | int port_num = mp->port_num; | 1236 | int port_num = mp->port_num; |
1308 | 1237 | ||
1309 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 1238 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1310 | /* wait for previous write to complete */ | 1239 | /* wait for previous write to complete */ |
1311 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1240 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
1312 | 1241 | ||
1313 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); | 1242 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); |
1314 | 1243 | ||
1315 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 1244 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1316 | } | 1245 | } |
1317 | #endif | 1246 | #endif |
1318 | 1247 | ||
1248 | static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | ||
1249 | int speed, int duplex, | ||
1250 | struct ethtool_cmd *cmd) | ||
1251 | { | ||
1252 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1253 | |||
1254 | memset(cmd, 0, sizeof(*cmd)); | ||
1255 | |||
1256 | cmd->port = PORT_MII; | ||
1257 | cmd->transceiver = XCVR_INTERNAL; | ||
1258 | cmd->phy_address = phy_address; | ||
1259 | |||
1260 | if (speed == 0) { | ||
1261 | cmd->autoneg = AUTONEG_ENABLE; | ||
1262 | /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */ | ||
1263 | cmd->speed = SPEED_100; | ||
1264 | cmd->advertising = ADVERTISED_10baseT_Half | | ||
1265 | ADVERTISED_10baseT_Full | | ||
1266 | ADVERTISED_100baseT_Half | | ||
1267 | ADVERTISED_100baseT_Full; | ||
1268 | if (mp->mii.supports_gmii) | ||
1269 | cmd->advertising |= ADVERTISED_1000baseT_Full; | ||
1270 | } else { | ||
1271 | cmd->autoneg = AUTONEG_DISABLE; | ||
1272 | cmd->speed = speed; | ||
1273 | cmd->duplex = duplex; | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1319 | /*/ | 1277 | /*/ |
1320 | * mv643xx_eth_probe | 1278 | * mv643xx_eth_probe |
1321 | * | 1279 | * |
@@ -1336,6 +1294,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1336 | u8 *p; | 1294 | u8 *p; |
1337 | struct resource *res; | 1295 | struct resource *res; |
1338 | int err; | 1296 | int err; |
1297 | struct ethtool_cmd cmd; | ||
1298 | int duplex = DUPLEX_HALF; | ||
1299 | int speed = 0; /* default to auto-negotiation */ | ||
1339 | 1300 | ||
1340 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1301 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1341 | if (!dev) | 1302 | if (!dev) |
@@ -1373,6 +1334,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | dev->tx_queue_len = mp->tx_ring_size; | 1334 | dev->tx_queue_len = mp->tx_ring_size; |
1374 | dev->base_addr = 0; | 1335 | dev->base_addr = 0; |
1375 | dev->change_mtu = mv643xx_eth_change_mtu; | 1336 | dev->change_mtu = mv643xx_eth_change_mtu; |
1337 | dev->do_ioctl = mv643xx_eth_do_ioctl; | ||
1376 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); | 1338 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); |
1377 | 1339 | ||
1378 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1340 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
@@ -1393,33 +1355,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1393 | 1355 | ||
1394 | /* set default config values */ | 1356 | /* set default config values */ |
1395 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1357 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1396 | mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE; | ||
1397 | mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE; | ||
1398 | mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE; | ||
1399 | mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE; | ||
1400 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1358 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1401 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1359 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1402 | 1360 | ||
1403 | pd = pdev->dev.platform_data; | 1361 | pd = pdev->dev.platform_data; |
1404 | if (pd) { | 1362 | if (pd) { |
1405 | if (pd->mac_addr != NULL) | 1363 | if (pd->mac_addr) |
1406 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1364 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1407 | 1365 | ||
1408 | if (pd->phy_addr || pd->force_phy_addr) | 1366 | if (pd->phy_addr || pd->force_phy_addr) |
1409 | ethernet_phy_set(port_num, pd->phy_addr); | 1367 | ethernet_phy_set(port_num, pd->phy_addr); |
1410 | 1368 | ||
1411 | if (pd->port_config || pd->force_port_config) | ||
1412 | mp->port_config = pd->port_config; | ||
1413 | |||
1414 | if (pd->port_config_extend || pd->force_port_config_extend) | ||
1415 | mp->port_config_extend = pd->port_config_extend; | ||
1416 | |||
1417 | if (pd->port_sdma_config || pd->force_port_sdma_config) | ||
1418 | mp->port_sdma_config = pd->port_sdma_config; | ||
1419 | |||
1420 | if (pd->port_serial_control || pd->force_port_serial_control) | ||
1421 | mp->port_serial_control = pd->port_serial_control; | ||
1422 | |||
1423 | if (pd->rx_queue_size) | 1369 | if (pd->rx_queue_size) |
1424 | mp->rx_ring_size = pd->rx_queue_size; | 1370 | mp->rx_ring_size = pd->rx_queue_size; |
1425 | 1371 | ||
@@ -1435,16 +1381,33 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1435 | mp->rx_sram_size = pd->rx_sram_size; | 1381 | mp->rx_sram_size = pd->rx_sram_size; |
1436 | mp->rx_sram_addr = pd->rx_sram_addr; | 1382 | mp->rx_sram_addr = pd->rx_sram_addr; |
1437 | } | 1383 | } |
1384 | |||
1385 | duplex = pd->duplex; | ||
1386 | speed = pd->speed; | ||
1438 | } | 1387 | } |
1439 | 1388 | ||
1389 | /* Hook up MII support for ethtool */ | ||
1390 | mp->mii.dev = dev; | ||
1391 | mp->mii.mdio_read = mv643xx_mdio_read; | ||
1392 | mp->mii.mdio_write = mv643xx_mdio_write; | ||
1393 | mp->mii.phy_id = ethernet_phy_get(port_num); | ||
1394 | mp->mii.phy_id_mask = 0x3f; | ||
1395 | mp->mii.reg_num_mask = 0x1f; | ||
1396 | |||
1440 | err = ethernet_phy_detect(port_num); | 1397 | err = ethernet_phy_detect(port_num); |
1441 | if (err) { | 1398 | if (err) { |
1442 | pr_debug("MV643xx ethernet port %d: " | 1399 | pr_debug("MV643xx ethernet port %d: " |
1443 | "No PHY detected at addr %d\n", | 1400 | "No PHY detected at addr %d\n", |
1444 | port_num, ethernet_phy_get(port_num)); | 1401 | port_num, ethernet_phy_get(port_num)); |
1445 | return err; | 1402 | goto out; |
1446 | } | 1403 | } |
1447 | 1404 | ||
1405 | ethernet_phy_reset(port_num); | ||
1406 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | ||
1407 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | ||
1408 | mv643xx_eth_update_pscr(dev, &cmd); | ||
1409 | mv643xx_set_settings(dev, &cmd); | ||
1410 | |||
1448 | err = register_netdev(dev); | 1411 | err = register_netdev(dev); |
1449 | if (err) | 1412 | if (err) |
1450 | goto out; | 1413 | goto out; |
@@ -1689,26 +1652,9 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1689 | * to the Rx descriptor ring to enable the reuse of this source. | 1652 | * to the Rx descriptor ring to enable the reuse of this source. |
1690 | * Return Rx resource is done using the eth_rx_return_buff API. | 1653 | * Return Rx resource is done using the eth_rx_return_buff API. |
1691 | * | 1654 | * |
1692 | * Transmit operation: | ||
1693 | * The eth_port_send API supports Scatter-Gather which enables to | ||
1694 | * send a packet spanned over multiple buffers. This means that | ||
1695 | * for each packet info structure given by the user and put into | ||
1696 | * the Tx descriptors ring, will be transmitted only if the 'LAST' | ||
1697 | * bit will be set in the packet info command status field. This | ||
1698 | * API also consider restriction regarding buffer alignments and | ||
1699 | * sizes. | ||
1700 | * The user must return a Tx resource after ensuring the buffer | ||
1701 | * has been transmitted to enable the Tx ring indexes to update. | ||
1702 | * | ||
1703 | * BOARD LAYOUT | ||
1704 | * This device is on-board. No jumper diagram is necessary. | ||
1705 | * | ||
1706 | * EXTERNAL INTERFACE | ||
1707 | * | ||
1708 | * Prior to calling the initialization routine eth_port_init() the user | 1655 | * Prior to calling the initialization routine eth_port_init() the user |
1709 | * must set the following fields under mv643xx_private struct: | 1656 | * must set the following fields under mv643xx_private struct: |
1710 | * port_num User Ethernet port number. | 1657 | * port_num User Ethernet port number. |
1711 | * port_mac_addr[6] User defined port MAC address. | ||
1712 | * port_config User port configuration value. | 1658 | * port_config User port configuration value. |
1713 | * port_config_extend User port config extend value. | 1659 | * port_config_extend User port config extend value. |
1714 | * port_sdma_config User port SDMA config value. | 1660 | * port_sdma_config User port SDMA config value. |
@@ -1725,20 +1671,12 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1725 | * return_info Tx/Rx user resource return information. | 1671 | * return_info Tx/Rx user resource return information. |
1726 | */ | 1672 | */ |
1727 | 1673 | ||
1728 | /* defines */ | ||
1729 | /* SDMA command macros */ | ||
1730 | #define ETH_ENABLE_TX_QUEUE(eth_port) \ | ||
1731 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1) | ||
1732 | |||
1733 | /* locals */ | ||
1734 | |||
1735 | /* PHY routines */ | 1674 | /* PHY routines */ |
1736 | static int ethernet_phy_get(unsigned int eth_port_num); | 1675 | static int ethernet_phy_get(unsigned int eth_port_num); |
1737 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 1676 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
1738 | 1677 | ||
1739 | /* Ethernet Port routines */ | 1678 | /* Ethernet Port routines */ |
1740 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | 1679 | static void eth_port_set_filter_table_entry(int table, unsigned char entry); |
1741 | int option); | ||
1742 | 1680 | ||
1743 | /* | 1681 | /* |
1744 | * eth_port_init - Initialize the Ethernet port driver | 1682 | * eth_port_init - Initialize the Ethernet port driver |
@@ -1766,17 +1704,11 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | |||
1766 | */ | 1704 | */ |
1767 | static void eth_port_init(struct mv643xx_private *mp) | 1705 | static void eth_port_init(struct mv643xx_private *mp) |
1768 | { | 1706 | { |
1769 | mp->port_rx_queue_command = 0; | ||
1770 | mp->port_tx_queue_command = 0; | ||
1771 | |||
1772 | mp->rx_resource_err = 0; | 1707 | mp->rx_resource_err = 0; |
1773 | mp->tx_resource_err = 0; | ||
1774 | 1708 | ||
1775 | eth_port_reset(mp->port_num); | 1709 | eth_port_reset(mp->port_num); |
1776 | 1710 | ||
1777 | eth_port_init_mac_tables(mp->port_num); | 1711 | eth_port_init_mac_tables(mp->port_num); |
1778 | |||
1779 | ethernet_phy_reset(mp->port_num); | ||
1780 | } | 1712 | } |
1781 | 1713 | ||
1782 | /* | 1714 | /* |
@@ -1798,7 +1730,7 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1798 | * and ether_init_rx_desc_ring for Rx queues). | 1730 | * and ether_init_rx_desc_ring for Rx queues). |
1799 | * | 1731 | * |
1800 | * INPUT: | 1732 | * INPUT: |
1801 | * struct mv643xx_private *mp Ethernet port control struct | 1733 | * dev - a pointer to the required interface |
1802 | * | 1734 | * |
1803 | * OUTPUT: | 1735 | * OUTPUT: |
1804 | * Ethernet port is ready to receive and transmit. | 1736 | * Ethernet port is ready to receive and transmit. |
@@ -1806,10 +1738,13 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1806 | * RETURN: | 1738 | * RETURN: |
1807 | * None. | 1739 | * None. |
1808 | */ | 1740 | */ |
1809 | static void eth_port_start(struct mv643xx_private *mp) | 1741 | static void eth_port_start(struct net_device *dev) |
1810 | { | 1742 | { |
1743 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1811 | unsigned int port_num = mp->port_num; | 1744 | unsigned int port_num = mp->port_num; |
1812 | int tx_curr_desc, rx_curr_desc; | 1745 | int tx_curr_desc, rx_curr_desc; |
1746 | u32 pscr; | ||
1747 | struct ethtool_cmd ethtool_cmd; | ||
1813 | 1748 | ||
1814 | /* Assignment of Tx CTRP of given queue */ | 1749 | /* Assignment of Tx CTRP of given queue */ |
1815 | tx_curr_desc = mp->tx_curr_desc_q; | 1750 | tx_curr_desc = mp->tx_curr_desc_q; |
@@ -1822,37 +1757,45 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1822 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 1757 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
1823 | 1758 | ||
1824 | /* Add the assigned Ethernet address to the port's address table */ | 1759 | /* Add the assigned Ethernet address to the port's address table */ |
1825 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | 1760 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
1826 | 1761 | ||
1827 | /* Assign port configuration and command. */ | 1762 | /* Assign port configuration and command. */ |
1828 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config); | 1763 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), |
1764 | MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); | ||
1829 | 1765 | ||
1830 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | 1766 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), |
1831 | mp->port_config_extend); | 1767 | MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
1832 | 1768 | ||
1769 | pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
1833 | 1770 | ||
1834 | /* Increase the Rx side buffer size if supporting GigE */ | 1771 | pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); |
1835 | if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | 1772 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1836 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1773 | |
1837 | (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17)); | 1774 | pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1838 | else | 1775 | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | |
1839 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1776 | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | |
1840 | mp->port_serial_control); | 1777 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | |
1778 | MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; | ||
1841 | 1779 | ||
1842 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1780 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1843 | mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) | | 1781 | |
1844 | MV643XX_ETH_SERIAL_PORT_ENABLE); | 1782 | pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; |
1783 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
1845 | 1784 | ||
1846 | /* Assign port SDMA configuration */ | 1785 | /* Assign port SDMA configuration */ |
1847 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | 1786 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), |
1848 | mp->port_sdma_config); | 1787 | MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1849 | 1788 | ||
1850 | /* Enable port Rx. */ | 1789 | /* Enable port Rx. */ |
1851 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | 1790 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); |
1852 | mp->port_rx_queue_command); | ||
1853 | 1791 | ||
1854 | /* Disable port bandwidth limits by clearing MTU register */ | 1792 | /* Disable port bandwidth limits by clearing MTU register */ |
1855 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 1793 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
1794 | |||
1795 | /* save phy settings across reset */ | ||
1796 | mv643xx_get_settings(dev, ðtool_cmd); | ||
1797 | ethernet_phy_reset(mp->port_num); | ||
1798 | mv643xx_set_settings(dev, ðtool_cmd); | ||
1856 | } | 1799 | } |
1857 | 1800 | ||
1858 | /* | 1801 | /* |
@@ -1866,8 +1809,9 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1866 | * char * p_addr Address to be set | 1809 | * char * p_addr Address to be set |
1867 | * | 1810 | * |
1868 | * OUTPUT: | 1811 | * OUTPUT: |
1869 | * Set MAC address low and high registers. also calls eth_port_uc_addr() | 1812 | * Set MAC address low and high registers. also calls |
1870 | * To set the unicast table with the proper information. | 1813 | * eth_port_set_filter_table_entry() to set the unicast |
1814 | * table with the proper information. | ||
1871 | * | 1815 | * |
1872 | * RETURN: | 1816 | * RETURN: |
1873 | * N/A. | 1817 | * N/A. |
@@ -1878,6 +1822,7 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1878 | { | 1822 | { |
1879 | unsigned int mac_h; | 1823 | unsigned int mac_h; |
1880 | unsigned int mac_l; | 1824 | unsigned int mac_l; |
1825 | int table; | ||
1881 | 1826 | ||
1882 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | 1827 | mac_l = (p_addr[4] << 8) | (p_addr[5]); |
1883 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 1828 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
@@ -1887,9 +1832,8 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1887 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); | 1832 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); |
1888 | 1833 | ||
1889 | /* Accept frames of this address */ | 1834 | /* Accept frames of this address */ |
1890 | eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); | 1835 | table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num); |
1891 | 1836 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | |
1892 | return; | ||
1893 | } | 1837 | } |
1894 | 1838 | ||
1895 | /* | 1839 | /* |
@@ -1928,72 +1872,6 @@ static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr) | |||
1928 | } | 1872 | } |
1929 | 1873 | ||
1930 | /* | 1874 | /* |
1931 | * eth_port_uc_addr - This function Set the port unicast address table | ||
1932 | * | ||
1933 | * DESCRIPTION: | ||
1934 | * This function locates the proper entry in the Unicast table for the | ||
1935 | * specified MAC nibble and sets its properties according to function | ||
1936 | * parameters. | ||
1937 | * | ||
1938 | * INPUT: | ||
1939 | * unsigned int eth_port_num Port number. | ||
1940 | * unsigned char uc_nibble Unicast MAC Address last nibble. | ||
1941 | * int option 0 = Add, 1 = remove address. | ||
1942 | * | ||
1943 | * OUTPUT: | ||
1944 | * This function add/removes MAC addresses from the port unicast address | ||
1945 | * table. | ||
1946 | * | ||
1947 | * RETURN: | ||
1948 | * true is output succeeded. | ||
1949 | * false if option parameter is invalid. | ||
1950 | * | ||
1951 | */ | ||
1952 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | ||
1953 | int option) | ||
1954 | { | ||
1955 | unsigned int unicast_reg; | ||
1956 | unsigned int tbl_offset; | ||
1957 | unsigned int reg_offset; | ||
1958 | |||
1959 | /* Locate the Unicast table entry */ | ||
1960 | uc_nibble = (0xf & uc_nibble); | ||
1961 | tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */ | ||
1962 | reg_offset = uc_nibble % 4; /* Entry offset within the above register */ | ||
1963 | |||
1964 | switch (option) { | ||
1965 | case REJECT_MAC_ADDR: | ||
1966 | /* Clear accepts frame bit at given unicast DA table entry */ | ||
1967 | unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1968 | (eth_port_num) + tbl_offset)); | ||
1969 | |||
1970 | unicast_reg &= (0x0E << (8 * reg_offset)); | ||
1971 | |||
1972 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1973 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1974 | break; | ||
1975 | |||
1976 | case ACCEPT_MAC_ADDR: | ||
1977 | /* Set accepts frame bit at unicast DA filter table entry */ | ||
1978 | unicast_reg = | ||
1979 | mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1980 | (eth_port_num) + tbl_offset)); | ||
1981 | |||
1982 | unicast_reg |= (0x01 << (8 * reg_offset)); | ||
1983 | |||
1984 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1985 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1986 | |||
1987 | break; | ||
1988 | |||
1989 | default: | ||
1990 | return 0; | ||
1991 | } | ||
1992 | |||
1993 | return 1; | ||
1994 | } | ||
1995 | |||
1996 | /* | ||
1997 | * The entries in each table are indexed by a hash of a packet's MAC | 1875 | * The entries in each table are indexed by a hash of a packet's MAC |
1998 | * address. One bit in each entry determines whether the packet is | 1876 | * address. One bit in each entry determines whether the packet is |
1999 | * accepted. There are 4 entries (each 8 bits wide) in each register | 1877 | * accepted. There are 4 entries (each 8 bits wide) in each register |
@@ -2205,8 +2083,8 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2205 | 2083 | ||
2206 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2084 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2207 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2085 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2208 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | 2086 | mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE |
2209 | (eth_port_num) + table_index), 0); | 2087 | (eth_port_num) + table_index, 0); |
2210 | 2088 | ||
2211 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2089 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2212 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2090 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
@@ -2389,6 +2267,73 @@ static void ethernet_phy_reset(unsigned int eth_port_num) | |||
2389 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | 2267 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); |
2390 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | 2268 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ |
2391 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); | 2269 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); |
2270 | |||
2271 | /* wait for PHY to come out of reset */ | ||
2272 | do { | ||
2273 | udelay(1); | ||
2274 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | ||
2275 | } while (phy_reg_data & 0x8000); | ||
2276 | } | ||
2277 | |||
2278 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
2279 | unsigned int queues) | ||
2280 | { | ||
2281 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | ||
2282 | } | ||
2283 | |||
2284 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
2285 | unsigned int queues) | ||
2286 | { | ||
2287 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | ||
2288 | } | ||
2289 | |||
2290 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | ||
2291 | { | ||
2292 | u32 queues; | ||
2293 | |||
2294 | /* Stop Tx port activity. Check port Tx activity. */ | ||
2295 | queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2296 | & 0xFF; | ||
2297 | if (queues) { | ||
2298 | /* Issue stop command for active queues only */ | ||
2299 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2300 | (queues << 8)); | ||
2301 | |||
2302 | /* Wait for all Tx activity to terminate. */ | ||
2303 | /* Check port cause register that all Tx queues are stopped */ | ||
2304 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2305 | & 0xFF) | ||
2306 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2307 | |||
2308 | /* Wait for Tx FIFO to empty */ | ||
2309 | while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & | ||
2310 | ETH_PORT_TX_FIFO_EMPTY) | ||
2311 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2312 | } | ||
2313 | |||
2314 | return queues; | ||
2315 | } | ||
2316 | |||
2317 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | ||
2318 | { | ||
2319 | u32 queues; | ||
2320 | |||
2321 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2322 | queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2323 | & 0xFF; | ||
2324 | if (queues) { | ||
2325 | /* Issue stop command for active queues only */ | ||
2326 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2327 | (queues << 8)); | ||
2328 | |||
2329 | /* Wait for all Rx activity to terminate. */ | ||
2330 | /* Check port cause register that all Rx queues are stopped */ | ||
2331 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2332 | & 0xFF) | ||
2333 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2334 | } | ||
2335 | |||
2336 | return queues; | ||
2392 | } | 2337 | } |
2393 | 2338 | ||
2394 | /* | 2339 | /* |
@@ -2413,70 +2358,21 @@ static void eth_port_reset(unsigned int port_num) | |||
2413 | { | 2358 | { |
2414 | unsigned int reg_data; | 2359 | unsigned int reg_data; |
2415 | 2360 | ||
2416 | /* Stop Tx port activity. Check port Tx activity. */ | 2361 | mv643xx_eth_port_disable_tx(port_num); |
2417 | reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)); | 2362 | mv643xx_eth_port_disable_rx(port_num); |
2418 | |||
2419 | if (reg_data & 0xFF) { | ||
2420 | /* Issue stop command for active channels only */ | ||
2421 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2422 | (reg_data << 8)); | ||
2423 | |||
2424 | /* Wait for all Tx activity to terminate. */ | ||
2425 | /* Check port cause register that all Tx queues are stopped */ | ||
2426 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2427 | & 0xFF) | ||
2428 | udelay(10); | ||
2429 | } | ||
2430 | |||
2431 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2432 | reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)); | ||
2433 | |||
2434 | if (reg_data & 0xFF) { | ||
2435 | /* Issue stop command for active channels only */ | ||
2436 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2437 | (reg_data << 8)); | ||
2438 | |||
2439 | /* Wait for all Rx activity to terminate. */ | ||
2440 | /* Check port cause register that all Rx queues are stopped */ | ||
2441 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2442 | & 0xFF) | ||
2443 | udelay(10); | ||
2444 | } | ||
2445 | 2363 | ||
2446 | /* Clear all MIB counters */ | 2364 | /* Clear all MIB counters */ |
2447 | eth_clear_mib_counters(port_num); | 2365 | eth_clear_mib_counters(port_num); |
2448 | 2366 | ||
2449 | /* Reset the Enable bit in the Configuration Register */ | 2367 | /* Reset the Enable bit in the Configuration Register */ |
2450 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2368 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); |
2451 | reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | 2369 | reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | |
2370 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | ||
2371 | MV643XX_ETH_FORCE_LINK_PASS); | ||
2452 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2372 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2453 | } | 2373 | } |
2454 | 2374 | ||
2455 | 2375 | ||
2456 | static int eth_port_autoneg_supported(unsigned int eth_port_num) | ||
2457 | { | ||
2458 | unsigned int phy_reg_data0; | ||
2459 | |||
2460 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0); | ||
2461 | |||
2462 | return phy_reg_data0 & 0x1000; | ||
2463 | } | ||
2464 | |||
2465 | static int eth_port_link_is_up(unsigned int eth_port_num) | ||
2466 | { | ||
2467 | unsigned int phy_reg_data1; | ||
2468 | |||
2469 | eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1); | ||
2470 | |||
2471 | if (eth_port_autoneg_supported(eth_port_num)) { | ||
2472 | if (phy_reg_data1 & 0x20) /* auto-neg complete */ | ||
2473 | return 1; | ||
2474 | } else if (phy_reg_data1 & 0x4) /* link up */ | ||
2475 | return 1; | ||
2476 | |||
2477 | return 0; | ||
2478 | } | ||
2479 | |||
2480 | /* | 2376 | /* |
2481 | * eth_port_read_smi_reg - Read PHY registers | 2377 | * eth_port_read_smi_reg - Read PHY registers |
2482 | * | 2378 | * |
@@ -2582,250 +2478,21 @@ out: | |||
2582 | } | 2478 | } |
2583 | 2479 | ||
2584 | /* | 2480 | /* |
2585 | * eth_port_send - Send an Ethernet packet | 2481 | * Wrappers for MII support library. |
2586 | * | ||
2587 | * DESCRIPTION: | ||
2588 | * This routine send a given packet described by p_pktinfo parameter. It | ||
2589 | * supports transmitting of a packet spaned over multiple buffers. The | ||
2590 | * routine updates 'curr' and 'first' indexes according to the packet | ||
2591 | * segment passed to the routine. In case the packet segment is first, | ||
2592 | * the 'first' index is update. In any case, the 'curr' index is updated. | ||
2593 | * If the routine get into Tx resource error it assigns 'curr' index as | ||
2594 | * 'first'. This way the function can abort Tx process of multiple | ||
2595 | * descriptors per packet. | ||
2596 | * | ||
2597 | * INPUT: | ||
2598 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2599 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2600 | * | ||
2601 | * OUTPUT: | ||
2602 | * Tx ring 'curr' and 'first' indexes are updated. | ||
2603 | * | ||
2604 | * RETURN: | ||
2605 | * ETH_QUEUE_FULL in case of Tx resource error. | ||
2606 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2607 | * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. | ||
2608 | * ETH_OK otherwise. | ||
2609 | * | ||
2610 | */ | ||
2611 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2612 | /* | ||
2613 | * Modified to include the first descriptor pointer in case of SG | ||
2614 | */ | 2482 | */ |
2615 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | 2483 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) |
2616 | struct pkt_info *p_pkt_info) | ||
2617 | { | ||
2618 | int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; | ||
2619 | struct eth_tx_desc *current_descriptor; | ||
2620 | struct eth_tx_desc *first_descriptor; | ||
2621 | u32 command; | ||
2622 | |||
2623 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2624 | if (mp->tx_resource_err) | ||
2625 | return ETH_QUEUE_FULL; | ||
2626 | |||
2627 | /* | ||
2628 | * The hardware requires that each buffer that is <= 8 bytes | ||
2629 | * in length must be aligned on an 8 byte boundary. | ||
2630 | */ | ||
2631 | if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) { | ||
2632 | printk(KERN_ERR | ||
2633 | "mv643xx_eth port %d: packet size <= 8 problem\n", | ||
2634 | mp->port_num); | ||
2635 | return ETH_ERROR; | ||
2636 | } | ||
2637 | |||
2638 | mp->tx_ring_skbs++; | ||
2639 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2640 | |||
2641 | /* Get the Tx Desc ring indexes */ | ||
2642 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2643 | tx_desc_used = mp->tx_used_desc_q; | ||
2644 | |||
2645 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2646 | |||
2647 | tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2648 | |||
2649 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2650 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2651 | current_descriptor->l4i_chk = p_pkt_info->l4i_chk; | ||
2652 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2653 | |||
2654 | command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC | | ||
2655 | ETH_BUFFER_OWNED_BY_DMA; | ||
2656 | if (command & ETH_TX_FIRST_DESC) { | ||
2657 | tx_first_desc = tx_desc_curr; | ||
2658 | mp->tx_first_desc_q = tx_first_desc; | ||
2659 | first_descriptor = current_descriptor; | ||
2660 | mp->tx_first_command = command; | ||
2661 | } else { | ||
2662 | tx_first_desc = mp->tx_first_desc_q; | ||
2663 | first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; | ||
2664 | BUG_ON(first_descriptor == NULL); | ||
2665 | current_descriptor->cmd_sts = command; | ||
2666 | } | ||
2667 | |||
2668 | if (command & ETH_TX_LAST_DESC) { | ||
2669 | wmb(); | ||
2670 | first_descriptor->cmd_sts = mp->tx_first_command; | ||
2671 | |||
2672 | wmb(); | ||
2673 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2674 | |||
2675 | /* | ||
2676 | * Finish Tx packet. Update first desc in case of Tx resource | ||
2677 | * error */ | ||
2678 | tx_first_desc = tx_next_desc; | ||
2679 | mp->tx_first_desc_q = tx_first_desc; | ||
2680 | } | ||
2681 | |||
2682 | /* Check for ring index overlap in the Tx desc ring */ | ||
2683 | if (tx_next_desc == tx_desc_used) { | ||
2684 | mp->tx_resource_err = 1; | ||
2685 | mp->tx_curr_desc_q = tx_first_desc; | ||
2686 | |||
2687 | return ETH_QUEUE_LAST_RESOURCE; | ||
2688 | } | ||
2689 | |||
2690 | mp->tx_curr_desc_q = tx_next_desc; | ||
2691 | |||
2692 | return ETH_OK; | ||
2693 | } | ||
2694 | #else | ||
2695 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2696 | struct pkt_info *p_pkt_info) | ||
2697 | { | 2484 | { |
2698 | int tx_desc_curr; | 2485 | int val; |
2699 | int tx_desc_used; | 2486 | struct mv643xx_private *mp = netdev_priv(dev); |
2700 | struct eth_tx_desc *current_descriptor; | ||
2701 | unsigned int command_status; | ||
2702 | |||
2703 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2704 | if (mp->tx_resource_err) | ||
2705 | return ETH_QUEUE_FULL; | ||
2706 | |||
2707 | mp->tx_ring_skbs++; | ||
2708 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2709 | |||
2710 | /* Get the Tx Desc ring indexes */ | ||
2711 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2712 | tx_desc_used = mp->tx_used_desc_q; | ||
2713 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2714 | |||
2715 | command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; | ||
2716 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2717 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2718 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2719 | |||
2720 | /* Set last desc with DMA ownership and interrupt enable. */ | ||
2721 | wmb(); | ||
2722 | current_descriptor->cmd_sts = command_status | | ||
2723 | ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; | ||
2724 | |||
2725 | wmb(); | ||
2726 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2727 | |||
2728 | /* Finish Tx packet. Update first desc in case of Tx resource error */ | ||
2729 | tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2730 | |||
2731 | /* Update the current descriptor */ | ||
2732 | mp->tx_curr_desc_q = tx_desc_curr; | ||
2733 | |||
2734 | /* Check for ring index overlap in the Tx desc ring */ | ||
2735 | if (tx_desc_curr == tx_desc_used) { | ||
2736 | mp->tx_resource_err = 1; | ||
2737 | return ETH_QUEUE_LAST_RESOURCE; | ||
2738 | } | ||
2739 | 2487 | ||
2740 | return ETH_OK; | 2488 | eth_port_read_smi_reg(mp->port_num, location, &val); |
2489 | return val; | ||
2741 | } | 2490 | } |
2742 | #endif | ||
2743 | 2491 | ||
2744 | /* | 2492 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
2745 | * eth_tx_return_desc - Free all used Tx descriptors | ||
2746 | * | ||
2747 | * DESCRIPTION: | ||
2748 | * This routine returns the transmitted packet information to the caller. | ||
2749 | * It uses the 'first' index to support Tx desc return in case a transmit | ||
2750 | * of a packet spanned over multiple buffer still in process. | ||
2751 | * In case the Tx queue was in "resource error" condition, where there are | ||
2752 | * no available Tx resources, the function resets the resource error flag. | ||
2753 | * | ||
2754 | * INPUT: | ||
2755 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2756 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2757 | * | ||
2758 | * OUTPUT: | ||
2759 | * Tx ring 'first' and 'used' indexes are updated. | ||
2760 | * | ||
2761 | * RETURN: | ||
2762 | * ETH_OK on success | ||
2763 | * ETH_ERROR otherwise. | ||
2764 | * | ||
2765 | */ | ||
2766 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
2767 | struct pkt_info *p_pkt_info) | ||
2768 | { | 2493 | { |
2769 | int tx_desc_used; | 2494 | struct mv643xx_private *mp = netdev_priv(dev); |
2770 | int tx_busy_desc; | 2495 | eth_port_write_smi_reg(mp->port_num, location, val); |
2771 | struct eth_tx_desc *p_tx_desc_used; | ||
2772 | unsigned int command_status; | ||
2773 | unsigned long flags; | ||
2774 | int err = ETH_OK; | ||
2775 | |||
2776 | spin_lock_irqsave(&mp->lock, flags); | ||
2777 | |||
2778 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2779 | tx_busy_desc = mp->tx_first_desc_q; | ||
2780 | #else | ||
2781 | tx_busy_desc = mp->tx_curr_desc_q; | ||
2782 | #endif | ||
2783 | |||
2784 | /* Get the Tx Desc ring indexes */ | ||
2785 | tx_desc_used = mp->tx_used_desc_q; | ||
2786 | |||
2787 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; | ||
2788 | |||
2789 | /* Sanity check */ | ||
2790 | if (p_tx_desc_used == NULL) { | ||
2791 | err = ETH_ERROR; | ||
2792 | goto out; | ||
2793 | } | ||
2794 | |||
2795 | /* Stop release. About to overlap the current available Tx descriptor */ | ||
2796 | if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) { | ||
2797 | err = ETH_ERROR; | ||
2798 | goto out; | ||
2799 | } | ||
2800 | |||
2801 | command_status = p_tx_desc_used->cmd_sts; | ||
2802 | |||
2803 | /* Still transmitting... */ | ||
2804 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | ||
2805 | err = ETH_ERROR; | ||
2806 | goto out; | ||
2807 | } | ||
2808 | |||
2809 | /* Pass the packet information to the caller */ | ||
2810 | p_pkt_info->cmd_sts = command_status; | ||
2811 | p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; | ||
2812 | p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr; | ||
2813 | p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt; | ||
2814 | mp->tx_skb[tx_desc_used] = NULL; | ||
2815 | |||
2816 | /* Update the next descriptor to release. */ | ||
2817 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; | ||
2818 | |||
2819 | /* Any Tx return cancels the Tx resource error status */ | ||
2820 | mp->tx_resource_err = 0; | ||
2821 | |||
2822 | BUG_ON(mp->tx_ring_skbs == 0); | ||
2823 | mp->tx_ring_skbs--; | ||
2824 | |||
2825 | out: | ||
2826 | spin_unlock_irqrestore(&mp->lock, flags); | ||
2827 | |||
2828 | return err; | ||
2829 | } | 2496 | } |
2830 | 2497 | ||
2831 | /* | 2498 | /* |
@@ -3017,111 +2684,6 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | |||
3017 | #define MV643XX_STATS_LEN \ | 2684 | #define MV643XX_STATS_LEN \ |
3018 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) | 2685 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) |
3019 | 2686 | ||
3020 | static int | ||
3021 | mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
3022 | { | ||
3023 | struct mv643xx_private *mp = netdev->priv; | ||
3024 | int port_num = mp->port_num; | ||
3025 | int autoneg = eth_port_autoneg_supported(port_num); | ||
3026 | int mode_10_bit; | ||
3027 | int auto_duplex; | ||
3028 | int half_duplex = 0; | ||
3029 | int full_duplex = 0; | ||
3030 | int auto_speed; | ||
3031 | int speed_10 = 0; | ||
3032 | int speed_100 = 0; | ||
3033 | int speed_1000 = 0; | ||
3034 | |||
3035 | u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
3036 | u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)); | ||
3037 | |||
3038 | mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT; | ||
3039 | |||
3040 | if (mode_10_bit) { | ||
3041 | ecmd->supported = SUPPORTED_10baseT_Half; | ||
3042 | } else { | ||
3043 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
3044 | SUPPORTED_10baseT_Full | | ||
3045 | SUPPORTED_100baseT_Half | | ||
3046 | SUPPORTED_100baseT_Full | | ||
3047 | SUPPORTED_1000baseT_Full | | ||
3048 | (autoneg ? SUPPORTED_Autoneg : 0) | | ||
3049 | SUPPORTED_TP); | ||
3050 | |||
3051 | auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX); | ||
3052 | auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII); | ||
3053 | |||
3054 | ecmd->advertising = ADVERTISED_TP; | ||
3055 | |||
3056 | if (autoneg) { | ||
3057 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
3058 | |||
3059 | if (auto_duplex) { | ||
3060 | half_duplex = 1; | ||
3061 | full_duplex = 1; | ||
3062 | } else { | ||
3063 | if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE) | ||
3064 | full_duplex = 1; | ||
3065 | else | ||
3066 | half_duplex = 1; | ||
3067 | } | ||
3068 | |||
3069 | if (auto_speed) { | ||
3070 | speed_10 = 1; | ||
3071 | speed_100 = 1; | ||
3072 | speed_1000 = 1; | ||
3073 | } else { | ||
3074 | if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | ||
3075 | speed_1000 = 1; | ||
3076 | else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100) | ||
3077 | speed_100 = 1; | ||
3078 | else | ||
3079 | speed_10 = 1; | ||
3080 | } | ||
3081 | |||
3082 | if (speed_10 & half_duplex) | ||
3083 | ecmd->advertising |= ADVERTISED_10baseT_Half; | ||
3084 | if (speed_10 & full_duplex) | ||
3085 | ecmd->advertising |= ADVERTISED_10baseT_Full; | ||
3086 | if (speed_100 & half_duplex) | ||
3087 | ecmd->advertising |= ADVERTISED_100baseT_Half; | ||
3088 | if (speed_100 & full_duplex) | ||
3089 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
3090 | if (speed_1000) | ||
3091 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
3092 | } | ||
3093 | } | ||
3094 | |||
3095 | ecmd->port = PORT_TP; | ||
3096 | ecmd->phy_address = ethernet_phy_get(port_num); | ||
3097 | |||
3098 | ecmd->transceiver = XCVR_EXTERNAL; | ||
3099 | |||
3100 | if (netif_carrier_ok(netdev)) { | ||
3101 | if (mode_10_bit) | ||
3102 | ecmd->speed = SPEED_10; | ||
3103 | else { | ||
3104 | if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000) | ||
3105 | ecmd->speed = SPEED_1000; | ||
3106 | else if (psr & MV643XX_ETH_PORT_STATUS_MII_100) | ||
3107 | ecmd->speed = SPEED_100; | ||
3108 | else | ||
3109 | ecmd->speed = SPEED_10; | ||
3110 | } | ||
3111 | |||
3112 | if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX) | ||
3113 | ecmd->duplex = DUPLEX_FULL; | ||
3114 | else | ||
3115 | ecmd->duplex = DUPLEX_HALF; | ||
3116 | } else { | ||
3117 | ecmd->speed = -1; | ||
3118 | ecmd->duplex = -1; | ||
3119 | } | ||
3120 | |||
3121 | ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
3122 | return 0; | ||
3123 | } | ||
3124 | |||
3125 | static void mv643xx_get_drvinfo(struct net_device *netdev, | 2687 | static void mv643xx_get_drvinfo(struct net_device *netdev, |
3126 | struct ethtool_drvinfo *drvinfo) | 2688 | struct ethtool_drvinfo *drvinfo) |
3127 | { | 2689 | { |
@@ -3168,15 +2730,41 @@ static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, | |||
3168 | } | 2730 | } |
3169 | } | 2731 | } |
3170 | 2732 | ||
2733 | static u32 mv643xx_eth_get_link(struct net_device *dev) | ||
2734 | { | ||
2735 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2736 | |||
2737 | return mii_link_ok(&mp->mii); | ||
2738 | } | ||
2739 | |||
2740 | static int mv643xx_eth_nway_restart(struct net_device *dev) | ||
2741 | { | ||
2742 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2743 | |||
2744 | return mii_nway_restart(&mp->mii); | ||
2745 | } | ||
2746 | |||
2747 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2748 | { | ||
2749 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2750 | |||
2751 | return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); | ||
2752 | } | ||
2753 | |||
3171 | static struct ethtool_ops mv643xx_ethtool_ops = { | 2754 | static struct ethtool_ops mv643xx_ethtool_ops = { |
3172 | .get_settings = mv643xx_get_settings, | 2755 | .get_settings = mv643xx_get_settings, |
2756 | .set_settings = mv643xx_set_settings, | ||
3173 | .get_drvinfo = mv643xx_get_drvinfo, | 2757 | .get_drvinfo = mv643xx_get_drvinfo, |
3174 | .get_link = ethtool_op_get_link, | 2758 | .get_link = mv643xx_eth_get_link, |
3175 | .get_sg = ethtool_op_get_sg, | 2759 | .get_sg = ethtool_op_get_sg, |
3176 | .set_sg = ethtool_op_set_sg, | 2760 | .set_sg = ethtool_op_set_sg, |
3177 | .get_strings = mv643xx_get_strings, | 2761 | .get_strings = mv643xx_get_strings, |
3178 | .get_stats_count = mv643xx_get_stats_count, | 2762 | .get_stats_count = mv643xx_get_stats_count, |
3179 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 2763 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2764 | .get_strings = mv643xx_get_strings, | ||
2765 | .get_stats_count = mv643xx_get_stats_count, | ||
2766 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
2767 | .nway_reset = mv643xx_eth_nway_restart, | ||
3180 | }; | 2768 | }; |
3181 | 2769 | ||
3182 | /************* End ethtool support *************************/ | 2770 | /************* End ethtool support *************************/ |
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index f769f9b626ea..7754d1974b9e 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -5,53 +5,16 @@ | |||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/workqueue.h> | 7 | #include <linux/workqueue.h> |
8 | #include <linux/mii.h> | ||
8 | 9 | ||
9 | #include <linux/mv643xx.h> | 10 | #include <linux/mv643xx.h> |
10 | 11 | ||
11 | #define BIT0 0x00000001 | ||
12 | #define BIT1 0x00000002 | ||
13 | #define BIT2 0x00000004 | ||
14 | #define BIT3 0x00000008 | ||
15 | #define BIT4 0x00000010 | ||
16 | #define BIT5 0x00000020 | ||
17 | #define BIT6 0x00000040 | ||
18 | #define BIT7 0x00000080 | ||
19 | #define BIT8 0x00000100 | ||
20 | #define BIT9 0x00000200 | ||
21 | #define BIT10 0x00000400 | ||
22 | #define BIT11 0x00000800 | ||
23 | #define BIT12 0x00001000 | ||
24 | #define BIT13 0x00002000 | ||
25 | #define BIT14 0x00004000 | ||
26 | #define BIT15 0x00008000 | ||
27 | #define BIT16 0x00010000 | ||
28 | #define BIT17 0x00020000 | ||
29 | #define BIT18 0x00040000 | ||
30 | #define BIT19 0x00080000 | ||
31 | #define BIT20 0x00100000 | ||
32 | #define BIT21 0x00200000 | ||
33 | #define BIT22 0x00400000 | ||
34 | #define BIT23 0x00800000 | ||
35 | #define BIT24 0x01000000 | ||
36 | #define BIT25 0x02000000 | ||
37 | #define BIT26 0x04000000 | ||
38 | #define BIT27 0x08000000 | ||
39 | #define BIT28 0x10000000 | ||
40 | #define BIT29 0x20000000 | ||
41 | #define BIT30 0x40000000 | ||
42 | #define BIT31 0x80000000 | ||
43 | |||
44 | /* | ||
45 | * The first part is the high level driver of the gigE ethernet ports. | ||
46 | */ | ||
47 | |||
48 | /* Checksum offload for Tx works for most packets, but | 12 | /* Checksum offload for Tx works for most packets, but |
49 | * fails if previous packet sent did not use hw csum | 13 | * fails if previous packet sent did not use hw csum |
50 | */ | 14 | */ |
51 | #define MV643XX_CHECKSUM_OFFLOAD_TX | 15 | #define MV643XX_CHECKSUM_OFFLOAD_TX |
52 | #define MV643XX_NAPI | 16 | #define MV643XX_NAPI |
53 | #define MV643XX_TX_FAST_REFILL | 17 | #define MV643XX_TX_FAST_REFILL |
54 | #undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */ | ||
55 | #undef MV643XX_COAL | 18 | #undef MV643XX_COAL |
56 | 19 | ||
57 | /* | 20 | /* |
@@ -73,25 +36,40 @@ | |||
73 | #define MV643XX_RX_COAL 100 | 36 | #define MV643XX_RX_COAL 100 |
74 | #endif | 37 | #endif |
75 | 38 | ||
76 | /* | 39 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
77 | * The second part is the low level driver of the gigE ethernet ports. | 40 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) |
78 | */ | 41 | #else |
42 | #define MAX_DESCS_PER_SKB 1 | ||
43 | #endif | ||
79 | 44 | ||
80 | /* | 45 | #define ETH_VLAN_HLEN 4 |
81 | * Header File for : MV-643xx network interface header | 46 | #define ETH_FCS_LEN 4 |
82 | * | 47 | #define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ |
83 | * DESCRIPTION: | 48 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ |
84 | * This header file contains macros typedefs and function declaration for | 49 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ |
85 | * the Marvell Gig Bit Ethernet Controller. | 50 | ETH_VLAN_HLEN + ETH_FCS_LEN) |
86 | * | 51 | #define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) |
87 | * DEPENDENCIES: | 52 | |
88 | * None. | 53 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ |
89 | * | 54 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ |
90 | */ | 55 | |
56 | #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) | ||
57 | #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) | ||
58 | #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) | ||
59 | #define ETH_INT_CAUSE_EXT 0x00000002 | ||
60 | #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) | ||
91 | 61 | ||
92 | /* MAC accepet/reject macros */ | 62 | #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) |
93 | #define ACCEPT_MAC_ADDR 0 | 63 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) |
94 | #define REJECT_MAC_ADDR 1 | 64 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) |
65 | #define ETH_INT_CAUSE_PHY 0x00010000 | ||
66 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) | ||
67 | |||
68 | #define ETH_INT_MASK_ALL 0x00000000 | ||
69 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | ||
70 | |||
71 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
72 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
95 | 73 | ||
96 | /* Buffer offset from buffer pointer */ | 74 | /* Buffer offset from buffer pointer */ |
97 | #define RX_BUF_OFFSET 0x2 | 75 | #define RX_BUF_OFFSET 0x2 |
@@ -133,88 +111,71 @@ | |||
133 | #define ETH_MIB_LATE_COLLISION 0x7c | 111 | #define ETH_MIB_LATE_COLLISION 0x7c |
134 | 112 | ||
135 | /* Port serial status reg (PSR) */ | 113 | /* Port serial status reg (PSR) */ |
136 | #define ETH_INTERFACE_GMII_MII 0 | 114 | #define ETH_INTERFACE_PCM 0x00000001 |
137 | #define ETH_INTERFACE_PCM BIT0 | 115 | #define ETH_LINK_IS_UP 0x00000002 |
138 | #define ETH_LINK_IS_DOWN 0 | 116 | #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 |
139 | #define ETH_LINK_IS_UP BIT1 | 117 | #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 |
140 | #define ETH_PORT_AT_HALF_DUPLEX 0 | 118 | #define ETH_GMII_SPEED_1000 0x00000010 |
141 | #define ETH_PORT_AT_FULL_DUPLEX BIT2 | 119 | #define ETH_MII_SPEED_100 0x00000020 |
142 | #define ETH_RX_FLOW_CTRL_DISABLED 0 | 120 | #define ETH_TX_IN_PROGRESS 0x00000080 |
143 | #define ETH_RX_FLOW_CTRL_ENBALED BIT3 | 121 | #define ETH_BYPASS_ACTIVE 0x00000100 |
144 | #define ETH_GMII_SPEED_100_10 0 | 122 | #define ETH_PORT_AT_PARTITION_STATE 0x00000200 |
145 | #define ETH_GMII_SPEED_1000 BIT4 | 123 | #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 |
146 | #define ETH_MII_SPEED_10 0 | ||
147 | #define ETH_MII_SPEED_100 BIT5 | ||
148 | #define ETH_NO_TX 0 | ||
149 | #define ETH_TX_IN_PROGRESS BIT7 | ||
150 | #define ETH_BYPASS_NO_ACTIVE 0 | ||
151 | #define ETH_BYPASS_ACTIVE BIT8 | ||
152 | #define ETH_PORT_NOT_AT_PARTITION_STATE 0 | ||
153 | #define ETH_PORT_AT_PARTITION_STATE BIT9 | ||
154 | #define ETH_PORT_TX_FIFO_NOT_EMPTY 0 | ||
155 | #define ETH_PORT_TX_FIFO_EMPTY BIT10 | ||
156 | |||
157 | #define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22) | ||
158 | #define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24 | ||
159 | #define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22) | ||
160 | #define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23) | ||
161 | #define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22) | ||
162 | 124 | ||
163 | /* SMI reg */ | 125 | /* SMI reg */ |
164 | #define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */ | 126 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ |
165 | #define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */ | 127 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ |
166 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */ | 128 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ |
167 | #define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */ | 129 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ |
130 | |||
131 | /* Interrupt Cause Register Bit Definitions */ | ||
168 | 132 | ||
169 | /* SDMA command status fields macros */ | 133 | /* SDMA command status fields macros */ |
170 | 134 | ||
171 | /* Tx & Rx descriptors status */ | 135 | /* Tx & Rx descriptors status */ |
172 | #define ETH_ERROR_SUMMARY (BIT0) | 136 | #define ETH_ERROR_SUMMARY 0x00000001 |
173 | 137 | ||
174 | /* Tx & Rx descriptors command */ | 138 | /* Tx & Rx descriptors command */ |
175 | #define ETH_BUFFER_OWNED_BY_DMA (BIT31) | 139 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 |
176 | 140 | ||
177 | /* Tx descriptors status */ | 141 | /* Tx descriptors status */ |
178 | #define ETH_LC_ERROR (0 ) | 142 | #define ETH_LC_ERROR 0 |
179 | #define ETH_UR_ERROR (BIT1 ) | 143 | #define ETH_UR_ERROR 0x00000002 |
180 | #define ETH_RL_ERROR (BIT2 ) | 144 | #define ETH_RL_ERROR 0x00000004 |
181 | #define ETH_LLC_SNAP_FORMAT (BIT9 ) | 145 | #define ETH_LLC_SNAP_FORMAT 0x00000200 |
182 | 146 | ||
183 | /* Rx descriptors status */ | 147 | /* Rx descriptors status */ |
184 | #define ETH_CRC_ERROR (0 ) | 148 | #define ETH_OVERRUN_ERROR 0x00000002 |
185 | #define ETH_OVERRUN_ERROR (BIT1 ) | 149 | #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 |
186 | #define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 ) | 150 | #define ETH_RESOURCE_ERROR 0x00000006 |
187 | #define ETH_RESOURCE_ERROR ((BIT2 | BIT1)) | 151 | #define ETH_VLAN_TAGGED 0x00080000 |
188 | #define ETH_VLAN_TAGGED (BIT19) | 152 | #define ETH_BPDU_FRAME 0x00100000 |
189 | #define ETH_BPDU_FRAME (BIT20) | 153 | #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 |
190 | #define ETH_TCP_FRAME_OVER_IP_V_4 (0 ) | 154 | #define ETH_OTHER_FRAME_TYPE 0x00400000 |
191 | #define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21) | 155 | #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 |
192 | #define ETH_OTHER_FRAME_TYPE (BIT22) | 156 | #define ETH_FRAME_TYPE_IP_V_4 0x01000000 |
193 | #define ETH_LAYER_2_IS_ETH_V_2 (BIT23) | 157 | #define ETH_FRAME_HEADER_OK 0x02000000 |
194 | #define ETH_FRAME_TYPE_IP_V_4 (BIT24) | 158 | #define ETH_RX_LAST_DESC 0x04000000 |
195 | #define ETH_FRAME_HEADER_OK (BIT25) | 159 | #define ETH_RX_FIRST_DESC 0x08000000 |
196 | #define ETH_RX_LAST_DESC (BIT26) | 160 | #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 |
197 | #define ETH_RX_FIRST_DESC (BIT27) | 161 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 |
198 | #define ETH_UNKNOWN_DESTINATION_ADDR (BIT28) | 162 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 |
199 | #define ETH_RX_ENABLE_INTERRUPT (BIT29) | ||
200 | #define ETH_LAYER_4_CHECKSUM_OK (BIT30) | ||
201 | 163 | ||
202 | /* Rx descriptors byte count */ | 164 | /* Rx descriptors byte count */ |
203 | #define ETH_FRAME_FRAGMENTED (BIT2) | 165 | #define ETH_FRAME_FRAGMENTED 0x00000004 |
204 | 166 | ||
205 | /* Tx descriptors command */ | 167 | /* Tx descriptors command */ |
206 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10) | 168 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 |
207 | #define ETH_FRAME_SET_TO_VLAN (BIT15) | 169 | #define ETH_FRAME_SET_TO_VLAN 0x00008000 |
208 | #define ETH_TCP_FRAME (0 ) | 170 | #define ETH_UDP_FRAME 0x00010000 |
209 | #define ETH_UDP_FRAME (BIT16) | 171 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 |
210 | #define ETH_GEN_TCP_UDP_CHECKSUM (BIT17) | 172 | #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 |
211 | #define ETH_GEN_IP_V_4_CHECKSUM (BIT18) | 173 | #define ETH_ZERO_PADDING 0x00080000 |
212 | #define ETH_ZERO_PADDING (BIT19) | 174 | #define ETH_TX_LAST_DESC 0x00100000 |
213 | #define ETH_TX_LAST_DESC (BIT20) | 175 | #define ETH_TX_FIRST_DESC 0x00200000 |
214 | #define ETH_TX_FIRST_DESC (BIT21) | 176 | #define ETH_GEN_CRC 0x00400000 |
215 | #define ETH_GEN_CRC (BIT22) | 177 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 |
216 | #define ETH_TX_ENABLE_INTERRUPT (BIT23) | 178 | #define ETH_AUTO_MODE 0x40000000 |
217 | #define ETH_AUTO_MODE (BIT30) | ||
218 | 179 | ||
219 | #define ETH_TX_IHL_SHIFT 11 | 180 | #define ETH_TX_IHL_SHIFT 11 |
220 | 181 | ||
@@ -324,13 +285,6 @@ struct mv643xx_mib_counters { | |||
324 | 285 | ||
325 | struct mv643xx_private { | 286 | struct mv643xx_private { |
326 | int port_num; /* User Ethernet port number */ | 287 | int port_num; /* User Ethernet port number */ |
327 | u8 port_mac_addr[6]; /* User defined port MAC address.*/ | ||
328 | u32 port_config; /* User port configuration value*/ | ||
329 | u32 port_config_extend; /* User port config extend value*/ | ||
330 | u32 port_sdma_config; /* User port SDMA config value */ | ||
331 | u32 port_serial_control; /* User port serial control value */ | ||
332 | u32 port_tx_queue_command; /* Port active Tx queues summary*/ | ||
333 | u32 port_rx_queue_command; /* Port active Rx queues summary*/ | ||
334 | 288 | ||
335 | u32 rx_sram_addr; /* Base address of rx sram area */ | 289 | u32 rx_sram_addr; /* Base address of rx sram area */ |
336 | u32 rx_sram_size; /* Size of rx sram area */ | 290 | u32 rx_sram_size; /* Size of rx sram area */ |
@@ -338,7 +292,6 @@ struct mv643xx_private { | |||
338 | u32 tx_sram_size; /* Size of tx sram area */ | 292 | u32 tx_sram_size; /* Size of tx sram area */ |
339 | 293 | ||
340 | int rx_resource_err; /* Rx ring resource error flag */ | 294 | int rx_resource_err; /* Rx ring resource error flag */ |
341 | int tx_resource_err; /* Tx ring resource error flag */ | ||
342 | 295 | ||
343 | /* Tx/Rx rings managment indexes fields. For driver use */ | 296 | /* Tx/Rx rings managment indexes fields. For driver use */ |
344 | 297 | ||
@@ -347,10 +300,6 @@ struct mv643xx_private { | |||
347 | 300 | ||
348 | /* Next available and first returning Tx resource */ | 301 | /* Next available and first returning Tx resource */ |
349 | int tx_curr_desc_q, tx_used_desc_q; | 302 | int tx_curr_desc_q, tx_used_desc_q; |
350 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
351 | int tx_first_desc_q; | ||
352 | u32 tx_first_command; | ||
353 | #endif | ||
354 | 303 | ||
355 | #ifdef MV643XX_TX_FAST_REFILL | 304 | #ifdef MV643XX_TX_FAST_REFILL |
356 | u32 tx_clean_threshold; | 305 | u32 tx_clean_threshold; |
@@ -358,54 +307,43 @@ struct mv643xx_private { | |||
358 | 307 | ||
359 | struct eth_rx_desc *p_rx_desc_area; | 308 | struct eth_rx_desc *p_rx_desc_area; |
360 | dma_addr_t rx_desc_dma; | 309 | dma_addr_t rx_desc_dma; |
361 | unsigned int rx_desc_area_size; | 310 | int rx_desc_area_size; |
362 | struct sk_buff **rx_skb; | 311 | struct sk_buff **rx_skb; |
363 | 312 | ||
364 | struct eth_tx_desc *p_tx_desc_area; | 313 | struct eth_tx_desc *p_tx_desc_area; |
365 | dma_addr_t tx_desc_dma; | 314 | dma_addr_t tx_desc_dma; |
366 | unsigned int tx_desc_area_size; | 315 | int tx_desc_area_size; |
367 | struct sk_buff **tx_skb; | 316 | struct sk_buff **tx_skb; |
368 | 317 | ||
369 | struct work_struct tx_timeout_task; | 318 | struct work_struct tx_timeout_task; |
370 | 319 | ||
371 | /* | ||
372 | * Former struct mv643xx_eth_priv members start here | ||
373 | */ | ||
374 | struct net_device_stats stats; | 320 | struct net_device_stats stats; |
375 | struct mv643xx_mib_counters mib_counters; | 321 | struct mv643xx_mib_counters mib_counters; |
376 | spinlock_t lock; | 322 | spinlock_t lock; |
377 | /* Size of Tx Ring per queue */ | 323 | /* Size of Tx Ring per queue */ |
378 | unsigned int tx_ring_size; | 324 | int tx_ring_size; |
379 | /* Ammont of SKBs outstanding on Tx queue */ | 325 | /* Number of tx descriptors in use */ |
380 | unsigned int tx_ring_skbs; | 326 | int tx_desc_count; |
381 | /* Size of Rx Ring per queue */ | 327 | /* Size of Rx Ring per queue */ |
382 | unsigned int rx_ring_size; | 328 | int rx_ring_size; |
383 | /* Ammount of SKBs allocated to Rx Ring per queue */ | 329 | /* Number of rx descriptors in use */ |
384 | unsigned int rx_ring_skbs; | 330 | int rx_desc_count; |
385 | |||
386 | /* | ||
387 | * rx_task used to fill RX ring out of bottom half context | ||
388 | */ | ||
389 | struct work_struct rx_task; | ||
390 | 331 | ||
391 | /* | 332 | /* |
392 | * Used in case RX Ring is empty, which can be caused when | 333 | * Used in case RX Ring is empty, which can be caused when |
393 | * system does not have resources (skb's) | 334 | * system does not have resources (skb's) |
394 | */ | 335 | */ |
395 | struct timer_list timeout; | 336 | struct timer_list timeout; |
396 | long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES))); | ||
397 | unsigned rx_timer_flag; | ||
398 | 337 | ||
399 | u32 rx_int_coal; | 338 | u32 rx_int_coal; |
400 | u32 tx_int_coal; | 339 | u32 tx_int_coal; |
340 | struct mii_if_info mii; | ||
401 | }; | 341 | }; |
402 | 342 | ||
403 | /* ethernet.h API list */ | ||
404 | |||
405 | /* Port operation control routines */ | 343 | /* Port operation control routines */ |
406 | static void eth_port_init(struct mv643xx_private *mp); | 344 | static void eth_port_init(struct mv643xx_private *mp); |
407 | static void eth_port_reset(unsigned int eth_port_num); | 345 | static void eth_port_reset(unsigned int eth_port_num); |
408 | static void eth_port_start(struct mv643xx_private *mp); | 346 | static void eth_port_start(struct net_device *dev); |
409 | 347 | ||
410 | /* Port MAC address routines */ | 348 | /* Port MAC address routines */ |
411 | static void eth_port_uc_addr_set(unsigned int eth_port_num, | 349 | static void eth_port_uc_addr_set(unsigned int eth_port_num, |
@@ -423,10 +361,6 @@ static void eth_port_read_smi_reg(unsigned int eth_port_num, | |||
423 | static void eth_clear_mib_counters(unsigned int eth_port_num); | 361 | static void eth_clear_mib_counters(unsigned int eth_port_num); |
424 | 362 | ||
425 | /* Port data flow control routines */ | 363 | /* Port data flow control routines */ |
426 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
427 | struct pkt_info *p_pkt_info); | ||
428 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
429 | struct pkt_info *p_pkt_info); | ||
430 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 364 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |
431 | struct pkt_info *p_pkt_info); | 365 | struct pkt_info *p_pkt_info); |
432 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | 366 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 9d6d2548c2d3..8d4999837b65 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -3,6 +3,7 @@ | |||
3 | Written/copyright 1999-2001 by Donald Becker. | 3 | Written/copyright 1999-2001 by Donald Becker. |
4 | Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) | 4 | Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) |
5 | Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) | 5 | Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) |
6 | Portions copyright 2004 Harald Welte <laforge@gnumonks.org> | ||
6 | 7 | ||
7 | This software may be used and distributed according to the terms of | 8 | This software may be used and distributed according to the terms of |
8 | the GNU General Public License (GPL), incorporated herein by reference. | 9 | the GNU General Public License (GPL), incorporated herein by reference. |
@@ -135,8 +136,6 @@ | |||
135 | 136 | ||
136 | TODO: | 137 | TODO: |
137 | * big endian support with CFG:BEM instead of cpu_to_le32 | 138 | * big endian support with CFG:BEM instead of cpu_to_le32 |
138 | * support for an external PHY | ||
139 | * NAPI | ||
140 | */ | 139 | */ |
141 | 140 | ||
142 | #include <linux/config.h> | 141 | #include <linux/config.h> |
@@ -160,6 +159,7 @@ | |||
160 | #include <linux/mii.h> | 159 | #include <linux/mii.h> |
161 | #include <linux/crc32.h> | 160 | #include <linux/crc32.h> |
162 | #include <linux/bitops.h> | 161 | #include <linux/bitops.h> |
162 | #include <linux/prefetch.h> | ||
163 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 163 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
164 | #include <asm/io.h> | 164 | #include <asm/io.h> |
165 | #include <asm/irq.h> | 165 | #include <asm/irq.h> |
@@ -183,13 +183,11 @@ | |||
183 | NETIF_MSG_TX_ERR) | 183 | NETIF_MSG_TX_ERR) |
184 | static int debug = -1; | 184 | static int debug = -1; |
185 | 185 | ||
186 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
187 | static int max_interrupt_work = 20; | ||
188 | static int mtu; | 186 | static int mtu; |
189 | 187 | ||
190 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 188 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
191 | This chip uses a 512 element hash table based on the Ethernet CRC. */ | 189 | This chip uses a 512 element hash table based on the Ethernet CRC. */ |
192 | static int multicast_filter_limit = 100; | 190 | static const int multicast_filter_limit = 100; |
193 | 191 | ||
194 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 192 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
195 | Setting to > 1518 effectively disables this feature. */ | 193 | Setting to > 1518 effectively disables this feature. */ |
@@ -251,14 +249,11 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |||
251 | MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); | 249 | MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); |
252 | MODULE_LICENSE("GPL"); | 250 | MODULE_LICENSE("GPL"); |
253 | 251 | ||
254 | module_param(max_interrupt_work, int, 0); | ||
255 | module_param(mtu, int, 0); | 252 | module_param(mtu, int, 0); |
256 | module_param(debug, int, 0); | 253 | module_param(debug, int, 0); |
257 | module_param(rx_copybreak, int, 0); | 254 | module_param(rx_copybreak, int, 0); |
258 | module_param_array(options, int, NULL, 0); | 255 | module_param_array(options, int, NULL, 0); |
259 | module_param_array(full_duplex, int, NULL, 0); | 256 | module_param_array(full_duplex, int, NULL, 0); |
260 | MODULE_PARM_DESC(max_interrupt_work, | ||
261 | "DP8381x maximum events handled per interrupt"); | ||
262 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); | 257 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); |
263 | MODULE_PARM_DESC(debug, "DP8381x default debug level"); | 258 | MODULE_PARM_DESC(debug, "DP8381x default debug level"); |
264 | MODULE_PARM_DESC(rx_copybreak, | 259 | MODULE_PARM_DESC(rx_copybreak, |
@@ -374,7 +369,7 @@ enum pcistuff { | |||
374 | 369 | ||
375 | 370 | ||
376 | /* array of board data directly indexed by pci_tbl[x].driver_data */ | 371 | /* array of board data directly indexed by pci_tbl[x].driver_data */ |
377 | static struct { | 372 | static const struct { |
378 | const char *name; | 373 | const char *name; |
379 | unsigned long flags; | 374 | unsigned long flags; |
380 | } natsemi_pci_info[] __devinitdata = { | 375 | } natsemi_pci_info[] __devinitdata = { |
@@ -691,6 +686,8 @@ struct netdev_private { | |||
691 | /* Based on MTU+slack. */ | 686 | /* Based on MTU+slack. */ |
692 | unsigned int rx_buf_sz; | 687 | unsigned int rx_buf_sz; |
693 | int oom; | 688 | int oom; |
689 | /* Interrupt status */ | ||
690 | u32 intr_status; | ||
694 | /* Do not touch the nic registers */ | 691 | /* Do not touch the nic registers */ |
695 | int hands_off; | 692 | int hands_off; |
696 | /* external phy that is used: only valid if dev->if_port != PORT_TP */ | 693 | /* external phy that is used: only valid if dev->if_port != PORT_TP */ |
@@ -748,7 +745,8 @@ static void init_registers(struct net_device *dev); | |||
748 | static int start_tx(struct sk_buff *skb, struct net_device *dev); | 745 | static int start_tx(struct sk_buff *skb, struct net_device *dev); |
749 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); | 746 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); |
750 | static void netdev_error(struct net_device *dev, int intr_status); | 747 | static void netdev_error(struct net_device *dev, int intr_status); |
751 | static void netdev_rx(struct net_device *dev); | 748 | static int natsemi_poll(struct net_device *dev, int *budget); |
749 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); | ||
752 | static void netdev_tx_done(struct net_device *dev); | 750 | static void netdev_tx_done(struct net_device *dev); |
753 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); | 751 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); |
754 | #ifdef CONFIG_NET_POLL_CONTROLLER | 752 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -776,6 +774,18 @@ static inline void __iomem *ns_ioaddr(struct net_device *dev) | |||
776 | return (void __iomem *) dev->base_addr; | 774 | return (void __iomem *) dev->base_addr; |
777 | } | 775 | } |
778 | 776 | ||
777 | static inline void natsemi_irq_enable(struct net_device *dev) | ||
778 | { | ||
779 | writel(1, ns_ioaddr(dev) + IntrEnable); | ||
780 | readl(ns_ioaddr(dev) + IntrEnable); | ||
781 | } | ||
782 | |||
783 | static inline void natsemi_irq_disable(struct net_device *dev) | ||
784 | { | ||
785 | writel(0, ns_ioaddr(dev) + IntrEnable); | ||
786 | readl(ns_ioaddr(dev) + IntrEnable); | ||
787 | } | ||
788 | |||
779 | static void move_int_phy(struct net_device *dev, int addr) | 789 | static void move_int_phy(struct net_device *dev, int addr) |
780 | { | 790 | { |
781 | struct netdev_private *np = netdev_priv(dev); | 791 | struct netdev_private *np = netdev_priv(dev); |
@@ -879,6 +889,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
879 | spin_lock_init(&np->lock); | 889 | spin_lock_init(&np->lock); |
880 | np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; | 890 | np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; |
881 | np->hands_off = 0; | 891 | np->hands_off = 0; |
892 | np->intr_status = 0; | ||
882 | 893 | ||
883 | /* Initial port: | 894 | /* Initial port: |
884 | * - If the nic was configured to use an external phy and if find_mii | 895 | * - If the nic was configured to use an external phy and if find_mii |
@@ -932,6 +943,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
932 | dev->do_ioctl = &netdev_ioctl; | 943 | dev->do_ioctl = &netdev_ioctl; |
933 | dev->tx_timeout = &tx_timeout; | 944 | dev->tx_timeout = &tx_timeout; |
934 | dev->watchdog_timeo = TX_TIMEOUT; | 945 | dev->watchdog_timeo = TX_TIMEOUT; |
946 | dev->poll = natsemi_poll; | ||
947 | dev->weight = 64; | ||
948 | |||
935 | #ifdef CONFIG_NET_POLL_CONTROLLER | 949 | #ifdef CONFIG_NET_POLL_CONTROLLER |
936 | dev->poll_controller = &natsemi_poll_controller; | 950 | dev->poll_controller = &natsemi_poll_controller; |
937 | #endif | 951 | #endif |
@@ -1484,6 +1498,31 @@ static void natsemi_reset(struct net_device *dev) | |||
1484 | writel(rfcr, ioaddr + RxFilterAddr); | 1498 | writel(rfcr, ioaddr + RxFilterAddr); |
1485 | } | 1499 | } |
1486 | 1500 | ||
1501 | static void reset_rx(struct net_device *dev) | ||
1502 | { | ||
1503 | int i; | ||
1504 | struct netdev_private *np = netdev_priv(dev); | ||
1505 | void __iomem *ioaddr = ns_ioaddr(dev); | ||
1506 | |||
1507 | np->intr_status &= ~RxResetDone; | ||
1508 | |||
1509 | writel(RxReset, ioaddr + ChipCmd); | ||
1510 | |||
1511 | for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { | ||
1512 | np->intr_status |= readl(ioaddr + IntrStatus); | ||
1513 | if (np->intr_status & RxResetDone) | ||
1514 | break; | ||
1515 | udelay(15); | ||
1516 | } | ||
1517 | if (i==NATSEMI_HW_TIMEOUT) { | ||
1518 | printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", | ||
1519 | dev->name, i*15); | ||
1520 | } else if (netif_msg_hw(np)) { | ||
1521 | printk(KERN_WARNING "%s: RX reset took %d usec.\n", | ||
1522 | dev->name, i*15); | ||
1523 | } | ||
1524 | } | ||
1525 | |||
1487 | static void natsemi_reload_eeprom(struct net_device *dev) | 1526 | static void natsemi_reload_eeprom(struct net_device *dev) |
1488 | { | 1527 | { |
1489 | struct netdev_private *np = netdev_priv(dev); | 1528 | struct netdev_private *np = netdev_priv(dev); |
@@ -2158,68 +2197,92 @@ static void netdev_tx_done(struct net_device *dev) | |||
2158 | } | 2197 | } |
2159 | } | 2198 | } |
2160 | 2199 | ||
2161 | /* The interrupt handler does all of the Rx thread work and cleans up | 2200 | /* The interrupt handler doesn't actually handle interrupts itself, it |
2162 | after the Tx thread. */ | 2201 | * schedules a NAPI poll if there is anything to do. */ |
2163 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) | 2202 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) |
2164 | { | 2203 | { |
2165 | struct net_device *dev = dev_instance; | 2204 | struct net_device *dev = dev_instance; |
2166 | struct netdev_private *np = netdev_priv(dev); | 2205 | struct netdev_private *np = netdev_priv(dev); |
2167 | void __iomem * ioaddr = ns_ioaddr(dev); | 2206 | void __iomem * ioaddr = ns_ioaddr(dev); |
2168 | int boguscnt = max_interrupt_work; | ||
2169 | unsigned int handled = 0; | ||
2170 | 2207 | ||
2171 | if (np->hands_off) | 2208 | if (np->hands_off) |
2172 | return IRQ_NONE; | 2209 | return IRQ_NONE; |
2173 | do { | 2210 | |
2174 | /* Reading automatically acknowledges all int sources. */ | 2211 | /* Reading automatically acknowledges. */ |
2175 | u32 intr_status = readl(ioaddr + IntrStatus); | 2212 | np->intr_status = readl(ioaddr + IntrStatus); |
2176 | 2213 | ||
2177 | if (netif_msg_intr(np)) | 2214 | if (netif_msg_intr(np)) |
2178 | printk(KERN_DEBUG | 2215 | printk(KERN_DEBUG |
2179 | "%s: Interrupt, status %#08x, mask %#08x.\n", | 2216 | "%s: Interrupt, status %#08x, mask %#08x.\n", |
2180 | dev->name, intr_status, | 2217 | dev->name, np->intr_status, |
2181 | readl(ioaddr + IntrMask)); | 2218 | readl(ioaddr + IntrMask)); |
2182 | 2219 | ||
2183 | if (intr_status == 0) | 2220 | if (!np->intr_status) |
2184 | break; | 2221 | return IRQ_NONE; |
2185 | handled = 1; | ||
2186 | 2222 | ||
2187 | if (intr_status & | 2223 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); |
2188 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | | ||
2189 | IntrRxErr | IntrRxOverrun)) { | ||
2190 | netdev_rx(dev); | ||
2191 | } | ||
2192 | 2224 | ||
2193 | if (intr_status & | 2225 | if (netif_rx_schedule_prep(dev)) { |
2194 | (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { | 2226 | /* Disable interrupts and register for poll */ |
2227 | natsemi_irq_disable(dev); | ||
2228 | __netif_rx_schedule(dev); | ||
2229 | } | ||
2230 | return IRQ_HANDLED; | ||
2231 | } | ||
2232 | |||
2233 | /* This is the NAPI poll routine. As well as the standard RX handling | ||
2234 | * it also handles all other interrupts that the chip might raise. | ||
2235 | */ | ||
2236 | static int natsemi_poll(struct net_device *dev, int *budget) | ||
2237 | { | ||
2238 | struct netdev_private *np = netdev_priv(dev); | ||
2239 | void __iomem * ioaddr = ns_ioaddr(dev); | ||
2240 | |||
2241 | int work_to_do = min(*budget, dev->quota); | ||
2242 | int work_done = 0; | ||
2243 | |||
2244 | do { | ||
2245 | if (np->intr_status & | ||
2246 | (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { | ||
2195 | spin_lock(&np->lock); | 2247 | spin_lock(&np->lock); |
2196 | netdev_tx_done(dev); | 2248 | netdev_tx_done(dev); |
2197 | spin_unlock(&np->lock); | 2249 | spin_unlock(&np->lock); |
2198 | } | 2250 | } |
2199 | 2251 | ||
2200 | /* Abnormal error summary/uncommon events handlers. */ | 2252 | /* Abnormal error summary/uncommon events handlers. */ |
2201 | if (intr_status & IntrAbnormalSummary) | 2253 | if (np->intr_status & IntrAbnormalSummary) |
2202 | netdev_error(dev, intr_status); | 2254 | netdev_error(dev, np->intr_status); |
2203 | 2255 | ||
2204 | if (--boguscnt < 0) { | 2256 | if (np->intr_status & |
2205 | if (netif_msg_intr(np)) | 2257 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | |
2206 | printk(KERN_WARNING | 2258 | IntrRxErr | IntrRxOverrun)) { |
2207 | "%s: Too much work at interrupt, " | 2259 | netdev_rx(dev, &work_done, work_to_do); |
2208 | "status=%#08x.\n", | ||
2209 | dev->name, intr_status); | ||
2210 | break; | ||
2211 | } | 2260 | } |
2212 | } while (1); | 2261 | |
2262 | *budget -= work_done; | ||
2263 | dev->quota -= work_done; | ||
2213 | 2264 | ||
2214 | if (netif_msg_intr(np)) | 2265 | if (work_done >= work_to_do) |
2215 | printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name); | 2266 | return 1; |
2267 | |||
2268 | np->intr_status = readl(ioaddr + IntrStatus); | ||
2269 | } while (np->intr_status); | ||
2270 | |||
2271 | netif_rx_complete(dev); | ||
2216 | 2272 | ||
2217 | return IRQ_RETVAL(handled); | 2273 | /* Reenable interrupts providing nothing is trying to shut |
2274 | * the chip down. */ | ||
2275 | spin_lock(&np->lock); | ||
2276 | if (!np->hands_off && netif_running(dev)) | ||
2277 | natsemi_irq_enable(dev); | ||
2278 | spin_unlock(&np->lock); | ||
2279 | |||
2280 | return 0; | ||
2218 | } | 2281 | } |
2219 | 2282 | ||
2220 | /* This routine is logically part of the interrupt handler, but separated | 2283 | /* This routine is logically part of the interrupt handler, but separated |
2221 | for clarity and better register allocation. */ | 2284 | for clarity and better register allocation. */ |
2222 | static void netdev_rx(struct net_device *dev) | 2285 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) |
2223 | { | 2286 | { |
2224 | struct netdev_private *np = netdev_priv(dev); | 2287 | struct netdev_private *np = netdev_priv(dev); |
2225 | int entry = np->cur_rx % RX_RING_SIZE; | 2288 | int entry = np->cur_rx % RX_RING_SIZE; |
@@ -2237,6 +2300,12 @@ static void netdev_rx(struct net_device *dev) | |||
2237 | entry, desc_status); | 2300 | entry, desc_status); |
2238 | if (--boguscnt < 0) | 2301 | if (--boguscnt < 0) |
2239 | break; | 2302 | break; |
2303 | |||
2304 | if (*work_done >= work_to_do) | ||
2305 | break; | ||
2306 | |||
2307 | (*work_done)++; | ||
2308 | |||
2240 | pkt_len = (desc_status & DescSizeMask) - 4; | 2309 | pkt_len = (desc_status & DescSizeMask) - 4; |
2241 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2310 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2242 | if (desc_status & DescMore) { | 2311 | if (desc_status & DescMore) { |
@@ -2248,6 +2317,23 @@ static void netdev_rx(struct net_device *dev) | |||
2248 | "status %#08x.\n", dev->name, | 2317 | "status %#08x.\n", dev->name, |
2249 | np->cur_rx, desc_status); | 2318 | np->cur_rx, desc_status); |
2250 | np->stats.rx_length_errors++; | 2319 | np->stats.rx_length_errors++; |
2320 | |||
2321 | /* The RX state machine has probably | ||
2322 | * locked up beneath us. Follow the | ||
2323 | * reset procedure documented in | ||
2324 | * AN-1287. */ | ||
2325 | |||
2326 | spin_lock_irq(&np->lock); | ||
2327 | reset_rx(dev); | ||
2328 | reinit_rx(dev); | ||
2329 | writel(np->ring_dma, ioaddr + RxRingPtr); | ||
2330 | check_link(dev); | ||
2331 | spin_unlock_irq(&np->lock); | ||
2332 | |||
2333 | /* We'll enable RX on exit from this | ||
2334 | * function. */ | ||
2335 | break; | ||
2336 | |||
2251 | } else { | 2337 | } else { |
2252 | /* There was an error. */ | 2338 | /* There was an error. */ |
2253 | np->stats.rx_errors++; | 2339 | np->stats.rx_errors++; |
@@ -2293,7 +2379,7 @@ static void netdev_rx(struct net_device *dev) | |||
2293 | np->rx_skbuff[entry] = NULL; | 2379 | np->rx_skbuff[entry] = NULL; |
2294 | } | 2380 | } |
2295 | skb->protocol = eth_type_trans(skb, dev); | 2381 | skb->protocol = eth_type_trans(skb, dev); |
2296 | netif_rx(skb); | 2382 | netif_receive_skb(skb); |
2297 | dev->last_rx = jiffies; | 2383 | dev->last_rx = jiffies; |
2298 | np->stats.rx_packets++; | 2384 | np->stats.rx_packets++; |
2299 | np->stats.rx_bytes += pkt_len; | 2385 | np->stats.rx_bytes += pkt_len; |
@@ -3074,9 +3160,7 @@ static int netdev_close(struct net_device *dev) | |||
3074 | del_timer_sync(&np->timer); | 3160 | del_timer_sync(&np->timer); |
3075 | disable_irq(dev->irq); | 3161 | disable_irq(dev->irq); |
3076 | spin_lock_irq(&np->lock); | 3162 | spin_lock_irq(&np->lock); |
3077 | /* Disable interrupts, and flush posted writes */ | 3163 | natsemi_irq_disable(dev); |
3078 | writel(0, ioaddr + IntrEnable); | ||
3079 | readl(ioaddr + IntrEnable); | ||
3080 | np->hands_off = 1; | 3164 | np->hands_off = 1; |
3081 | spin_unlock_irq(&np->lock); | 3165 | spin_unlock_irq(&np->lock); |
3082 | enable_irq(dev->irq); | 3166 | enable_irq(dev->irq); |
@@ -3158,6 +3242,9 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) | |||
3158 | * * netdev_timer: timer stopped by natsemi_suspend. | 3242 | * * netdev_timer: timer stopped by natsemi_suspend. |
3159 | * * intr_handler: doesn't acquire the spinlock. suspend calls | 3243 | * * intr_handler: doesn't acquire the spinlock. suspend calls |
3160 | * disable_irq() to enforce synchronization. | 3244 | * disable_irq() to enforce synchronization. |
3245 | * * natsemi_poll: checks before reenabling interrupts. suspend | ||
3246 | * sets hands_off, disables interrupts and then waits with | ||
3247 | * netif_poll_disable(). | ||
3161 | * | 3248 | * |
3162 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. | 3249 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. |
3163 | */ | 3250 | */ |
@@ -3183,6 +3270,8 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) | |||
3183 | spin_unlock_irq(&np->lock); | 3270 | spin_unlock_irq(&np->lock); |
3184 | enable_irq(dev->irq); | 3271 | enable_irq(dev->irq); |
3185 | 3272 | ||
3273 | netif_poll_disable(dev); | ||
3274 | |||
3186 | /* Update the error counts. */ | 3275 | /* Update the error counts. */ |
3187 | __get_stats(dev); | 3276 | __get_stats(dev); |
3188 | 3277 | ||
@@ -3235,6 +3324,7 @@ static int natsemi_resume (struct pci_dev *pdev) | |||
3235 | mod_timer(&np->timer, jiffies + 1*HZ); | 3324 | mod_timer(&np->timer, jiffies + 1*HZ); |
3236 | } | 3325 | } |
3237 | netif_device_attach(dev); | 3326 | netif_device_attach(dev); |
3327 | netif_poll_enable(dev); | ||
3238 | out: | 3328 | out: |
3239 | rtnl_unlock(); | 3329 | rtnl_unlock(); |
3240 | return 0; | 3330 | return 0; |
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 8f40368cf2e9..aaebd28a1920 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -27,6 +27,7 @@ static const char version1[] = | |||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/etherdevice.h> | 29 | #include <linux/etherdevice.h> |
30 | #include <linux/jiffies.h> | ||
30 | 31 | ||
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
@@ -365,7 +366,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
365 | 366 | ||
366 | /* This check _should_not_ be necessary, omit eventually. */ | 367 | /* This check _should_not_ be necessary, omit eventually. */ |
367 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 368 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
368 | if (jiffies - reset_start_time > 2*HZ/100) { | 369 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
369 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); | 370 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); |
370 | break; | 371 | break; |
371 | } | 372 | } |
@@ -580,7 +581,7 @@ retry: | |||
580 | #endif | 581 | #endif |
581 | 582 | ||
582 | while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) | 583 | while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) |
583 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 584 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
584 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 585 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); |
585 | ne_reset_8390(dev); | 586 | ne_reset_8390(dev); |
586 | NS8390_init(dev,1); | 587 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 94f782d51f0f..08b218c5bfbc 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -50,6 +50,7 @@ static const char version2[] = | |||
50 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
51 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
52 | #include <linux/etherdevice.h> | 52 | #include <linux/etherdevice.h> |
53 | #include <linux/jiffies.h> | ||
53 | 54 | ||
54 | #include <asm/system.h> | 55 | #include <asm/system.h> |
55 | #include <asm/io.h> | 56 | #include <asm/io.h> |
@@ -341,7 +342,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
341 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 342 | outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
342 | 343 | ||
343 | while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) | 344 | while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) |
344 | if (jiffies - reset_start_time > 2*HZ/100) { | 345 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
345 | if (bad_card) { | 346 | if (bad_card) { |
346 | printk(" (warning: no reset ack)"); | 347 | printk(" (warning: no reset ack)"); |
347 | break; | 348 | break; |
@@ -580,7 +581,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
580 | 581 | ||
581 | /* This check _should_not_ be necessary, omit eventually. */ | 582 | /* This check _should_not_ be necessary, omit eventually. */ |
582 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 583 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
583 | if (jiffies - reset_start_time > 2*HZ/100) { | 584 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
584 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); | 585 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); |
585 | break; | 586 | break; |
586 | } | 587 | } |
@@ -787,7 +788,7 @@ retry: | |||
787 | #endif | 788 | #endif |
788 | 789 | ||
789 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) | 790 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) |
790 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 791 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
791 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); | 792 | printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); |
792 | ne_reset_8390(dev); | 793 | ne_reset_8390(dev); |
793 | NS8390_init(dev,1); | 794 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index e6df375a1d4b..2aa7b77f84f8 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c | |||
@@ -75,6 +75,7 @@ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.o | |||
75 | #include <linux/etherdevice.h> | 75 | #include <linux/etherdevice.h> |
76 | #include <linux/skbuff.h> | 76 | #include <linux/skbuff.h> |
77 | #include <linux/bitops.h> | 77 | #include <linux/bitops.h> |
78 | #include <linux/jiffies.h> | ||
78 | 79 | ||
79 | #include <asm/system.h> | 80 | #include <asm/system.h> |
80 | #include <asm/io.h> | 81 | #include <asm/io.h> |
@@ -395,7 +396,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) | |||
395 | outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); | 396 | outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); |
396 | 397 | ||
397 | while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) | 398 | while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) |
398 | if (jiffies - reset_start_time > 2*HZ/100) { | 399 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
399 | printk(" not found (no reset ack).\n"); | 400 | printk(" not found (no reset ack).\n"); |
400 | retval = -ENODEV; | 401 | retval = -ENODEV; |
401 | goto out; | 402 | goto out; |
@@ -548,7 +549,7 @@ static void ne_reset_8390(struct net_device *dev) | |||
548 | 549 | ||
549 | /* This check _should_not_ be necessary, omit eventually. */ | 550 | /* This check _should_not_ be necessary, omit eventually. */ |
550 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) | 551 | while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) |
551 | if (jiffies - reset_start_time > 2*HZ/100) { | 552 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
552 | printk("%s: ne_reset_8390() did not complete.\n", | 553 | printk("%s: ne_reset_8390() did not complete.\n", |
553 | dev->name); | 554 | dev->name); |
554 | break; | 555 | break; |
@@ -749,7 +750,7 @@ retry: | |||
749 | #endif | 750 | #endif |
750 | 751 | ||
751 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) | 752 | while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) |
752 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 753 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
753 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 754 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
754 | ne_reset_8390(dev); | 755 | ne_reset_8390(dev); |
755 | NS8390_init(dev,1); | 756 | NS8390_init(dev,1); |
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index d11821dd86ed..e3ebb5803b02 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c | |||
@@ -117,7 +117,7 @@ enum ne2k_pci_chipsets { | |||
117 | }; | 117 | }; |
118 | 118 | ||
119 | 119 | ||
120 | static struct { | 120 | static const struct { |
121 | char *name; | 121 | char *name; |
122 | int flags; | 122 | int flags; |
123 | } pci_clone_list[] __devinitdata = { | 123 | } pci_clone_list[] __devinitdata = { |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index b0c3b6ab6263..0fede50abd3e 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -116,6 +116,7 @@ | |||
116 | #include <linux/timer.h> | 116 | #include <linux/timer.h> |
117 | #include <linux/if_vlan.h> | 117 | #include <linux/if_vlan.h> |
118 | #include <linux/rtnetlink.h> | 118 | #include <linux/rtnetlink.h> |
119 | #include <linux/jiffies.h> | ||
119 | 120 | ||
120 | #include <asm/io.h> | 121 | #include <asm/io.h> |
121 | #include <asm/uaccess.h> | 122 | #include <asm/uaccess.h> |
@@ -651,7 +652,7 @@ static void FASTCALL(phy_intr(struct net_device *ndev)); | |||
651 | static void fastcall phy_intr(struct net_device *ndev) | 652 | static void fastcall phy_intr(struct net_device *ndev) |
652 | { | 653 | { |
653 | struct ns83820 *dev = PRIV(ndev); | 654 | struct ns83820 *dev = PRIV(ndev); |
654 | static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; | 655 | static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; |
655 | u32 cfg, new_cfg; | 656 | u32 cfg, new_cfg; |
656 | u32 tbisr, tanar, tanlpar; | 657 | u32 tbisr, tanar, tanlpar; |
657 | int speed, fullduplex, newlinkstate; | 658 | int speed, fullduplex, newlinkstate; |
@@ -1607,7 +1608,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab | |||
1607 | { | 1608 | { |
1608 | struct ns83820 *dev = PRIV(ndev); | 1609 | struct ns83820 *dev = PRIV(ndev); |
1609 | int timed_out = 0; | 1610 | int timed_out = 0; |
1610 | long start; | 1611 | unsigned long start; |
1611 | u32 status; | 1612 | u32 status; |
1612 | int loops = 0; | 1613 | int loops = 0; |
1613 | 1614 | ||
@@ -1625,7 +1626,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab | |||
1625 | break; | 1626 | break; |
1626 | if (status & fail) | 1627 | if (status & fail) |
1627 | break; | 1628 | break; |
1628 | if ((jiffies - start) >= HZ) { | 1629 | if (time_after_eq(jiffies, start + HZ)) { |
1629 | timed_out = 1; | 1630 | timed_out = 1; |
1630 | break; | 1631 | break; |
1631 | } | 1632 | } |
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c index 62167a29debe..d0f686d6eaaa 100644 --- a/drivers/net/oaknet.c +++ b/drivers/net/oaknet.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/jiffies.h> | ||
23 | 24 | ||
24 | #include <asm/board.h> | 25 | #include <asm/board.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -606,7 +607,7 @@ retry: | |||
606 | #endif | 607 | #endif |
607 | 608 | ||
608 | while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { | 609 | while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { |
609 | if (jiffies - start > OAKNET_WAIT) { | 610 | if (time_after(jiffies, start + OAKNET_WAIT)) { |
610 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); | 611 | printk("%s: timeout waiting for Tx RDC.\n", dev->name); |
611 | oaknet_reset_8390(dev); | 612 | oaknet_reset_8390(dev); |
612 | NS8390_init(dev, TRUE); | 613 | NS8390_init(dev, TRUE); |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 48774efeec71..ce90becb8bdf 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -341,7 +341,7 @@ static void tc574_detach(struct pcmcia_device *p_dev) | |||
341 | #define CS_CHECK(fn, ret) \ | 341 | #define CS_CHECK(fn, ret) \ |
342 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | 342 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) |
343 | 343 | ||
344 | static char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 344 | static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
345 | 345 | ||
346 | static void tc574_config(dev_link_t *link) | 346 | static void tc574_config(dev_link_t *link) |
347 | { | 347 | { |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 1c3c9c666f74..3dba50849da7 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/if_arp.h> | 39 | #include <linux/if_arp.h> |
40 | #include <linux/ioport.h> | 40 | #include <linux/ioport.h> |
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/jiffies.h> | ||
42 | 43 | ||
43 | #include <pcmcia/cs_types.h> | 44 | #include <pcmcia/cs_types.h> |
44 | #include <pcmcia/cs.h> | 45 | #include <pcmcia/cs.h> |
@@ -115,7 +116,7 @@ struct el3_private { | |||
115 | spinlock_t lock; | 116 | spinlock_t lock; |
116 | }; | 117 | }; |
117 | 118 | ||
118 | static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; | 119 | static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; |
119 | 120 | ||
120 | /*====================================================================*/ | 121 | /*====================================================================*/ |
121 | 122 | ||
@@ -796,7 +797,7 @@ static void media_check(unsigned long arg) | |||
796 | media = inw(ioaddr+WN4_MEDIA) & 0xc810; | 797 | media = inw(ioaddr+WN4_MEDIA) & 0xc810; |
797 | 798 | ||
798 | /* Ignore collisions unless we've had no irq's recently */ | 799 | /* Ignore collisions unless we've had no irq's recently */ |
799 | if (jiffies - lp->last_irq < HZ) { | 800 | if (time_before(jiffies, lp->last_irq + HZ)) { |
800 | media &= ~0x0010; | 801 | media &= ~0x0010; |
801 | } else { | 802 | } else { |
802 | /* Try harder to detect carrier errors */ | 803 | /* Try harder to detect carrier errors */ |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 28fe2fb4d6c0..b7ac14ba8877 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -309,7 +309,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | |||
309 | static int mfc_try_io_port(dev_link_t *link) | 309 | static int mfc_try_io_port(dev_link_t *link) |
310 | { | 310 | { |
311 | int i, ret; | 311 | int i, ret; |
312 | static kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 312 | static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; |
313 | 313 | ||
314 | for (i = 0; i < 5; i++) { | 314 | for (i = 0; i < 5; i++) { |
315 | link->io.BasePort2 = serial_base[i]; | 315 | link->io.BasePort2 = serial_base[i]; |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 4a232254a497..787176c57fd9 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -388,7 +388,7 @@ static char *version = | |||
388 | DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; | 388 | DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; |
389 | #endif | 389 | #endif |
390 | 390 | ||
391 | static char *if_names[]={ | 391 | static const char *if_names[]={ |
392 | "Auto", "10baseT", "BNC", | 392 | "Auto", "10baseT", "BNC", |
393 | }; | 393 | }; |
394 | 394 | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index d85b758f3efa..b46e5f703efa 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -66,7 +66,7 @@ | |||
66 | 66 | ||
67 | #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ | 67 | #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ |
68 | 68 | ||
69 | static char *if_names[] = { "auto", "10baseT", "10base2"}; | 69 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; |
70 | 70 | ||
71 | #ifdef PCMCIA_DEBUG | 71 | #ifdef PCMCIA_DEBUG |
72 | static int pc_debug = PCMCIA_DEBUG; | 72 | static int pc_debug = PCMCIA_DEBUG; |
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1727 | PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), | 1727 | PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), |
1728 | PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), | 1728 | PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), |
1729 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), | 1729 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), |
1730 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), | ||
1730 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), | 1731 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), |
1731 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), | 1732 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), |
1732 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), | 1733 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 0122415dfeef..8839c4faafd6 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -59,7 +59,7 @@ | |||
59 | 59 | ||
60 | /*====================================================================*/ | 60 | /*====================================================================*/ |
61 | 61 | ||
62 | static char *if_names[] = { "auto", "10baseT", "10base2"}; | 62 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; |
63 | 63 | ||
64 | /* Module parameters */ | 64 | /* Module parameters */ |
65 | 65 | ||
@@ -777,7 +777,7 @@ free_cfg_mem: | |||
777 | static int osi_config(dev_link_t *link) | 777 | static int osi_config(dev_link_t *link) |
778 | { | 778 | { |
779 | struct net_device *dev = link->priv; | 779 | struct net_device *dev = link->priv; |
780 | static kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; | 780 | static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; |
781 | int i, j; | 781 | int i, j; |
782 | 782 | ||
783 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 783 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 593d8adee891..eed496803fe4 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -208,7 +208,7 @@ enum xirc_cmd { /* Commands */ | |||
208 | #define XIRCREG45_REV 15 /* Revision Register (rd) */ | 208 | #define XIRCREG45_REV 15 /* Revision Register (rd) */ |
209 | #define XIRCREG50_IA 8 /* Individual Address (8-13) */ | 209 | #define XIRCREG50_IA 8 /* Individual Address (8-13) */ |
210 | 210 | ||
211 | static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; | 211 | static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; |
212 | 212 | ||
213 | /**************** | 213 | /**************** |
214 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | 214 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 8f6cf8c896a4..7e900572eaf8 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #define DRV_RELDATE "01.Nov.2005" | 26 | #define DRV_RELDATE "01.Nov.2005" |
27 | #define PFX DRV_NAME ": " | 27 | #define PFX DRV_NAME ": " |
28 | 28 | ||
29 | static const char *version = | 29 | static const char * const version = |
30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | 30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; |
31 | 31 | ||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -109,7 +109,7 @@ static int rx_copybreak = 200; | |||
109 | * table to translate option values from tulip | 109 | * table to translate option values from tulip |
110 | * to internal options | 110 | * to internal options |
111 | */ | 111 | */ |
112 | static unsigned char options_mapping[] = { | 112 | static const unsigned char options_mapping[] = { |
113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ | 113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ |
114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ | 114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ |
115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ | 115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ |
@@ -733,7 +733,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) | |||
733 | int rc; /* return code */ | 733 | int rc; /* return code */ |
734 | int size; /* size of packets */ | 734 | int size; /* size of packets */ |
735 | unsigned char *packet; /* source packet data */ | 735 | unsigned char *packet; /* source packet data */ |
736 | static int data_len = 60; /* length of source packets */ | 736 | static const int data_len = 60; /* length of source packets */ |
737 | unsigned long flags; | 737 | unsigned long flags; |
738 | unsigned long ticks; | 738 | unsigned long ticks; |
739 | 739 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1474b7c5ac0b..33cec2dab942 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -132,7 +132,7 @@ struct phy_setting { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* A mapping of all SUPPORTED settings to speed/duplex */ | 134 | /* A mapping of all SUPPORTED settings to speed/duplex */ |
135 | static struct phy_setting settings[] = { | 135 | static const struct phy_setting settings[] = { |
136 | { | 136 | { |
137 | .speed = 10000, | 137 | .speed = 10000, |
138 | .duplex = DUPLEX_FULL, | 138 | .duplex = DUPLEX_FULL, |
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 87ee3271b17d..d4449d6d1fe4 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -123,7 +123,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" | |||
123 | #ifndef NET_DEBUG | 123 | #ifndef NET_DEBUG |
124 | #define NET_DEBUG 1 | 124 | #define NET_DEBUG 1 |
125 | #endif | 125 | #endif |
126 | static unsigned int net_debug = NET_DEBUG; | 126 | static const unsigned int net_debug = NET_DEBUG; |
127 | 127 | ||
128 | #define ENABLE(irq) if (irq != -1) enable_irq(irq) | 128 | #define ENABLE(irq) if (irq != -1) enable_irq(irq) |
129 | #define DISABLE(irq) if (irq != -1) disable_irq(irq) | 129 | #define DISABLE(irq) if (irq != -1) disable_irq(irq) |
@@ -351,7 +351,7 @@ static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |||
351 | typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, | 351 | typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, |
352 | struct plip_local *snd, struct plip_local *rcv); | 352 | struct plip_local *snd, struct plip_local *rcv); |
353 | 353 | ||
354 | static plip_func connection_state_table[] = | 354 | static const plip_func connection_state_table[] = |
355 | { | 355 | { |
356 | plip_none, | 356 | plip_none, |
357 | plip_receive_packet, | 357 | plip_receive_packet, |
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index aa6540b39466..23659fd7c3a6 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/ppp_channel.h> | 30 | #include <linux/ppp_channel.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/jiffies.h> | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/string.h> | 35 | #include <asm/string.h> |
35 | 36 | ||
@@ -570,7 +571,7 @@ ppp_async_encode(struct asyncppp *ap) | |||
570 | * character if necessary. | 571 | * character if necessary. |
571 | */ | 572 | */ |
572 | if (islcp || flag_time == 0 | 573 | if (islcp || flag_time == 0 |
573 | || jiffies - ap->last_xmit >= flag_time) | 574 | || time_after_eq(jiffies, ap->last_xmit + flag_time)) |
574 | *buf++ = PPP_FLAG; | 575 | *buf++ = PPP_FLAG; |
575 | ap->last_xmit = jiffies; | 576 | ap->last_xmit = jiffies; |
576 | fcs = PPP_INITFCS; | 577 | fcs = PPP_INITFCS; |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 33cb8254e79d..33255fe8031e 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -108,7 +108,7 @@ static void | |||
108 | ppp_print_hex (register __u8 * out, const __u8 * in, int count) | 108 | ppp_print_hex (register __u8 * out, const __u8 * in, int count) |
109 | { | 109 | { |
110 | register __u8 next_ch; | 110 | register __u8 next_ch; |
111 | static char hex[] = "0123456789ABCDEF"; | 111 | static const char hex[] = "0123456789ABCDEF"; |
112 | 112 | ||
113 | while (count-- > 0) { | 113 | while (count-- > 0) { |
114 | next_ch = *in++; | 114 | next_ch = *in++; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 8cc0d0bbdf50..0ad3310290f1 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -113,11 +113,11 @@ static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; | |||
113 | static int num_media = 0; | 113 | static int num_media = 0; |
114 | 114 | ||
115 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | 115 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ |
116 | static int max_interrupt_work = 20; | 116 | static const int max_interrupt_work = 20; |
117 | 117 | ||
118 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 118 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
119 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | 119 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ |
120 | static int multicast_filter_limit = 32; | 120 | static const int multicast_filter_limit = 32; |
121 | 121 | ||
122 | /* MAC address length */ | 122 | /* MAC address length */ |
123 | #define MAC_ADDR_LEN 6 | 123 | #define MAC_ADDR_LEN 6 |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index b7f00d6eb6a6..79208f434ac1 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -57,23 +57,27 @@ | |||
57 | #include <linux/ethtool.h> | 57 | #include <linux/ethtool.h> |
58 | #include <linux/workqueue.h> | 58 | #include <linux/workqueue.h> |
59 | #include <linux/if_vlan.h> | 59 | #include <linux/if_vlan.h> |
60 | #include <linux/ip.h> | ||
61 | #include <linux/tcp.h> | ||
62 | #include <net/tcp.h> | ||
60 | 63 | ||
61 | #include <asm/system.h> | 64 | #include <asm/system.h> |
62 | #include <asm/uaccess.h> | 65 | #include <asm/uaccess.h> |
63 | #include <asm/io.h> | 66 | #include <asm/io.h> |
67 | #include <asm/div64.h> | ||
64 | 68 | ||
65 | /* local include */ | 69 | /* local include */ |
66 | #include "s2io.h" | 70 | #include "s2io.h" |
67 | #include "s2io-regs.h" | 71 | #include "s2io-regs.h" |
68 | 72 | ||
69 | #define DRV_VERSION "Version 2.0.9.4" | 73 | #define DRV_VERSION "2.0.11.2" |
70 | 74 | ||
71 | /* S2io Driver name & version. */ | 75 | /* S2io Driver name & version. */ |
72 | static char s2io_driver_name[] = "Neterion"; | 76 | static char s2io_driver_name[] = "Neterion"; |
73 | static char s2io_driver_version[] = DRV_VERSION; | 77 | static char s2io_driver_version[] = DRV_VERSION; |
74 | 78 | ||
75 | int rxd_size[4] = {32,48,48,64}; | 79 | static int rxd_size[4] = {32,48,48,64}; |
76 | int rxd_count[4] = {127,85,85,63}; | 80 | static int rxd_count[4] = {127,85,85,63}; |
77 | 81 | ||
78 | static inline int RXD_IS_UP2DT(RxD_t *rxdp) | 82 | static inline int RXD_IS_UP2DT(RxD_t *rxdp) |
79 | { | 83 | { |
@@ -168,6 +172,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { | |||
168 | {"\n DRIVER STATISTICS"}, | 172 | {"\n DRIVER STATISTICS"}, |
169 | {"single_bit_ecc_errs"}, | 173 | {"single_bit_ecc_errs"}, |
170 | {"double_bit_ecc_errs"}, | 174 | {"double_bit_ecc_errs"}, |
175 | ("lro_aggregated_pkts"), | ||
176 | ("lro_flush_both_count"), | ||
177 | ("lro_out_of_sequence_pkts"), | ||
178 | ("lro_flush_due_to_max_pkts"), | ||
179 | ("lro_avg_aggr_pkts"), | ||
171 | }; | 180 | }; |
172 | 181 | ||
173 | #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN | 182 | #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN |
@@ -214,7 +223,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) | |||
214 | #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL | 223 | #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL |
215 | #define END_SIGN 0x0 | 224 | #define END_SIGN 0x0 |
216 | 225 | ||
217 | static u64 herc_act_dtx_cfg[] = { | 226 | static const u64 herc_act_dtx_cfg[] = { |
218 | /* Set address */ | 227 | /* Set address */ |
219 | 0x8000051536750000ULL, 0x80000515367500E0ULL, | 228 | 0x8000051536750000ULL, 0x80000515367500E0ULL, |
220 | /* Write data */ | 229 | /* Write data */ |
@@ -235,7 +244,7 @@ static u64 herc_act_dtx_cfg[] = { | |||
235 | END_SIGN | 244 | END_SIGN |
236 | }; | 245 | }; |
237 | 246 | ||
238 | static u64 xena_mdio_cfg[] = { | 247 | static const u64 xena_mdio_cfg[] = { |
239 | /* Reset PMA PLL */ | 248 | /* Reset PMA PLL */ |
240 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, | 249 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, |
241 | 0xC0010100008000E4ULL, | 250 | 0xC0010100008000E4ULL, |
@@ -245,7 +254,7 @@ static u64 xena_mdio_cfg[] = { | |||
245 | END_SIGN | 254 | END_SIGN |
246 | }; | 255 | }; |
247 | 256 | ||
248 | static u64 xena_dtx_cfg[] = { | 257 | static const u64 xena_dtx_cfg[] = { |
249 | 0x8000051500000000ULL, 0x80000515000000E0ULL, | 258 | 0x8000051500000000ULL, 0x80000515000000E0ULL, |
250 | 0x80000515D93500E4ULL, 0x8001051500000000ULL, | 259 | 0x80000515D93500E4ULL, 0x8001051500000000ULL, |
251 | 0x80010515000000E0ULL, 0x80010515001E00E4ULL, | 260 | 0x80010515000000E0ULL, 0x80010515001E00E4ULL, |
@@ -273,7 +282,7 @@ static u64 xena_dtx_cfg[] = { | |||
273 | * Constants for Fixing the MacAddress problem seen mostly on | 282 | * Constants for Fixing the MacAddress problem seen mostly on |
274 | * Alpha machines. | 283 | * Alpha machines. |
275 | */ | 284 | */ |
276 | static u64 fix_mac[] = { | 285 | static const u64 fix_mac[] = { |
277 | 0x0060000000000000ULL, 0x0060600000000000ULL, | 286 | 0x0060000000000000ULL, 0x0060600000000000ULL, |
278 | 0x0040600000000000ULL, 0x0000600000000000ULL, | 287 | 0x0040600000000000ULL, 0x0000600000000000ULL, |
279 | 0x0020600000000000ULL, 0x0060600000000000ULL, | 288 | 0x0020600000000000ULL, 0x0060600000000000ULL, |
@@ -317,6 +326,12 @@ static unsigned int indicate_max_pkts; | |||
317 | static unsigned int rxsync_frequency = 3; | 326 | static unsigned int rxsync_frequency = 3; |
318 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ | 327 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ |
319 | static unsigned int intr_type = 0; | 328 | static unsigned int intr_type = 0; |
329 | /* Large receive offload feature */ | ||
330 | static unsigned int lro = 0; | ||
331 | /* Max pkts to be aggregated by LRO at one time. If not specified, | ||
332 | * aggregation happens until we hit max IP pkt size(64K) | ||
333 | */ | ||
334 | static unsigned int lro_max_pkts = 0xFFFF; | ||
320 | 335 | ||
321 | /* | 336 | /* |
322 | * S2IO device table. | 337 | * S2IO device table. |
@@ -1476,6 +1491,19 @@ static int init_nic(struct s2io_nic *nic) | |||
1476 | writel((u32) (val64 >> 32), (add + 4)); | 1491 | writel((u32) (val64 >> 32), (add + 4)); |
1477 | val64 = readq(&bar0->mac_cfg); | 1492 | val64 = readq(&bar0->mac_cfg); |
1478 | 1493 | ||
1494 | /* Enable FCS stripping by adapter */ | ||
1495 | add = &bar0->mac_cfg; | ||
1496 | val64 = readq(&bar0->mac_cfg); | ||
1497 | val64 |= MAC_CFG_RMAC_STRIP_FCS; | ||
1498 | if (nic->device_type == XFRAME_II_DEVICE) | ||
1499 | writeq(val64, &bar0->mac_cfg); | ||
1500 | else { | ||
1501 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | ||
1502 | writel((u32) (val64), add); | ||
1503 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | ||
1504 | writel((u32) (val64 >> 32), (add + 4)); | ||
1505 | } | ||
1506 | |||
1479 | /* | 1507 | /* |
1480 | * Set the time value to be inserted in the pause frame | 1508 | * Set the time value to be inserted in the pause frame |
1481 | * generated by xena. | 1509 | * generated by xena. |
@@ -2127,7 +2155,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
2127 | } | 2155 | } |
2128 | } | 2156 | } |
2129 | 2157 | ||
2130 | int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) | 2158 | static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) |
2131 | { | 2159 | { |
2132 | struct net_device *dev = nic->dev; | 2160 | struct net_device *dev = nic->dev; |
2133 | struct sk_buff *frag_list; | 2161 | struct sk_buff *frag_list; |
@@ -2569,6 +2597,8 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2569 | #ifndef CONFIG_S2IO_NAPI | 2597 | #ifndef CONFIG_S2IO_NAPI |
2570 | int pkt_cnt = 0; | 2598 | int pkt_cnt = 0; |
2571 | #endif | 2599 | #endif |
2600 | int i; | ||
2601 | |||
2572 | spin_lock(&nic->rx_lock); | 2602 | spin_lock(&nic->rx_lock); |
2573 | if (atomic_read(&nic->card_state) == CARD_DOWN) { | 2603 | if (atomic_read(&nic->card_state) == CARD_DOWN) { |
2574 | DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", | 2604 | DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", |
@@ -2661,6 +2691,18 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2661 | break; | 2691 | break; |
2662 | #endif | 2692 | #endif |
2663 | } | 2693 | } |
2694 | if (nic->lro) { | ||
2695 | /* Clear all LRO sessions before exiting */ | ||
2696 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
2697 | lro_t *lro = &nic->lro0_n[i]; | ||
2698 | if (lro->in_use) { | ||
2699 | update_L3L4_header(nic, lro); | ||
2700 | queue_rx_frame(lro->parent); | ||
2701 | clear_lro_session(lro); | ||
2702 | } | ||
2703 | } | ||
2704 | } | ||
2705 | |||
2664 | spin_unlock(&nic->rx_lock); | 2706 | spin_unlock(&nic->rx_lock); |
2665 | } | 2707 | } |
2666 | 2708 | ||
@@ -2852,7 +2894,7 @@ static int wait_for_cmd_complete(nic_t * sp) | |||
2852 | * void. | 2894 | * void. |
2853 | */ | 2895 | */ |
2854 | 2896 | ||
2855 | void s2io_reset(nic_t * sp) | 2897 | static void s2io_reset(nic_t * sp) |
2856 | { | 2898 | { |
2857 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 2899 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
2858 | u64 val64; | 2900 | u64 val64; |
@@ -2940,7 +2982,7 @@ void s2io_reset(nic_t * sp) | |||
2940 | * SUCCESS on success and FAILURE on failure. | 2982 | * SUCCESS on success and FAILURE on failure. |
2941 | */ | 2983 | */ |
2942 | 2984 | ||
2943 | int s2io_set_swapper(nic_t * sp) | 2985 | static int s2io_set_swapper(nic_t * sp) |
2944 | { | 2986 | { |
2945 | struct net_device *dev = sp->dev; | 2987 | struct net_device *dev = sp->dev; |
2946 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 2988 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
@@ -3089,7 +3131,7 @@ static int wait_for_msix_trans(nic_t *nic, int i) | |||
3089 | return ret; | 3131 | return ret; |
3090 | } | 3132 | } |
3091 | 3133 | ||
3092 | void restore_xmsi_data(nic_t *nic) | 3134 | static void restore_xmsi_data(nic_t *nic) |
3093 | { | 3135 | { |
3094 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 3136 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
3095 | u64 val64; | 3137 | u64 val64; |
@@ -3180,7 +3222,7 @@ int s2io_enable_msi(nic_t *nic) | |||
3180 | return 0; | 3222 | return 0; |
3181 | } | 3223 | } |
3182 | 3224 | ||
3183 | int s2io_enable_msi_x(nic_t *nic) | 3225 | static int s2io_enable_msi_x(nic_t *nic) |
3184 | { | 3226 | { |
3185 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 3227 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
3186 | u64 tx_mat, rx_mat; | 3228 | u64 tx_mat, rx_mat; |
@@ -3668,23 +3710,32 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
3668 | * else schedule a tasklet to reallocate the buffers. | 3710 | * else schedule a tasklet to reallocate the buffers. |
3669 | */ | 3711 | */ |
3670 | for (i = 0; i < config->rx_ring_num; i++) { | 3712 | for (i = 0; i < config->rx_ring_num; i++) { |
3671 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | 3713 | if (!sp->lro) { |
3672 | int level = rx_buffer_level(sp, rxb_size, i); | 3714 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); |
3673 | 3715 | int level = rx_buffer_level(sp, rxb_size, i); | |
3674 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3716 | |
3675 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); | 3717 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3676 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3718 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", |
3677 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | 3719 | dev->name); |
3678 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | 3720 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3679 | dev->name); | 3721 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { |
3680 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | 3722 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3723 | dev->name); | ||
3724 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
3725 | clear_bit(0, (&sp->tasklet_status)); | ||
3726 | atomic_dec(&sp->isr_cnt); | ||
3727 | return IRQ_HANDLED; | ||
3728 | } | ||
3681 | clear_bit(0, (&sp->tasklet_status)); | 3729 | clear_bit(0, (&sp->tasklet_status)); |
3682 | atomic_dec(&sp->isr_cnt); | 3730 | } else if (level == LOW) { |
3683 | return IRQ_HANDLED; | 3731 | tasklet_schedule(&sp->task); |
3684 | } | 3732 | } |
3685 | clear_bit(0, (&sp->tasklet_status)); | 3733 | } |
3686 | } else if (level == LOW) { | 3734 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { |
3687 | tasklet_schedule(&sp->task); | 3735 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3736 | dev->name); | ||
3737 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
3738 | break; | ||
3688 | } | 3739 | } |
3689 | } | 3740 | } |
3690 | 3741 | ||
@@ -3697,29 +3748,37 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
3697 | { | 3748 | { |
3698 | ring_info_t *ring = (ring_info_t *)dev_id; | 3749 | ring_info_t *ring = (ring_info_t *)dev_id; |
3699 | nic_t *sp = ring->nic; | 3750 | nic_t *sp = ring->nic; |
3751 | struct net_device *dev = (struct net_device *) dev_id; | ||
3700 | int rxb_size, level, rng_n; | 3752 | int rxb_size, level, rng_n; |
3701 | 3753 | ||
3702 | atomic_inc(&sp->isr_cnt); | 3754 | atomic_inc(&sp->isr_cnt); |
3703 | rx_intr_handler(ring); | 3755 | rx_intr_handler(ring); |
3704 | 3756 | ||
3705 | rng_n = ring->ring_no; | 3757 | rng_n = ring->ring_no; |
3706 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | 3758 | if (!sp->lro) { |
3707 | level = rx_buffer_level(sp, rxb_size, rng_n); | 3759 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); |
3708 | 3760 | level = rx_buffer_level(sp, rxb_size, rng_n); | |
3709 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3761 | |
3710 | int ret; | 3762 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3711 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | 3763 | int ret; |
3712 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3764 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); |
3713 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | 3765 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3714 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | 3766 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { |
3715 | __FUNCTION__); | 3767 | DBG_PRINT(ERR_DBG, "Out of memory in %s", |
3768 | __FUNCTION__); | ||
3769 | clear_bit(0, (&sp->tasklet_status)); | ||
3770 | return IRQ_HANDLED; | ||
3771 | } | ||
3716 | clear_bit(0, (&sp->tasklet_status)); | 3772 | clear_bit(0, (&sp->tasklet_status)); |
3717 | return IRQ_HANDLED; | 3773 | } else if (level == LOW) { |
3774 | tasklet_schedule(&sp->task); | ||
3718 | } | 3775 | } |
3719 | clear_bit(0, (&sp->tasklet_status)); | ||
3720 | } else if (level == LOW) { | ||
3721 | tasklet_schedule(&sp->task); | ||
3722 | } | 3776 | } |
3777 | else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
3778 | DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); | ||
3779 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
3780 | } | ||
3781 | |||
3723 | atomic_dec(&sp->isr_cnt); | 3782 | atomic_dec(&sp->isr_cnt); |
3724 | 3783 | ||
3725 | return IRQ_HANDLED; | 3784 | return IRQ_HANDLED; |
@@ -3875,24 +3934,33 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3875 | */ | 3934 | */ |
3876 | #ifndef CONFIG_S2IO_NAPI | 3935 | #ifndef CONFIG_S2IO_NAPI |
3877 | for (i = 0; i < config->rx_ring_num; i++) { | 3936 | for (i = 0; i < config->rx_ring_num; i++) { |
3878 | int ret; | 3937 | if (!sp->lro) { |
3879 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | 3938 | int ret; |
3880 | int level = rx_buffer_level(sp, rxb_size, i); | 3939 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); |
3881 | 3940 | int level = rx_buffer_level(sp, rxb_size, i); | |
3882 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 3941 | |
3883 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); | 3942 | if ((level == PANIC) && (!TASKLET_IN_USE)) { |
3884 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | 3943 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", |
3885 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | 3944 | dev->name); |
3886 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | 3945 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); |
3887 | dev->name); | 3946 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { |
3888 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | 3947 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3948 | dev->name); | ||
3949 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
3950 | clear_bit(0, (&sp->tasklet_status)); | ||
3951 | atomic_dec(&sp->isr_cnt); | ||
3952 | return IRQ_HANDLED; | ||
3953 | } | ||
3889 | clear_bit(0, (&sp->tasklet_status)); | 3954 | clear_bit(0, (&sp->tasklet_status)); |
3890 | atomic_dec(&sp->isr_cnt); | 3955 | } else if (level == LOW) { |
3891 | return IRQ_HANDLED; | 3956 | tasklet_schedule(&sp->task); |
3892 | } | 3957 | } |
3893 | clear_bit(0, (&sp->tasklet_status)); | 3958 | } |
3894 | } else if (level == LOW) { | 3959 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { |
3895 | tasklet_schedule(&sp->task); | 3960 | DBG_PRINT(ERR_DBG, "%s:Out of memory", |
3961 | dev->name); | ||
3962 | DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); | ||
3963 | break; | ||
3896 | } | 3964 | } |
3897 | } | 3965 | } |
3898 | #endif | 3966 | #endif |
@@ -4129,7 +4197,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4129 | * as defined in errno.h file on failure. | 4197 | * as defined in errno.h file on failure. |
4130 | */ | 4198 | */ |
4131 | 4199 | ||
4132 | int s2io_set_mac_addr(struct net_device *dev, u8 * addr) | 4200 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) |
4133 | { | 4201 | { |
4134 | nic_t *sp = dev->priv; | 4202 | nic_t *sp = dev->priv; |
4135 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 4203 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
@@ -5044,6 +5112,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5044 | int i = 0; | 5112 | int i = 0; |
5045 | nic_t *sp = dev->priv; | 5113 | nic_t *sp = dev->priv; |
5046 | StatInfo_t *stat_info = sp->mac_control.stats_info; | 5114 | StatInfo_t *stat_info = sp->mac_control.stats_info; |
5115 | u64 tmp; | ||
5047 | 5116 | ||
5048 | s2io_updt_stats(sp); | 5117 | s2io_updt_stats(sp); |
5049 | tmp_stats[i++] = | 5118 | tmp_stats[i++] = |
@@ -5135,6 +5204,16 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5135 | tmp_stats[i++] = 0; | 5204 | tmp_stats[i++] = 0; |
5136 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; | 5205 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; |
5137 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; | 5206 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; |
5207 | tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt; | ||
5208 | tmp_stats[i++] = stat_info->sw_stat.sending_both; | ||
5209 | tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts; | ||
5210 | tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts; | ||
5211 | tmp = 0; | ||
5212 | if (stat_info->sw_stat.num_aggregations) { | ||
5213 | tmp = stat_info->sw_stat.sum_avg_pkts_aggregated; | ||
5214 | do_div(tmp, stat_info->sw_stat.num_aggregations); | ||
5215 | } | ||
5216 | tmp_stats[i++] = tmp; | ||
5138 | } | 5217 | } |
5139 | 5218 | ||
5140 | static int s2io_ethtool_get_regs_len(struct net_device *dev) | 5219 | static int s2io_ethtool_get_regs_len(struct net_device *dev) |
@@ -5516,6 +5595,14 @@ static int s2io_card_up(nic_t * sp) | |||
5516 | /* Setting its receive mode */ | 5595 | /* Setting its receive mode */ |
5517 | s2io_set_multicast(dev); | 5596 | s2io_set_multicast(dev); |
5518 | 5597 | ||
5598 | if (sp->lro) { | ||
5599 | /* Initialize max aggregatable pkts based on MTU */ | ||
5600 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | ||
5601 | /* Check if we can use(if specified) user provided value */ | ||
5602 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | ||
5603 | sp->lro_max_aggr_per_sess = lro_max_pkts; | ||
5604 | } | ||
5605 | |||
5519 | /* Enable tasklet for the device */ | 5606 | /* Enable tasklet for the device */ |
5520 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | 5607 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); |
5521 | 5608 | ||
@@ -5608,6 +5695,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5608 | ((unsigned long) rxdp->Host_Control); | 5695 | ((unsigned long) rxdp->Host_Control); |
5609 | int ring_no = ring_data->ring_no; | 5696 | int ring_no = ring_data->ring_no; |
5610 | u16 l3_csum, l4_csum; | 5697 | u16 l3_csum, l4_csum; |
5698 | lro_t *lro; | ||
5611 | 5699 | ||
5612 | skb->dev = dev; | 5700 | skb->dev = dev; |
5613 | if (rxdp->Control_1 & RXD_T_CODE) { | 5701 | if (rxdp->Control_1 & RXD_T_CODE) { |
@@ -5656,7 +5744,8 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5656 | skb_put(skb, buf2_len); | 5744 | skb_put(skb, buf2_len); |
5657 | } | 5745 | } |
5658 | 5746 | ||
5659 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && | 5747 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || |
5748 | (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | ||
5660 | (sp->rx_csum)) { | 5749 | (sp->rx_csum)) { |
5661 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); | 5750 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); |
5662 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); | 5751 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); |
@@ -5667,6 +5756,54 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5667 | * a flag in the RxD. | 5756 | * a flag in the RxD. |
5668 | */ | 5757 | */ |
5669 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 5758 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5759 | if (sp->lro) { | ||
5760 | u32 tcp_len; | ||
5761 | u8 *tcp; | ||
5762 | int ret = 0; | ||
5763 | |||
5764 | ret = s2io_club_tcp_session(skb->data, &tcp, | ||
5765 | &tcp_len, &lro, rxdp, sp); | ||
5766 | switch (ret) { | ||
5767 | case 3: /* Begin anew */ | ||
5768 | lro->parent = skb; | ||
5769 | goto aggregate; | ||
5770 | case 1: /* Aggregate */ | ||
5771 | { | ||
5772 | lro_append_pkt(sp, lro, | ||
5773 | skb, tcp_len); | ||
5774 | goto aggregate; | ||
5775 | } | ||
5776 | case 4: /* Flush session */ | ||
5777 | { | ||
5778 | lro_append_pkt(sp, lro, | ||
5779 | skb, tcp_len); | ||
5780 | queue_rx_frame(lro->parent); | ||
5781 | clear_lro_session(lro); | ||
5782 | sp->mac_control.stats_info-> | ||
5783 | sw_stat.flush_max_pkts++; | ||
5784 | goto aggregate; | ||
5785 | } | ||
5786 | case 2: /* Flush both */ | ||
5787 | lro->parent->data_len = | ||
5788 | lro->frags_len; | ||
5789 | sp->mac_control.stats_info-> | ||
5790 | sw_stat.sending_both++; | ||
5791 | queue_rx_frame(lro->parent); | ||
5792 | clear_lro_session(lro); | ||
5793 | goto send_up; | ||
5794 | case 0: /* sessions exceeded */ | ||
5795 | case 5: /* | ||
5796 | * First pkt in session not | ||
5797 | * L3/L4 aggregatable | ||
5798 | */ | ||
5799 | break; | ||
5800 | default: | ||
5801 | DBG_PRINT(ERR_DBG, | ||
5802 | "%s: Samadhana!!\n", | ||
5803 | __FUNCTION__); | ||
5804 | BUG(); | ||
5805 | } | ||
5806 | } | ||
5670 | } else { | 5807 | } else { |
5671 | /* | 5808 | /* |
5672 | * Packet with erroneous checksum, let the | 5809 | * Packet with erroneous checksum, let the |
@@ -5678,25 +5815,31 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5678 | skb->ip_summed = CHECKSUM_NONE; | 5815 | skb->ip_summed = CHECKSUM_NONE; |
5679 | } | 5816 | } |
5680 | 5817 | ||
5681 | skb->protocol = eth_type_trans(skb, dev); | 5818 | if (!sp->lro) { |
5819 | skb->protocol = eth_type_trans(skb, dev); | ||
5682 | #ifdef CONFIG_S2IO_NAPI | 5820 | #ifdef CONFIG_S2IO_NAPI |
5683 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | 5821 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { |
5684 | /* Queueing the vlan frame to the upper layer */ | 5822 | /* Queueing the vlan frame to the upper layer */ |
5685 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, | 5823 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, |
5686 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | 5824 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
5687 | } else { | 5825 | } else { |
5688 | netif_receive_skb(skb); | 5826 | netif_receive_skb(skb); |
5689 | } | 5827 | } |
5690 | #else | 5828 | #else |
5691 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | 5829 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { |
5692 | /* Queueing the vlan frame to the upper layer */ | 5830 | /* Queueing the vlan frame to the upper layer */ |
5693 | vlan_hwaccel_rx(skb, sp->vlgrp, | 5831 | vlan_hwaccel_rx(skb, sp->vlgrp, |
5694 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | 5832 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
5695 | } else { | 5833 | } else { |
5696 | netif_rx(skb); | 5834 | netif_rx(skb); |
5697 | } | 5835 | } |
5698 | #endif | 5836 | #endif |
5837 | } else { | ||
5838 | send_up: | ||
5839 | queue_rx_frame(skb); | ||
5840 | } | ||
5699 | dev->last_rx = jiffies; | 5841 | dev->last_rx = jiffies; |
5842 | aggregate: | ||
5700 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 5843 | atomic_dec(&sp->rx_bufs_left[ring_no]); |
5701 | return SUCCESS; | 5844 | return SUCCESS; |
5702 | } | 5845 | } |
@@ -5714,7 +5857,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5714 | * void. | 5857 | * void. |
5715 | */ | 5858 | */ |
5716 | 5859 | ||
5717 | void s2io_link(nic_t * sp, int link) | 5860 | static void s2io_link(nic_t * sp, int link) |
5718 | { | 5861 | { |
5719 | struct net_device *dev = (struct net_device *) sp->dev; | 5862 | struct net_device *dev = (struct net_device *) sp->dev; |
5720 | 5863 | ||
@@ -5739,7 +5882,7 @@ void s2io_link(nic_t * sp, int link) | |||
5739 | * returns the revision ID of the device. | 5882 | * returns the revision ID of the device. |
5740 | */ | 5883 | */ |
5741 | 5884 | ||
5742 | int get_xena_rev_id(struct pci_dev *pdev) | 5885 | static int get_xena_rev_id(struct pci_dev *pdev) |
5743 | { | 5886 | { |
5744 | u8 id = 0; | 5887 | u8 id = 0; |
5745 | int ret; | 5888 | int ret; |
@@ -5808,6 +5951,8 @@ module_param(indicate_max_pkts, int, 0); | |||
5808 | #endif | 5951 | #endif |
5809 | module_param(rxsync_frequency, int, 0); | 5952 | module_param(rxsync_frequency, int, 0); |
5810 | module_param(intr_type, int, 0); | 5953 | module_param(intr_type, int, 0); |
5954 | module_param(lro, int, 0); | ||
5955 | module_param(lro_max_pkts, int, 0); | ||
5811 | 5956 | ||
5812 | /** | 5957 | /** |
5813 | * s2io_init_nic - Initialization of the adapter . | 5958 | * s2io_init_nic - Initialization of the adapter . |
@@ -5939,6 +6084,7 @@ Defaulting to INTA\n"); | |||
5939 | else | 6084 | else |
5940 | sp->device_type = XFRAME_I_DEVICE; | 6085 | sp->device_type = XFRAME_I_DEVICE; |
5941 | 6086 | ||
6087 | sp->lro = lro; | ||
5942 | 6088 | ||
5943 | /* Initialize some PCI/PCI-X fields of the NIC. */ | 6089 | /* Initialize some PCI/PCI-X fields of the NIC. */ |
5944 | s2io_init_pci(sp); | 6090 | s2io_init_pci(sp); |
@@ -6242,6 +6388,10 @@ Defaulting to INTA\n"); | |||
6242 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " | 6388 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " |
6243 | "enabled\n",dev->name); | 6389 | "enabled\n",dev->name); |
6244 | 6390 | ||
6391 | if (sp->lro) | ||
6392 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | ||
6393 | dev->name); | ||
6394 | |||
6245 | /* Initialize device name */ | 6395 | /* Initialize device name */ |
6246 | strcpy(sp->name, dev->name); | 6396 | strcpy(sp->name, dev->name); |
6247 | if (sp->device_type & XFRAME_II_DEVICE) | 6397 | if (sp->device_type & XFRAME_II_DEVICE) |
@@ -6344,7 +6494,7 @@ int __init s2io_starter(void) | |||
6344 | * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. | 6494 | * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. |
6345 | */ | 6495 | */ |
6346 | 6496 | ||
6347 | void s2io_closer(void) | 6497 | static void s2io_closer(void) |
6348 | { | 6498 | { |
6349 | pci_unregister_driver(&s2io_driver); | 6499 | pci_unregister_driver(&s2io_driver); |
6350 | DBG_PRINT(INIT_DBG, "cleanup done\n"); | 6500 | DBG_PRINT(INIT_DBG, "cleanup done\n"); |
@@ -6352,3 +6502,318 @@ void s2io_closer(void) | |||
6352 | 6502 | ||
6353 | module_init(s2io_starter); | 6503 | module_init(s2io_starter); |
6354 | module_exit(s2io_closer); | 6504 | module_exit(s2io_closer); |
6505 | |||
6506 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | ||
6507 | struct tcphdr **tcp, RxD_t *rxdp) | ||
6508 | { | ||
6509 | int ip_off; | ||
6510 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; | ||
6511 | |||
6512 | if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { | ||
6513 | DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", | ||
6514 | __FUNCTION__); | ||
6515 | return -1; | ||
6516 | } | ||
6517 | |||
6518 | /* TODO: | ||
6519 | * By default the VLAN field in the MAC is stripped by the card, if this | ||
6520 | * feature is turned off in rx_pa_cfg register, then the ip_off field | ||
6521 | * has to be shifted by a further 2 bytes | ||
6522 | */ | ||
6523 | switch (l2_type) { | ||
6524 | case 0: /* DIX type */ | ||
6525 | case 4: /* DIX type with VLAN */ | ||
6526 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; | ||
6527 | break; | ||
6528 | /* LLC, SNAP etc are considered non-mergeable */ | ||
6529 | default: | ||
6530 | return -1; | ||
6531 | } | ||
6532 | |||
6533 | *ip = (struct iphdr *)((u8 *)buffer + ip_off); | ||
6534 | ip_len = (u8)((*ip)->ihl); | ||
6535 | ip_len <<= 2; | ||
6536 | *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); | ||
6537 | |||
6538 | return 0; | ||
6539 | } | ||
6540 | |||
6541 | static int check_for_socket_match(lro_t *lro, struct iphdr *ip, | ||
6542 | struct tcphdr *tcp) | ||
6543 | { | ||
6544 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6545 | if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || | ||
6546 | (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) | ||
6547 | return -1; | ||
6548 | return 0; | ||
6549 | } | ||
6550 | |||
6551 | static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) | ||
6552 | { | ||
6553 | return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); | ||
6554 | } | ||
6555 | |||
6556 | static void initiate_new_session(lro_t *lro, u8 *l2h, | ||
6557 | struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) | ||
6558 | { | ||
6559 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6560 | lro->l2h = l2h; | ||
6561 | lro->iph = ip; | ||
6562 | lro->tcph = tcp; | ||
6563 | lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); | ||
6564 | lro->tcp_ack = ntohl(tcp->ack_seq); | ||
6565 | lro->sg_num = 1; | ||
6566 | lro->total_len = ntohs(ip->tot_len); | ||
6567 | lro->frags_len = 0; | ||
6568 | /* | ||
6569 | * check if we saw TCP timestamp. Other consistency checks have | ||
6570 | * already been done. | ||
6571 | */ | ||
6572 | if (tcp->doff == 8) { | ||
6573 | u32 *ptr; | ||
6574 | ptr = (u32 *)(tcp+1); | ||
6575 | lro->saw_ts = 1; | ||
6576 | lro->cur_tsval = *(ptr+1); | ||
6577 | lro->cur_tsecr = *(ptr+2); | ||
6578 | } | ||
6579 | lro->in_use = 1; | ||
6580 | } | ||
6581 | |||
6582 | static void update_L3L4_header(nic_t *sp, lro_t *lro) | ||
6583 | { | ||
6584 | struct iphdr *ip = lro->iph; | ||
6585 | struct tcphdr *tcp = lro->tcph; | ||
6586 | u16 nchk; | ||
6587 | StatInfo_t *statinfo = sp->mac_control.stats_info; | ||
6588 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6589 | |||
6590 | /* Update L3 header */ | ||
6591 | ip->tot_len = htons(lro->total_len); | ||
6592 | ip->check = 0; | ||
6593 | nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl); | ||
6594 | ip->check = nchk; | ||
6595 | |||
6596 | /* Update L4 header */ | ||
6597 | tcp->ack_seq = lro->tcp_ack; | ||
6598 | tcp->window = lro->window; | ||
6599 | |||
6600 | /* Update tsecr field if this session has timestamps enabled */ | ||
6601 | if (lro->saw_ts) { | ||
6602 | u32 *ptr = (u32 *)(tcp + 1); | ||
6603 | *(ptr+2) = lro->cur_tsecr; | ||
6604 | } | ||
6605 | |||
6606 | /* Update counters required for calculation of | ||
6607 | * average no. of packets aggregated. | ||
6608 | */ | ||
6609 | statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num; | ||
6610 | statinfo->sw_stat.num_aggregations++; | ||
6611 | } | ||
6612 | |||
6613 | static void aggregate_new_rx(lro_t *lro, struct iphdr *ip, | ||
6614 | struct tcphdr *tcp, u32 l4_pyld) | ||
6615 | { | ||
6616 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6617 | lro->total_len += l4_pyld; | ||
6618 | lro->frags_len += l4_pyld; | ||
6619 | lro->tcp_next_seq += l4_pyld; | ||
6620 | lro->sg_num++; | ||
6621 | |||
6622 | /* Update ack seq no. and window ad(from this pkt) in LRO object */ | ||
6623 | lro->tcp_ack = tcp->ack_seq; | ||
6624 | lro->window = tcp->window; | ||
6625 | |||
6626 | if (lro->saw_ts) { | ||
6627 | u32 *ptr; | ||
6628 | /* Update tsecr and tsval from this packet */ | ||
6629 | ptr = (u32 *) (tcp + 1); | ||
6630 | lro->cur_tsval = *(ptr + 1); | ||
6631 | lro->cur_tsecr = *(ptr + 2); | ||
6632 | } | ||
6633 | } | ||
6634 | |||
6635 | static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, | ||
6636 | struct tcphdr *tcp, u32 tcp_pyld_len) | ||
6637 | { | ||
6638 | u8 *ptr; | ||
6639 | |||
6640 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | ||
6641 | |||
6642 | if (!tcp_pyld_len) { | ||
6643 | /* Runt frame or a pure ack */ | ||
6644 | return -1; | ||
6645 | } | ||
6646 | |||
6647 | if (ip->ihl != 5) /* IP has options */ | ||
6648 | return -1; | ||
6649 | |||
6650 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | ||
6651 | !tcp->ack) { | ||
6652 | /* | ||
6653 | * Currently recognize only the ack control word and | ||
6654 | * any other control field being set would result in | ||
6655 | * flushing the LRO session | ||
6656 | */ | ||
6657 | return -1; | ||
6658 | } | ||
6659 | |||
6660 | /* | ||
6661 | * Allow only one TCP timestamp option. Don't aggregate if | ||
6662 | * any other options are detected. | ||
6663 | */ | ||
6664 | if (tcp->doff != 5 && tcp->doff != 8) | ||
6665 | return -1; | ||
6666 | |||
6667 | if (tcp->doff == 8) { | ||
6668 | ptr = (u8 *)(tcp + 1); | ||
6669 | while (*ptr == TCPOPT_NOP) | ||
6670 | ptr++; | ||
6671 | if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP) | ||
6672 | return -1; | ||
6673 | |||
6674 | /* Ensure timestamp value increases monotonically */ | ||
6675 | if (l_lro) | ||
6676 | if (l_lro->cur_tsval > *((u32 *)(ptr+2))) | ||
6677 | return -1; | ||
6678 | |||
6679 | /* timestamp echo reply should be non-zero */ | ||
6680 | if (*((u32 *)(ptr+6)) == 0) | ||
6681 | return -1; | ||
6682 | } | ||
6683 | |||
6684 | return 0; | ||
6685 | } | ||
6686 | |||
6687 | static int | ||
6688 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, | ||
6689 | RxD_t *rxdp, nic_t *sp) | ||
6690 | { | ||
6691 | struct iphdr *ip; | ||
6692 | struct tcphdr *tcph; | ||
6693 | int ret = 0, i; | ||
6694 | |||
6695 | if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, | ||
6696 | rxdp))) { | ||
6697 | DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", | ||
6698 | ip->saddr, ip->daddr); | ||
6699 | } else { | ||
6700 | return ret; | ||
6701 | } | ||
6702 | |||
6703 | tcph = (struct tcphdr *)*tcp; | ||
6704 | *tcp_len = get_l4_pyld_length(ip, tcph); | ||
6705 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
6706 | lro_t *l_lro = &sp->lro0_n[i]; | ||
6707 | if (l_lro->in_use) { | ||
6708 | if (check_for_socket_match(l_lro, ip, tcph)) | ||
6709 | continue; | ||
6710 | /* Sock pair matched */ | ||
6711 | *lro = l_lro; | ||
6712 | |||
6713 | if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { | ||
6714 | DBG_PRINT(INFO_DBG, "%s:Out of order. expected " | ||
6715 | "0x%x, actual 0x%x\n", __FUNCTION__, | ||
6716 | (*lro)->tcp_next_seq, | ||
6717 | ntohl(tcph->seq)); | ||
6718 | |||
6719 | sp->mac_control.stats_info-> | ||
6720 | sw_stat.outof_sequence_pkts++; | ||
6721 | ret = 2; | ||
6722 | break; | ||
6723 | } | ||
6724 | |||
6725 | if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len)) | ||
6726 | ret = 1; /* Aggregate */ | ||
6727 | else | ||
6728 | ret = 2; /* Flush both */ | ||
6729 | break; | ||
6730 | } | ||
6731 | } | ||
6732 | |||
6733 | if (ret == 0) { | ||
6734 | /* Before searching for available LRO objects, | ||
6735 | * check if the pkt is L3/L4 aggregatable. If not | ||
6736 | * don't create new LRO session. Just send this | ||
6737 | * packet up. | ||
6738 | */ | ||
6739 | if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) { | ||
6740 | return 5; | ||
6741 | } | ||
6742 | |||
6743 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | ||
6744 | lro_t *l_lro = &sp->lro0_n[i]; | ||
6745 | if (!(l_lro->in_use)) { | ||
6746 | *lro = l_lro; | ||
6747 | ret = 3; /* Begin anew */ | ||
6748 | break; | ||
6749 | } | ||
6750 | } | ||
6751 | } | ||
6752 | |||
6753 | if (ret == 0) { /* sessions exceeded */ | ||
6754 | DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", | ||
6755 | __FUNCTION__); | ||
6756 | *lro = NULL; | ||
6757 | return ret; | ||
6758 | } | ||
6759 | |||
6760 | switch (ret) { | ||
6761 | case 3: | ||
6762 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len); | ||
6763 | break; | ||
6764 | case 2: | ||
6765 | update_L3L4_header(sp, *lro); | ||
6766 | break; | ||
6767 | case 1: | ||
6768 | aggregate_new_rx(*lro, ip, tcph, *tcp_len); | ||
6769 | if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { | ||
6770 | update_L3L4_header(sp, *lro); | ||
6771 | ret = 4; /* Flush the LRO */ | ||
6772 | } | ||
6773 | break; | ||
6774 | default: | ||
6775 | DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", | ||
6776 | __FUNCTION__); | ||
6777 | break; | ||
6778 | } | ||
6779 | |||
6780 | return ret; | ||
6781 | } | ||
6782 | |||
6783 | static void clear_lro_session(lro_t *lro) | ||
6784 | { | ||
6785 | static u16 lro_struct_size = sizeof(lro_t); | ||
6786 | |||
6787 | memset(lro, 0, lro_struct_size); | ||
6788 | } | ||
6789 | |||
6790 | static void queue_rx_frame(struct sk_buff *skb) | ||
6791 | { | ||
6792 | struct net_device *dev = skb->dev; | ||
6793 | |||
6794 | skb->protocol = eth_type_trans(skb, dev); | ||
6795 | #ifdef CONFIG_S2IO_NAPI | ||
6796 | netif_receive_skb(skb); | ||
6797 | #else | ||
6798 | netif_rx(skb); | ||
6799 | #endif | ||
6800 | } | ||
6801 | |||
6802 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | ||
6803 | u32 tcp_len) | ||
6804 | { | ||
6805 | struct sk_buff *tmp, *first = lro->parent; | ||
6806 | |||
6807 | first->len += tcp_len; | ||
6808 | first->data_len = lro->frags_len; | ||
6809 | skb_pull(skb, (skb->len - tcp_len)); | ||
6810 | if ((tmp = skb_shinfo(first)->frag_list)) { | ||
6811 | while (tmp->next) | ||
6812 | tmp = tmp->next; | ||
6813 | tmp->next = skb; | ||
6814 | } | ||
6815 | else | ||
6816 | skb_shinfo(first)->frag_list = skb; | ||
6817 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; | ||
6818 | return; | ||
6819 | } | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 852a6a899d07..0a0b5b29d81e 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -64,7 +64,7 @@ typedef enum xena_max_outstanding_splits { | |||
64 | #define INTR_DBG 4 | 64 | #define INTR_DBG 4 |
65 | 65 | ||
66 | /* Global variable that defines the present debug level of the driver. */ | 66 | /* Global variable that defines the present debug level of the driver. */ |
67 | int debug_level = ERR_DBG; /* Default level. */ | 67 | static int debug_level = ERR_DBG; |
68 | 68 | ||
69 | /* DEBUG message print. */ | 69 | /* DEBUG message print. */ |
70 | #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) | 70 | #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) |
@@ -78,6 +78,13 @@ int debug_level = ERR_DBG; /* Default level. */ | |||
78 | typedef struct { | 78 | typedef struct { |
79 | unsigned long long single_ecc_errs; | 79 | unsigned long long single_ecc_errs; |
80 | unsigned long long double_ecc_errs; | 80 | unsigned long long double_ecc_errs; |
81 | /* LRO statistics */ | ||
82 | unsigned long long clubbed_frms_cnt; | ||
83 | unsigned long long sending_both; | ||
84 | unsigned long long outof_sequence_pkts; | ||
85 | unsigned long long flush_max_pkts; | ||
86 | unsigned long long sum_avg_pkts_aggregated; | ||
87 | unsigned long long num_aggregations; | ||
81 | } swStat_t; | 88 | } swStat_t; |
82 | 89 | ||
83 | /* The statistics block of Xena */ | 90 | /* The statistics block of Xena */ |
@@ -268,7 +275,7 @@ typedef struct stat_block { | |||
268 | #define MAX_RX_RINGS 8 | 275 | #define MAX_RX_RINGS 8 |
269 | 276 | ||
270 | /* FIFO mappings for all possible number of fifos configured */ | 277 | /* FIFO mappings for all possible number of fifos configured */ |
271 | int fifo_map[][MAX_TX_FIFOS] = { | 278 | static int fifo_map[][MAX_TX_FIFOS] = { |
272 | {0, 0, 0, 0, 0, 0, 0, 0}, | 279 | {0, 0, 0, 0, 0, 0, 0, 0}, |
273 | {0, 0, 0, 0, 1, 1, 1, 1}, | 280 | {0, 0, 0, 0, 1, 1, 1, 1}, |
274 | {0, 0, 0, 1, 1, 1, 2, 2}, | 281 | {0, 0, 0, 1, 1, 1, 2, 2}, |
@@ -680,6 +687,24 @@ struct msix_info_st { | |||
680 | u64 data; | 687 | u64 data; |
681 | }; | 688 | }; |
682 | 689 | ||
690 | /* Data structure to represent a LRO session */ | ||
691 | typedef struct lro { | ||
692 | struct sk_buff *parent; | ||
693 | u8 *l2h; | ||
694 | struct iphdr *iph; | ||
695 | struct tcphdr *tcph; | ||
696 | u32 tcp_next_seq; | ||
697 | u32 tcp_ack; | ||
698 | int total_len; | ||
699 | int frags_len; | ||
700 | int sg_num; | ||
701 | int in_use; | ||
702 | u16 window; | ||
703 | u32 cur_tsval; | ||
704 | u32 cur_tsecr; | ||
705 | u8 saw_ts; | ||
706 | }lro_t; | ||
707 | |||
683 | /* Structure representing one instance of the NIC */ | 708 | /* Structure representing one instance of the NIC */ |
684 | struct s2io_nic { | 709 | struct s2io_nic { |
685 | int rxd_mode; | 710 | int rxd_mode; |
@@ -784,6 +809,13 @@ struct s2io_nic { | |||
784 | #define XFRAME_II_DEVICE 2 | 809 | #define XFRAME_II_DEVICE 2 |
785 | u8 device_type; | 810 | u8 device_type; |
786 | 811 | ||
812 | #define MAX_LRO_SESSIONS 32 | ||
813 | lro_t lro0_n[MAX_LRO_SESSIONS]; | ||
814 | unsigned long clubbed_frms_cnt; | ||
815 | unsigned long sending_both; | ||
816 | u8 lro; | ||
817 | u16 lro_max_aggr_per_sess; | ||
818 | |||
787 | #define INTA 0 | 819 | #define INTA 0 |
788 | #define MSI 1 | 820 | #define MSI 1 |
789 | #define MSI_X 2 | 821 | #define MSI_X 2 |
@@ -911,18 +943,16 @@ static void tx_intr_handler(fifo_info_t *fifo_data); | |||
911 | static void alarm_intr_handler(struct s2io_nic *sp); | 943 | static void alarm_intr_handler(struct s2io_nic *sp); |
912 | 944 | ||
913 | static int s2io_starter(void); | 945 | static int s2io_starter(void); |
914 | void s2io_closer(void); | ||
915 | static void s2io_tx_watchdog(struct net_device *dev); | 946 | static void s2io_tx_watchdog(struct net_device *dev); |
916 | static void s2io_tasklet(unsigned long dev_addr); | 947 | static void s2io_tasklet(unsigned long dev_addr); |
917 | static void s2io_set_multicast(struct net_device *dev); | 948 | static void s2io_set_multicast(struct net_device *dev); |
918 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); | 949 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); |
919 | void s2io_link(nic_t * sp, int link); | 950 | static void s2io_link(nic_t * sp, int link); |
920 | void s2io_reset(nic_t * sp); | ||
921 | #if defined(CONFIG_S2IO_NAPI) | 951 | #if defined(CONFIG_S2IO_NAPI) |
922 | static int s2io_poll(struct net_device *dev, int *budget); | 952 | static int s2io_poll(struct net_device *dev, int *budget); |
923 | #endif | 953 | #endif |
924 | static void s2io_init_pci(nic_t * sp); | 954 | static void s2io_init_pci(nic_t * sp); |
925 | int s2io_set_mac_addr(struct net_device *dev, u8 * addr); | 955 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); |
926 | static void s2io_alarm_handle(unsigned long data); | 956 | static void s2io_alarm_handle(unsigned long data); |
927 | static int s2io_enable_msi(nic_t *nic); | 957 | static int s2io_enable_msi(nic_t *nic); |
928 | static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); | 958 | static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); |
@@ -930,14 +960,19 @@ static irqreturn_t | |||
930 | s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); | 960 | s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); |
931 | static irqreturn_t | 961 | static irqreturn_t |
932 | s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); | 962 | s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); |
933 | int s2io_enable_msi_x(nic_t *nic); | ||
934 | static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); | 963 | static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); |
935 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | 964 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); |
936 | static struct ethtool_ops netdev_ethtool_ops; | 965 | static struct ethtool_ops netdev_ethtool_ops; |
937 | static void s2io_set_link(unsigned long data); | 966 | static void s2io_set_link(unsigned long data); |
938 | int s2io_set_swapper(nic_t * sp); | 967 | static int s2io_set_swapper(nic_t * sp); |
939 | static void s2io_card_down(nic_t *nic); | 968 | static void s2io_card_down(nic_t *nic); |
940 | static int s2io_card_up(nic_t *nic); | 969 | static int s2io_card_up(nic_t *nic); |
941 | int get_xena_rev_id(struct pci_dev *pdev); | 970 | static int get_xena_rev_id(struct pci_dev *pdev); |
942 | void restore_xmsi_data(nic_t *nic); | 971 | static void restore_xmsi_data(nic_t *nic); |
972 | |||
973 | static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp); | ||
974 | static void clear_lro_session(lro_t *lro); | ||
975 | static void queue_rx_frame(struct sk_buff *skb); | ||
976 | static void update_L3L4_header(nic_t *sp, lro_t *lro); | ||
977 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); | ||
943 | #endif /* _S2IO_H */ | 978 | #endif /* _S2IO_H */ |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 76139478c3df..66cf226c4ee3 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -59,7 +59,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n"; | |||
59 | #ifdef SB1000_DEBUG | 59 | #ifdef SB1000_DEBUG |
60 | static int sb1000_debug = SB1000_DEBUG; | 60 | static int sb1000_debug = SB1000_DEBUG; |
61 | #else | 61 | #else |
62 | static int sb1000_debug = 1; | 62 | static const int sb1000_debug = 1; |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | static const int SB1000_IO_EXTENT = 8; | 65 | static const int SB1000_IO_EXTENT = 8; |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index aa4ca1821759..f2be9f83f091 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001,2002,2003 Broadcom Corporation | 2 | * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
@@ -43,6 +43,7 @@ | |||
43 | #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00" | 43 | #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00" |
44 | #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01" | 44 | #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01" |
45 | #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02" | 45 | #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02" |
46 | #define SBMAC_ETH3_HWADDR "40:00:00:00:01:03" | ||
46 | #endif | 47 | #endif |
47 | 48 | ||
48 | 49 | ||
@@ -57,7 +58,7 @@ static char version1[] __devinitdata = | |||
57 | 58 | ||
58 | #define CONFIG_SBMAC_COALESCE | 59 | #define CONFIG_SBMAC_COALESCE |
59 | 60 | ||
60 | #define MAX_UNITS 3 /* More are supported, limit only on options */ | 61 | #define MAX_UNITS 4 /* More are supported, limit only on options */ |
61 | 62 | ||
62 | /* Time in jiffies before concluding the transmitter is hung. */ | 63 | /* Time in jiffies before concluding the transmitter is hung. */ |
63 | #define TX_TIMEOUT (2*HZ) | 64 | #define TX_TIMEOUT (2*HZ) |
@@ -85,11 +86,11 @@ MODULE_PARM_DESC(noisy_mii, "MII status messages"); | |||
85 | The media type is usually passed in 'options[]'. | 86 | The media type is usually passed in 'options[]'. |
86 | */ | 87 | */ |
87 | #ifdef MODULE | 88 | #ifdef MODULE |
88 | static int options[MAX_UNITS] = {-1, -1, -1}; | 89 | static int options[MAX_UNITS] = {-1, -1, -1, -1}; |
89 | module_param_array(options, int, NULL, S_IRUGO); | 90 | module_param_array(options, int, NULL, S_IRUGO); |
90 | MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); | 91 | MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); |
91 | 92 | ||
92 | static int full_duplex[MAX_UNITS] = {-1, -1, -1}; | 93 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1}; |
93 | module_param_array(full_duplex, int, NULL, S_IRUGO); | 94 | module_param_array(full_duplex, int, NULL, S_IRUGO); |
94 | MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); | 95 | MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); |
95 | #endif | 96 | #endif |
@@ -105,13 +106,26 @@ MODULE_PARM_DESC(int_timeout, "Timeout value"); | |||
105 | #endif | 106 | #endif |
106 | 107 | ||
107 | #include <asm/sibyte/sb1250.h> | 108 | #include <asm/sibyte/sb1250.h> |
108 | #include <asm/sibyte/sb1250_defs.h> | 109 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) |
110 | #include <asm/sibyte/bcm1480_regs.h> | ||
111 | #include <asm/sibyte/bcm1480_int.h> | ||
112 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
109 | #include <asm/sibyte/sb1250_regs.h> | 113 | #include <asm/sibyte/sb1250_regs.h> |
110 | #include <asm/sibyte/sb1250_mac.h> | ||
111 | #include <asm/sibyte/sb1250_dma.h> | ||
112 | #include <asm/sibyte/sb1250_int.h> | 114 | #include <asm/sibyte/sb1250_int.h> |
115 | #else | ||
116 | #error invalid SiByte MAC configuation | ||
117 | #endif | ||
113 | #include <asm/sibyte/sb1250_scd.h> | 118 | #include <asm/sibyte/sb1250_scd.h> |
119 | #include <asm/sibyte/sb1250_mac.h> | ||
120 | #include <asm/sibyte/sb1250_dma.h> | ||
114 | 121 | ||
122 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) | ||
123 | #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) | ||
124 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
125 | #define UNIT_INT(n) (K_INT_MAC_0 + (n)) | ||
126 | #else | ||
127 | #error invalid SiByte MAC configuation | ||
128 | #endif | ||
115 | 129 | ||
116 | /********************************************************************** | 130 | /********************************************************************** |
117 | * Simple types | 131 | * Simple types |
@@ -1476,10 +1490,10 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1476 | * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above | 1490 | * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above |
1477 | * Use a larger RD_THRSH for gigabit | 1491 | * Use a larger RD_THRSH for gigabit |
1478 | */ | 1492 | */ |
1479 | if (periph_rev >= 2) | 1493 | if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) |
1480 | th_value = 64; | ||
1481 | else | ||
1482 | th_value = 28; | 1494 | th_value = 28; |
1495 | else | ||
1496 | th_value = 64; | ||
1483 | 1497 | ||
1484 | fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ | 1498 | fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ |
1485 | ((s->sbm_speed == sbmac_speed_1000) | 1499 | ((s->sbm_speed == sbmac_speed_1000) |
@@ -1589,13 +1603,17 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1589 | * Turn on the rest of the bits in the enable register | 1603 | * Turn on the rest of the bits in the enable register |
1590 | */ | 1604 | */ |
1591 | 1605 | ||
1606 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) | ||
1607 | __raw_writeq(M_MAC_RXDMA_EN0 | | ||
1608 | M_MAC_TXDMA_EN0, s->sbm_macenable); | ||
1609 | #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) | ||
1592 | __raw_writeq(M_MAC_RXDMA_EN0 | | 1610 | __raw_writeq(M_MAC_RXDMA_EN0 | |
1593 | M_MAC_TXDMA_EN0 | | 1611 | M_MAC_TXDMA_EN0 | |
1594 | M_MAC_RX_ENABLE | | 1612 | M_MAC_RX_ENABLE | |
1595 | M_MAC_TX_ENABLE, s->sbm_macenable); | 1613 | M_MAC_TX_ENABLE, s->sbm_macenable); |
1596 | 1614 | #else | |
1597 | 1615 | #error invalid SiByte MAC configuation | |
1598 | 1616 | #endif | |
1599 | 1617 | ||
1600 | #ifdef CONFIG_SBMAC_COALESCE | 1618 | #ifdef CONFIG_SBMAC_COALESCE |
1601 | /* | 1619 | /* |
@@ -1786,11 +1804,12 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) | |||
1786 | reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); | 1804 | reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); |
1787 | __raw_writeq(reg, sc->sbm_rxfilter); | 1805 | __raw_writeq(reg, sc->sbm_rxfilter); |
1788 | 1806 | ||
1789 | /* read system identification to determine revision */ | 1807 | /* BCM1250 pass1 didn't have hardware checksum. Everything |
1790 | if (periph_rev >= 2) { | 1808 | later does. */ |
1791 | sc->rx_hw_checksum = ENABLE; | 1809 | if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { |
1792 | } else { | ||
1793 | sc->rx_hw_checksum = DISABLE; | 1810 | sc->rx_hw_checksum = DISABLE; |
1811 | } else { | ||
1812 | sc->rx_hw_checksum = ENABLE; | ||
1794 | } | 1813 | } |
1795 | } | 1814 | } |
1796 | 1815 | ||
@@ -2220,7 +2239,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc) | |||
2220 | 2239 | ||
2221 | 2240 | ||
2222 | 2241 | ||
2223 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) | 2242 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) |
2224 | /********************************************************************** | 2243 | /********************************************************************** |
2225 | * SBMAC_PARSE_XDIGIT(str) | 2244 | * SBMAC_PARSE_XDIGIT(str) |
2226 | * | 2245 | * |
@@ -2792,7 +2811,7 @@ static int sbmac_close(struct net_device *dev) | |||
2792 | 2811 | ||
2793 | 2812 | ||
2794 | 2813 | ||
2795 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) | 2814 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) |
2796 | static void | 2815 | static void |
2797 | sbmac_setup_hwaddr(int chan,char *addr) | 2816 | sbmac_setup_hwaddr(int chan,char *addr) |
2798 | { | 2817 | { |
@@ -2818,25 +2837,7 @@ sbmac_init_module(void) | |||
2818 | unsigned long port; | 2837 | unsigned long port; |
2819 | int chip_max_units; | 2838 | int chip_max_units; |
2820 | 2839 | ||
2821 | /* | 2840 | /* Set the number of available units based on the SOC type. */ |
2822 | * For bringup when not using the firmware, we can pre-fill | ||
2823 | * the MAC addresses using the environment variables | ||
2824 | * specified in this file (or maybe from the config file?) | ||
2825 | */ | ||
2826 | #ifdef SBMAC_ETH0_HWADDR | ||
2827 | sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); | ||
2828 | #endif | ||
2829 | #ifdef SBMAC_ETH1_HWADDR | ||
2830 | sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); | ||
2831 | #endif | ||
2832 | #ifdef SBMAC_ETH2_HWADDR | ||
2833 | sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); | ||
2834 | #endif | ||
2835 | |||
2836 | /* | ||
2837 | * Walk through the Ethernet controllers and find | ||
2838 | * those who have their MAC addresses set. | ||
2839 | */ | ||
2840 | switch (soc_type) { | 2841 | switch (soc_type) { |
2841 | case K_SYS_SOC_TYPE_BCM1250: | 2842 | case K_SYS_SOC_TYPE_BCM1250: |
2842 | case K_SYS_SOC_TYPE_BCM1250_ALT: | 2843 | case K_SYS_SOC_TYPE_BCM1250_ALT: |
@@ -2848,6 +2849,10 @@ sbmac_init_module(void) | |||
2848 | case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ | 2849 | case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ |
2849 | chip_max_units = 2; | 2850 | chip_max_units = 2; |
2850 | break; | 2851 | break; |
2852 | case K_SYS_SOC_TYPE_BCM1x55: | ||
2853 | case K_SYS_SOC_TYPE_BCM1x80: | ||
2854 | chip_max_units = 4; | ||
2855 | break; | ||
2851 | default: | 2856 | default: |
2852 | chip_max_units = 0; | 2857 | chip_max_units = 0; |
2853 | break; | 2858 | break; |
@@ -2855,6 +2860,32 @@ sbmac_init_module(void) | |||
2855 | if (chip_max_units > MAX_UNITS) | 2860 | if (chip_max_units > MAX_UNITS) |
2856 | chip_max_units = MAX_UNITS; | 2861 | chip_max_units = MAX_UNITS; |
2857 | 2862 | ||
2863 | /* | ||
2864 | * For bringup when not using the firmware, we can pre-fill | ||
2865 | * the MAC addresses using the environment variables | ||
2866 | * specified in this file (or maybe from the config file?) | ||
2867 | */ | ||
2868 | #ifdef SBMAC_ETH0_HWADDR | ||
2869 | if (chip_max_units > 0) | ||
2870 | sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); | ||
2871 | #endif | ||
2872 | #ifdef SBMAC_ETH1_HWADDR | ||
2873 | if (chip_max_units > 1) | ||
2874 | sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); | ||
2875 | #endif | ||
2876 | #ifdef SBMAC_ETH2_HWADDR | ||
2877 | if (chip_max_units > 2) | ||
2878 | sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); | ||
2879 | #endif | ||
2880 | #ifdef SBMAC_ETH3_HWADDR | ||
2881 | if (chip_max_units > 3) | ||
2882 | sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR); | ||
2883 | #endif | ||
2884 | |||
2885 | /* | ||
2886 | * Walk through the Ethernet controllers and find | ||
2887 | * those who have their MAC addresses set. | ||
2888 | */ | ||
2858 | for (idx = 0; idx < chip_max_units; idx++) { | 2889 | for (idx = 0; idx < chip_max_units; idx++) { |
2859 | 2890 | ||
2860 | /* | 2891 | /* |
@@ -2886,7 +2917,7 @@ sbmac_init_module(void) | |||
2886 | 2917 | ||
2887 | printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); | 2918 | printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); |
2888 | 2919 | ||
2889 | dev->irq = K_INT_MAC_0 + idx; | 2920 | dev->irq = UNIT_INT(idx); |
2890 | dev->base_addr = port; | 2921 | dev->base_addr = port; |
2891 | dev->mem_end = 0; | 2922 | dev->mem_end = 0; |
2892 | if (sbmac_init(dev, idx)) { | 2923 | if (sbmac_init(dev, idx)) { |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 79dca398f3ac..bcef03feb2fc 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -46,6 +46,7 @@ static const char version[] = | |||
46 | #include <linux/etherdevice.h> | 46 | #include <linux/etherdevice.h> |
47 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
48 | #include <linux/bitops.h> | 48 | #include <linux/bitops.h> |
49 | #include <linux/jiffies.h> | ||
49 | 50 | ||
50 | #include <asm/system.h> | 51 | #include <asm/system.h> |
51 | #include <asm/io.h> | 52 | #include <asm/io.h> |
@@ -699,7 +700,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length) | |||
699 | int ioaddr = dev->base_addr; | 700 | int ioaddr = dev->base_addr; |
700 | int status = inw(SEEQ_STATUS); | 701 | int status = inw(SEEQ_STATUS); |
701 | int transmit_ptr = 0; | 702 | int transmit_ptr = 0; |
702 | int tmp; | 703 | unsigned long tmp; |
703 | 704 | ||
704 | if (net_debug>4) { | 705 | if (net_debug>4) { |
705 | printk("%s: send 0x%04x\n",dev->name,length); | 706 | printk("%s: send 0x%04x\n",dev->name,length); |
@@ -724,7 +725,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length) | |||
724 | 725 | ||
725 | /* drain FIFO */ | 726 | /* drain FIFO */ |
726 | tmp = jiffies; | 727 | tmp = jiffies; |
727 | while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies - tmp < HZ)) | 728 | while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ)) |
728 | mb(); | 729 | mb(); |
729 | 730 | ||
730 | /* doit ! */ | 731 | /* doit ! */ |
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index a4614df38a90..f95a5b0223fb 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | 4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) |
5 | */ | 5 | */ |
6 | |||
7 | #undef DEBUG | ||
8 | |||
6 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
8 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
@@ -59,8 +62,6 @@ static char *sgiseeqstr = "SGI Seeq8003"; | |||
59 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | 62 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ |
60 | sp->tx_old - sp->tx_new - 1) | 63 | sp->tx_old - sp->tx_new - 1) |
61 | 64 | ||
62 | #define DEBUG | ||
63 | |||
64 | struct sgiseeq_rx_desc { | 65 | struct sgiseeq_rx_desc { |
65 | volatile struct hpc_dma_desc rdma; | 66 | volatile struct hpc_dma_desc rdma; |
66 | volatile signed int buf_vaddr; | 67 | volatile signed int buf_vaddr; |
@@ -209,7 +210,7 @@ static int seeq_init_ring(struct net_device *dev) | |||
209 | static struct sgiseeq_private *gpriv; | 210 | static struct sgiseeq_private *gpriv; |
210 | static struct net_device *gdev; | 211 | static struct net_device *gdev; |
211 | 212 | ||
212 | void sgiseeq_dump_rings(void) | 213 | static void sgiseeq_dump_rings(void) |
213 | { | 214 | { |
214 | static int once; | 215 | static int once; |
215 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | 216 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; |
@@ -311,9 +312,9 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
311 | struct sgiseeq_regs *sregs) | 312 | struct sgiseeq_regs *sregs) |
312 | { | 313 | { |
313 | struct sgiseeq_rx_desc *rd; | 314 | struct sgiseeq_rx_desc *rd; |
314 | struct sk_buff *skb = 0; | 315 | struct sk_buff *skb = NULL; |
315 | unsigned char pkt_status; | 316 | unsigned char pkt_status; |
316 | unsigned char *pkt_pointer = 0; | 317 | unsigned char *pkt_pointer = NULL; |
317 | int len = 0; | 318 | int len = 0; |
318 | unsigned int orig_end = PREV_RX(sp->rx_new); | 319 | unsigned int orig_end = PREV_RX(sp->rx_new); |
319 | 320 | ||
@@ -515,12 +516,6 @@ static inline int sgiseeq_reset(struct net_device *dev) | |||
515 | return 0; | 516 | return 0; |
516 | } | 517 | } |
517 | 518 | ||
518 | void sgiseeq_my_reset(void) | ||
519 | { | ||
520 | printk("RESET!\n"); | ||
521 | sgiseeq_reset(gdev); | ||
522 | } | ||
523 | |||
524 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | 519 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) |
525 | { | 520 | { |
526 | struct sgiseeq_private *sp = netdev_priv(dev); | 521 | struct sgiseeq_private *sp = netdev_priv(dev); |
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 221354eea21f..88e212043a43 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c | |||
@@ -83,6 +83,7 @@ | |||
83 | #include <linux/if_arp.h> | 83 | #include <linux/if_arp.h> |
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/if_shaper.h> | 85 | #include <linux/if_shaper.h> |
86 | #include <linux/jiffies.h> | ||
86 | 87 | ||
87 | #include <net/dst.h> | 88 | #include <net/dst.h> |
88 | #include <net/arp.h> | 89 | #include <net/arp.h> |
@@ -168,7 +169,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
168 | /* | 169 | /* |
169 | * Queue over time. Spill packet. | 170 | * Queue over time. Spill packet. |
170 | */ | 171 | */ |
171 | if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) { | 172 | if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { |
172 | dev_kfree_skb(skb); | 173 | dev_kfree_skb(skb); |
173 | shaper->stats.tx_dropped++; | 174 | shaper->stats.tx_dropped++; |
174 | } else | 175 | } else |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index ed4bc91638d2..31dd3f036fa8 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -366,7 +366,7 @@ static const u32 sis190_intr_mask = | |||
366 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 366 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
367 | * The chips use a 64 element hash table based on the Ethernet CRC. | 367 | * The chips use a 64 element hash table based on the Ethernet CRC. |
368 | */ | 368 | */ |
369 | static int multicast_filter_limit = 32; | 369 | static const int multicast_filter_limit = 32; |
370 | 370 | ||
371 | static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) | 371 | static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) |
372 | { | 372 | { |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 7a952fe60be2..a1cb07cdb60f 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -100,7 +100,7 @@ enum { | |||
100 | SIS_900 = 0, | 100 | SIS_900 = 0, |
101 | SIS_7016 | 101 | SIS_7016 |
102 | }; | 102 | }; |
103 | static char * card_names[] = { | 103 | static const char * card_names[] = { |
104 | "SiS 900 PCI Fast Ethernet", | 104 | "SiS 900 PCI Fast Ethernet", |
105 | "SiS 7016 PCI Fast Ethernet" | 105 | "SiS 7016 PCI Fast Ethernet" |
106 | }; | 106 | }; |
@@ -115,7 +115,7 @@ MODULE_DEVICE_TABLE (pci, sis900_pci_tbl); | |||
115 | 115 | ||
116 | static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); | 116 | static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); |
117 | 117 | ||
118 | static struct mii_chip_info { | 118 | static const struct mii_chip_info { |
119 | const char * name; | 119 | const char * name; |
120 | u16 phy_id0; | 120 | u16 phy_id0; |
121 | u16 phy_id1; | 121 | u16 phy_id1; |
@@ -400,7 +400,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, | |||
400 | void *ring_space; | 400 | void *ring_space; |
401 | long ioaddr; | 401 | long ioaddr; |
402 | int i, ret; | 402 | int i, ret; |
403 | char *card_name = card_names[pci_id->driver_data]; | 403 | const char *card_name = card_names[pci_id->driver_data]; |
404 | const char *dev_name = pci_name(pci_dev); | 404 | const char *dev_name = pci_name(pci_dev); |
405 | 405 | ||
406 | /* when built into the kernel, we only print version if device is found */ | 406 | /* when built into the kernel, we only print version if device is found */ |
@@ -1275,7 +1275,7 @@ static void sis900_timer(unsigned long data) | |||
1275 | struct net_device *net_dev = (struct net_device *)data; | 1275 | struct net_device *net_dev = (struct net_device *)data; |
1276 | struct sis900_private *sis_priv = net_dev->priv; | 1276 | struct sis900_private *sis_priv = net_dev->priv; |
1277 | struct mii_phy *mii_phy = sis_priv->mii; | 1277 | struct mii_phy *mii_phy = sis_priv->mii; |
1278 | static int next_tick = 5*HZ; | 1278 | static const int next_tick = 5*HZ; |
1279 | u16 status; | 1279 | u16 status; |
1280 | 1280 | ||
1281 | if (!sis_priv->autong_complete){ | 1281 | if (!sis_priv->autong_complete){ |
diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h index 3a2ea4a4b539..423ad063d09b 100644 --- a/drivers/net/sk98lin/h/skaddr.h +++ b/drivers/net/sk98lin/h/skaddr.h | |||
@@ -236,18 +236,6 @@ extern int SkAddrMcClear( | |||
236 | SK_U32 PortNumber, | 236 | SK_U32 PortNumber, |
237 | int Flags); | 237 | int Flags); |
238 | 238 | ||
239 | extern int SkAddrXmacMcClear( | ||
240 | SK_AC *pAC, | ||
241 | SK_IOC IoC, | ||
242 | SK_U32 PortNumber, | ||
243 | int Flags); | ||
244 | |||
245 | extern int SkAddrGmacMcClear( | ||
246 | SK_AC *pAC, | ||
247 | SK_IOC IoC, | ||
248 | SK_U32 PortNumber, | ||
249 | int Flags); | ||
250 | |||
251 | extern int SkAddrMcAdd( | 239 | extern int SkAddrMcAdd( |
252 | SK_AC *pAC, | 240 | SK_AC *pAC, |
253 | SK_IOC IoC, | 241 | SK_IOC IoC, |
@@ -255,35 +243,11 @@ extern int SkAddrMcAdd( | |||
255 | SK_MAC_ADDR *pMc, | 243 | SK_MAC_ADDR *pMc, |
256 | int Flags); | 244 | int Flags); |
257 | 245 | ||
258 | extern int SkAddrXmacMcAdd( | ||
259 | SK_AC *pAC, | ||
260 | SK_IOC IoC, | ||
261 | SK_U32 PortNumber, | ||
262 | SK_MAC_ADDR *pMc, | ||
263 | int Flags); | ||
264 | |||
265 | extern int SkAddrGmacMcAdd( | ||
266 | SK_AC *pAC, | ||
267 | SK_IOC IoC, | ||
268 | SK_U32 PortNumber, | ||
269 | SK_MAC_ADDR *pMc, | ||
270 | int Flags); | ||
271 | |||
272 | extern int SkAddrMcUpdate( | 246 | extern int SkAddrMcUpdate( |
273 | SK_AC *pAC, | 247 | SK_AC *pAC, |
274 | SK_IOC IoC, | 248 | SK_IOC IoC, |
275 | SK_U32 PortNumber); | 249 | SK_U32 PortNumber); |
276 | 250 | ||
277 | extern int SkAddrXmacMcUpdate( | ||
278 | SK_AC *pAC, | ||
279 | SK_IOC IoC, | ||
280 | SK_U32 PortNumber); | ||
281 | |||
282 | extern int SkAddrGmacMcUpdate( | ||
283 | SK_AC *pAC, | ||
284 | SK_IOC IoC, | ||
285 | SK_U32 PortNumber); | ||
286 | |||
287 | extern int SkAddrOverride( | 251 | extern int SkAddrOverride( |
288 | SK_AC *pAC, | 252 | SK_AC *pAC, |
289 | SK_IOC IoC, | 253 | SK_IOC IoC, |
@@ -297,18 +261,6 @@ extern int SkAddrPromiscuousChange( | |||
297 | SK_U32 PortNumber, | 261 | SK_U32 PortNumber, |
298 | int NewPromMode); | 262 | int NewPromMode); |
299 | 263 | ||
300 | extern int SkAddrXmacPromiscuousChange( | ||
301 | SK_AC *pAC, | ||
302 | SK_IOC IoC, | ||
303 | SK_U32 PortNumber, | ||
304 | int NewPromMode); | ||
305 | |||
306 | extern int SkAddrGmacPromiscuousChange( | ||
307 | SK_AC *pAC, | ||
308 | SK_IOC IoC, | ||
309 | SK_U32 PortNumber, | ||
310 | int NewPromMode); | ||
311 | |||
312 | #ifndef SK_SLIM | 264 | #ifndef SK_SLIM |
313 | extern int SkAddrSwap( | 265 | extern int SkAddrSwap( |
314 | SK_AC *pAC, | 266 | SK_AC *pAC, |
diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h index 2b94adb93331..6e256bd9a28c 100644 --- a/drivers/net/sk98lin/h/skcsum.h +++ b/drivers/net/sk98lin/h/skcsum.h | |||
@@ -203,12 +203,6 @@ extern SKCS_STATUS SkCsGetReceiveInfo( | |||
203 | unsigned Checksum2, | 203 | unsigned Checksum2, |
204 | int NetNumber); | 204 | int NetNumber); |
205 | 205 | ||
206 | extern void SkCsGetSendInfo( | ||
207 | SK_AC *pAc, | ||
208 | void *pIpHeader, | ||
209 | SKCS_PACKET_INFO *pPacketInfo, | ||
210 | int NetNumber); | ||
211 | |||
212 | extern void SkCsSetReceiveFlags( | 206 | extern void SkCsSetReceiveFlags( |
213 | SK_AC *pAc, | 207 | SK_AC *pAc, |
214 | unsigned ReceiveFlags, | 208 | unsigned ReceiveFlags, |
diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h index 184f47c5a60f..143e635ec24d 100644 --- a/drivers/net/sk98lin/h/skgeinit.h +++ b/drivers/net/sk98lin/h/skgeinit.h | |||
@@ -464,12 +464,6 @@ typedef struct s_GeInit { | |||
464 | /* | 464 | /* |
465 | * public functions in skgeinit.c | 465 | * public functions in skgeinit.c |
466 | */ | 466 | */ |
467 | extern void SkGePollRxD( | ||
468 | SK_AC *pAC, | ||
469 | SK_IOC IoC, | ||
470 | int Port, | ||
471 | SK_BOOL PollRxD); | ||
472 | |||
473 | extern void SkGePollTxD( | 467 | extern void SkGePollTxD( |
474 | SK_AC *pAC, | 468 | SK_AC *pAC, |
475 | SK_IOC IoC, | 469 | SK_IOC IoC, |
@@ -522,10 +516,6 @@ extern void SkGeXmitLED( | |||
522 | int Led, | 516 | int Led, |
523 | int Mode); | 517 | int Mode); |
524 | 518 | ||
525 | extern void SkGeInitRamIface( | ||
526 | SK_AC *pAC, | ||
527 | SK_IOC IoC); | ||
528 | |||
529 | extern int SkGeInitAssignRamToQueues( | 519 | extern int SkGeInitAssignRamToQueues( |
530 | SK_AC *pAC, | 520 | SK_AC *pAC, |
531 | int ActivePort, | 521 | int ActivePort, |
@@ -549,11 +539,6 @@ extern void SkMacHardRst( | |||
549 | SK_IOC IoC, | 539 | SK_IOC IoC, |
550 | int Port); | 540 | int Port); |
551 | 541 | ||
552 | extern void SkMacClearRst( | ||
553 | SK_AC *pAC, | ||
554 | SK_IOC IoC, | ||
555 | int Port); | ||
556 | |||
557 | extern void SkXmInitMac( | 542 | extern void SkXmInitMac( |
558 | SK_AC *pAC, | 543 | SK_AC *pAC, |
559 | SK_IOC IoC, | 544 | SK_IOC IoC, |
@@ -580,11 +565,6 @@ extern void SkMacFlushTxFifo( | |||
580 | SK_IOC IoC, | 565 | SK_IOC IoC, |
581 | int Port); | 566 | int Port); |
582 | 567 | ||
583 | extern void SkMacFlushRxFifo( | ||
584 | SK_AC *pAC, | ||
585 | SK_IOC IoC, | ||
586 | int Port); | ||
587 | |||
588 | extern void SkMacIrq( | 568 | extern void SkMacIrq( |
589 | SK_AC *pAC, | 569 | SK_AC *pAC, |
590 | SK_IOC IoC, | 570 | SK_IOC IoC, |
@@ -601,12 +581,6 @@ extern void SkMacAutoNegLipaPhy( | |||
601 | int Port, | 581 | int Port, |
602 | SK_U16 IStatus); | 582 | SK_U16 IStatus); |
603 | 583 | ||
604 | extern void SkMacSetRxTxEn( | ||
605 | SK_AC *pAC, | ||
606 | SK_IOC IoC, | ||
607 | int Port, | ||
608 | int Para); | ||
609 | |||
610 | extern int SkMacRxTxEnable( | 584 | extern int SkMacRxTxEnable( |
611 | SK_AC *pAC, | 585 | SK_AC *pAC, |
612 | SK_IOC IoC, | 586 | SK_IOC IoC, |
@@ -659,16 +633,6 @@ extern void SkXmClrExactAddr( | |||
659 | int StartNum, | 633 | int StartNum, |
660 | int StopNum); | 634 | int StopNum); |
661 | 635 | ||
662 | extern void SkXmInitDupMd( | ||
663 | SK_AC *pAC, | ||
664 | SK_IOC IoC, | ||
665 | int Port); | ||
666 | |||
667 | extern void SkXmInitPauseMd( | ||
668 | SK_AC *pAC, | ||
669 | SK_IOC IoC, | ||
670 | int Port); | ||
671 | |||
672 | extern void SkXmAutoNegLipaXmac( | 636 | extern void SkXmAutoNegLipaXmac( |
673 | SK_AC *pAC, | 637 | SK_AC *pAC, |
674 | SK_IOC IoC, | 638 | SK_IOC IoC, |
@@ -729,17 +693,6 @@ extern int SkGmCableDiagStatus( | |||
729 | int Port, | 693 | int Port, |
730 | SK_BOOL StartTest); | 694 | SK_BOOL StartTest); |
731 | 695 | ||
732 | extern int SkGmEnterLowPowerMode( | ||
733 | SK_AC *pAC, | ||
734 | SK_IOC IoC, | ||
735 | int Port, | ||
736 | SK_U8 Mode); | ||
737 | |||
738 | extern int SkGmLeaveLowPowerMode( | ||
739 | SK_AC *pAC, | ||
740 | SK_IOC IoC, | ||
741 | int Port); | ||
742 | |||
743 | #ifdef SK_DIAG | 696 | #ifdef SK_DIAG |
744 | extern void SkGePhyRead( | 697 | extern void SkGePhyRead( |
745 | SK_AC *pAC, | 698 | SK_AC *pAC, |
@@ -782,7 +735,6 @@ extern void SkXmSendCont( | |||
782 | /* | 735 | /* |
783 | * public functions in skgeinit.c | 736 | * public functions in skgeinit.c |
784 | */ | 737 | */ |
785 | extern void SkGePollRxD(); | ||
786 | extern void SkGePollTxD(); | 738 | extern void SkGePollTxD(); |
787 | extern void SkGeYellowLED(); | 739 | extern void SkGeYellowLED(); |
788 | extern int SkGeCfgSync(); | 740 | extern int SkGeCfgSync(); |
@@ -792,7 +744,6 @@ extern int SkGeInit(); | |||
792 | extern void SkGeDeInit(); | 744 | extern void SkGeDeInit(); |
793 | extern int SkGeInitPort(); | 745 | extern int SkGeInitPort(); |
794 | extern void SkGeXmitLED(); | 746 | extern void SkGeXmitLED(); |
795 | extern void SkGeInitRamIface(); | ||
796 | extern int SkGeInitAssignRamToQueues(); | 747 | extern int SkGeInitAssignRamToQueues(); |
797 | 748 | ||
798 | /* | 749 | /* |
@@ -801,18 +752,15 @@ extern int SkGeInitAssignRamToQueues(); | |||
801 | extern void SkMacRxTxDisable(); | 752 | extern void SkMacRxTxDisable(); |
802 | extern void SkMacSoftRst(); | 753 | extern void SkMacSoftRst(); |
803 | extern void SkMacHardRst(); | 754 | extern void SkMacHardRst(); |
804 | extern void SkMacClearRst(); | ||
805 | extern void SkMacInitPhy(); | 755 | extern void SkMacInitPhy(); |
806 | extern int SkMacRxTxEnable(); | 756 | extern int SkMacRxTxEnable(); |
807 | extern void SkMacPromiscMode(); | 757 | extern void SkMacPromiscMode(); |
808 | extern void SkMacHashing(); | 758 | extern void SkMacHashing(); |
809 | extern void SkMacIrqDisable(); | 759 | extern void SkMacIrqDisable(); |
810 | extern void SkMacFlushTxFifo(); | 760 | extern void SkMacFlushTxFifo(); |
811 | extern void SkMacFlushRxFifo(); | ||
812 | extern void SkMacIrq(); | 761 | extern void SkMacIrq(); |
813 | extern int SkMacAutoNegDone(); | 762 | extern int SkMacAutoNegDone(); |
814 | extern void SkMacAutoNegLipaPhy(); | 763 | extern void SkMacAutoNegLipaPhy(); |
815 | extern void SkMacSetRxTxEn(); | ||
816 | extern void SkXmInitMac(); | 764 | extern void SkXmInitMac(); |
817 | extern void SkXmPhyRead(); | 765 | extern void SkXmPhyRead(); |
818 | extern void SkXmPhyWrite(); | 766 | extern void SkXmPhyWrite(); |
@@ -820,8 +768,6 @@ extern void SkGmInitMac(); | |||
820 | extern void SkGmPhyRead(); | 768 | extern void SkGmPhyRead(); |
821 | extern void SkGmPhyWrite(); | 769 | extern void SkGmPhyWrite(); |
822 | extern void SkXmClrExactAddr(); | 770 | extern void SkXmClrExactAddr(); |
823 | extern void SkXmInitDupMd(); | ||
824 | extern void SkXmInitPauseMd(); | ||
825 | extern void SkXmAutoNegLipaXmac(); | 771 | extern void SkXmAutoNegLipaXmac(); |
826 | extern int SkXmUpdateStats(); | 772 | extern int SkXmUpdateStats(); |
827 | extern int SkGmUpdateStats(); | 773 | extern int SkGmUpdateStats(); |
@@ -832,8 +778,6 @@ extern int SkGmResetCounter(); | |||
832 | extern int SkXmOverflowStatus(); | 778 | extern int SkXmOverflowStatus(); |
833 | extern int SkGmOverflowStatus(); | 779 | extern int SkGmOverflowStatus(); |
834 | extern int SkGmCableDiagStatus(); | 780 | extern int SkGmCableDiagStatus(); |
835 | extern int SkGmEnterLowPowerMode(); | ||
836 | extern int SkGmLeaveLowPowerMode(); | ||
837 | 781 | ||
838 | #ifdef SK_DIAG | 782 | #ifdef SK_DIAG |
839 | extern void SkGePhyRead(); | 783 | extern void SkGePhyRead(); |
diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h index 3b2773e6f822..1ed214ccb253 100644 --- a/drivers/net/sk98lin/h/skgepnmi.h +++ b/drivers/net/sk98lin/h/skgepnmi.h | |||
@@ -946,10 +946,6 @@ typedef struct s_PnmiData { | |||
946 | * Function prototypes | 946 | * Function prototypes |
947 | */ | 947 | */ |
948 | extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); | 948 | extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); |
949 | extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, | ||
950 | unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
951 | extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, | ||
952 | void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
953 | extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, | 949 | extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, |
954 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | 950 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); |
955 | extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, | 951 | extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, |
diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h index b486bd9b6628..3eec6274e413 100644 --- a/drivers/net/sk98lin/h/skgesirq.h +++ b/drivers/net/sk98lin/h/skgesirq.h | |||
@@ -105,7 +105,6 @@ | |||
105 | 105 | ||
106 | extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); | 106 | extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); |
107 | extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); | 107 | extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); |
108 | extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port); | ||
109 | extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); | 108 | extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); |
110 | 109 | ||
111 | #endif /* _INC_SKGESIRQ_H_ */ | 110 | #endif /* _INC_SKGESIRQ_H_ */ |
diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h index 598bb42ccc3d..6a63f4a15de6 100644 --- a/drivers/net/sk98lin/h/ski2c.h +++ b/drivers/net/sk98lin/h/ski2c.h | |||
@@ -162,9 +162,6 @@ typedef struct s_I2c { | |||
162 | } SK_I2C; | 162 | } SK_I2C; |
163 | 163 | ||
164 | extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); | 164 | extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); |
165 | extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size, | ||
166 | int Reg, int Burst); | ||
167 | extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen); | ||
168 | #ifdef SK_DIAG | 165 | #ifdef SK_DIAG |
169 | extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, | 166 | extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, |
170 | int Burst); | 167 | int Burst); |
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h index daa9a8d154fc..fdd9e48e8040 100644 --- a/drivers/net/sk98lin/h/skvpd.h +++ b/drivers/net/sk98lin/h/skvpd.h | |||
@@ -183,14 +183,6 @@ extern SK_U32 VpdReadDWord( | |||
183 | int addr); | 183 | int addr); |
184 | #endif /* SKDIAG */ | 184 | #endif /* SKDIAG */ |
185 | 185 | ||
186 | extern int VpdSetupPara( | ||
187 | SK_AC *pAC, | ||
188 | const char *key, | ||
189 | const char *buf, | ||
190 | int len, | ||
191 | int type, | ||
192 | int op); | ||
193 | |||
194 | extern SK_VPD_STATUS *VpdStat( | 186 | extern SK_VPD_STATUS *VpdStat( |
195 | SK_AC *pAC, | 187 | SK_AC *pAC, |
196 | SK_IOC IoC); | 188 | SK_IOC IoC); |
@@ -227,11 +219,6 @@ extern int VpdUpdate( | |||
227 | SK_AC *pAC, | 219 | SK_AC *pAC, |
228 | SK_IOC IoC); | 220 | SK_IOC IoC); |
229 | 221 | ||
230 | extern void VpdErrLog( | ||
231 | SK_AC *pAC, | ||
232 | SK_IOC IoC, | ||
233 | char *msg); | ||
234 | |||
235 | #ifdef SKDIAG | 222 | #ifdef SKDIAG |
236 | extern int VpdReadBlock( | 223 | extern int VpdReadBlock( |
237 | SK_AC *pAC, | 224 | SK_AC *pAC, |
@@ -249,7 +236,6 @@ extern int VpdWriteBlock( | |||
249 | #endif /* SKDIAG */ | 236 | #endif /* SKDIAG */ |
250 | #else /* SK_KR_PROTO */ | 237 | #else /* SK_KR_PROTO */ |
251 | extern SK_U32 VpdReadDWord(); | 238 | extern SK_U32 VpdReadDWord(); |
252 | extern int VpdSetupPara(); | ||
253 | extern SK_VPD_STATUS *VpdStat(); | 239 | extern SK_VPD_STATUS *VpdStat(); |
254 | extern int VpdKeys(); | 240 | extern int VpdKeys(); |
255 | extern int VpdRead(); | 241 | extern int VpdRead(); |
@@ -257,7 +243,6 @@ extern SK_BOOL VpdMayWrite(); | |||
257 | extern int VpdWrite(); | 243 | extern int VpdWrite(); |
258 | extern int VpdDelete(); | 244 | extern int VpdDelete(); |
259 | extern int VpdUpdate(); | 245 | extern int VpdUpdate(); |
260 | extern void VpdErrLog(); | ||
261 | #endif /* SK_KR_PROTO */ | 246 | #endif /* SK_KR_PROTO */ |
262 | 247 | ||
263 | #endif /* __INC_SKVPD_H_ */ | 248 | #endif /* __INC_SKVPD_H_ */ |
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c index a7e25edc7fc4..6e6c56aa6d6f 100644 --- a/drivers/net/sk98lin/skaddr.c +++ b/drivers/net/sk98lin/skaddr.c | |||
@@ -87,6 +87,21 @@ static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF}; | |||
87 | static int Next0[SK_MAX_MACS] = {0}; | 87 | static int Next0[SK_MAX_MACS] = {0}; |
88 | #endif /* DEBUG */ | 88 | #endif /* DEBUG */ |
89 | 89 | ||
90 | static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
91 | SK_MAC_ADDR *pMc, int Flags); | ||
92 | static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
93 | int Flags); | ||
94 | static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); | ||
95 | static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, | ||
96 | SK_U32 PortNumber, int NewPromMode); | ||
97 | static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
98 | SK_MAC_ADDR *pMc, int Flags); | ||
99 | static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, | ||
100 | int Flags); | ||
101 | static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); | ||
102 | static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, | ||
103 | SK_U32 PortNumber, int NewPromMode); | ||
104 | |||
90 | /* functions ******************************************************************/ | 105 | /* functions ******************************************************************/ |
91 | 106 | ||
92 | /****************************************************************************** | 107 | /****************************************************************************** |
@@ -372,7 +387,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
372 | * SK_ADDR_SUCCESS | 387 | * SK_ADDR_SUCCESS |
373 | * SK_ADDR_ILLEGAL_PORT | 388 | * SK_ADDR_ILLEGAL_PORT |
374 | */ | 389 | */ |
375 | int SkAddrXmacMcClear( | 390 | static int SkAddrXmacMcClear( |
376 | SK_AC *pAC, /* adapter context */ | 391 | SK_AC *pAC, /* adapter context */ |
377 | SK_IOC IoC, /* I/O context */ | 392 | SK_IOC IoC, /* I/O context */ |
378 | SK_U32 PortNumber, /* Index of affected port */ | 393 | SK_U32 PortNumber, /* Index of affected port */ |
@@ -429,7 +444,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
429 | * SK_ADDR_SUCCESS | 444 | * SK_ADDR_SUCCESS |
430 | * SK_ADDR_ILLEGAL_PORT | 445 | * SK_ADDR_ILLEGAL_PORT |
431 | */ | 446 | */ |
432 | int SkAddrGmacMcClear( | 447 | static int SkAddrGmacMcClear( |
433 | SK_AC *pAC, /* adapter context */ | 448 | SK_AC *pAC, /* adapter context */ |
434 | SK_IOC IoC, /* I/O context */ | 449 | SK_IOC IoC, /* I/O context */ |
435 | SK_U32 PortNumber, /* Index of affected port */ | 450 | SK_U32 PortNumber, /* Index of affected port */ |
@@ -519,7 +534,7 @@ int Flags) /* permanent/non-perm, sw-only */ | |||
519 | * Returns: | 534 | * Returns: |
520 | * Hash value of multicast address. | 535 | * Hash value of multicast address. |
521 | */ | 536 | */ |
522 | SK_U32 SkXmacMcHash( | 537 | static SK_U32 SkXmacMcHash( |
523 | unsigned char *pMc) /* Multicast address */ | 538 | unsigned char *pMc) /* Multicast address */ |
524 | { | 539 | { |
525 | SK_U32 Idx; | 540 | SK_U32 Idx; |
@@ -557,7 +572,7 @@ unsigned char *pMc) /* Multicast address */ | |||
557 | * Returns: | 572 | * Returns: |
558 | * Hash value of multicast address. | 573 | * Hash value of multicast address. |
559 | */ | 574 | */ |
560 | SK_U32 SkGmacMcHash( | 575 | static SK_U32 SkGmacMcHash( |
561 | unsigned char *pMc) /* Multicast address */ | 576 | unsigned char *pMc) /* Multicast address */ |
562 | { | 577 | { |
563 | SK_U32 Data; | 578 | SK_U32 Data; |
@@ -672,7 +687,7 @@ int Flags) /* permanent/non-permanent */ | |||
672 | * SK_MC_ILLEGAL_ADDRESS | 687 | * SK_MC_ILLEGAL_ADDRESS |
673 | * SK_MC_RLMT_OVERFLOW | 688 | * SK_MC_RLMT_OVERFLOW |
674 | */ | 689 | */ |
675 | int SkAddrXmacMcAdd( | 690 | static int SkAddrXmacMcAdd( |
676 | SK_AC *pAC, /* adapter context */ | 691 | SK_AC *pAC, /* adapter context */ |
677 | SK_IOC IoC, /* I/O context */ | 692 | SK_IOC IoC, /* I/O context */ |
678 | SK_U32 PortNumber, /* Port Number */ | 693 | SK_U32 PortNumber, /* Port Number */ |
@@ -778,7 +793,7 @@ int Flags) /* permanent/non-permanent */ | |||
778 | * SK_MC_FILTERING_INEXACT | 793 | * SK_MC_FILTERING_INEXACT |
779 | * SK_MC_ILLEGAL_ADDRESS | 794 | * SK_MC_ILLEGAL_ADDRESS |
780 | */ | 795 | */ |
781 | int SkAddrGmacMcAdd( | 796 | static int SkAddrGmacMcAdd( |
782 | SK_AC *pAC, /* adapter context */ | 797 | SK_AC *pAC, /* adapter context */ |
783 | SK_IOC IoC, /* I/O context */ | 798 | SK_IOC IoC, /* I/O context */ |
784 | SK_U32 PortNumber, /* Port Number */ | 799 | SK_U32 PortNumber, /* Port Number */ |
@@ -937,7 +952,7 @@ SK_U32 PortNumber) /* Port Number */ | |||
937 | * SK_MC_FILTERING_INEXACT | 952 | * SK_MC_FILTERING_INEXACT |
938 | * SK_ADDR_ILLEGAL_PORT | 953 | * SK_ADDR_ILLEGAL_PORT |
939 | */ | 954 | */ |
940 | int SkAddrXmacMcUpdate( | 955 | static int SkAddrXmacMcUpdate( |
941 | SK_AC *pAC, /* adapter context */ | 956 | SK_AC *pAC, /* adapter context */ |
942 | SK_IOC IoC, /* I/O context */ | 957 | SK_IOC IoC, /* I/O context */ |
943 | SK_U32 PortNumber) /* Port Number */ | 958 | SK_U32 PortNumber) /* Port Number */ |
@@ -1082,7 +1097,7 @@ SK_U32 PortNumber) /* Port Number */ | |||
1082 | * SK_MC_FILTERING_INEXACT | 1097 | * SK_MC_FILTERING_INEXACT |
1083 | * SK_ADDR_ILLEGAL_PORT | 1098 | * SK_ADDR_ILLEGAL_PORT |
1084 | */ | 1099 | */ |
1085 | int SkAddrGmacMcUpdate( | 1100 | static int SkAddrGmacMcUpdate( |
1086 | SK_AC *pAC, /* adapter context */ | 1101 | SK_AC *pAC, /* adapter context */ |
1087 | SK_IOC IoC, /* I/O context */ | 1102 | SK_IOC IoC, /* I/O context */ |
1088 | SK_U32 PortNumber) /* Port Number */ | 1103 | SK_U32 PortNumber) /* Port Number */ |
@@ -1468,7 +1483,7 @@ int NewPromMode) /* new promiscuous mode */ | |||
1468 | * SK_ADDR_SUCCESS | 1483 | * SK_ADDR_SUCCESS |
1469 | * SK_ADDR_ILLEGAL_PORT | 1484 | * SK_ADDR_ILLEGAL_PORT |
1470 | */ | 1485 | */ |
1471 | int SkAddrXmacPromiscuousChange( | 1486 | static int SkAddrXmacPromiscuousChange( |
1472 | SK_AC *pAC, /* adapter context */ | 1487 | SK_AC *pAC, /* adapter context */ |
1473 | SK_IOC IoC, /* I/O context */ | 1488 | SK_IOC IoC, /* I/O context */ |
1474 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ | 1489 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ |
@@ -1585,7 +1600,7 @@ int NewPromMode) /* new promiscuous mode */ | |||
1585 | * SK_ADDR_SUCCESS | 1600 | * SK_ADDR_SUCCESS |
1586 | * SK_ADDR_ILLEGAL_PORT | 1601 | * SK_ADDR_ILLEGAL_PORT |
1587 | */ | 1602 | */ |
1588 | int SkAddrGmacPromiscuousChange( | 1603 | static int SkAddrGmacPromiscuousChange( |
1589 | SK_AC *pAC, /* adapter context */ | 1604 | SK_AC *pAC, /* adapter context */ |
1590 | SK_IOC IoC, /* I/O context */ | 1605 | SK_IOC IoC, /* I/O context */ |
1591 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ | 1606 | SK_U32 PortNumber, /* port whose promiscuous mode changes */ |
diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c index 6cb49dd02251..67f1d6a5c15d 100644 --- a/drivers/net/sk98lin/skgeinit.c +++ b/drivers/net/sk98lin/skgeinit.c | |||
@@ -59,34 +59,6 @@ static struct s_Config OemConfig = { | |||
59 | 59 | ||
60 | /****************************************************************************** | 60 | /****************************************************************************** |
61 | * | 61 | * |
62 | * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring | ||
63 | * | ||
64 | * Description: | ||
65 | * Enable or disable the descriptor polling of the receive descriptor | ||
66 | * ring (RxD) for port 'Port'. | ||
67 | * The new configuration is *not* saved over any SkGeStopPort() and | ||
68 | * SkGeInitPort() calls. | ||
69 | * | ||
70 | * Returns: | ||
71 | * nothing | ||
72 | */ | ||
73 | void SkGePollRxD( | ||
74 | SK_AC *pAC, /* adapter context */ | ||
75 | SK_IOC IoC, /* IO context */ | ||
76 | int Port, /* Port Index (MAC_1 + n) */ | ||
77 | SK_BOOL PollRxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */ | ||
78 | { | ||
79 | SK_GEPORT *pPrt; | ||
80 | |||
81 | pPrt = &pAC->GIni.GP[Port]; | ||
82 | |||
83 | SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ? | ||
84 | CSR_ENA_POL : CSR_DIS_POL); | ||
85 | } /* SkGePollRxD */ | ||
86 | |||
87 | |||
88 | /****************************************************************************** | ||
89 | * | ||
90 | * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings | 62 | * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings |
91 | * | 63 | * |
92 | * Description: | 64 | * Description: |
@@ -952,7 +924,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
952 | * Returns: | 924 | * Returns: |
953 | * nothing | 925 | * nothing |
954 | */ | 926 | */ |
955 | void SkGeInitRamIface( | 927 | static void SkGeInitRamIface( |
956 | SK_AC *pAC, /* adapter context */ | 928 | SK_AC *pAC, /* adapter context */ |
957 | SK_IOC IoC) /* IO context */ | 929 | SK_IOC IoC) /* IO context */ |
958 | { | 930 | { |
@@ -1409,83 +1381,6 @@ SK_IOC IoC) /* IO context */ | |||
1409 | 1381 | ||
1410 | } /* SkGeInit0*/ | 1382 | } /* SkGeInit0*/ |
1411 | 1383 | ||
1412 | #ifdef SK_PCI_RESET | ||
1413 | |||
1414 | /****************************************************************************** | ||
1415 | * | ||
1416 | * SkGePciReset() - Reset PCI interface | ||
1417 | * | ||
1418 | * Description: | ||
1419 | * o Read PCI configuration. | ||
1420 | * o Change power state to 3. | ||
1421 | * o Change power state to 0. | ||
1422 | * o Restore PCI configuration. | ||
1423 | * | ||
1424 | * Returns: | ||
1425 | * 0: Success. | ||
1426 | * 1: Power state could not be changed to 3. | ||
1427 | */ | ||
1428 | static int SkGePciReset( | ||
1429 | SK_AC *pAC, /* adapter context */ | ||
1430 | SK_IOC IoC) /* IO context */ | ||
1431 | { | ||
1432 | int i; | ||
1433 | SK_U16 PmCtlSts; | ||
1434 | SK_U32 Bp1; | ||
1435 | SK_U32 Bp2; | ||
1436 | SK_U16 PciCmd; | ||
1437 | SK_U8 Cls; | ||
1438 | SK_U8 Lat; | ||
1439 | SK_U8 ConfigSpace[PCI_CFG_SIZE]; | ||
1440 | |||
1441 | /* | ||
1442 | * Note: Switching to D3 state is like a software reset. | ||
1443 | * Switching from D3 to D0 is a hardware reset. | ||
1444 | * We have to save and restore the configuration space. | ||
1445 | */ | ||
1446 | for (i = 0; i < PCI_CFG_SIZE; i++) { | ||
1447 | SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]); | ||
1448 | } | ||
1449 | |||
1450 | /* We know the RAM Interface Arbiter is enabled. */ | ||
1451 | SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3); | ||
1452 | SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); | ||
1453 | |||
1454 | if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) { | ||
1455 | return(1); | ||
1456 | } | ||
1457 | |||
1458 | /* Return to D0 state. */ | ||
1459 | SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0); | ||
1460 | |||
1461 | /* Check for D0 state. */ | ||
1462 | SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); | ||
1463 | |||
1464 | if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) { | ||
1465 | return(1); | ||
1466 | } | ||
1467 | |||
1468 | /* Check PCI Config Registers. */ | ||
1469 | SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd); | ||
1470 | SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls); | ||
1471 | SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1); | ||
1472 | SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2); | ||
1473 | SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat); | ||
1474 | |||
1475 | if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 || | ||
1476 | (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) { | ||
1477 | return(1); | ||
1478 | } | ||
1479 | |||
1480 | /* Restore PCI Config Space. */ | ||
1481 | for (i = 0; i < PCI_CFG_SIZE; i++) { | ||
1482 | SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]); | ||
1483 | } | ||
1484 | |||
1485 | return(0); | ||
1486 | } /* SkGePciReset */ | ||
1487 | |||
1488 | #endif /* SK_PCI_RESET */ | ||
1489 | 1384 | ||
1490 | /****************************************************************************** | 1385 | /****************************************************************************** |
1491 | * | 1386 | * |
@@ -1524,10 +1419,6 @@ SK_IOC IoC) /* IO context */ | |||
1524 | /* save CLK_RUN bits (YUKON-Lite) */ | 1419 | /* save CLK_RUN bits (YUKON-Lite) */ |
1525 | SK_IN16(IoC, B0_CTST, &CtrlStat); | 1420 | SK_IN16(IoC, B0_CTST, &CtrlStat); |
1526 | 1421 | ||
1527 | #ifdef SK_PCI_RESET | ||
1528 | (void)SkGePciReset(pAC, IoC); | ||
1529 | #endif /* SK_PCI_RESET */ | ||
1530 | |||
1531 | /* do the SW-reset */ | 1422 | /* do the SW-reset */ |
1532 | SK_OUT8(IoC, B0_CTST, CS_RST_SET); | 1423 | SK_OUT8(IoC, B0_CTST, CS_RST_SET); |
1533 | 1424 | ||
@@ -1991,11 +1882,6 @@ SK_IOC IoC) /* IO context */ | |||
1991 | int i; | 1882 | int i; |
1992 | SK_U16 Word; | 1883 | SK_U16 Word; |
1993 | 1884 | ||
1994 | #ifdef SK_PHY_LP_MODE | ||
1995 | SK_U8 Byte; | ||
1996 | SK_U16 PmCtlSts; | ||
1997 | #endif /* SK_PHY_LP_MODE */ | ||
1998 | |||
1999 | #if (!defined(SK_SLIM) && !defined(VCPU)) | 1885 | #if (!defined(SK_SLIM) && !defined(VCPU)) |
2000 | /* ensure I2C is ready */ | 1886 | /* ensure I2C is ready */ |
2001 | SkI2cWaitIrq(pAC, IoC); | 1887 | SkI2cWaitIrq(pAC, IoC); |
@@ -2010,38 +1896,6 @@ SK_IOC IoC) /* IO context */ | |||
2010 | } | 1896 | } |
2011 | } | 1897 | } |
2012 | 1898 | ||
2013 | #ifdef SK_PHY_LP_MODE | ||
2014 | /* | ||
2015 | * for power saving purposes within mobile environments | ||
2016 | * we set the PHY to coma mode and switch to D3 power state. | ||
2017 | */ | ||
2018 | if (pAC->GIni.GIYukonLite && | ||
2019 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2020 | |||
2021 | /* for all ports switch PHY to coma mode */ | ||
2022 | for (i = 0; i < pAC->GIni.GIMacsFound; i++) { | ||
2023 | |||
2024 | SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP); | ||
2025 | } | ||
2026 | |||
2027 | if (pAC->GIni.GIVauxAvail) { | ||
2028 | /* switch power to VAUX */ | ||
2029 | Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF; | ||
2030 | |||
2031 | SK_OUT8(IoC, B0_POWER_CTRL, Byte); | ||
2032 | } | ||
2033 | |||
2034 | /* switch to D3 state */ | ||
2035 | SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts); | ||
2036 | |||
2037 | PmCtlSts |= PCI_PM_STATE_D3; | ||
2038 | |||
2039 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2040 | |||
2041 | SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts); | ||
2042 | } | ||
2043 | #endif /* SK_PHY_LP_MODE */ | ||
2044 | |||
2045 | /* Reset all bits in the PCI STATUS register */ | 1899 | /* Reset all bits in the PCI STATUS register */ |
2046 | /* | 1900 | /* |
2047 | * Note: PCI Cfg cycles cannot be used, because they are not | 1901 | * Note: PCI Cfg cycles cannot be used, because they are not |
diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c index 2991bc85cf2c..0a6f67a7a395 100644 --- a/drivers/net/sk98lin/skgemib.c +++ b/drivers/net/sk98lin/skgemib.c | |||
@@ -871,13 +871,6 @@ PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = { | |||
871 | sizeof(SK_PNMI_CONF), | 871 | sizeof(SK_PNMI_CONF), |
872 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), | 872 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), |
873 | SK_PNMI_RO, MacPrivateConf, 0}, | 873 | SK_PNMI_RO, MacPrivateConf, 0}, |
874 | #ifdef SK_PHY_LP_MODE | ||
875 | {OID_SKGE_PHY_LP_MODE, | ||
876 | SK_PNMI_MAC_ENTRIES, | ||
877 | sizeof(SK_PNMI_CONF), | ||
878 | SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode), | ||
879 | SK_PNMI_RW, MacPrivateConf, 0}, | ||
880 | #endif | ||
881 | {OID_SKGE_LINK_CAP, | 874 | {OID_SKGE_LINK_CAP, |
882 | SK_PNMI_MAC_ENTRIES, | 875 | SK_PNMI_MAC_ENTRIES, |
883 | sizeof(SK_PNMI_CONF), | 876 | sizeof(SK_PNMI_CONF), |
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c index a386172107e8..b36dd9ac6b29 100644 --- a/drivers/net/sk98lin/skgepnmi.c +++ b/drivers/net/sk98lin/skgepnmi.c | |||
@@ -56,10 +56,6 @@ static const char SysKonnectFileId[] = | |||
56 | * Public Function prototypes | 56 | * Public Function prototypes |
57 | */ | 57 | */ |
58 | int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); | 58 | int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); |
59 | int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | ||
60 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
61 | int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | ||
62 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | ||
63 | int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, | 59 | int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, |
64 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); | 60 | unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); |
65 | int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, | 61 | int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, |
@@ -587,7 +583,7 @@ int Level) /* Initialization level */ | |||
587 | * exist (e.g. port instance 3 on a two port | 583 | * exist (e.g. port instance 3 on a two port |
588 | * adapter. | 584 | * adapter. |
589 | */ | 585 | */ |
590 | int SkPnmiGetVar( | 586 | static int SkPnmiGetVar( |
591 | SK_AC *pAC, /* Pointer to adapter context */ | 587 | SK_AC *pAC, /* Pointer to adapter context */ |
592 | SK_IOC IoC, /* IO context handle */ | 588 | SK_IOC IoC, /* IO context handle */ |
593 | SK_U32 Id, /* Object ID that is to be processed */ | 589 | SK_U32 Id, /* Object ID that is to be processed */ |
@@ -629,7 +625,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
629 | * exist (e.g. port instance 3 on a two port | 625 | * exist (e.g. port instance 3 on a two port |
630 | * adapter. | 626 | * adapter. |
631 | */ | 627 | */ |
632 | int SkPnmiPreSetVar( | 628 | static int SkPnmiPreSetVar( |
633 | SK_AC *pAC, /* Pointer to adapter context */ | 629 | SK_AC *pAC, /* Pointer to adapter context */ |
634 | SK_IOC IoC, /* IO context handle */ | 630 | SK_IOC IoC, /* IO context handle */ |
635 | SK_U32 Id, /* Object ID that is to be processed */ | 631 | SK_U32 Id, /* Object ID that is to be processed */ |
@@ -5062,9 +5058,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5062 | case OID_SKGE_SPEED_CAP: | 5058 | case OID_SKGE_SPEED_CAP: |
5063 | case OID_SKGE_SPEED_MODE: | 5059 | case OID_SKGE_SPEED_MODE: |
5064 | case OID_SKGE_SPEED_STATUS: | 5060 | case OID_SKGE_SPEED_STATUS: |
5065 | #ifdef SK_PHY_LP_MODE | ||
5066 | case OID_SKGE_PHY_LP_MODE: | ||
5067 | #endif | ||
5068 | if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { | 5061 | if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { |
5069 | 5062 | ||
5070 | *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); | 5063 | *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); |
@@ -5140,28 +5133,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5140 | Offset += sizeof(SK_U32); | 5133 | Offset += sizeof(SK_U32); |
5141 | break; | 5134 | break; |
5142 | 5135 | ||
5143 | #ifdef SK_PHY_LP_MODE | ||
5144 | case OID_SKGE_PHY_LP_MODE: | ||
5145 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | ||
5146 | if (LogPortIndex == 0) { | ||
5147 | continue; | ||
5148 | } | ||
5149 | else { | ||
5150 | /* Get value for physical ports */ | ||
5151 | PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); | ||
5152 | Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState; | ||
5153 | *pBufPtr = Val8; | ||
5154 | } | ||
5155 | } | ||
5156 | else { /* DualNetMode */ | ||
5157 | |||
5158 | Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState; | ||
5159 | *pBufPtr = Val8; | ||
5160 | } | ||
5161 | Offset += sizeof(SK_U8); | ||
5162 | break; | ||
5163 | #endif | ||
5164 | |||
5165 | case OID_SKGE_LINK_CAP: | 5136 | case OID_SKGE_LINK_CAP: |
5166 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | 5137 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ |
5167 | if (LogPortIndex == 0) { | 5138 | if (LogPortIndex == 0) { |
@@ -5478,16 +5449,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5478 | } | 5449 | } |
5479 | break; | 5450 | break; |
5480 | 5451 | ||
5481 | #ifdef SK_PHY_LP_MODE | ||
5482 | case OID_SKGE_PHY_LP_MODE: | ||
5483 | if (*pLen < Limit - LogPortIndex) { | ||
5484 | |||
5485 | *pLen = Limit - LogPortIndex; | ||
5486 | return (SK_PNMI_ERR_TOO_SHORT); | ||
5487 | } | ||
5488 | break; | ||
5489 | #endif | ||
5490 | |||
5491 | case OID_SKGE_MTU: | 5452 | case OID_SKGE_MTU: |
5492 | if (*pLen < sizeof(SK_U32)) { | 5453 | if (*pLen < sizeof(SK_U32)) { |
5493 | 5454 | ||
@@ -5845,116 +5806,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ | |||
5845 | Offset += sizeof(SK_U32); | 5806 | Offset += sizeof(SK_U32); |
5846 | break; | 5807 | break; |
5847 | 5808 | ||
5848 | #ifdef SK_PHY_LP_MODE | ||
5849 | case OID_SKGE_PHY_LP_MODE: | ||
5850 | /* The preset ends here */ | ||
5851 | if (Action == SK_PNMI_PRESET) { | ||
5852 | |||
5853 | return (SK_PNMI_ERR_OK); | ||
5854 | } | ||
5855 | |||
5856 | if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ | ||
5857 | if (LogPortIndex == 0) { | ||
5858 | Offset = 0; | ||
5859 | continue; | ||
5860 | } | ||
5861 | else { | ||
5862 | /* Set value for physical ports */ | ||
5863 | PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); | ||
5864 | |||
5865 | switch (*(pBuf + Offset)) { | ||
5866 | case 0: | ||
5867 | /* If LowPowerMode is active, we can leave it. */ | ||
5868 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5869 | |||
5870 | Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex); | ||
5871 | |||
5872 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) { | ||
5873 | |||
5874 | SkDrvInitAdapter(pAC); | ||
5875 | } | ||
5876 | break; | ||
5877 | } | ||
5878 | else { | ||
5879 | *pLen = 0; | ||
5880 | return (SK_PNMI_ERR_GENERAL); | ||
5881 | } | ||
5882 | case 1: | ||
5883 | case 2: | ||
5884 | case 3: | ||
5885 | case 4: | ||
5886 | /* If no LowPowerMode is active, we can enter it. */ | ||
5887 | if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5888 | |||
5889 | if ((*(pBuf + Offset)) < 3) { | ||
5890 | |||
5891 | SkDrvDeInitAdapter(pAC); | ||
5892 | } | ||
5893 | |||
5894 | Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf); | ||
5895 | break; | ||
5896 | } | ||
5897 | else { | ||
5898 | *pLen = 0; | ||
5899 | return (SK_PNMI_ERR_GENERAL); | ||
5900 | } | ||
5901 | default: | ||
5902 | *pLen = 0; | ||
5903 | return (SK_PNMI_ERR_BAD_VALUE); | ||
5904 | } | ||
5905 | } | ||
5906 | } | ||
5907 | else { /* DualNetMode */ | ||
5908 | |||
5909 | switch (*(pBuf + Offset)) { | ||
5910 | case 0: | ||
5911 | /* If we are in a LowPowerMode, we can leave it. */ | ||
5912 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5913 | |||
5914 | Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex); | ||
5915 | |||
5916 | if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) { | ||
5917 | |||
5918 | SkDrvInitAdapter(pAC); | ||
5919 | } | ||
5920 | break; | ||
5921 | } | ||
5922 | else { | ||
5923 | *pLen = 0; | ||
5924 | return (SK_PNMI_ERR_GENERAL); | ||
5925 | } | ||
5926 | |||
5927 | case 1: | ||
5928 | case 2: | ||
5929 | case 3: | ||
5930 | case 4: | ||
5931 | /* If we are not already in LowPowerMode, we can enter it. */ | ||
5932 | if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) { | ||
5933 | |||
5934 | if ((*(pBuf + Offset)) < 3) { | ||
5935 | |||
5936 | SkDrvDeInitAdapter(pAC); | ||
5937 | } | ||
5938 | else { | ||
5939 | |||
5940 | Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf); | ||
5941 | } | ||
5942 | break; | ||
5943 | } | ||
5944 | else { | ||
5945 | *pLen = 0; | ||
5946 | return (SK_PNMI_ERR_GENERAL); | ||
5947 | } | ||
5948 | |||
5949 | default: | ||
5950 | *pLen = 0; | ||
5951 | return (SK_PNMI_ERR_BAD_VALUE); | ||
5952 | } | ||
5953 | } | ||
5954 | Offset += sizeof(SK_U8); | ||
5955 | break; | ||
5956 | #endif | ||
5957 | |||
5958 | default: | 5809 | default: |
5959 | SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, | 5810 | SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, |
5960 | ("MacPrivateConf: Unknown OID should be handled before set")); | 5811 | ("MacPrivateConf: Unknown OID should be handled before set")); |
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c index 87520f0057d7..ab66d80a4455 100644 --- a/drivers/net/sk98lin/skgesirq.c +++ b/drivers/net/sk98lin/skgesirq.c | |||
@@ -265,7 +265,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
265 | * | 265 | * |
266 | * Returns: N/A | 266 | * Returns: N/A |
267 | */ | 267 | */ |
268 | void SkHWLinkUp( | 268 | static void SkHWLinkUp( |
269 | SK_AC *pAC, /* adapter context */ | 269 | SK_AC *pAC, /* adapter context */ |
270 | SK_IOC IoC, /* IO context */ | 270 | SK_IOC IoC, /* IO context */ |
271 | int Port) /* Port Index (MAC_1 + n) */ | 271 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -612,14 +612,6 @@ SK_U32 Istatus) /* Interrupt status word */ | |||
612 | * we ignore those | 612 | * we ignore those |
613 | */ | 613 | */ |
614 | pPrt->HalfDupTimerActive = SK_TRUE; | 614 | pPrt->HalfDupTimerActive = SK_TRUE; |
615 | #ifdef XXX | ||
616 | Len = sizeof(SK_U64); | ||
617 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
618 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0), | ||
619 | pAC->Rlmt.Port[0].Net->NetNumber); | ||
620 | |||
621 | pPrt->LastOctets = Octets; | ||
622 | #endif /* XXX */ | ||
623 | /* Snap statistic counters */ | 615 | /* Snap statistic counters */ |
624 | (void)SkXmUpdateStats(pAC, IoC, 0); | 616 | (void)SkXmUpdateStats(pAC, IoC, 0); |
625 | 617 | ||
@@ -653,14 +645,6 @@ SK_U32 Istatus) /* Interrupt status word */ | |||
653 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && | 645 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && |
654 | !pPrt->HalfDupTimerActive) { | 646 | !pPrt->HalfDupTimerActive) { |
655 | pPrt->HalfDupTimerActive = SK_TRUE; | 647 | pPrt->HalfDupTimerActive = SK_TRUE; |
656 | #ifdef XXX | ||
657 | Len = sizeof(SK_U64); | ||
658 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
659 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1), | ||
660 | pAC->Rlmt.Port[1].Net->NetNumber); | ||
661 | |||
662 | pPrt->LastOctets = Octets; | ||
663 | #endif /* XXX */ | ||
664 | /* Snap statistic counters */ | 648 | /* Snap statistic counters */ |
665 | (void)SkXmUpdateStats(pAC, IoC, 1); | 649 | (void)SkXmUpdateStats(pAC, IoC, 1); |
666 | 650 | ||
@@ -2085,12 +2069,6 @@ SK_EVPARA Para) /* Event specific Parameter */ | |||
2085 | pPrt->HalfDupTimerActive = SK_FALSE; | 2069 | pPrt->HalfDupTimerActive = SK_FALSE; |
2086 | if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || | 2070 | if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || |
2087 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { | 2071 | pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { |
2088 | #ifdef XXX | ||
2089 | Len = sizeof(SK_U64); | ||
2090 | SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, | ||
2091 | &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), | ||
2092 | pAC->Rlmt.Port[Port].Net->NetNumber); | ||
2093 | #endif /* XXX */ | ||
2094 | /* Snap statistic counters */ | 2072 | /* Snap statistic counters */ |
2095 | (void)SkXmUpdateStats(pAC, IoC, Port); | 2073 | (void)SkXmUpdateStats(pAC, IoC, Port); |
2096 | 2074 | ||
diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c index 075a0464e56b..79bf57cb5326 100644 --- a/drivers/net/sk98lin/ski2c.c +++ b/drivers/net/sk98lin/ski2c.c | |||
@@ -396,7 +396,7 @@ int Rw) /* Read / Write Flag */ | |||
396 | * 1: error, transfer does not complete, I2C transfer | 396 | * 1: error, transfer does not complete, I2C transfer |
397 | * killed, wait loop terminated. | 397 | * killed, wait loop terminated. |
398 | */ | 398 | */ |
399 | int SkI2cWait( | 399 | static int SkI2cWait( |
400 | SK_AC *pAC, /* Adapter Context */ | 400 | SK_AC *pAC, /* Adapter Context */ |
401 | SK_IOC IoC, /* I/O Context */ | 401 | SK_IOC IoC, /* I/O Context */ |
402 | int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ | 402 | int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ |
@@ -481,7 +481,7 @@ SK_IOC IoC) /* I/O Context */ | |||
481 | * returns 0: success | 481 | * returns 0: success |
482 | * 1: error | 482 | * 1: error |
483 | */ | 483 | */ |
484 | int SkI2cWrite( | 484 | static int SkI2cWrite( |
485 | SK_AC *pAC, /* Adapter Context */ | 485 | SK_AC *pAC, /* Adapter Context */ |
486 | SK_IOC IoC, /* I/O Context */ | 486 | SK_IOC IoC, /* I/O Context */ |
487 | SK_U32 I2cData, /* I2C Data to write */ | 487 | SK_U32 I2cData, /* I2C Data to write */ |
@@ -538,7 +538,7 @@ int I2cBurst) /* I2C Burst Flag */ | |||
538 | * 1 if the read is completed | 538 | * 1 if the read is completed |
539 | * 0 if the read must be continued (I2C Bus still allocated) | 539 | * 0 if the read must be continued (I2C Bus still allocated) |
540 | */ | 540 | */ |
541 | int SkI2cReadSensor( | 541 | static int SkI2cReadSensor( |
542 | SK_AC *pAC, /* Adapter Context */ | 542 | SK_AC *pAC, /* Adapter Context */ |
543 | SK_IOC IoC, /* I/O Context */ | 543 | SK_IOC IoC, /* I/O Context */ |
544 | SK_SENSOR *pSen) /* Sensor to be read */ | 544 | SK_SENSOR *pSen) /* Sensor to be read */ |
diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c index 68292d18175b..a204f5bb55d4 100644 --- a/drivers/net/sk98lin/sklm80.c +++ b/drivers/net/sk98lin/sklm80.c | |||
@@ -34,79 +34,7 @@ static const char SysKonnectFileId[] = | |||
34 | #include "h/lm80.h" | 34 | #include "h/lm80.h" |
35 | #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ | 35 | #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ |
36 | 36 | ||
37 | #ifdef SK_DIAG | ||
38 | #define BREAK_OR_WAIT(pAC,IoC,Event) SkI2cWait(pAC,IoC,Event) | ||
39 | #else /* nSK_DIAG */ | ||
40 | #define BREAK_OR_WAIT(pAC,IoC,Event) break | 37 | #define BREAK_OR_WAIT(pAC,IoC,Event) break |
41 | #endif /* nSK_DIAG */ | ||
42 | |||
43 | #ifdef SK_DIAG | ||
44 | /* | ||
45 | * read the register 'Reg' from the device 'Dev' | ||
46 | * | ||
47 | * return read error -1 | ||
48 | * success the read value | ||
49 | */ | ||
50 | int SkLm80RcvReg( | ||
51 | SK_IOC IoC, /* Adapter Context */ | ||
52 | int Dev, /* I2C device address */ | ||
53 | int Reg) /* register to read */ | ||
54 | { | ||
55 | int Val = 0; | ||
56 | int TempExt; | ||
57 | |||
58 | /* Signal device number */ | ||
59 | if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) { | ||
60 | return(-1); | ||
61 | } | ||
62 | |||
63 | if (SkI2cSndByte(IoC, Reg)) { | ||
64 | return(-1); | ||
65 | } | ||
66 | |||
67 | /* repeat start */ | ||
68 | if (SkI2cSndDev(IoC, Dev, I2C_READ)) { | ||
69 | return(-1); | ||
70 | } | ||
71 | |||
72 | switch (Reg) { | ||
73 | case LM80_TEMP_IN: | ||
74 | Val = (int)SkI2cRcvByte(IoC, 1); | ||
75 | |||
76 | /* First: correct the value: it might be negative */ | ||
77 | if ((Val & 0x80) != 0) { | ||
78 | /* Value is negative */ | ||
79 | Val = Val - 256; | ||
80 | } | ||
81 | Val = Val * SK_LM80_TEMP_LSB; | ||
82 | SkI2cStop(IoC); | ||
83 | |||
84 | TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL); | ||
85 | |||
86 | if (Val > 0) { | ||
87 | Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB); | ||
88 | } | ||
89 | else { | ||
90 | Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB); | ||
91 | } | ||
92 | return(Val); | ||
93 | break; | ||
94 | case LM80_VT0_IN: | ||
95 | case LM80_VT1_IN: | ||
96 | case LM80_VT2_IN: | ||
97 | case LM80_VT3_IN: | ||
98 | Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB; | ||
99 | break; | ||
100 | |||
101 | default: | ||
102 | Val = (int)SkI2cRcvByte(IoC, 1); | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | SkI2cStop(IoC); | ||
107 | return(Val); | ||
108 | } | ||
109 | #endif /* SK_DIAG */ | ||
110 | 38 | ||
111 | /* | 39 | /* |
112 | * read a sensors value (LM80 specific) | 40 | * read a sensors value (LM80 specific) |
diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c index 9ea11ab2296a..be8d1ccddf6d 100644 --- a/drivers/net/sk98lin/skrlmt.c +++ b/drivers/net/sk98lin/skrlmt.c | |||
@@ -282,7 +282,6 @@ typedef struct s_SpTreeRlmtPacket { | |||
282 | 282 | ||
283 | SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; | 283 | SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; |
284 | SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; | 284 | SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; |
285 | SK_MAC_ADDR BcAddr = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; | ||
286 | 285 | ||
287 | /* local variables ************************************************************/ | 286 | /* local variables ************************************************************/ |
288 | 287 | ||
diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c index eb3c8988ced1..17786056c66a 100644 --- a/drivers/net/sk98lin/skvpd.c +++ b/drivers/net/sk98lin/skvpd.c | |||
@@ -132,65 +132,6 @@ int addr) /* VPD address */ | |||
132 | 132 | ||
133 | #endif /* SKDIAG */ | 133 | #endif /* SKDIAG */ |
134 | 134 | ||
135 | #if 0 | ||
136 | |||
137 | /* | ||
138 | Write the dword 'data' at address 'addr' into the VPD EEPROM, and | ||
139 | verify that the data is written. | ||
140 | |||
141 | Needed Time: | ||
142 | |||
143 | . MIN MAX | ||
144 | . ------------------------------------------------------------------- | ||
145 | . write 1.8 ms 3.6 ms | ||
146 | . internal write cyles 0.7 ms 7.0 ms | ||
147 | . ------------------------------------------------------------------- | ||
148 | . over all program time 2.5 ms 10.6 ms | ||
149 | . read 1.3 ms 2.6 ms | ||
150 | . ------------------------------------------------------------------- | ||
151 | . over all 3.8 ms 13.2 ms | ||
152 | . | ||
153 | |||
154 | |||
155 | Returns 0: success | ||
156 | 1: error, I2C transfer does not terminate | ||
157 | 2: error, data verify error | ||
158 | |||
159 | */ | ||
160 | static int VpdWriteDWord( | ||
161 | SK_AC *pAC, /* pAC pointer */ | ||
162 | SK_IOC IoC, /* IO Context */ | ||
163 | int addr, /* VPD address */ | ||
164 | SK_U32 data) /* VPD data to write */ | ||
165 | { | ||
166 | /* start VPD write */ | ||
167 | /* Don't swap here, it's a data stream of bytes */ | ||
168 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, | ||
169 | ("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data)); | ||
170 | VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data); | ||
171 | /* But do it here */ | ||
172 | addr |= VPD_WRITE; | ||
173 | |||
174 | VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE)); | ||
175 | |||
176 | /* this may take up to 10,6 ms */ | ||
177 | if (VpdWait(pAC, IoC, VPD_WRITE)) { | ||
178 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, | ||
179 | ("Write Timed Out\n")); | ||
180 | return(1); | ||
181 | }; | ||
182 | |||
183 | /* verify data */ | ||
184 | if (VpdReadDWord(pAC, IoC, addr) != data) { | ||
185 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, | ||
186 | ("Data Verify Error\n")); | ||
187 | return(2); | ||
188 | } | ||
189 | return(0); | ||
190 | } /* VpdWriteDWord */ | ||
191 | |||
192 | #endif /* 0 */ | ||
193 | |||
194 | /* | 135 | /* |
195 | * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from | 136 | * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from |
196 | * or to the I2C EEPROM. | 137 | * or to the I2C EEPROM. |
@@ -728,7 +669,7 @@ char *etp) /* end pointer input position */ | |||
728 | * 6: fatal VPD error | 669 | * 6: fatal VPD error |
729 | * | 670 | * |
730 | */ | 671 | */ |
731 | int VpdSetupPara( | 672 | static int VpdSetupPara( |
732 | SK_AC *pAC, /* common data base */ | 673 | SK_AC *pAC, /* common data base */ |
733 | const char *key, /* keyword to insert */ | 674 | const char *key, /* keyword to insert */ |
734 | const char *buf, /* buffer with the keyword value */ | 675 | const char *buf, /* buffer with the keyword value */ |
@@ -1148,50 +1089,3 @@ SK_IOC IoC) /* IO Context */ | |||
1148 | return(0); | 1089 | return(0); |
1149 | } | 1090 | } |
1150 | 1091 | ||
1151 | |||
1152 | |||
1153 | /* | ||
1154 | * Read the contents of the VPD EEPROM and copy it to the VPD buffer | ||
1155 | * if not already done. If the keyword "VF" is not present it will be | ||
1156 | * created and the error log message will be stored to this keyword. | ||
1157 | * If "VF" is not present the error log message will be stored to the | ||
1158 | * keyword "VL". "VL" will created or overwritten if "VF" is present. | ||
1159 | * The VPD read/write area is saved to the VPD EEPROM. | ||
1160 | * | ||
1161 | * returns nothing, errors will be ignored. | ||
1162 | */ | ||
1163 | void VpdErrLog( | ||
1164 | SK_AC *pAC, /* common data base */ | ||
1165 | SK_IOC IoC, /* IO Context */ | ||
1166 | char *msg) /* error log message */ | ||
1167 | { | ||
1168 | SK_VPD_PARA *v, vf; /* VF */ | ||
1169 | int len; | ||
1170 | |||
1171 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, | ||
1172 | ("VPD error log msg %s\n", msg)); | ||
1173 | if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { | ||
1174 | if (VpdInit(pAC, IoC) != 0) { | ||
1175 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, | ||
1176 | ("VPD init error\n")); | ||
1177 | return; | ||
1178 | } | ||
1179 | } | ||
1180 | |||
1181 | len = strlen(msg); | ||
1182 | if (len > VPD_MAX_LEN) { | ||
1183 | /* cut it */ | ||
1184 | len = VPD_MAX_LEN; | ||
1185 | } | ||
1186 | if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) { | ||
1187 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n")); | ||
1188 | (void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY); | ||
1189 | } | ||
1190 | else { | ||
1191 | SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n")); | ||
1192 | (void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY); | ||
1193 | } | ||
1194 | |||
1195 | (void)VpdUpdate(pAC, IoC); | ||
1196 | } | ||
1197 | |||
diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c index 42d2d963150a..b4e75022a657 100644 --- a/drivers/net/sk98lin/skxmac2.c +++ b/drivers/net/sk98lin/skxmac2.c | |||
@@ -41,13 +41,13 @@ static const char SysKonnectFileId[] = | |||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #ifdef GENESIS | 43 | #ifdef GENESIS |
44 | BCOM_HACK BcomRegA1Hack[] = { | 44 | static BCOM_HACK BcomRegA1Hack[] = { |
45 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, | 45 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, |
46 | { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, | 46 | { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, |
47 | { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, | 47 | { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, |
48 | { 0, 0 } | 48 | { 0, 0 } |
49 | }; | 49 | }; |
50 | BCOM_HACK BcomRegC0Hack[] = { | 50 | static BCOM_HACK BcomRegC0Hack[] = { |
51 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, | 51 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, |
52 | { 0x15, 0x0A04 }, { 0x18, 0x0420 }, | 52 | { 0x15, 0x0A04 }, { 0x18, 0x0420 }, |
53 | { 0, 0 } | 53 | { 0, 0 } |
@@ -790,7 +790,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
790 | * Returns: | 790 | * Returns: |
791 | * nothing | 791 | * nothing |
792 | */ | 792 | */ |
793 | void SkMacFlushRxFifo( | 793 | static void SkMacFlushRxFifo( |
794 | SK_AC *pAC, /* adapter context */ | 794 | SK_AC *pAC, /* adapter context */ |
795 | SK_IOC IoC, /* IO context */ | 795 | SK_IOC IoC, /* IO context */ |
796 | int Port) /* Port Index (MAC_1 + n) */ | 796 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -1231,38 +1231,6 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1231 | } /* SkMacHardRst */ | 1231 | } /* SkMacHardRst */ |
1232 | 1232 | ||
1233 | 1233 | ||
1234 | /****************************************************************************** | ||
1235 | * | ||
1236 | * SkMacClearRst() - Clear the MAC reset | ||
1237 | * | ||
1238 | * Description: calls a clear MAC reset routine dep. on board type | ||
1239 | * | ||
1240 | * Returns: | ||
1241 | * nothing | ||
1242 | */ | ||
1243 | void SkMacClearRst( | ||
1244 | SK_AC *pAC, /* adapter context */ | ||
1245 | SK_IOC IoC, /* IO context */ | ||
1246 | int Port) /* Port Index (MAC_1 + n) */ | ||
1247 | { | ||
1248 | |||
1249 | #ifdef GENESIS | ||
1250 | if (pAC->GIni.GIGenesis) { | ||
1251 | |||
1252 | SkXmClearRst(pAC, IoC, Port); | ||
1253 | } | ||
1254 | #endif /* GENESIS */ | ||
1255 | |||
1256 | #ifdef YUKON | ||
1257 | if (pAC->GIni.GIYukon) { | ||
1258 | |||
1259 | SkGmClearRst(pAC, IoC, Port); | ||
1260 | } | ||
1261 | #endif /* YUKON */ | ||
1262 | |||
1263 | } /* SkMacClearRst */ | ||
1264 | |||
1265 | |||
1266 | #ifdef GENESIS | 1234 | #ifdef GENESIS |
1267 | /****************************************************************************** | 1235 | /****************************************************************************** |
1268 | * | 1236 | * |
@@ -1713,7 +1681,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1713 | * Returns: | 1681 | * Returns: |
1714 | * nothing | 1682 | * nothing |
1715 | */ | 1683 | */ |
1716 | void SkXmInitDupMd( | 1684 | static void SkXmInitDupMd( |
1717 | SK_AC *pAC, /* adapter context */ | 1685 | SK_AC *pAC, /* adapter context */ |
1718 | SK_IOC IoC, /* IO context */ | 1686 | SK_IOC IoC, /* IO context */ |
1719 | int Port) /* Port Index (MAC_1 + n) */ | 1687 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -1761,7 +1729,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
1761 | * Returns: | 1729 | * Returns: |
1762 | * nothing | 1730 | * nothing |
1763 | */ | 1731 | */ |
1764 | void SkXmInitPauseMd( | 1732 | static void SkXmInitPauseMd( |
1765 | SK_AC *pAC, /* adapter context */ | 1733 | SK_AC *pAC, /* adapter context */ |
1766 | SK_IOC IoC, /* IO context */ | 1734 | SK_IOC IoC, /* IO context */ |
1767 | int Port) /* Port Index (MAC_1 + n) */ | 1735 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -2076,283 +2044,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ | |||
2076 | } /* SkXmInitPhyBcom */ | 2044 | } /* SkXmInitPhyBcom */ |
2077 | #endif /* GENESIS */ | 2045 | #endif /* GENESIS */ |
2078 | 2046 | ||
2079 | |||
2080 | #ifdef YUKON | 2047 | #ifdef YUKON |
2081 | #ifndef SK_SLIM | ||
2082 | /****************************************************************************** | ||
2083 | * | ||
2084 | * SkGmEnterLowPowerMode() | ||
2085 | * | ||
2086 | * Description: | ||
2087 | * This function sets the Marvell Alaska PHY to the low power mode | ||
2088 | * given by parameter mode. | ||
2089 | * The following low power modes are available: | ||
2090 | * | ||
2091 | * - Coma Mode (Deep Sleep): | ||
2092 | * Power consumption: ~15 - 30 mW | ||
2093 | * The PHY cannot wake up on its own. | ||
2094 | * | ||
2095 | * - IEEE 22.2.4.1.5 compatible power down mode | ||
2096 | * Power consumption: ~240 mW | ||
2097 | * The PHY cannot wake up on its own. | ||
2098 | * | ||
2099 | * - energy detect mode | ||
2100 | * Power consumption: ~160 mW | ||
2101 | * The PHY can wake up on its own by detecting activity | ||
2102 | * on the CAT 5 cable. | ||
2103 | * | ||
2104 | * - energy detect plus mode | ||
2105 | * Power consumption: ~150 mW | ||
2106 | * The PHY can wake up on its own by detecting activity | ||
2107 | * on the CAT 5 cable. | ||
2108 | * Connected devices can be woken up by sending normal link | ||
2109 | * pulses every one second. | ||
2110 | * | ||
2111 | * Note: | ||
2112 | * | ||
2113 | * Returns: | ||
2114 | * 0: ok | ||
2115 | * 1: error | ||
2116 | */ | ||
2117 | int SkGmEnterLowPowerMode( | ||
2118 | SK_AC *pAC, /* adapter context */ | ||
2119 | SK_IOC IoC, /* IO context */ | ||
2120 | int Port, /* Port Index (e.g. MAC_1) */ | ||
2121 | SK_U8 Mode) /* low power mode */ | ||
2122 | { | ||
2123 | SK_U16 Word; | ||
2124 | SK_U32 DWord; | ||
2125 | SK_U8 LastMode; | ||
2126 | int Ret = 0; | ||
2127 | |||
2128 | if (pAC->GIni.GIYukonLite && | ||
2129 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2130 | |||
2131 | /* save current power mode */ | ||
2132 | LastMode = pAC->GIni.GP[Port].PPhyPowerState; | ||
2133 | pAC->GIni.GP[Port].PPhyPowerState = Mode; | ||
2134 | |||
2135 | switch (Mode) { | ||
2136 | /* coma mode (deep sleep) */ | ||
2137 | case PHY_PM_DEEP_SLEEP: | ||
2138 | /* setup General Purpose Control Register */ | ||
2139 | GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS | | ||
2140 | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS); | ||
2141 | |||
2142 | /* apply COMA mode workaround */ | ||
2143 | SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f); | ||
2144 | SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3); | ||
2145 | |||
2146 | SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord); | ||
2147 | |||
2148 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2149 | |||
2150 | /* Set PHY to Coma Mode */ | ||
2151 | SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA); | ||
2152 | |||
2153 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2154 | |||
2155 | break; | ||
2156 | |||
2157 | /* IEEE 22.2.4.1.5 compatible power down mode */ | ||
2158 | case PHY_PM_IEEE_POWER_DOWN: | ||
2159 | /* | ||
2160 | * - disable MAC 125 MHz clock | ||
2161 | * - allow MAC power down | ||
2162 | */ | ||
2163 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2164 | Word |= PHY_M_PC_DIS_125CLK; | ||
2165 | Word &= ~PHY_M_PC_MAC_POW_UP; | ||
2166 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2167 | |||
2168 | /* | ||
2169 | * register changes must be followed by a software | ||
2170 | * reset to take effect | ||
2171 | */ | ||
2172 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2173 | Word |= PHY_CT_RESET; | ||
2174 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2175 | |||
2176 | /* switch IEEE compatible power down mode on */ | ||
2177 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2178 | Word |= PHY_CT_PDOWN; | ||
2179 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2180 | break; | ||
2181 | |||
2182 | /* energy detect and energy detect plus mode */ | ||
2183 | case PHY_PM_ENERGY_DETECT: | ||
2184 | case PHY_PM_ENERGY_DETECT_PLUS: | ||
2185 | /* | ||
2186 | * - disable MAC 125 MHz clock | ||
2187 | */ | ||
2188 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2189 | Word |= PHY_M_PC_DIS_125CLK; | ||
2190 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2191 | |||
2192 | /* activate energy detect mode 1 */ | ||
2193 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2194 | |||
2195 | /* energy detect mode */ | ||
2196 | if (Mode == PHY_PM_ENERGY_DETECT) { | ||
2197 | Word |= PHY_M_PC_EN_DET; | ||
2198 | } | ||
2199 | /* energy detect plus mode */ | ||
2200 | else { | ||
2201 | Word |= PHY_M_PC_EN_DET_PLUS; | ||
2202 | } | ||
2203 | |||
2204 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2205 | |||
2206 | /* | ||
2207 | * reinitialize the PHY to force a software reset | ||
2208 | * which is necessary after the register settings | ||
2209 | * for the energy detect modes. | ||
2210 | * Furthermore reinitialisation prevents that the | ||
2211 | * PHY is running out of a stable state. | ||
2212 | */ | ||
2213 | SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); | ||
2214 | break; | ||
2215 | |||
2216 | /* don't change current power mode */ | ||
2217 | default: | ||
2218 | pAC->GIni.GP[Port].PPhyPowerState = LastMode; | ||
2219 | Ret = 1; | ||
2220 | break; | ||
2221 | } | ||
2222 | } | ||
2223 | /* low power modes are not supported by this chip */ | ||
2224 | else { | ||
2225 | Ret = 1; | ||
2226 | } | ||
2227 | |||
2228 | return(Ret); | ||
2229 | |||
2230 | } /* SkGmEnterLowPowerMode */ | ||
2231 | |||
2232 | /****************************************************************************** | ||
2233 | * | ||
2234 | * SkGmLeaveLowPowerMode() | ||
2235 | * | ||
2236 | * Description: | ||
2237 | * Leave the current low power mode and switch to normal mode | ||
2238 | * | ||
2239 | * Note: | ||
2240 | * | ||
2241 | * Returns: | ||
2242 | * 0: ok | ||
2243 | * 1: error | ||
2244 | */ | ||
2245 | int SkGmLeaveLowPowerMode( | ||
2246 | SK_AC *pAC, /* adapter context */ | ||
2247 | SK_IOC IoC, /* IO context */ | ||
2248 | int Port) /* Port Index (e.g. MAC_1) */ | ||
2249 | { | ||
2250 | SK_U32 DWord; | ||
2251 | SK_U16 Word; | ||
2252 | SK_U8 LastMode; | ||
2253 | int Ret = 0; | ||
2254 | |||
2255 | if (pAC->GIni.GIYukonLite && | ||
2256 | pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { | ||
2257 | |||
2258 | /* save current power mode */ | ||
2259 | LastMode = pAC->GIni.GP[Port].PPhyPowerState; | ||
2260 | pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE; | ||
2261 | |||
2262 | switch (LastMode) { | ||
2263 | /* coma mode (deep sleep) */ | ||
2264 | case PHY_PM_DEEP_SLEEP: | ||
2265 | SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord); | ||
2266 | |||
2267 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2268 | |||
2269 | /* Release PHY from Coma Mode */ | ||
2270 | SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA); | ||
2271 | |||
2272 | SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2273 | |||
2274 | SK_IN32(IoC, B2_GP_IO, &DWord); | ||
2275 | |||
2276 | /* set to output */ | ||
2277 | DWord |= (GP_DIR_9 | GP_IO_9); | ||
2278 | |||
2279 | /* set PHY reset */ | ||
2280 | SK_OUT32(IoC, B2_GP_IO, DWord); | ||
2281 | |||
2282 | DWord &= ~GP_IO_9; /* clear PHY reset (active high) */ | ||
2283 | |||
2284 | /* clear PHY reset */ | ||
2285 | SK_OUT32(IoC, B2_GP_IO, DWord); | ||
2286 | break; | ||
2287 | |||
2288 | /* IEEE 22.2.4.1.5 compatible power down mode */ | ||
2289 | case PHY_PM_IEEE_POWER_DOWN: | ||
2290 | /* | ||
2291 | * - enable MAC 125 MHz clock | ||
2292 | * - set MAC power up | ||
2293 | */ | ||
2294 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2295 | Word &= ~PHY_M_PC_DIS_125CLK; | ||
2296 | Word |= PHY_M_PC_MAC_POW_UP; | ||
2297 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2298 | |||
2299 | /* | ||
2300 | * register changes must be followed by a software | ||
2301 | * reset to take effect | ||
2302 | */ | ||
2303 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2304 | Word |= PHY_CT_RESET; | ||
2305 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2306 | |||
2307 | /* switch IEEE compatible power down mode off */ | ||
2308 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word); | ||
2309 | Word &= ~PHY_CT_PDOWN; | ||
2310 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word); | ||
2311 | break; | ||
2312 | |||
2313 | /* energy detect and energy detect plus mode */ | ||
2314 | case PHY_PM_ENERGY_DETECT: | ||
2315 | case PHY_PM_ENERGY_DETECT_PLUS: | ||
2316 | /* | ||
2317 | * - enable MAC 125 MHz clock | ||
2318 | */ | ||
2319 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2320 | Word &= ~PHY_M_PC_DIS_125CLK; | ||
2321 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2322 | |||
2323 | /* disable energy detect mode */ | ||
2324 | SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word); | ||
2325 | Word &= ~PHY_M_PC_EN_DET_MSK; | ||
2326 | SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word); | ||
2327 | |||
2328 | /* | ||
2329 | * reinitialize the PHY to force a software reset | ||
2330 | * which is necessary after the register settings | ||
2331 | * for the energy detect modes. | ||
2332 | * Furthermore reinitialisation prevents that the | ||
2333 | * PHY is running out of a stable state. | ||
2334 | */ | ||
2335 | SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); | ||
2336 | break; | ||
2337 | |||
2338 | /* don't change current power mode */ | ||
2339 | default: | ||
2340 | pAC->GIni.GP[Port].PPhyPowerState = LastMode; | ||
2341 | Ret = 1; | ||
2342 | break; | ||
2343 | } | ||
2344 | } | ||
2345 | /* low power modes are not supported by this chip */ | ||
2346 | else { | ||
2347 | Ret = 1; | ||
2348 | } | ||
2349 | |||
2350 | return(Ret); | ||
2351 | |||
2352 | } /* SkGmLeaveLowPowerMode */ | ||
2353 | #endif /* !SK_SLIM */ | ||
2354 | |||
2355 | |||
2356 | /****************************************************************************** | 2048 | /****************************************************************************** |
2357 | * | 2049 | * |
2358 | * SkGmInitPhyMarv() - Initialize the Marvell Phy registers | 2050 | * SkGmInitPhyMarv() - Initialize the Marvell Phy registers |
@@ -3420,145 +3112,6 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
3420 | } /* SkMacAutoNegDone */ | 3112 | } /* SkMacAutoNegDone */ |
3421 | 3113 | ||
3422 | 3114 | ||
3423 | #ifdef GENESIS | ||
3424 | /****************************************************************************** | ||
3425 | * | ||
3426 | * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC | ||
3427 | * | ||
3428 | * Description: | ||
3429 | * sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg. | ||
3430 | * enables Rx/Tx | ||
3431 | * | ||
3432 | * Returns: N/A | ||
3433 | */ | ||
3434 | static void SkXmSetRxTxEn( | ||
3435 | SK_AC *pAC, /* Adapter Context */ | ||
3436 | SK_IOC IoC, /* IO context */ | ||
3437 | int Port, /* Port Index (MAC_1 + n) */ | ||
3438 | int Para) /* Parameter to set: MAC or PHY LoopBack, Duplex Mode */ | ||
3439 | { | ||
3440 | SK_U16 Word; | ||
3441 | |||
3442 | XM_IN16(IoC, Port, XM_MMU_CMD, &Word); | ||
3443 | |||
3444 | switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { | ||
3445 | case SK_MAC_LOOPB_ON: | ||
3446 | Word |= XM_MMU_MAC_LB; | ||
3447 | break; | ||
3448 | case SK_MAC_LOOPB_OFF: | ||
3449 | Word &= ~XM_MMU_MAC_LB; | ||
3450 | break; | ||
3451 | } | ||
3452 | |||
3453 | switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) { | ||
3454 | case SK_PHY_LOOPB_ON: | ||
3455 | Word |= XM_MMU_GMII_LOOP; | ||
3456 | break; | ||
3457 | case SK_PHY_LOOPB_OFF: | ||
3458 | Word &= ~XM_MMU_GMII_LOOP; | ||
3459 | break; | ||
3460 | } | ||
3461 | |||
3462 | switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { | ||
3463 | case SK_PHY_FULLD_ON: | ||
3464 | Word |= XM_MMU_GMII_FD; | ||
3465 | break; | ||
3466 | case SK_PHY_FULLD_OFF: | ||
3467 | Word &= ~XM_MMU_GMII_FD; | ||
3468 | break; | ||
3469 | } | ||
3470 | |||
3471 | XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX); | ||
3472 | |||
3473 | /* dummy read to ensure writing */ | ||
3474 | XM_IN16(IoC, Port, XM_MMU_CMD, &Word); | ||
3475 | |||
3476 | } /* SkXmSetRxTxEn */ | ||
3477 | #endif /* GENESIS */ | ||
3478 | |||
3479 | |||
3480 | #ifdef YUKON | ||
3481 | /****************************************************************************** | ||
3482 | * | ||
3483 | * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC | ||
3484 | * | ||
3485 | * Description: | ||
3486 | * sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg. | ||
3487 | * enables Rx/Tx | ||
3488 | * | ||
3489 | * Returns: N/A | ||
3490 | */ | ||
3491 | static void SkGmSetRxTxEn( | ||
3492 | SK_AC *pAC, /* Adapter Context */ | ||
3493 | SK_IOC IoC, /* IO context */ | ||
3494 | int Port, /* Port Index (MAC_1 + n) */ | ||
3495 | int Para) /* Parameter to set: MAC LoopBack, Duplex Mode */ | ||
3496 | { | ||
3497 | SK_U16 Ctrl; | ||
3498 | |||
3499 | GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); | ||
3500 | |||
3501 | switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { | ||
3502 | case SK_MAC_LOOPB_ON: | ||
3503 | Ctrl |= GM_GPCR_LOOP_ENA; | ||
3504 | break; | ||
3505 | case SK_MAC_LOOPB_OFF: | ||
3506 | Ctrl &= ~GM_GPCR_LOOP_ENA; | ||
3507 | break; | ||
3508 | } | ||
3509 | |||
3510 | switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { | ||
3511 | case SK_PHY_FULLD_ON: | ||
3512 | Ctrl |= GM_GPCR_DUP_FULL; | ||
3513 | break; | ||
3514 | case SK_PHY_FULLD_OFF: | ||
3515 | Ctrl &= ~GM_GPCR_DUP_FULL; | ||
3516 | break; | ||
3517 | } | ||
3518 | |||
3519 | GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA | | ||
3520 | GM_GPCR_TX_ENA)); | ||
3521 | |||
3522 | /* dummy read to ensure writing */ | ||
3523 | GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); | ||
3524 | |||
3525 | } /* SkGmSetRxTxEn */ | ||
3526 | #endif /* YUKON */ | ||
3527 | |||
3528 | |||
3529 | #ifndef SK_SLIM | ||
3530 | /****************************************************************************** | ||
3531 | * | ||
3532 | * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters | ||
3533 | * | ||
3534 | * Description: calls the Special Set Rx/Tx Enable routines dep. on board type | ||
3535 | * | ||
3536 | * Returns: N/A | ||
3537 | */ | ||
3538 | void SkMacSetRxTxEn( | ||
3539 | SK_AC *pAC, /* Adapter Context */ | ||
3540 | SK_IOC IoC, /* IO context */ | ||
3541 | int Port, /* Port Index (MAC_1 + n) */ | ||
3542 | int Para) | ||
3543 | { | ||
3544 | #ifdef GENESIS | ||
3545 | if (pAC->GIni.GIGenesis) { | ||
3546 | |||
3547 | SkXmSetRxTxEn(pAC, IoC, Port, Para); | ||
3548 | } | ||
3549 | #endif /* GENESIS */ | ||
3550 | |||
3551 | #ifdef YUKON | ||
3552 | if (pAC->GIni.GIYukon) { | ||
3553 | |||
3554 | SkGmSetRxTxEn(pAC, IoC, Port, Para); | ||
3555 | } | ||
3556 | #endif /* YUKON */ | ||
3557 | |||
3558 | } /* SkMacSetRxTxEn */ | ||
3559 | #endif /* !SK_SLIM */ | ||
3560 | |||
3561 | |||
3562 | /****************************************************************************** | 3115 | /****************************************************************************** |
3563 | * | 3116 | * |
3564 | * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up | 3117 | * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up |
@@ -3976,7 +3529,7 @@ SK_U16 PhyStat) /* PHY Status word to analyse */ | |||
3976 | * Returns: | 3529 | * Returns: |
3977 | * nothing | 3530 | * nothing |
3978 | */ | 3531 | */ |
3979 | void SkXmIrq( | 3532 | static void SkXmIrq( |
3980 | SK_AC *pAC, /* adapter context */ | 3533 | SK_AC *pAC, /* adapter context */ |
3981 | SK_IOC IoC, /* IO context */ | 3534 | SK_IOC IoC, /* IO context */ |
3982 | int Port) /* Port Index (MAC_1 + n) */ | 3535 | int Port) /* Port Index (MAC_1 + n) */ |
@@ -4112,7 +3665,7 @@ int Port) /* Port Index (MAC_1 + n) */ | |||
4112 | * Returns: | 3665 | * Returns: |
4113 | * nothing | 3666 | * nothing |
4114 | */ | 3667 | */ |
4115 | void SkGmIrq( | 3668 | static void SkGmIrq( |
4116 | SK_AC *pAC, /* adapter context */ | 3669 | SK_AC *pAC, /* adapter context */ |
4117 | SK_IOC IoC, /* IO context */ | 3670 | SK_IOC IoC, /* IO context */ |
4118 | int Port) /* Port Index (MAC_1 + n) */ | 3671 | int Port) /* Port Index (MAC_1 + n) */ |
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c index a2ed47f1cc70..a4b2b6975d6c 100644 --- a/drivers/net/skfp/fplustm.c +++ b/drivers/net/skfp/fplustm.c | |||
@@ -89,21 +89,21 @@ static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */ | |||
89 | /* | 89 | /* |
90 | * useful interrupt bits | 90 | * useful interrupt bits |
91 | */ | 91 | */ |
92 | static int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ; | 92 | static const int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ; |
93 | static int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0| | 93 | static const int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0| |
94 | FM_STBURS | FM_STBURA0 ; | 94 | FM_STBURS | FM_STBURA0 ; |
95 | 95 | ||
96 | /* delete FM_SRBFL after tests */ | 96 | /* delete FM_SRBFL after tests */ |
97 | static int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL | | 97 | static const int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL | |
98 | FM_SMYCLM ; | 98 | FM_SMYCLM ; |
99 | static int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR | | 99 | static const int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR | |
100 | FM_SERRCTR | FM_SLSTCTR | | 100 | FM_SERRCTR | FM_SLSTCTR | |
101 | FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ; | 101 | FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ; |
102 | 102 | ||
103 | static int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ; | 103 | static const int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ; |
104 | static int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ; | 104 | static const int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ; |
105 | 105 | ||
106 | static int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC | | 106 | static const int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC | |
107 | FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ; | 107 | FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ; |
108 | 108 | ||
109 | 109 | ||
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c index cd0aa4c151b0..74e129f3ce92 100644 --- a/drivers/net/skfp/pcmplc.c +++ b/drivers/net/skfp/pcmplc.c | |||
@@ -186,7 +186,7 @@ static const struct plt { | |||
186 | * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 | 186 | * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 |
187 | * PLL bug? | 187 | * PLL bug? |
188 | */ | 188 | */ |
189 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 189 | static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
190 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; | 190 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; |
191 | #else /* SUPERNET_3 */ | 191 | #else /* SUPERNET_3 */ |
192 | /* | 192 | /* |
@@ -195,7 +195,7 @@ static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | |||
195 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 195 | static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
196 | PL_PCM_ENABLED | PL_SELF_TEST ; | 196 | PL_PCM_ENABLED | PL_SELF_TEST ; |
197 | #endif /* SUPERNET_3 */ | 197 | #endif /* SUPERNET_3 */ |
198 | static int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | | 198 | static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | |
199 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; | 199 | PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; |
200 | 200 | ||
201 | /* external functions */ | 201 | /* external functions */ |
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 4b5ed2c63177..c7fb6133047e 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c | |||
@@ -67,7 +67,7 @@ | |||
67 | /* each new release!!! */ | 67 | /* each new release!!! */ |
68 | #define VERSION "2.07" | 68 | #define VERSION "2.07" |
69 | 69 | ||
70 | static const char *boot_msg = | 70 | static const char * const boot_msg = |
71 | "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n" | 71 | "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n" |
72 | " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)"; | 72 | " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)"; |
73 | 73 | ||
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index d167deda9a53..35b18057fbdd 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -201,7 +201,7 @@ static int max_interrupt_work = 20; | |||
201 | static int mtu; | 201 | static int mtu; |
202 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 202 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
203 | The Starfire has a 512 element hash table based on the Ethernet CRC. */ | 203 | The Starfire has a 512 element hash table based on the Ethernet CRC. */ |
204 | static int multicast_filter_limit = 512; | 204 | static const int multicast_filter_limit = 512; |
205 | /* Whether to do TCP/UDP checksums in hardware */ | 205 | /* Whether to do TCP/UDP checksums in hardware */ |
206 | static int enable_hw_cksum = 1; | 206 | static int enable_hw_cksum = 1; |
207 | 207 | ||
@@ -463,7 +463,7 @@ static struct pci_device_id starfire_pci_tbl[] = { | |||
463 | MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); | 463 | MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); |
464 | 464 | ||
465 | /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ | 465 | /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ |
466 | static struct chip_info { | 466 | static const struct chip_info { |
467 | const char *name; | 467 | const char *name; |
468 | int drv_flags; | 468 | int drv_flags; |
469 | } netdrv_tbl[] __devinitdata = { | 469 | } netdrv_tbl[] __devinitdata = { |
@@ -2084,6 +2084,38 @@ static int netdev_close(struct net_device *dev) | |||
2084 | return 0; | 2084 | return 0; |
2085 | } | 2085 | } |
2086 | 2086 | ||
2087 | #ifdef CONFIG_PM | ||
2088 | static int starfire_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2089 | { | ||
2090 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2091 | |||
2092 | if (netif_running(dev)) { | ||
2093 | netif_device_detach(dev); | ||
2094 | netdev_close(dev); | ||
2095 | } | ||
2096 | |||
2097 | pci_save_state(pdev); | ||
2098 | pci_set_power_state(pdev, pci_choose_state(pdev,state)); | ||
2099 | |||
2100 | return 0; | ||
2101 | } | ||
2102 | |||
2103 | static int starfire_resume(struct pci_dev *pdev) | ||
2104 | { | ||
2105 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2106 | |||
2107 | pci_set_power_state(pdev, PCI_D0); | ||
2108 | pci_restore_state(pdev); | ||
2109 | |||
2110 | if (netif_running(dev)) { | ||
2111 | netdev_open(dev); | ||
2112 | netif_device_attach(dev); | ||
2113 | } | ||
2114 | |||
2115 | return 0; | ||
2116 | } | ||
2117 | #endif /* CONFIG_PM */ | ||
2118 | |||
2087 | 2119 | ||
2088 | static void __devexit starfire_remove_one (struct pci_dev *pdev) | 2120 | static void __devexit starfire_remove_one (struct pci_dev *pdev) |
2089 | { | 2121 | { |
@@ -2115,6 +2147,10 @@ static struct pci_driver starfire_driver = { | |||
2115 | .name = DRV_NAME, | 2147 | .name = DRV_NAME, |
2116 | .probe = starfire_init_one, | 2148 | .probe = starfire_init_one, |
2117 | .remove = __devexit_p(starfire_remove_one), | 2149 | .remove = __devexit_p(starfire_remove_one), |
2150 | #ifdef CONFIG_PM | ||
2151 | .suspend = starfire_suspend, | ||
2152 | .resume = starfire_resume, | ||
2153 | #endif /* CONFIG_PM */ | ||
2118 | .id_table = starfire_pci_tbl, | 2154 | .id_table = starfire_pci_tbl, |
2119 | }; | 2155 | }; |
2120 | 2156 | ||
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 0ab9c38b4a34..61eec46cb111 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -106,7 +106,7 @@ | |||
106 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | 106 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ |
107 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | 107 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
108 | Typical is a 64 element hash table based on the Ethernet CRC. */ | 108 | Typical is a 64 element hash table based on the Ethernet CRC. */ |
109 | static int multicast_filter_limit = 32; | 109 | static const int multicast_filter_limit = 32; |
110 | 110 | ||
111 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 111 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
112 | Setting to > 1518 effectively disables this feature. | 112 | Setting to > 1518 effectively disables this feature. |
@@ -298,7 +298,7 @@ enum { | |||
298 | struct pci_id_info { | 298 | struct pci_id_info { |
299 | const char *name; | 299 | const char *name; |
300 | }; | 300 | }; |
301 | static struct pci_id_info pci_id_tbl[] = { | 301 | static const struct pci_id_info pci_id_tbl[] = { |
302 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, | 302 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, |
303 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, | 303 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, |
304 | {"D-Link DFE-580TX 4 port Server Adapter"}, | 304 | {"D-Link DFE-580TX 4 port Server Adapter"}, |
@@ -633,9 +633,13 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
633 | 633 | ||
634 | np->phys[0] = 1; /* Default setting */ | 634 | np->phys[0] = 1; /* Default setting */ |
635 | np->mii_preamble_required++; | 635 | np->mii_preamble_required++; |
636 | /* | ||
637 | * It seems some phys doesn't deal well with address 0 being accessed | ||
638 | * first, so leave address zero to the end of the loop (32 & 31). | ||
639 | */ | ||
636 | for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { | 640 | for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { |
637 | int mii_status = mdio_read(dev, phy, MII_BMSR); | ||
638 | int phyx = phy & 0x1f; | 641 | int phyx = phy & 0x1f; |
642 | int mii_status = mdio_read(dev, phyx, MII_BMSR); | ||
639 | if (mii_status != 0xffff && mii_status != 0x0000) { | 643 | if (mii_status != 0xffff && mii_status != 0x0000) { |
640 | np->phys[phy_idx++] = phyx; | 644 | np->phys[phy_idx++] = phyx; |
641 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); | 645 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); |
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index d3ddb41d6e5c..cb0aba95d4e3 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include "sungem_phy.h" | 39 | #include "sungem_phy.h" |
40 | 40 | ||
41 | /* Link modes of the BCM5400 PHY */ | 41 | /* Link modes of the BCM5400 PHY */ |
42 | static int phy_BCM5400_link_table[8][3] = { | 42 | static const int phy_BCM5400_link_table[8][3] = { |
43 | { 0, 0, 0 }, /* No link */ | 43 | { 0, 0, 0 }, /* No link */ |
44 | { 0, 0, 0 }, /* 10BT Half Duplex */ | 44 | { 0, 0, 0 }, /* 10BT Half Duplex */ |
45 | { 1, 0, 0 }, /* 10BT Full Duplex */ | 45 | { 1, 0, 0 }, /* 10BT Full Duplex */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index caf4102b54ce..6c6c5498899f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -7802,7 +7802,7 @@ static int tg3_test_link(struct tg3 *tp) | |||
7802 | } | 7802 | } |
7803 | 7803 | ||
7804 | /* Only test the commonly used registers */ | 7804 | /* Only test the commonly used registers */ |
7805 | static int tg3_test_registers(struct tg3 *tp) | 7805 | static const int tg3_test_registers(struct tg3 *tp) |
7806 | { | 7806 | { |
7807 | int i, is_5705; | 7807 | int i, is_5705; |
7808 | u32 offset, read_mask, write_mask, val, save_val, read_val; | 7808 | u32 offset, read_mask, write_mask, val, save_val, read_val; |
@@ -8016,7 +8016,7 @@ out: | |||
8016 | 8016 | ||
8017 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) | 8017 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) |
8018 | { | 8018 | { |
8019 | static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; | 8019 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; |
8020 | int i; | 8020 | int i; |
8021 | u32 j; | 8021 | u32 j; |
8022 | 8022 | ||
@@ -9097,6 +9097,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
9097 | tp->phy_id = PHY_ID_INVALID; | 9097 | tp->phy_id = PHY_ID_INVALID; |
9098 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 9098 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
9099 | 9099 | ||
9100 | /* Do not even try poking around in here on Sun parts. */ | ||
9101 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) | ||
9102 | return; | ||
9103 | |||
9100 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 9104 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); |
9101 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 9105 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { |
9102 | u32 nic_cfg, led_cfg; | 9106 | u32 nic_cfg, led_cfg; |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 97712c3c4e07..c58a4c31d0dd 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -122,6 +122,7 @@ | |||
122 | #include <linux/spinlock.h> | 122 | #include <linux/spinlock.h> |
123 | #include <linux/version.h> | 123 | #include <linux/version.h> |
124 | #include <linux/bitops.h> | 124 | #include <linux/bitops.h> |
125 | #include <linux/jiffies.h> | ||
125 | 126 | ||
126 | #include <net/checksum.h> | 127 | #include <net/checksum.h> |
127 | 128 | ||
@@ -512,7 +513,7 @@ static int streamer_reset(struct net_device *dev) | |||
512 | 513 | ||
513 | while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { | 514 | while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { |
514 | msleep_interruptible(100); | 515 | msleep_interruptible(100); |
515 | if (jiffies - t > 40 * HZ) { | 516 | if (time_after(jiffies, t + 40 * HZ)) { |
516 | printk(KERN_ERR | 517 | printk(KERN_ERR |
517 | "IBM PCI tokenring card not responding\n"); | 518 | "IBM PCI tokenring card not responding\n"); |
518 | release_region(dev->base_addr, STREAMER_IO_SPACE); | 519 | release_region(dev->base_addr, STREAMER_IO_SPACE); |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 05477d24fd49..23032a7bc0a9 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -100,6 +100,7 @@ | |||
100 | #include <linux/pci.h> | 100 | #include <linux/pci.h> |
101 | #include <linux/spinlock.h> | 101 | #include <linux/spinlock.h> |
102 | #include <linux/bitops.h> | 102 | #include <linux/bitops.h> |
103 | #include <linux/jiffies.h> | ||
103 | 104 | ||
104 | #include <net/checksum.h> | 105 | #include <net/checksum.h> |
105 | 106 | ||
@@ -307,7 +308,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
307 | t=jiffies; | 308 | t=jiffies; |
308 | while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { | 309 | while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { |
309 | schedule(); | 310 | schedule(); |
310 | if(jiffies-t > 40*HZ) { | 311 | if(time_after(jiffies, t + 40*HZ)) { |
311 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); | 312 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); |
312 | return -ENODEV; | 313 | return -ENODEV; |
313 | } | 314 | } |
@@ -359,7 +360,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
359 | t=jiffies; | 360 | t=jiffies; |
360 | while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) { | 361 | while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) { |
361 | schedule() ; | 362 | schedule() ; |
362 | if(jiffies-t > 2*HZ) { | 363 | if(time_after(jiffies, t + 2*HZ)) { |
363 | printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; | 364 | printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; |
364 | return -ENODEV; | 365 | return -ENODEV; |
365 | } | 366 | } |
@@ -373,7 +374,7 @@ static int __devinit olympic_init(struct net_device *dev) | |||
373 | t=jiffies; | 374 | t=jiffies; |
374 | while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { | 375 | while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { |
375 | schedule(); | 376 | schedule(); |
376 | if(jiffies-t > 15*HZ) { | 377 | if(time_after(jiffies, t + 15*HZ)) { |
377 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); | 378 | printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); |
378 | return -ENODEV; | 379 | return -ENODEV; |
379 | } | 380 | } |
@@ -519,7 +520,7 @@ static int olympic_open(struct net_device *dev) | |||
519 | olympic_priv->srb_queued=0; | 520 | olympic_priv->srb_queued=0; |
520 | break; | 521 | break; |
521 | } | 522 | } |
522 | if ((jiffies-t) > 10*HZ) { | 523 | if (time_after(jiffies, t + 10*HZ)) { |
523 | printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; | 524 | printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; |
524 | olympic_priv->srb_queued=0; | 525 | olympic_priv->srb_queued=0; |
525 | break ; | 526 | break ; |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 2d0cfbceee22..6299e186c73f 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -402,8 +402,7 @@ static void de_rx (struct de_private *de) | |||
402 | unsigned copying_skb, buflen; | 402 | unsigned copying_skb, buflen; |
403 | 403 | ||
404 | skb = de->rx_skb[rx_tail].skb; | 404 | skb = de->rx_skb[rx_tail].skb; |
405 | if (!skb) | 405 | BUG_ON(!skb); |
406 | BUG(); | ||
407 | rmb(); | 406 | rmb(); |
408 | status = le32_to_cpu(de->rx_ring[rx_tail].opts1); | 407 | status = le32_to_cpu(de->rx_ring[rx_tail].opts1); |
409 | if (status & DescOwn) | 408 | if (status & DescOwn) |
@@ -545,8 +544,7 @@ static void de_tx (struct de_private *de) | |||
545 | break; | 544 | break; |
546 | 545 | ||
547 | skb = de->tx_skb[tx_tail].skb; | 546 | skb = de->tx_skb[tx_tail].skb; |
548 | if (!skb) | 547 | BUG_ON(!skb); |
549 | BUG(); | ||
550 | if (unlikely(skb == DE_DUMMY_SKB)) | 548 | if (unlikely(skb == DE_DUMMY_SKB)) |
551 | goto next; | 549 | goto next; |
552 | 550 | ||
@@ -789,8 +787,7 @@ static void __de_set_rx_mode (struct net_device *dev) | |||
789 | 787 | ||
790 | de->tx_head = NEXT_TX(entry); | 788 | de->tx_head = NEXT_TX(entry); |
791 | 789 | ||
792 | if (TX_BUFFS_AVAIL(de) < 0) | 790 | BUG_ON(TX_BUFFS_AVAIL(de) < 0); |
793 | BUG(); | ||
794 | if (TX_BUFFS_AVAIL(de) == 0) | 791 | if (TX_BUFFS_AVAIL(de) == 0) |
795 | netif_stop_queue(dev); | 792 | netif_stop_queue(dev); |
796 | 793 | ||
@@ -916,8 +913,7 @@ static void de_set_media (struct de_private *de) | |||
916 | unsigned media = de->media_type; | 913 | unsigned media = de->media_type; |
917 | u32 macmode = dr32(MacMode); | 914 | u32 macmode = dr32(MacMode); |
918 | 915 | ||
919 | if (de_is_running(de)) | 916 | BUG_ON(de_is_running(de)); |
920 | BUG(); | ||
921 | 917 | ||
922 | if (de->de21040) | 918 | if (de->de21040) |
923 | dw32(CSR11, FULL_DUPLEX_MAGIC); | 919 | dw32(CSR11, FULL_DUPLEX_MAGIC); |
@@ -1153,8 +1149,7 @@ static void de_media_interrupt (struct de_private *de, u32 status) | |||
1153 | return; | 1149 | return; |
1154 | } | 1150 | } |
1155 | 1151 | ||
1156 | if (!(status & LinkFail)) | 1152 | BUG_ON(!(status & LinkFail)); |
1157 | BUG(); | ||
1158 | 1153 | ||
1159 | if (netif_carrier_ok(de->dev)) { | 1154 | if (netif_carrier_ok(de->dev)) { |
1160 | de_link_down(de); | 1155 | de_link_down(de); |
@@ -2092,8 +2087,7 @@ static void __exit de_remove_one (struct pci_dev *pdev) | |||
2092 | struct net_device *dev = pci_get_drvdata(pdev); | 2087 | struct net_device *dev = pci_get_drvdata(pdev); |
2093 | struct de_private *de = dev->priv; | 2088 | struct de_private *de = dev->priv; |
2094 | 2089 | ||
2095 | if (!dev) | 2090 | BUG_ON(!dev); |
2096 | BUG(); | ||
2097 | unregister_netdev(dev); | 2091 | unregister_netdev(dev); |
2098 | kfree(de->ee_data); | 2092 | kfree(de->ee_data); |
2099 | iounmap(de->regs); | 2093 | iounmap(de->regs); |
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c index d9980bde7508..ca7e53246adb 100644 --- a/drivers/net/tulip/pnic.c +++ b/drivers/net/tulip/pnic.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/jiffies.h> | ||
19 | #include "tulip.h" | 20 | #include "tulip.h" |
20 | 21 | ||
21 | 22 | ||
@@ -68,7 +69,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5) | |||
68 | */ | 69 | */ |
69 | if (tulip_media_cap[dev->if_port] & MediaIsMII) | 70 | if (tulip_media_cap[dev->if_port] & MediaIsMII) |
70 | return; | 71 | return; |
71 | if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) { | 72 | if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) { |
72 | tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); | 73 | tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); |
73 | iowrite32(tp->csr6, ioaddr + CSR6); | 74 | iowrite32(tp->csr6, ioaddr + CSR6); |
74 | iowrite32(0x30, ioaddr + CSR12); | 75 | iowrite32(0x30, ioaddr + CSR12); |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 5b1af3986abf..ba05dedf29d3 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -1645,7 +1645,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1645 | 1645 | ||
1646 | /* no more hardware accesses behind this line. */ | 1646 | /* no more hardware accesses behind this line. */ |
1647 | 1647 | ||
1648 | if (np->csr6) BUG(); | 1648 | BUG_ON(np->csr6); |
1649 | if (ioread32(ioaddr + IntrEnable)) BUG(); | 1649 | if (ioread32(ioaddr + IntrEnable)) BUG(); |
1650 | 1650 | ||
1651 | /* pci_power_off(pdev, -1); */ | 1651 | /* pci_power_off(pdev, -1); */ |
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 60d1e05ab732..56344103ac23 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -32,6 +32,9 @@ | |||
32 | 32 | ||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
36 | #include <asm/irq.h> | ||
37 | #endif | ||
35 | 38 | ||
36 | #ifdef DEBUG | 39 | #ifdef DEBUG |
37 | #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) | 40 | #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) |
@@ -598,10 +601,8 @@ static void setup_descriptors(struct xircom_private *card) | |||
598 | enter("setup_descriptors"); | 601 | enter("setup_descriptors"); |
599 | 602 | ||
600 | 603 | ||
601 | if (card->rx_buffer == NULL) | 604 | BUG_ON(card->rx_buffer == NULL); |
602 | BUG(); | 605 | BUG_ON(card->tx_buffer == NULL); |
603 | if (card->tx_buffer == NULL) | ||
604 | BUG(); | ||
605 | 606 | ||
606 | /* Receive descriptors */ | 607 | /* Receive descriptors */ |
607 | memset(card->rx_buffer, 0, 128); /* clear the descriptors */ | 608 | memset(card->rx_buffer, 0, 128); /* clear the descriptors */ |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 4c76cb794bfb..cde35dd87906 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -178,7 +178,7 @@ enum typhoon_cards { | |||
178 | }; | 178 | }; |
179 | 179 | ||
180 | /* directly indexed by enum typhoon_cards, above */ | 180 | /* directly indexed by enum typhoon_cards, above */ |
181 | static struct typhoon_card_info typhoon_card_info[] __devinitdata = { | 181 | static const struct typhoon_card_info typhoon_card_info[] __devinitdata = { |
182 | { "3Com Typhoon (3C990-TX)", | 182 | { "3Com Typhoon (3C990-TX)", |
183 | TYPHOON_CRYPTO_NONE}, | 183 | TYPHOON_CRYPTO_NONE}, |
184 | { "3Com Typhoon (3CR990-TX-95)", | 184 | { "3Com Typhoon (3CR990-TX-95)", |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 18c27e1e7884..883cf7da10fc 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -459,7 +459,7 @@ config WANPIPE_FR | |||
459 | bool "WANPIPE Frame Relay support" | 459 | bool "WANPIPE Frame Relay support" |
460 | depends on VENDOR_SANGOMA | 460 | depends on VENDOR_SANGOMA |
461 | help | 461 | help |
462 | Connect a WANPIPE card to a Frame Relay network, or use Frame Felay | 462 | Connect a WANPIPE card to a Frame Relay network, or use Frame Relay |
463 | API to develop custom applications. | 463 | API to develop custom applications. |
464 | 464 | ||
465 | Contains the Ethernet Bridging over Frame Relay feature, where | 465 | Contains the Ethernet Bridging over Frame Relay feature, where |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 7db1d1d0bb34..cf5c805452a3 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <net/arp.h> | 30 | #include <net/arp.h> |
31 | 31 | ||
32 | #include <asm/irq.h> | ||
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
33 | #include <asm/dma.h> | 34 | #include <asm/dma.h> |
34 | #include <asm/byteorder.h> | 35 | #include <asm/byteorder.h> |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 5380ddfcd7d5..050e854e7774 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <net/arp.h> | 24 | #include <net/arp.h> |
25 | 25 | ||
26 | #include <asm/irq.h> | ||
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
27 | #include <asm/dma.h> | 28 | #include <asm/dma.h> |
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ef85d76575a2..5b0a19a5058d 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -6,7 +6,8 @@ menu "Wireless LAN (non-hamradio)" | |||
6 | depends on NETDEVICES | 6 | depends on NETDEVICES |
7 | 7 | ||
8 | config NET_RADIO | 8 | config NET_RADIO |
9 | bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions" | 9 | bool "Wireless LAN drivers (non-hamradio)" |
10 | select WIRELESS_EXT | ||
10 | ---help--- | 11 | ---help--- |
11 | Support for wireless LANs and everything having to do with radio, | 12 | Support for wireless LANs and everything having to do with radio, |
12 | but not with amateur radio or FM broadcasting. | 13 | but not with amateur radio or FM broadcasting. |
@@ -135,8 +136,9 @@ comment "Wireless 802.11b ISA/PCI cards support" | |||
135 | 136 | ||
136 | config IPW2100 | 137 | config IPW2100 |
137 | tristate "Intel PRO/Wireless 2100 Network Connection" | 138 | tristate "Intel PRO/Wireless 2100 Network Connection" |
138 | depends on NET_RADIO && PCI && IEEE80211 | 139 | depends on NET_RADIO && PCI |
139 | select FW_LOADER | 140 | select FW_LOADER |
141 | select IEEE80211 | ||
140 | ---help--- | 142 | ---help--- |
141 | A driver for the Intel PRO/Wireless 2100 Network | 143 | A driver for the Intel PRO/Wireless 2100 Network |
142 | Connection 802.11b wireless network adapter. | 144 | Connection 802.11b wireless network adapter. |
@@ -188,8 +190,9 @@ config IPW2100_DEBUG | |||
188 | 190 | ||
189 | config IPW2200 | 191 | config IPW2200 |
190 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" | 192 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" |
191 | depends on NET_RADIO && IEEE80211 && PCI | 193 | depends on NET_RADIO && PCI |
192 | select FW_LOADER | 194 | select FW_LOADER |
195 | select IEEE80211 | ||
193 | ---help--- | 196 | ---help--- |
194 | A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network | 197 | A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network |
195 | Connection adapters. | 198 | Connection adapters. |
@@ -201,7 +204,7 @@ config IPW2200 | |||
201 | In order to use this driver, you will need a firmware image for it. | 204 | In order to use this driver, you will need a firmware image for it. |
202 | You can obtain the firmware from | 205 | You can obtain the firmware from |
203 | <http://ipw2200.sf.net/>. See the above referenced README.ipw2200 | 206 | <http://ipw2200.sf.net/>. See the above referenced README.ipw2200 |
204 | for information on where to install the firmare images. | 207 | for information on where to install the firmware images. |
205 | 208 | ||
206 | You will also very likely need the Wireless Tools in order to | 209 | You will also very likely need the Wireless Tools in order to |
207 | configure your card: | 210 | configure your card: |
@@ -213,6 +216,19 @@ config IPW2200 | |||
213 | say M here and read <file:Documentation/modules.txt>. The module | 216 | say M here and read <file:Documentation/modules.txt>. The module |
214 | will be called ipw2200.ko. | 217 | will be called ipw2200.ko. |
215 | 218 | ||
219 | config IPW2200_MONITOR | ||
220 | bool "Enable promiscuous mode" | ||
221 | depends on IPW2200 | ||
222 | ---help--- | ||
223 | Enables promiscuous/monitor mode support for the ipw2200 driver. | ||
224 | With this feature compiled into the driver, you can switch to | ||
225 | promiscuous mode via the Wireless Tool's Monitor mode. While in this | ||
226 | mode, no packets can be sent. | ||
227 | |||
228 | config IPW_QOS | ||
229 | bool "Enable QoS support" | ||
230 | depends on IPW2200 && EXPERIMENTAL | ||
231 | |||
216 | config IPW2200_DEBUG | 232 | config IPW2200_DEBUG |
217 | bool "Enable full debugging output in IPW2200 module." | 233 | bool "Enable full debugging output in IPW2200 module." |
218 | depends on IPW2200 | 234 | depends on IPW2200 |
@@ -239,13 +255,14 @@ config IPW2200_DEBUG | |||
239 | 255 | ||
240 | config AIRO | 256 | config AIRO |
241 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" | 257 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" |
242 | depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN) | 258 | depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) |
259 | select CRYPTO | ||
243 | ---help--- | 260 | ---help--- |
244 | This is the standard Linux driver to support Cisco/Aironet ISA and | 261 | This is the standard Linux driver to support Cisco/Aironet ISA and |
245 | PCI 802.11 wireless cards. | 262 | PCI 802.11 wireless cards. |
246 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X | 263 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X |
247 | - with or without encryption) as well as card before the Cisco | 264 | - with or without encryption) as well as card before the Cisco |
248 | aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). | 265 | acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). |
249 | 266 | ||
250 | This driver support both the standard Linux Wireless Extensions | 267 | This driver support both the standard Linux Wireless Extensions |
251 | and Cisco proprietary API, so both the Linux Wireless Tools and the | 268 | and Cisco proprietary API, so both the Linux Wireless Tools and the |
@@ -387,13 +404,14 @@ config PCMCIA_SPECTRUM | |||
387 | config AIRO_CS | 404 | config AIRO_CS |
388 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 405 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
389 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 406 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) |
407 | select CRYPTO | ||
390 | ---help--- | 408 | ---help--- |
391 | This is the standard Linux driver to support Cisco/Aironet PCMCIA | 409 | This is the standard Linux driver to support Cisco/Aironet PCMCIA |
392 | 802.11 wireless cards. This driver is the same as the Aironet | 410 | 802.11 wireless cards. This driver is the same as the Aironet |
393 | driver part of the Linux Pcmcia package. | 411 | driver part of the Linux Pcmcia package. |
394 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X | 412 | It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X |
395 | - with or without encryption) as well as card before the Cisco | 413 | - with or without encryption) as well as card before the Cisco |
396 | aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also | 414 | acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also |
397 | supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom | 415 | supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom |
398 | 802.11b cards. | 416 | 802.11b cards. |
399 | 417 | ||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a4c7ae94614d..864937a409e5 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/in.h> | 36 | #include <linux/in.h> |
37 | #include <linux/bitops.h> | 37 | #include <linux/bitops.h> |
38 | #include <linux/scatterlist.h> | 38 | #include <linux/scatterlist.h> |
39 | #include <linux/crypto.h> | ||
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
41 | 42 | ||
@@ -87,14 +88,6 @@ static struct pci_driver airo_driver = { | |||
87 | #include <linux/delay.h> | 88 | #include <linux/delay.h> |
88 | #endif | 89 | #endif |
89 | 90 | ||
90 | /* Support Cisco MIC feature */ | ||
91 | #define MICSUPPORT | ||
92 | |||
93 | #if defined(MICSUPPORT) && !defined(CONFIG_CRYPTO) | ||
94 | #warning MIC support requires Crypto API | ||
95 | #undef MICSUPPORT | ||
96 | #endif | ||
97 | |||
98 | /* Hack to do some power saving */ | 91 | /* Hack to do some power saving */ |
99 | #define POWER_ON_DOWN | 92 | #define POWER_ON_DOWN |
100 | 93 | ||
@@ -1118,7 +1111,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp); | |||
1118 | static int writerids(struct net_device *dev, aironet_ioctl *comp); | 1111 | static int writerids(struct net_device *dev, aironet_ioctl *comp); |
1119 | static int flashcard(struct net_device *dev, aironet_ioctl *comp); | 1112 | static int flashcard(struct net_device *dev, aironet_ioctl *comp); |
1120 | #endif /* CISCO_EXT */ | 1113 | #endif /* CISCO_EXT */ |
1121 | #ifdef MICSUPPORT | ||
1122 | static void micinit(struct airo_info *ai); | 1114 | static void micinit(struct airo_info *ai); |
1123 | static int micsetup(struct airo_info *ai); | 1115 | static int micsetup(struct airo_info *ai); |
1124 | static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); | 1116 | static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); |
@@ -1127,9 +1119,6 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, | |||
1127 | static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); | 1119 | static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); |
1128 | static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); | 1120 | static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); |
1129 | 1121 | ||
1130 | #include <linux/crypto.h> | ||
1131 | #endif | ||
1132 | |||
1133 | struct airo_info { | 1122 | struct airo_info { |
1134 | struct net_device_stats stats; | 1123 | struct net_device_stats stats; |
1135 | struct net_device *dev; | 1124 | struct net_device *dev; |
@@ -1190,12 +1179,10 @@ struct airo_info { | |||
1190 | unsigned long scan_timestamp; /* Time started to scan */ | 1179 | unsigned long scan_timestamp; /* Time started to scan */ |
1191 | struct iw_spy_data spy_data; | 1180 | struct iw_spy_data spy_data; |
1192 | struct iw_public_data wireless_data; | 1181 | struct iw_public_data wireless_data; |
1193 | #ifdef MICSUPPORT | ||
1194 | /* MIC stuff */ | 1182 | /* MIC stuff */ |
1195 | struct crypto_tfm *tfm; | 1183 | struct crypto_tfm *tfm; |
1196 | mic_module mod[2]; | 1184 | mic_module mod[2]; |
1197 | mic_statistics micstats; | 1185 | mic_statistics micstats; |
1198 | #endif | ||
1199 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors | 1186 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors |
1200 | HostTxDesc txfids[MPI_MAX_FIDS]; | 1187 | HostTxDesc txfids[MPI_MAX_FIDS]; |
1201 | HostRidDesc config_desc; | 1188 | HostRidDesc config_desc; |
@@ -1229,7 +1216,6 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime); | |||
1229 | static int flashputbuf(struct airo_info *ai); | 1216 | static int flashputbuf(struct airo_info *ai); |
1230 | static int flashrestart(struct airo_info *ai,struct net_device *dev); | 1217 | static int flashrestart(struct airo_info *ai,struct net_device *dev); |
1231 | 1218 | ||
1232 | #ifdef MICSUPPORT | ||
1233 | /*********************************************************************** | 1219 | /*********************************************************************** |
1234 | * MIC ROUTINES * | 1220 | * MIC ROUTINES * |
1235 | *********************************************************************** | 1221 | *********************************************************************** |
@@ -1686,7 +1672,6 @@ static void emmh32_final(emmh32_context *context, u8 digest[4]) | |||
1686 | digest[2] = (val>>8) & 0xFF; | 1672 | digest[2] = (val>>8) & 0xFF; |
1687 | digest[3] = val & 0xFF; | 1673 | digest[3] = val & 0xFF; |
1688 | } | 1674 | } |
1689 | #endif | ||
1690 | 1675 | ||
1691 | static int readBSSListRid(struct airo_info *ai, int first, | 1676 | static int readBSSListRid(struct airo_info *ai, int first, |
1692 | BSSListRid *list) { | 1677 | BSSListRid *list) { |
@@ -2005,7 +1990,6 @@ static int mpi_send_packet (struct net_device *dev) | |||
2005 | * Firmware automaticly puts 802 header on so | 1990 | * Firmware automaticly puts 802 header on so |
2006 | * we don't need to account for it in the length | 1991 | * we don't need to account for it in the length |
2007 | */ | 1992 | */ |
2008 | #ifdef MICSUPPORT | ||
2009 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && | 1993 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && |
2010 | (ntohs(((u16 *)buffer)[6]) != 0x888E)) { | 1994 | (ntohs(((u16 *)buffer)[6]) != 0x888E)) { |
2011 | MICBuffer pMic; | 1995 | MICBuffer pMic; |
@@ -2022,9 +2006,7 @@ static int mpi_send_packet (struct net_device *dev) | |||
2022 | memcpy (sendbuf, &pMic, sizeof(pMic)); | 2006 | memcpy (sendbuf, &pMic, sizeof(pMic)); |
2023 | sendbuf += sizeof(pMic); | 2007 | sendbuf += sizeof(pMic); |
2024 | memcpy (sendbuf, buffer, len - sizeof(etherHead)); | 2008 | memcpy (sendbuf, buffer, len - sizeof(etherHead)); |
2025 | } else | 2009 | } else { |
2026 | #endif | ||
2027 | { | ||
2028 | *payloadLen = cpu_to_le16(len - sizeof(etherHead)); | 2010 | *payloadLen = cpu_to_le16(len - sizeof(etherHead)); |
2029 | 2011 | ||
2030 | dev->trans_start = jiffies; | 2012 | dev->trans_start = jiffies; |
@@ -2400,9 +2382,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2400 | ai->shared, ai->shared_dma); | 2382 | ai->shared, ai->shared_dma); |
2401 | } | 2383 | } |
2402 | } | 2384 | } |
2403 | #ifdef MICSUPPORT | ||
2404 | crypto_free_tfm(ai->tfm); | 2385 | crypto_free_tfm(ai->tfm); |
2405 | #endif | ||
2406 | del_airo_dev( dev ); | 2386 | del_airo_dev( dev ); |
2407 | free_netdev( dev ); | 2387 | free_netdev( dev ); |
2408 | } | 2388 | } |
@@ -2726,9 +2706,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, | |||
2726 | ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); | 2706 | ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); |
2727 | if (ai->thr_pid < 0) | 2707 | if (ai->thr_pid < 0) |
2728 | goto err_out_free; | 2708 | goto err_out_free; |
2729 | #ifdef MICSUPPORT | ||
2730 | ai->tfm = NULL; | 2709 | ai->tfm = NULL; |
2731 | #endif | ||
2732 | rc = add_airo_dev( dev ); | 2710 | rc = add_airo_dev( dev ); |
2733 | if (rc) | 2711 | if (rc) |
2734 | goto err_out_thr; | 2712 | goto err_out_thr; |
@@ -2969,10 +2947,8 @@ static int airo_thread(void *data) { | |||
2969 | airo_read_wireless_stats(ai); | 2947 | airo_read_wireless_stats(ai); |
2970 | else if (test_bit(JOB_PROMISC, &ai->flags)) | 2948 | else if (test_bit(JOB_PROMISC, &ai->flags)) |
2971 | airo_set_promisc(ai); | 2949 | airo_set_promisc(ai); |
2972 | #ifdef MICSUPPORT | ||
2973 | else if (test_bit(JOB_MIC, &ai->flags)) | 2950 | else if (test_bit(JOB_MIC, &ai->flags)) |
2974 | micinit(ai); | 2951 | micinit(ai); |
2975 | #endif | ||
2976 | else if (test_bit(JOB_EVENT, &ai->flags)) | 2952 | else if (test_bit(JOB_EVENT, &ai->flags)) |
2977 | airo_send_event(dev); | 2953 | airo_send_event(dev); |
2978 | else if (test_bit(JOB_AUTOWEP, &ai->flags)) | 2954 | else if (test_bit(JOB_AUTOWEP, &ai->flags)) |
@@ -3010,12 +2986,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3010 | 2986 | ||
3011 | if ( status & EV_MIC ) { | 2987 | if ( status & EV_MIC ) { |
3012 | OUT4500( apriv, EVACK, EV_MIC ); | 2988 | OUT4500( apriv, EVACK, EV_MIC ); |
3013 | #ifdef MICSUPPORT | ||
3014 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { | 2989 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { |
3015 | set_bit(JOB_MIC, &apriv->flags); | 2990 | set_bit(JOB_MIC, &apriv->flags); |
3016 | wake_up_interruptible(&apriv->thr_wait); | 2991 | wake_up_interruptible(&apriv->thr_wait); |
3017 | } | 2992 | } |
3018 | #endif | ||
3019 | } | 2993 | } |
3020 | if ( status & EV_LINK ) { | 2994 | if ( status & EV_LINK ) { |
3021 | union iwreq_data wrqu; | 2995 | union iwreq_data wrqu; |
@@ -3194,11 +3168,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3194 | } | 3168 | } |
3195 | bap_read (apriv, buffer + hdrlen/2, len, BAP0); | 3169 | bap_read (apriv, buffer + hdrlen/2, len, BAP0); |
3196 | } else { | 3170 | } else { |
3197 | #ifdef MICSUPPORT | ||
3198 | MICBuffer micbuf; | 3171 | MICBuffer micbuf; |
3199 | #endif | ||
3200 | bap_read (apriv, buffer, ETH_ALEN*2, BAP0); | 3172 | bap_read (apriv, buffer, ETH_ALEN*2, BAP0); |
3201 | #ifdef MICSUPPORT | ||
3202 | if (apriv->micstats.enabled) { | 3173 | if (apriv->micstats.enabled) { |
3203 | bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0); | 3174 | bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0); |
3204 | if (ntohs(micbuf.typelen) > 0x05DC) | 3175 | if (ntohs(micbuf.typelen) > 0x05DC) |
@@ -3211,15 +3182,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3211 | skb_trim (skb, len + hdrlen); | 3182 | skb_trim (skb, len + hdrlen); |
3212 | } | 3183 | } |
3213 | } | 3184 | } |
3214 | #endif | ||
3215 | bap_read(apriv,buffer+ETH_ALEN,len,BAP0); | 3185 | bap_read(apriv,buffer+ETH_ALEN,len,BAP0); |
3216 | #ifdef MICSUPPORT | ||
3217 | if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { | 3186 | if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { |
3218 | badmic: | 3187 | badmic: |
3219 | dev_kfree_skb_irq (skb); | 3188 | dev_kfree_skb_irq (skb); |
3220 | #else | ||
3221 | if (0) { | ||
3222 | #endif | ||
3223 | badrx: | 3189 | badrx: |
3224 | OUT4500( apriv, EVACK, EV_RX); | 3190 | OUT4500( apriv, EVACK, EV_RX); |
3225 | goto exitrx; | 3191 | goto exitrx; |
@@ -3430,10 +3396,8 @@ static void mpi_receive_802_3(struct airo_info *ai) | |||
3430 | int len = 0; | 3396 | int len = 0; |
3431 | struct sk_buff *skb; | 3397 | struct sk_buff *skb; |
3432 | char *buffer; | 3398 | char *buffer; |
3433 | #ifdef MICSUPPORT | ||
3434 | int off = 0; | 3399 | int off = 0; |
3435 | MICBuffer micbuf; | 3400 | MICBuffer micbuf; |
3436 | #endif | ||
3437 | 3401 | ||
3438 | memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); | 3402 | memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); |
3439 | /* Make sure we got something */ | 3403 | /* Make sure we got something */ |
@@ -3448,7 +3412,6 @@ static void mpi_receive_802_3(struct airo_info *ai) | |||
3448 | goto badrx; | 3412 | goto badrx; |
3449 | } | 3413 | } |
3450 | buffer = skb_put(skb,len); | 3414 | buffer = skb_put(skb,len); |
3451 | #ifdef MICSUPPORT | ||
3452 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); | 3415 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); |
3453 | if (ai->micstats.enabled) { | 3416 | if (ai->micstats.enabled) { |
3454 | memcpy(&micbuf, | 3417 | memcpy(&micbuf, |
@@ -3470,9 +3433,6 @@ badmic: | |||
3470 | dev_kfree_skb_irq (skb); | 3433 | dev_kfree_skb_irq (skb); |
3471 | goto badrx; | 3434 | goto badrx; |
3472 | } | 3435 | } |
3473 | #else | ||
3474 | memcpy(buffer, ai->rxfids[0].virtual_host_addr, len); | ||
3475 | #endif | ||
3476 | #ifdef WIRELESS_SPY | 3436 | #ifdef WIRELESS_SPY |
3477 | if (ai->spy_data.spy_number > 0) { | 3437 | if (ai->spy_data.spy_number > 0) { |
3478 | char *sa; | 3438 | char *sa; |
@@ -3689,13 +3649,11 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) | |||
3689 | ai->config.authType = AUTH_OPEN; | 3649 | ai->config.authType = AUTH_OPEN; |
3690 | ai->config.modulation = MOD_CCK; | 3650 | ai->config.modulation = MOD_CCK; |
3691 | 3651 | ||
3692 | #ifdef MICSUPPORT | ||
3693 | if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) && | 3652 | if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) && |
3694 | (micsetup(ai) == SUCCESS)) { | 3653 | (micsetup(ai) == SUCCESS)) { |
3695 | ai->config.opmode |= MODE_MIC; | 3654 | ai->config.opmode |= MODE_MIC; |
3696 | set_bit(FLAG_MIC_CAPABLE, &ai->flags); | 3655 | set_bit(FLAG_MIC_CAPABLE, &ai->flags); |
3697 | } | 3656 | } |
3698 | #endif | ||
3699 | 3657 | ||
3700 | /* Save off the MAC */ | 3658 | /* Save off the MAC */ |
3701 | for( i = 0; i < ETH_ALEN; i++ ) { | 3659 | for( i = 0; i < ETH_ALEN; i++ ) { |
@@ -4170,15 +4128,12 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) | |||
4170 | } | 4128 | } |
4171 | len -= ETH_ALEN * 2; | 4129 | len -= ETH_ALEN * 2; |
4172 | 4130 | ||
4173 | #ifdef MICSUPPORT | ||
4174 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && | 4131 | if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && |
4175 | (ntohs(((u16 *)pPacket)[6]) != 0x888E)) { | 4132 | (ntohs(((u16 *)pPacket)[6]) != 0x888E)) { |
4176 | if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) | 4133 | if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) |
4177 | return ERROR; | 4134 | return ERROR; |
4178 | miclen = sizeof(pMic); | 4135 | miclen = sizeof(pMic); |
4179 | } | 4136 | } |
4180 | #endif | ||
4181 | |||
4182 | // packet is destination[6], source[6], payload[len-12] | 4137 | // packet is destination[6], source[6], payload[len-12] |
4183 | // write the payload length and dst/src/payload | 4138 | // write the payload length and dst/src/payload |
4184 | if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; | 4139 | if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; |
@@ -5081,7 +5036,6 @@ static int set_wep_key(struct airo_info *ai, u16 index, | |||
5081 | wkr.len = sizeof(wkr); | 5036 | wkr.len = sizeof(wkr); |
5082 | wkr.kindex = 0xffff; | 5037 | wkr.kindex = 0xffff; |
5083 | wkr.mac[0] = (char)index; | 5038 | wkr.mac[0] = (char)index; |
5084 | if (perm) printk(KERN_INFO "Setting transmit key to %d\n", index); | ||
5085 | if (perm) ai->defindex = (char)index; | 5039 | if (perm) ai->defindex = (char)index; |
5086 | } else { | 5040 | } else { |
5087 | // We are actually setting the key | 5041 | // We are actually setting the key |
@@ -5090,7 +5044,6 @@ static int set_wep_key(struct airo_info *ai, u16 index, | |||
5090 | wkr.klen = keylen; | 5044 | wkr.klen = keylen; |
5091 | memcpy( wkr.key, key, keylen ); | 5045 | memcpy( wkr.key, key, keylen ); |
5092 | memcpy( wkr.mac, macaddr, ETH_ALEN ); | 5046 | memcpy( wkr.mac, macaddr, ETH_ALEN ); |
5093 | printk(KERN_INFO "Setting key %d\n", index); | ||
5094 | } | 5047 | } |
5095 | 5048 | ||
5096 | if (perm) disable_MAC(ai, lock); | 5049 | if (perm) disable_MAC(ai, lock); |
@@ -5801,11 +5754,13 @@ static int airo_set_wap(struct net_device *dev, | |||
5801 | Cmd cmd; | 5754 | Cmd cmd; |
5802 | Resp rsp; | 5755 | Resp rsp; |
5803 | APListRid APList_rid; | 5756 | APListRid APList_rid; |
5804 | static const unsigned char bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 }; | 5757 | static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
5758 | static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | ||
5805 | 5759 | ||
5806 | if (awrq->sa_family != ARPHRD_ETHER) | 5760 | if (awrq->sa_family != ARPHRD_ETHER) |
5807 | return -EINVAL; | 5761 | return -EINVAL; |
5808 | else if (!memcmp(bcast, awrq->sa_data, ETH_ALEN)) { | 5762 | else if (!memcmp(any, awrq->sa_data, ETH_ALEN) || |
5763 | !memcmp(off, awrq->sa_data, ETH_ALEN)) { | ||
5809 | memset(&cmd, 0, sizeof(cmd)); | 5764 | memset(&cmd, 0, sizeof(cmd)); |
5810 | cmd.cmd=CMD_LOSE_SYNC; | 5765 | cmd.cmd=CMD_LOSE_SYNC; |
5811 | if (down_interruptible(&local->sem)) | 5766 | if (down_interruptible(&local->sem)) |
@@ -6296,6 +6251,272 @@ static int airo_get_encode(struct net_device *dev, | |||
6296 | 6251 | ||
6297 | /*------------------------------------------------------------------*/ | 6252 | /*------------------------------------------------------------------*/ |
6298 | /* | 6253 | /* |
6254 | * Wireless Handler : set extended Encryption parameters | ||
6255 | */ | ||
6256 | static int airo_set_encodeext(struct net_device *dev, | ||
6257 | struct iw_request_info *info, | ||
6258 | union iwreq_data *wrqu, | ||
6259 | char *extra) | ||
6260 | { | ||
6261 | struct airo_info *local = dev->priv; | ||
6262 | struct iw_point *encoding = &wrqu->encoding; | ||
6263 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
6264 | CapabilityRid cap_rid; /* Card capability info */ | ||
6265 | int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 ); | ||
6266 | u16 currentAuthType = local->config.authType; | ||
6267 | int idx, key_len, alg = ext->alg, set_key = 1; | ||
6268 | wep_key_t key; | ||
6269 | |||
6270 | /* Is WEP supported ? */ | ||
6271 | readCapabilityRid(local, &cap_rid, 1); | ||
6272 | /* Older firmware doesn't support this... | ||
6273 | if(!(cap_rid.softCap & 2)) { | ||
6274 | return -EOPNOTSUPP; | ||
6275 | } */ | ||
6276 | readConfigRid(local, 1); | ||
6277 | |||
6278 | /* Determine and validate the key index */ | ||
6279 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
6280 | if (idx) { | ||
6281 | if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1)) | ||
6282 | return -EINVAL; | ||
6283 | idx--; | ||
6284 | } else | ||
6285 | idx = get_wep_key(local, 0xffff); | ||
6286 | |||
6287 | if (encoding->flags & IW_ENCODE_DISABLED) | ||
6288 | alg = IW_ENCODE_ALG_NONE; | ||
6289 | |||
6290 | if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { | ||
6291 | /* Only set transmit key index here, actual | ||
6292 | * key is set below if needed. | ||
6293 | */ | ||
6294 | set_wep_key(local, idx, NULL, 0, perm, 1); | ||
6295 | set_key = ext->key_len > 0 ? 1 : 0; | ||
6296 | } | ||
6297 | |||
6298 | if (set_key) { | ||
6299 | /* Set the requested key first */ | ||
6300 | memset(key.key, 0, MAX_KEY_SIZE); | ||
6301 | switch (alg) { | ||
6302 | case IW_ENCODE_ALG_NONE: | ||
6303 | key.len = 0; | ||
6304 | break; | ||
6305 | case IW_ENCODE_ALG_WEP: | ||
6306 | if (ext->key_len > MIN_KEY_SIZE) { | ||
6307 | key.len = MAX_KEY_SIZE; | ||
6308 | } else if (ext->key_len > 0) { | ||
6309 | key.len = MIN_KEY_SIZE; | ||
6310 | } else { | ||
6311 | return -EINVAL; | ||
6312 | } | ||
6313 | key_len = min (ext->key_len, key.len); | ||
6314 | memcpy(key.key, ext->key, key_len); | ||
6315 | break; | ||
6316 | default: | ||
6317 | return -EINVAL; | ||
6318 | } | ||
6319 | /* Send the key to the card */ | ||
6320 | set_wep_key(local, idx, key.key, key.len, perm, 1); | ||
6321 | } | ||
6322 | |||
6323 | /* Read the flags */ | ||
6324 | if(encoding->flags & IW_ENCODE_DISABLED) | ||
6325 | local->config.authType = AUTH_OPEN; // disable encryption | ||
6326 | if(encoding->flags & IW_ENCODE_RESTRICTED) | ||
6327 | local->config.authType = AUTH_SHAREDKEY; // Only Both | ||
6328 | if(encoding->flags & IW_ENCODE_OPEN) | ||
6329 | local->config.authType = AUTH_ENCRYPT; // Only Wep | ||
6330 | /* Commit the changes to flags if needed */ | ||
6331 | if (local->config.authType != currentAuthType) | ||
6332 | set_bit (FLAG_COMMIT, &local->flags); | ||
6333 | |||
6334 | return -EINPROGRESS; | ||
6335 | } | ||
6336 | |||
6337 | |||
6338 | /*------------------------------------------------------------------*/ | ||
6339 | /* | ||
6340 | * Wireless Handler : get extended Encryption parameters | ||
6341 | */ | ||
6342 | static int airo_get_encodeext(struct net_device *dev, | ||
6343 | struct iw_request_info *info, | ||
6344 | union iwreq_data *wrqu, | ||
6345 | char *extra) | ||
6346 | { | ||
6347 | struct airo_info *local = dev->priv; | ||
6348 | struct iw_point *encoding = &wrqu->encoding; | ||
6349 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
6350 | CapabilityRid cap_rid; /* Card capability info */ | ||
6351 | int idx, max_key_len; | ||
6352 | |||
6353 | /* Is it supported ? */ | ||
6354 | readCapabilityRid(local, &cap_rid, 1); | ||
6355 | if(!(cap_rid.softCap & 2)) { | ||
6356 | return -EOPNOTSUPP; | ||
6357 | } | ||
6358 | readConfigRid(local, 1); | ||
6359 | |||
6360 | max_key_len = encoding->length - sizeof(*ext); | ||
6361 | if (max_key_len < 0) | ||
6362 | return -EINVAL; | ||
6363 | |||
6364 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
6365 | if (idx) { | ||
6366 | if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1)) | ||
6367 | return -EINVAL; | ||
6368 | idx--; | ||
6369 | } else | ||
6370 | idx = get_wep_key(local, 0xffff); | ||
6371 | |||
6372 | encoding->flags = idx + 1; | ||
6373 | memset(ext, 0, sizeof(*ext)); | ||
6374 | |||
6375 | /* Check encryption mode */ | ||
6376 | switch(local->config.authType) { | ||
6377 | case AUTH_ENCRYPT: | ||
6378 | encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; | ||
6379 | break; | ||
6380 | case AUTH_SHAREDKEY: | ||
6381 | encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; | ||
6382 | break; | ||
6383 | default: | ||
6384 | case AUTH_OPEN: | ||
6385 | encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED; | ||
6386 | break; | ||
6387 | } | ||
6388 | /* We can't return the key, so set the proper flag and return zero */ | ||
6389 | encoding->flags |= IW_ENCODE_NOKEY; | ||
6390 | memset(extra, 0, 16); | ||
6391 | |||
6392 | /* Copy the key to the user buffer */ | ||
6393 | ext->key_len = get_wep_key(local, idx); | ||
6394 | if (ext->key_len > 16) { | ||
6395 | ext->key_len=0; | ||
6396 | } | ||
6397 | |||
6398 | return 0; | ||
6399 | } | ||
6400 | |||
6401 | |||
6402 | /*------------------------------------------------------------------*/ | ||
6403 | /* | ||
6404 | * Wireless Handler : set extended authentication parameters | ||
6405 | */ | ||
6406 | static int airo_set_auth(struct net_device *dev, | ||
6407 | struct iw_request_info *info, | ||
6408 | union iwreq_data *wrqu, char *extra) | ||
6409 | { | ||
6410 | struct airo_info *local = dev->priv; | ||
6411 | struct iw_param *param = &wrqu->param; | ||
6412 | u16 currentAuthType = local->config.authType; | ||
6413 | |||
6414 | switch (param->flags & IW_AUTH_INDEX) { | ||
6415 | case IW_AUTH_WPA_VERSION: | ||
6416 | case IW_AUTH_CIPHER_PAIRWISE: | ||
6417 | case IW_AUTH_CIPHER_GROUP: | ||
6418 | case IW_AUTH_KEY_MGMT: | ||
6419 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
6420 | case IW_AUTH_PRIVACY_INVOKED: | ||
6421 | /* | ||
6422 | * airo does not use these parameters | ||
6423 | */ | ||
6424 | break; | ||
6425 | |||
6426 | case IW_AUTH_DROP_UNENCRYPTED: | ||
6427 | if (param->value) { | ||
6428 | /* Only change auth type if unencrypted */ | ||
6429 | if (currentAuthType == AUTH_OPEN) | ||
6430 | local->config.authType = AUTH_ENCRYPT; | ||
6431 | } else { | ||
6432 | local->config.authType = AUTH_OPEN; | ||
6433 | } | ||
6434 | |||
6435 | /* Commit the changes to flags if needed */ | ||
6436 | if (local->config.authType != currentAuthType) | ||
6437 | set_bit (FLAG_COMMIT, &local->flags); | ||
6438 | break; | ||
6439 | |||
6440 | case IW_AUTH_80211_AUTH_ALG: { | ||
6441 | /* FIXME: What about AUTH_OPEN? This API seems to | ||
6442 | * disallow setting our auth to AUTH_OPEN. | ||
6443 | */ | ||
6444 | if (param->value & IW_AUTH_ALG_SHARED_KEY) { | ||
6445 | local->config.authType = AUTH_SHAREDKEY; | ||
6446 | } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { | ||
6447 | local->config.authType = AUTH_ENCRYPT; | ||
6448 | } else | ||
6449 | return -EINVAL; | ||
6450 | break; | ||
6451 | |||
6452 | /* Commit the changes to flags if needed */ | ||
6453 | if (local->config.authType != currentAuthType) | ||
6454 | set_bit (FLAG_COMMIT, &local->flags); | ||
6455 | } | ||
6456 | |||
6457 | case IW_AUTH_WPA_ENABLED: | ||
6458 | /* Silently accept disable of WPA */ | ||
6459 | if (param->value > 0) | ||
6460 | return -EOPNOTSUPP; | ||
6461 | break; | ||
6462 | |||
6463 | default: | ||
6464 | return -EOPNOTSUPP; | ||
6465 | } | ||
6466 | return -EINPROGRESS; | ||
6467 | } | ||
6468 | |||
6469 | |||
6470 | /*------------------------------------------------------------------*/ | ||
6471 | /* | ||
6472 | * Wireless Handler : get extended authentication parameters | ||
6473 | */ | ||
6474 | static int airo_get_auth(struct net_device *dev, | ||
6475 | struct iw_request_info *info, | ||
6476 | union iwreq_data *wrqu, char *extra) | ||
6477 | { | ||
6478 | struct airo_info *local = dev->priv; | ||
6479 | struct iw_param *param = &wrqu->param; | ||
6480 | u16 currentAuthType = local->config.authType; | ||
6481 | |||
6482 | switch (param->flags & IW_AUTH_INDEX) { | ||
6483 | case IW_AUTH_DROP_UNENCRYPTED: | ||
6484 | switch (currentAuthType) { | ||
6485 | case AUTH_SHAREDKEY: | ||
6486 | case AUTH_ENCRYPT: | ||
6487 | param->value = 1; | ||
6488 | break; | ||
6489 | default: | ||
6490 | param->value = 0; | ||
6491 | break; | ||
6492 | } | ||
6493 | break; | ||
6494 | |||
6495 | case IW_AUTH_80211_AUTH_ALG: | ||
6496 | switch (currentAuthType) { | ||
6497 | case AUTH_SHAREDKEY: | ||
6498 | param->value = IW_AUTH_ALG_SHARED_KEY; | ||
6499 | break; | ||
6500 | case AUTH_ENCRYPT: | ||
6501 | default: | ||
6502 | param->value = IW_AUTH_ALG_OPEN_SYSTEM; | ||
6503 | break; | ||
6504 | } | ||
6505 | break; | ||
6506 | |||
6507 | case IW_AUTH_WPA_ENABLED: | ||
6508 | param->value = 0; | ||
6509 | break; | ||
6510 | |||
6511 | default: | ||
6512 | return -EOPNOTSUPP; | ||
6513 | } | ||
6514 | return 0; | ||
6515 | } | ||
6516 | |||
6517 | |||
6518 | /*------------------------------------------------------------------*/ | ||
6519 | /* | ||
6299 | * Wireless Handler : set Tx-Power | 6520 | * Wireless Handler : set Tx-Power |
6300 | */ | 6521 | */ |
6301 | static int airo_set_txpow(struct net_device *dev, | 6522 | static int airo_set_txpow(struct net_device *dev, |
@@ -7050,6 +7271,15 @@ static const iw_handler airo_handler[] = | |||
7050 | (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ | 7271 | (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ |
7051 | (iw_handler) airo_set_power, /* SIOCSIWPOWER */ | 7272 | (iw_handler) airo_set_power, /* SIOCSIWPOWER */ |
7052 | (iw_handler) airo_get_power, /* SIOCGIWPOWER */ | 7273 | (iw_handler) airo_get_power, /* SIOCGIWPOWER */ |
7274 | (iw_handler) NULL, /* -- hole -- */ | ||
7275 | (iw_handler) NULL, /* -- hole -- */ | ||
7276 | (iw_handler) NULL, /* SIOCSIWGENIE */ | ||
7277 | (iw_handler) NULL, /* SIOCGIWGENIE */ | ||
7278 | (iw_handler) airo_set_auth, /* SIOCSIWAUTH */ | ||
7279 | (iw_handler) airo_get_auth, /* SIOCGIWAUTH */ | ||
7280 | (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */ | ||
7281 | (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */ | ||
7282 | (iw_handler) NULL, /* SIOCSIWPMKSA */ | ||
7053 | }; | 7283 | }; |
7054 | 7284 | ||
7055 | /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. | 7285 | /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. |
@@ -7270,13 +7500,11 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { | |||
7270 | case AIROGSTAT: ridcode = RID_STATUS; break; | 7500 | case AIROGSTAT: ridcode = RID_STATUS; break; |
7271 | case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; | 7501 | case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; |
7272 | case AIROGSTATSC32: ridcode = RID_STATS; break; | 7502 | case AIROGSTATSC32: ridcode = RID_STATS; break; |
7273 | #ifdef MICSUPPORT | ||
7274 | case AIROGMICSTATS: | 7503 | case AIROGMICSTATS: |
7275 | if (copy_to_user(comp->data, &ai->micstats, | 7504 | if (copy_to_user(comp->data, &ai->micstats, |
7276 | min((int)comp->len,(int)sizeof(ai->micstats)))) | 7505 | min((int)comp->len,(int)sizeof(ai->micstats)))) |
7277 | return -EFAULT; | 7506 | return -EFAULT; |
7278 | return 0; | 7507 | return 0; |
7279 | #endif | ||
7280 | case AIRORRID: ridcode = comp->ridnum; break; | 7508 | case AIRORRID: ridcode = comp->ridnum; break; |
7281 | default: | 7509 | default: |
7282 | return -EINVAL; | 7510 | return -EINVAL; |
@@ -7308,9 +7536,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { | |||
7308 | static int writerids(struct net_device *dev, aironet_ioctl *comp) { | 7536 | static int writerids(struct net_device *dev, aironet_ioctl *comp) { |
7309 | struct airo_info *ai = dev->priv; | 7537 | struct airo_info *ai = dev->priv; |
7310 | int ridcode; | 7538 | int ridcode; |
7311 | #ifdef MICSUPPORT | ||
7312 | int enabled; | 7539 | int enabled; |
7313 | #endif | ||
7314 | Resp rsp; | 7540 | Resp rsp; |
7315 | static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); | 7541 | static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); |
7316 | unsigned char *iobuf; | 7542 | unsigned char *iobuf; |
@@ -7367,11 +7593,9 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { | |||
7367 | 7593 | ||
7368 | PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); | 7594 | PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); |
7369 | 7595 | ||
7370 | #ifdef MICSUPPORT | ||
7371 | enabled = ai->micstats.enabled; | 7596 | enabled = ai->micstats.enabled; |
7372 | memset(&ai->micstats,0,sizeof(ai->micstats)); | 7597 | memset(&ai->micstats,0,sizeof(ai->micstats)); |
7373 | ai->micstats.enabled = enabled; | 7598 | ai->micstats.enabled = enabled; |
7374 | #endif | ||
7375 | 7599 | ||
7376 | if (copy_to_user(comp->data, iobuf, | 7600 | if (copy_to_user(comp->data, iobuf, |
7377 | min((int)comp->len, (int)RIDSIZE))) { | 7601 | min((int)comp->len, (int)RIDSIZE))) { |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index dfc24016ba81..87afa6878f26 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -137,44 +137,6 @@ static struct { | |||
137 | #define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed | 137 | #define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed |
138 | #define MAC_INIT_OK 0x0002 // MAC boot has been completed | 138 | #define MAC_INIT_OK 0x0002 // MAC boot has been completed |
139 | 139 | ||
140 | #define C80211_SUBTYPE_MGMT_ASS_REQUEST 0x00 | ||
141 | #define C80211_SUBTYPE_MGMT_ASS_RESPONSE 0x10 | ||
142 | #define C80211_SUBTYPE_MGMT_REASS_REQUEST 0x20 | ||
143 | #define C80211_SUBTYPE_MGMT_REASS_RESPONSE 0x30 | ||
144 | #define C80211_SUBTYPE_MGMT_ProbeRequest 0x40 | ||
145 | #define C80211_SUBTYPE_MGMT_ProbeResponse 0x50 | ||
146 | #define C80211_SUBTYPE_MGMT_BEACON 0x80 | ||
147 | #define C80211_SUBTYPE_MGMT_ATIM 0x90 | ||
148 | #define C80211_SUBTYPE_MGMT_DISASSOSIATION 0xA0 | ||
149 | #define C80211_SUBTYPE_MGMT_Authentication 0xB0 | ||
150 | #define C80211_SUBTYPE_MGMT_Deauthentication 0xC0 | ||
151 | |||
152 | #define C80211_MGMT_AAN_OPENSYSTEM 0x0000 | ||
153 | #define C80211_MGMT_AAN_SHAREDKEY 0x0001 | ||
154 | |||
155 | #define C80211_MGMT_CAPABILITY_ESS 0x0001 // see 802.11 p.58 | ||
156 | #define C80211_MGMT_CAPABILITY_IBSS 0x0002 // - " - | ||
157 | #define C80211_MGMT_CAPABILITY_CFPollable 0x0004 // - " - | ||
158 | #define C80211_MGMT_CAPABILITY_CFPollRequest 0x0008 // - " - | ||
159 | #define C80211_MGMT_CAPABILITY_Privacy 0x0010 // - " - | ||
160 | |||
161 | #define C80211_MGMT_SC_Success 0 | ||
162 | #define C80211_MGMT_SC_Unspecified 1 | ||
163 | #define C80211_MGMT_SC_SupportCapabilities 10 | ||
164 | #define C80211_MGMT_SC_ReassDenied 11 | ||
165 | #define C80211_MGMT_SC_AssDenied 12 | ||
166 | #define C80211_MGMT_SC_AuthAlgNotSupported 13 | ||
167 | #define C80211_MGMT_SC_AuthTransSeqNumError 14 | ||
168 | #define C80211_MGMT_SC_AuthRejectChallenge 15 | ||
169 | #define C80211_MGMT_SC_AuthRejectTimeout 16 | ||
170 | #define C80211_MGMT_SC_AssDeniedHandleAP 17 | ||
171 | #define C80211_MGMT_SC_AssDeniedBSSRate 18 | ||
172 | |||
173 | #define C80211_MGMT_ElementID_SSID 0 | ||
174 | #define C80211_MGMT_ElementID_SupportedRates 1 | ||
175 | #define C80211_MGMT_ElementID_ChallengeText 16 | ||
176 | #define C80211_MGMT_CAPABILITY_ShortPreamble 0x0020 | ||
177 | |||
178 | #define MIB_MAX_DATA_BYTES 212 | 140 | #define MIB_MAX_DATA_BYTES 212 |
179 | #define MIB_HEADER_SIZE 4 /* first four fields */ | 141 | #define MIB_HEADER_SIZE 4 /* first four fields */ |
180 | 142 | ||
@@ -2835,7 +2797,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, | |||
2835 | u8 channel) | 2797 | u8 channel) |
2836 | { | 2798 | { |
2837 | int rejoin = 0; | 2799 | int rejoin = 0; |
2838 | int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? | 2800 | int new = capability & MFIE_TYPE_POWER_CONSTRAINT ? |
2839 | SHORT_PREAMBLE : LONG_PREAMBLE; | 2801 | SHORT_PREAMBLE : LONG_PREAMBLE; |
2840 | 2802 | ||
2841 | if (priv->preamble != new) { | 2803 | if (priv->preamble != new) { |
@@ -2921,11 +2883,11 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) | |||
2921 | memcpy(header.addr2, priv->dev->dev_addr, 6); | 2883 | memcpy(header.addr2, priv->dev->dev_addr, 6); |
2922 | memcpy(header.addr3, priv->CurrentBSSID, 6); | 2884 | memcpy(header.addr3, priv->CurrentBSSID, 6); |
2923 | 2885 | ||
2924 | body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS); | 2886 | body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS); |
2925 | if (priv->wep_is_on) | 2887 | if (priv->wep_is_on) |
2926 | body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_Privacy); | 2888 | body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); |
2927 | if (priv->preamble == SHORT_PREAMBLE) | 2889 | if (priv->preamble == SHORT_PREAMBLE) |
2928 | body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble); | 2890 | body.capability |= cpu_to_le16(MFIE_TYPE_POWER_CONSTRAINT); |
2929 | 2891 | ||
2930 | body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); | 2892 | body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); |
2931 | 2893 | ||
@@ -2939,10 +2901,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) | |||
2939 | bodysize = 12 + priv->SSID_size; | 2901 | bodysize = 12 + priv->SSID_size; |
2940 | } | 2902 | } |
2941 | 2903 | ||
2942 | ssid_el_p[0] = C80211_MGMT_ElementID_SSID; | 2904 | ssid_el_p[0] = MFIE_TYPE_SSID; |
2943 | ssid_el_p[1] = priv->SSID_size; | 2905 | ssid_el_p[1] = priv->SSID_size; |
2944 | memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); | 2906 | memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); |
2945 | ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates; | 2907 | ssid_el_p[2 + priv->SSID_size] = MFIE_TYPE_RATES; |
2946 | ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ | 2908 | ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ |
2947 | memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); | 2909 | memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); |
2948 | 2910 | ||
@@ -3004,7 +2966,7 @@ static void store_bss_info(struct atmel_private *priv, | |||
3004 | u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, | 2966 | u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, |
3005 | u8 *ssid, int is_beacon) | 2967 | u8 *ssid, int is_beacon) |
3006 | { | 2968 | { |
3007 | u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3; | 2969 | u8 *bss = capability & WLAN_CAPABILITY_ESS ? header->addr2 : header->addr3; |
3008 | int i, index; | 2970 | int i, index; |
3009 | 2971 | ||
3010 | for (index = -1, i = 0; i < priv->BSS_list_entries; i++) | 2972 | for (index = -1, i = 0; i < priv->BSS_list_entries; i++) |
@@ -3030,16 +2992,16 @@ static void store_bss_info(struct atmel_private *priv, | |||
3030 | 2992 | ||
3031 | priv->BSSinfo[index].channel = channel; | 2993 | priv->BSSinfo[index].channel = channel; |
3032 | priv->BSSinfo[index].beacon_period = beacon_period; | 2994 | priv->BSSinfo[index].beacon_period = beacon_period; |
3033 | priv->BSSinfo[index].UsingWEP = capability & C80211_MGMT_CAPABILITY_Privacy; | 2995 | priv->BSSinfo[index].UsingWEP = capability & WLAN_CAPABILITY_PRIVACY; |
3034 | memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len); | 2996 | memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len); |
3035 | priv->BSSinfo[index].SSIDsize = ssid_len; | 2997 | priv->BSSinfo[index].SSIDsize = ssid_len; |
3036 | 2998 | ||
3037 | if (capability & C80211_MGMT_CAPABILITY_IBSS) | 2999 | if (capability & WLAN_CAPABILITY_IBSS) |
3038 | priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; | 3000 | priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; |
3039 | else if (capability & C80211_MGMT_CAPABILITY_ESS) | 3001 | else if (capability & WLAN_CAPABILITY_ESS) |
3040 | priv->BSSinfo[index].BSStype =IW_MODE_INFRA; | 3002 | priv->BSSinfo[index].BSStype =IW_MODE_INFRA; |
3041 | 3003 | ||
3042 | priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? | 3004 | priv->BSSinfo[index].preamble = capability & MFIE_TYPE_POWER_CONSTRAINT ? |
3043 | SHORT_PREAMBLE : LONG_PREAMBLE; | 3005 | SHORT_PREAMBLE : LONG_PREAMBLE; |
3044 | } | 3006 | } |
3045 | 3007 | ||
@@ -3050,7 +3012,7 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3050 | u16 trans_seq_no = le16_to_cpu(auth->trans_seq); | 3012 | u16 trans_seq_no = le16_to_cpu(auth->trans_seq); |
3051 | u16 system = le16_to_cpu(auth->alg); | 3013 | u16 system = le16_to_cpu(auth->alg); |
3052 | 3014 | ||
3053 | if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) { | 3015 | if (status == WLAN_STATUS_SUCCESS && !priv->wep_is_on) { |
3054 | /* no WEP */ | 3016 | /* no WEP */ |
3055 | if (priv->station_was_associated) { | 3017 | if (priv->station_was_associated) { |
3056 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); | 3018 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); |
@@ -3063,19 +3025,19 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3063 | } | 3025 | } |
3064 | } | 3026 | } |
3065 | 3027 | ||
3066 | if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { | 3028 | if (status == WLAN_STATUS_SUCCESS && priv->wep_is_on) { |
3067 | int should_associate = 0; | 3029 | int should_associate = 0; |
3068 | /* WEP */ | 3030 | /* WEP */ |
3069 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) | 3031 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) |
3070 | return; | 3032 | return; |
3071 | 3033 | ||
3072 | if (system == C80211_MGMT_AAN_OPENSYSTEM) { | 3034 | if (system == WLAN_AUTH_OPEN) { |
3073 | if (trans_seq_no == 0x0002) { | 3035 | if (trans_seq_no == 0x0002) { |
3074 | should_associate = 1; | 3036 | should_associate = 1; |
3075 | } | 3037 | } |
3076 | } else if (system == C80211_MGMT_AAN_SHAREDKEY) { | 3038 | } else if (system == WLAN_AUTH_SHARED_KEY) { |
3077 | if (trans_seq_no == 0x0002 && | 3039 | if (trans_seq_no == 0x0002 && |
3078 | auth->el_id == C80211_MGMT_ElementID_ChallengeText) { | 3040 | auth->el_id == MFIE_TYPE_CHALLENGE) { |
3079 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); | 3041 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); |
3080 | return; | 3042 | return; |
3081 | } else if (trans_seq_no == 0x0004) { | 3043 | } else if (trans_seq_no == 0x0004) { |
@@ -3140,8 +3102,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3140 | if (frame_len < 8 + rates_len) | 3102 | if (frame_len < 8 + rates_len) |
3141 | return; | 3103 | return; |
3142 | 3104 | ||
3143 | if (status == C80211_MGMT_SC_Success) { | 3105 | if (status == WLAN_STATUS_SUCCESS) { |
3144 | if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE) | 3106 | if (subtype == IEEE80211_STYPE_ASSOC_RESP) |
3145 | priv->AssociationRequestRetryCnt = 0; | 3107 | priv->AssociationRequestRetryCnt = 0; |
3146 | else | 3108 | else |
3147 | priv->ReAssociationRequestRetryCnt = 0; | 3109 | priv->ReAssociationRequestRetryCnt = 0; |
@@ -3178,9 +3140,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3178 | return; | 3140 | return; |
3179 | } | 3141 | } |
3180 | 3142 | ||
3181 | if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE && | 3143 | if (subtype == IEEE80211_STYPE_ASSOC_RESP && |
3182 | status != C80211_MGMT_SC_AssDeniedBSSRate && | 3144 | status != WLAN_STATUS_ASSOC_DENIED_RATES && |
3183 | status != C80211_MGMT_SC_SupportCapabilities && | 3145 | status != WLAN_STATUS_CAPS_UNSUPPORTED && |
3184 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { | 3146 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { |
3185 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3147 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3186 | priv->AssociationRequestRetryCnt++; | 3148 | priv->AssociationRequestRetryCnt++; |
@@ -3188,9 +3150,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype) | |||
3188 | return; | 3150 | return; |
3189 | } | 3151 | } |
3190 | 3152 | ||
3191 | if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE && | 3153 | if (subtype == IEEE80211_STYPE_REASSOC_RESP && |
3192 | status != C80211_MGMT_SC_AssDeniedBSSRate && | 3154 | status != WLAN_STATUS_ASSOC_DENIED_RATES && |
3193 | status != C80211_MGMT_SC_SupportCapabilities && | 3155 | status != WLAN_STATUS_CAPS_UNSUPPORTED && |
3194 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { | 3156 | priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { |
3195 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3157 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3196 | priv->ReAssociationRequestRetryCnt++; | 3158 | priv->ReAssociationRequestRetryCnt++; |
@@ -3325,8 +3287,8 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3325 | 3287 | ||
3326 | subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; | 3288 | subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; |
3327 | switch (subtype) { | 3289 | switch (subtype) { |
3328 | case C80211_SUBTYPE_MGMT_BEACON: | 3290 | case IEEE80211_STYPE_BEACON: |
3329 | case C80211_SUBTYPE_MGMT_ProbeResponse: | 3291 | case IEEE80211_STYPE_PROBE_RESP: |
3330 | 3292 | ||
3331 | /* beacon frame has multiple variable-length fields - | 3293 | /* beacon frame has multiple variable-length fields - |
3332 | never let an engineer loose with a data structure design. */ | 3294 | never let an engineer loose with a data structure design. */ |
@@ -3384,19 +3346,19 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3384 | beacon_interval, channel, rssi, | 3346 | beacon_interval, channel, rssi, |
3385 | ssid_length, | 3347 | ssid_length, |
3386 | &beacon->rates_el_id, | 3348 | &beacon->rates_el_id, |
3387 | subtype == C80211_SUBTYPE_MGMT_BEACON); | 3349 | subtype == IEEE80211_STYPE_BEACON); |
3388 | } | 3350 | } |
3389 | break; | 3351 | break; |
3390 | 3352 | ||
3391 | case C80211_SUBTYPE_MGMT_Authentication: | 3353 | case IEEE80211_STYPE_AUTH: |
3392 | 3354 | ||
3393 | if (priv->station_state == STATION_STATE_AUTHENTICATING) | 3355 | if (priv->station_state == STATION_STATE_AUTHENTICATING) |
3394 | authenticate(priv, frame_len); | 3356 | authenticate(priv, frame_len); |
3395 | 3357 | ||
3396 | break; | 3358 | break; |
3397 | 3359 | ||
3398 | case C80211_SUBTYPE_MGMT_ASS_RESPONSE: | 3360 | case IEEE80211_STYPE_ASSOC_RESP: |
3399 | case C80211_SUBTYPE_MGMT_REASS_RESPONSE: | 3361 | case IEEE80211_STYPE_REASSOC_RESP: |
3400 | 3362 | ||
3401 | if (priv->station_state == STATION_STATE_ASSOCIATING || | 3363 | if (priv->station_state == STATION_STATE_ASSOCIATING || |
3402 | priv->station_state == STATION_STATE_REASSOCIATING) | 3364 | priv->station_state == STATION_STATE_REASSOCIATING) |
@@ -3404,7 +3366,7 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3404 | 3366 | ||
3405 | break; | 3367 | break; |
3406 | 3368 | ||
3407 | case C80211_SUBTYPE_MGMT_DISASSOSIATION: | 3369 | case IEEE80211_STYPE_DISASSOC: |
3408 | if (priv->station_is_associated && | 3370 | if (priv->station_is_associated && |
3409 | priv->operating_mode == IW_MODE_INFRA && | 3371 | priv->operating_mode == IW_MODE_INFRA && |
3410 | is_frame_from_current_bss(priv, header)) { | 3372 | is_frame_from_current_bss(priv, header)) { |
@@ -3417,7 +3379,7 @@ static void atmel_management_frame(struct atmel_private *priv, | |||
3417 | 3379 | ||
3418 | break; | 3380 | break; |
3419 | 3381 | ||
3420 | case C80211_SUBTYPE_MGMT_Deauthentication: | 3382 | case IEEE80211_STYPE_DEAUTH: |
3421 | if (priv->operating_mode == IW_MODE_INFRA && | 3383 | if (priv->operating_mode == IW_MODE_INFRA && |
3422 | is_frame_from_current_bss(priv, header)) { | 3384 | is_frame_from_current_bss(priv, header)) { |
3423 | priv->station_was_associated = 0; | 3385 | priv->station_was_associated = 0; |
@@ -3453,12 +3415,12 @@ static void atmel_management_timer(u_long a) | |||
3453 | priv->AuthenticationRequestRetryCnt = 0; | 3415 | priv->AuthenticationRequestRetryCnt = 0; |
3454 | restart_search(priv); | 3416 | restart_search(priv); |
3455 | } else { | 3417 | } else { |
3456 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | 3418 | int auth = WLAN_AUTH_OPEN; |
3457 | priv->AuthenticationRequestRetryCnt++; | 3419 | priv->AuthenticationRequestRetryCnt++; |
3458 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3420 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3459 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3421 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3460 | if (priv->wep_is_on && priv->exclude_unencrypted) | 3422 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3461 | auth = C80211_MGMT_AAN_SHAREDKEY; | 3423 | auth = WLAN_AUTH_SHARED_KEY; |
3462 | send_authentication_request(priv, auth, NULL, 0); | 3424 | send_authentication_request(priv, auth, NULL, 0); |
3463 | } | 3425 | } |
3464 | break; | 3426 | break; |
@@ -3558,14 +3520,14 @@ static void atmel_command_irq(struct atmel_private *priv) | |||
3558 | priv->station_was_associated = priv->station_is_associated; | 3520 | priv->station_was_associated = priv->station_is_associated; |
3559 | atmel_enter_state(priv, STATION_STATE_READY); | 3521 | atmel_enter_state(priv, STATION_STATE_READY); |
3560 | } else { | 3522 | } else { |
3561 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | 3523 | int auth = WLAN_AUTH_OPEN; |
3562 | priv->AuthenticationRequestRetryCnt = 0; | 3524 | priv->AuthenticationRequestRetryCnt = 0; |
3563 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); | 3525 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); |
3564 | 3526 | ||
3565 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3527 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3566 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3528 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3567 | if (priv->wep_is_on && priv->exclude_unencrypted) | 3529 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3568 | auth = C80211_MGMT_AAN_SHAREDKEY; | 3530 | auth = WLAN_AUTH_SHARED_KEY; |
3569 | send_authentication_request(priv, auth, NULL, 0); | 3531 | send_authentication_request(priv, auth, NULL, 0); |
3570 | } | 3532 | } |
3571 | return; | 3533 | return; |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 6290c9f7e939..72335c8eb97f 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -167,12 +167,12 @@ that only one external action is invoked at a time. | |||
167 | 167 | ||
168 | #include "ipw2100.h" | 168 | #include "ipw2100.h" |
169 | 169 | ||
170 | #define IPW2100_VERSION "1.1.3" | 170 | #define IPW2100_VERSION "git-1.2.2" |
171 | 171 | ||
172 | #define DRV_NAME "ipw2100" | 172 | #define DRV_NAME "ipw2100" |
173 | #define DRV_VERSION IPW2100_VERSION | 173 | #define DRV_VERSION IPW2100_VERSION |
174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" | 174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" |
175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" | 175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
176 | 176 | ||
177 | /* Debugging stuff */ | 177 | /* Debugging stuff */ |
178 | #ifdef CONFIG_IPW2100_DEBUG | 178 | #ifdef CONFIG_IPW2100_DEBUG |
@@ -1418,7 +1418,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) | |||
1418 | if (priv->status & STATUS_ENABLED) | 1418 | if (priv->status & STATUS_ENABLED) |
1419 | return 0; | 1419 | return 0; |
1420 | 1420 | ||
1421 | down(&priv->adapter_sem); | 1421 | mutex_lock(&priv->adapter_mutex); |
1422 | 1422 | ||
1423 | if (rf_kill_active(priv)) { | 1423 | if (rf_kill_active(priv)) { |
1424 | IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); | 1424 | IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); |
@@ -1444,7 +1444,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) | |||
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | fail_up: | 1446 | fail_up: |
1447 | up(&priv->adapter_sem); | 1447 | mutex_unlock(&priv->adapter_mutex); |
1448 | return err; | 1448 | return err; |
1449 | } | 1449 | } |
1450 | 1450 | ||
@@ -1576,7 +1576,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv) | |||
1576 | cancel_delayed_work(&priv->hang_check); | 1576 | cancel_delayed_work(&priv->hang_check); |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | down(&priv->adapter_sem); | 1579 | mutex_lock(&priv->adapter_mutex); |
1580 | 1580 | ||
1581 | err = ipw2100_hw_send_command(priv, &cmd); | 1581 | err = ipw2100_hw_send_command(priv, &cmd); |
1582 | if (err) { | 1582 | if (err) { |
@@ -1595,7 +1595,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv) | |||
1595 | IPW_DEBUG_INFO("TODO: implement scan state machine\n"); | 1595 | IPW_DEBUG_INFO("TODO: implement scan state machine\n"); |
1596 | 1596 | ||
1597 | fail_up: | 1597 | fail_up: |
1598 | up(&priv->adapter_sem); | 1598 | mutex_unlock(&priv->adapter_mutex); |
1599 | return err; | 1599 | return err; |
1600 | } | 1600 | } |
1601 | 1601 | ||
@@ -1672,6 +1672,18 @@ static int ipw2100_start_scan(struct ipw2100_priv *priv) | |||
1672 | return err; | 1672 | return err; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | static const struct ieee80211_geo ipw_geos[] = { | ||
1676 | { /* Restricted */ | ||
1677 | "---", | ||
1678 | .bg_channels = 14, | ||
1679 | .bg = {{2412, 1}, {2417, 2}, {2422, 3}, | ||
1680 | {2427, 4}, {2432, 5}, {2437, 6}, | ||
1681 | {2442, 7}, {2447, 8}, {2452, 9}, | ||
1682 | {2457, 10}, {2462, 11}, {2467, 12}, | ||
1683 | {2472, 13}, {2484, 14}}, | ||
1684 | }, | ||
1685 | }; | ||
1686 | |||
1675 | static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | 1687 | static int ipw2100_up(struct ipw2100_priv *priv, int deferred) |
1676 | { | 1688 | { |
1677 | unsigned long flags; | 1689 | unsigned long flags; |
@@ -1727,6 +1739,13 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1727 | goto exit; | 1739 | goto exit; |
1728 | } | 1740 | } |
1729 | 1741 | ||
1742 | /* Initialize the geo */ | ||
1743 | if (ieee80211_set_geo(priv->ieee, &ipw_geos[0])) { | ||
1744 | printk(KERN_WARNING DRV_NAME "Could not set geo\n"); | ||
1745 | return 0; | ||
1746 | } | ||
1747 | priv->ieee->freq_band = IEEE80211_24GHZ_BAND; | ||
1748 | |||
1730 | lock = LOCK_NONE; | 1749 | lock = LOCK_NONE; |
1731 | if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { | 1750 | if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { |
1732 | printk(KERN_ERR DRV_NAME | 1751 | printk(KERN_ERR DRV_NAME |
@@ -1869,7 +1888,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | |||
1869 | priv->status |= STATUS_RESET_PENDING; | 1888 | priv->status |= STATUS_RESET_PENDING; |
1870 | spin_unlock_irqrestore(&priv->low_lock, flags); | 1889 | spin_unlock_irqrestore(&priv->low_lock, flags); |
1871 | 1890 | ||
1872 | down(&priv->action_sem); | 1891 | mutex_lock(&priv->action_mutex); |
1873 | /* stop timed checks so that they don't interfere with reset */ | 1892 | /* stop timed checks so that they don't interfere with reset */ |
1874 | priv->stop_hang_check = 1; | 1893 | priv->stop_hang_check = 1; |
1875 | cancel_delayed_work(&priv->hang_check); | 1894 | cancel_delayed_work(&priv->hang_check); |
@@ -1879,7 +1898,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | |||
1879 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 1898 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
1880 | 1899 | ||
1881 | ipw2100_up(priv, 0); | 1900 | ipw2100_up(priv, 0); |
1882 | up(&priv->action_sem); | 1901 | mutex_unlock(&priv->action_mutex); |
1883 | 1902 | ||
1884 | } | 1903 | } |
1885 | 1904 | ||
@@ -2371,15 +2390,6 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2371 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | 2390 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); |
2372 | return; | 2391 | return; |
2373 | } | 2392 | } |
2374 | #ifdef CONFIG_IPW2100_MONITOR | ||
2375 | if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR && | ||
2376 | priv->config & CFG_CRC_CHECK && | ||
2377 | status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { | ||
2378 | IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); | ||
2379 | priv->ieee->stats.rx_errors++; | ||
2380 | return; | ||
2381 | } | ||
2382 | #endif | ||
2383 | 2393 | ||
2384 | if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && | 2394 | if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && |
2385 | !(priv->status & STATUS_ASSOCIATED))) { | 2395 | !(priv->status & STATUS_ASSOCIATED))) { |
@@ -2427,6 +2437,89 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2427 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; | 2437 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; |
2428 | } | 2438 | } |
2429 | 2439 | ||
2440 | #ifdef CONFIG_IPW2100_MONITOR | ||
2441 | |||
2442 | static void isr_rx_monitor(struct ipw2100_priv *priv, int i, | ||
2443 | struct ieee80211_rx_stats *stats) | ||
2444 | { | ||
2445 | struct ipw2100_status *status = &priv->status_queue.drv[i]; | ||
2446 | struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; | ||
2447 | |||
2448 | /* Magic struct that slots into the radiotap header -- no reason | ||
2449 | * to build this manually element by element, we can write it much | ||
2450 | * more efficiently than we can parse it. ORDER MATTERS HERE */ | ||
2451 | struct ipw_rt_hdr { | ||
2452 | struct ieee80211_radiotap_header rt_hdr; | ||
2453 | s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ | ||
2454 | } *ipw_rt; | ||
2455 | |||
2456 | IPW_DEBUG_RX("Handler...\n"); | ||
2457 | |||
2458 | if (unlikely(status->frame_size > skb_tailroom(packet->skb) - | ||
2459 | sizeof(struct ipw_rt_hdr))) { | ||
2460 | IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" | ||
2461 | " Dropping.\n", | ||
2462 | priv->net_dev->name, | ||
2463 | status->frame_size, | ||
2464 | skb_tailroom(packet->skb)); | ||
2465 | priv->ieee->stats.rx_errors++; | ||
2466 | return; | ||
2467 | } | ||
2468 | |||
2469 | if (unlikely(!netif_running(priv->net_dev))) { | ||
2470 | priv->ieee->stats.rx_errors++; | ||
2471 | priv->wstats.discard.misc++; | ||
2472 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | ||
2473 | return; | ||
2474 | } | ||
2475 | |||
2476 | if (unlikely(priv->config & CFG_CRC_CHECK && | ||
2477 | status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { | ||
2478 | IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); | ||
2479 | priv->ieee->stats.rx_errors++; | ||
2480 | return; | ||
2481 | } | ||
2482 | |||
2483 | pci_unmap_single(priv->pci_dev, packet->dma_addr, | ||
2484 | sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); | ||
2485 | memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), | ||
2486 | packet->skb->data, status->frame_size); | ||
2487 | |||
2488 | ipw_rt = (struct ipw_rt_hdr *) packet->skb->data; | ||
2489 | |||
2490 | ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
2491 | ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ | ||
2492 | ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total hdr+data */ | ||
2493 | |||
2494 | ipw_rt->rt_hdr.it_present = 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL; | ||
2495 | |||
2496 | ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM; | ||
2497 | |||
2498 | skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr)); | ||
2499 | |||
2500 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { | ||
2501 | priv->ieee->stats.rx_errors++; | ||
2502 | |||
2503 | /* ieee80211_rx failed, so it didn't free the SKB */ | ||
2504 | dev_kfree_skb_any(packet->skb); | ||
2505 | packet->skb = NULL; | ||
2506 | } | ||
2507 | |||
2508 | /* We need to allocate a new SKB and attach it to the RDB. */ | ||
2509 | if (unlikely(ipw2100_alloc_skb(priv, packet))) { | ||
2510 | IPW_DEBUG_WARNING( | ||
2511 | "%s: Unable to allocate SKB onto RBD ring - disabling " | ||
2512 | "adapter.\n", priv->net_dev->name); | ||
2513 | /* TODO: schedule adapter shutdown */ | ||
2514 | IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); | ||
2515 | } | ||
2516 | |||
2517 | /* Update the RDB entry */ | ||
2518 | priv->rx_queue.drv[i].host_addr = packet->dma_addr; | ||
2519 | } | ||
2520 | |||
2521 | #endif | ||
2522 | |||
2430 | static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) | 2523 | static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) |
2431 | { | 2524 | { |
2432 | struct ipw2100_status *status = &priv->status_queue.drv[i]; | 2525 | struct ipw2100_status *status = &priv->status_queue.drv[i]; |
@@ -2558,7 +2651,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) | |||
2558 | case P8023_DATA_VAL: | 2651 | case P8023_DATA_VAL: |
2559 | #ifdef CONFIG_IPW2100_MONITOR | 2652 | #ifdef CONFIG_IPW2100_MONITOR |
2560 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 2653 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
2561 | isr_rx(priv, i, &stats); | 2654 | isr_rx_monitor(priv, i, &stats); |
2562 | break; | 2655 | break; |
2563 | } | 2656 | } |
2564 | #endif | 2657 | #endif |
@@ -3750,7 +3843,7 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr, | |||
3750 | struct net_device *dev = priv->net_dev; | 3843 | struct net_device *dev = priv->net_dev; |
3751 | const char *p = buf; | 3844 | const char *p = buf; |
3752 | 3845 | ||
3753 | (void) dev; /* kill unused-var warning for debug-only code */ | 3846 | (void)dev; /* kill unused-var warning for debug-only code */ |
3754 | 3847 | ||
3755 | if (count < 1) | 3848 | if (count < 1) |
3756 | return count; | 3849 | return count; |
@@ -3863,7 +3956,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) | |||
3863 | #ifdef CONFIG_IPW2100_MONITOR | 3956 | #ifdef CONFIG_IPW2100_MONITOR |
3864 | case IW_MODE_MONITOR: | 3957 | case IW_MODE_MONITOR: |
3865 | priv->last_mode = priv->ieee->iw_mode; | 3958 | priv->last_mode = priv->ieee->iw_mode; |
3866 | priv->net_dev->type = ARPHRD_IEEE80211; | 3959 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; |
3867 | break; | 3960 | break; |
3868 | #endif /* CONFIG_IPW2100_MONITOR */ | 3961 | #endif /* CONFIG_IPW2100_MONITOR */ |
3869 | } | 3962 | } |
@@ -4070,7 +4163,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, | |||
4070 | unsigned long val; | 4163 | unsigned long val; |
4071 | char *p = buffer; | 4164 | char *p = buffer; |
4072 | 4165 | ||
4073 | (void) dev; /* kill unused-var warning for debug-only code */ | 4166 | (void)dev; /* kill unused-var warning for debug-only code */ |
4074 | 4167 | ||
4075 | IPW_DEBUG_INFO("enter\n"); | 4168 | IPW_DEBUG_INFO("enter\n"); |
4076 | 4169 | ||
@@ -4119,7 +4212,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4119 | IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", | 4212 | IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", |
4120 | disable_radio ? "OFF" : "ON"); | 4213 | disable_radio ? "OFF" : "ON"); |
4121 | 4214 | ||
4122 | down(&priv->action_sem); | 4215 | mutex_lock(&priv->action_mutex); |
4123 | 4216 | ||
4124 | if (disable_radio) { | 4217 | if (disable_radio) { |
4125 | priv->status |= STATUS_RF_KILL_SW; | 4218 | priv->status |= STATUS_RF_KILL_SW; |
@@ -4137,7 +4230,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4137 | schedule_reset(priv); | 4230 | schedule_reset(priv); |
4138 | } | 4231 | } |
4139 | 4232 | ||
4140 | up(&priv->action_sem); | 4233 | mutex_unlock(&priv->action_mutex); |
4141 | return 1; | 4234 | return 1; |
4142 | } | 4235 | } |
4143 | 4236 | ||
@@ -5107,12 +5200,13 @@ static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power) | |||
5107 | .host_command_length = 4 | 5200 | .host_command_length = 4 |
5108 | }; | 5201 | }; |
5109 | int err = 0; | 5202 | int err = 0; |
5203 | u32 tmp = tx_power; | ||
5110 | 5204 | ||
5111 | if (tx_power != IPW_TX_POWER_DEFAULT) | 5205 | if (tx_power != IPW_TX_POWER_DEFAULT) |
5112 | tx_power = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / | 5206 | tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / |
5113 | (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); | 5207 | (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); |
5114 | 5208 | ||
5115 | cmd.host_command_parameters[0] = tx_power; | 5209 | cmd.host_command_parameters[0] = tmp; |
5116 | 5210 | ||
5117 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) | 5211 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) |
5118 | err = ipw2100_hw_send_command(priv, &cmd); | 5212 | err = ipw2100_hw_send_command(priv, &cmd); |
@@ -5365,9 +5459,12 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) | |||
5365 | SEC_LEVEL_0, 0, 1); | 5459 | SEC_LEVEL_0, 0, 1); |
5366 | } else { | 5460 | } else { |
5367 | auth_mode = IPW_AUTH_OPEN; | 5461 | auth_mode = IPW_AUTH_OPEN; |
5368 | if ((priv->ieee->sec.flags & SEC_AUTH_MODE) && | 5462 | if (priv->ieee->sec.flags & SEC_AUTH_MODE) { |
5369 | (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) | 5463 | if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY) |
5370 | auth_mode = IPW_AUTH_SHARED; | 5464 | auth_mode = IPW_AUTH_SHARED; |
5465 | else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP) | ||
5466 | auth_mode = IPW_AUTH_LEAP_CISCO_ID; | ||
5467 | } | ||
5371 | 5468 | ||
5372 | sec_level = SEC_LEVEL_0; | 5469 | sec_level = SEC_LEVEL_0; |
5373 | if (priv->ieee->sec.flags & SEC_LEVEL) | 5470 | if (priv->ieee->sec.flags & SEC_LEVEL) |
@@ -5437,7 +5534,7 @@ static void shim__set_security(struct net_device *dev, | |||
5437 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 5534 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
5438 | int i, force_update = 0; | 5535 | int i, force_update = 0; |
5439 | 5536 | ||
5440 | down(&priv->action_sem); | 5537 | mutex_lock(&priv->action_mutex); |
5441 | if (!(priv->status & STATUS_INITIALIZED)) | 5538 | if (!(priv->status & STATUS_INITIALIZED)) |
5442 | goto done; | 5539 | goto done; |
5443 | 5540 | ||
@@ -5510,7 +5607,7 @@ static void shim__set_security(struct net_device *dev, | |||
5510 | if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) | 5607 | if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) |
5511 | ipw2100_configure_security(priv, 0); | 5608 | ipw2100_configure_security(priv, 0); |
5512 | done: | 5609 | done: |
5513 | up(&priv->action_sem); | 5610 | mutex_unlock(&priv->action_mutex); |
5514 | } | 5611 | } |
5515 | 5612 | ||
5516 | static int ipw2100_adapter_setup(struct ipw2100_priv *priv) | 5613 | static int ipw2100_adapter_setup(struct ipw2100_priv *priv) |
@@ -5634,7 +5731,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5634 | if (!is_valid_ether_addr(addr->sa_data)) | 5731 | if (!is_valid_ether_addr(addr->sa_data)) |
5635 | return -EADDRNOTAVAIL; | 5732 | return -EADDRNOTAVAIL; |
5636 | 5733 | ||
5637 | down(&priv->action_sem); | 5734 | mutex_lock(&priv->action_mutex); |
5638 | 5735 | ||
5639 | priv->config |= CFG_CUSTOM_MAC; | 5736 | priv->config |= CFG_CUSTOM_MAC; |
5640 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | 5737 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); |
@@ -5644,12 +5741,12 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5644 | goto done; | 5741 | goto done; |
5645 | 5742 | ||
5646 | priv->reset_backoff = 0; | 5743 | priv->reset_backoff = 0; |
5647 | up(&priv->action_sem); | 5744 | mutex_unlock(&priv->action_mutex); |
5648 | ipw2100_reset_adapter(priv); | 5745 | ipw2100_reset_adapter(priv); |
5649 | return 0; | 5746 | return 0; |
5650 | 5747 | ||
5651 | done: | 5748 | done: |
5652 | up(&priv->action_sem); | 5749 | mutex_unlock(&priv->action_mutex); |
5653 | return err; | 5750 | return err; |
5654 | } | 5751 | } |
5655 | 5752 | ||
@@ -5760,6 +5857,9 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) | |||
5760 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { | 5857 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { |
5761 | sec.auth_mode = WLAN_AUTH_OPEN; | 5858 | sec.auth_mode = WLAN_AUTH_OPEN; |
5762 | ieee->open_wep = 1; | 5859 | ieee->open_wep = 1; |
5860 | } else if (value & IW_AUTH_ALG_LEAP) { | ||
5861 | sec.auth_mode = WLAN_AUTH_LEAP; | ||
5862 | ieee->open_wep = 1; | ||
5763 | } else | 5863 | } else |
5764 | return -EINVAL; | 5864 | return -EINVAL; |
5765 | 5865 | ||
@@ -5771,8 +5871,8 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) | |||
5771 | return ret; | 5871 | return ret; |
5772 | } | 5872 | } |
5773 | 5873 | ||
5774 | void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, | 5874 | static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, |
5775 | char *wpa_ie, int wpa_ie_len) | 5875 | char *wpa_ie, int wpa_ie_len) |
5776 | { | 5876 | { |
5777 | 5877 | ||
5778 | struct ipw2100_wpa_assoc_frame frame; | 5878 | struct ipw2100_wpa_assoc_frame frame; |
@@ -5989,8 +6089,8 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
5989 | strcpy(priv->nick, "ipw2100"); | 6089 | strcpy(priv->nick, "ipw2100"); |
5990 | 6090 | ||
5991 | spin_lock_init(&priv->low_lock); | 6091 | spin_lock_init(&priv->low_lock); |
5992 | sema_init(&priv->action_sem, 1); | 6092 | mutex_init(&priv->action_mutex); |
5993 | sema_init(&priv->adapter_sem, 1); | 6093 | mutex_init(&priv->adapter_mutex); |
5994 | 6094 | ||
5995 | init_waitqueue_head(&priv->wait_command_queue); | 6095 | init_waitqueue_head(&priv->wait_command_queue); |
5996 | 6096 | ||
@@ -6155,7 +6255,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6155 | * member to call a function that then just turns and calls ipw2100_up. | 6255 | * member to call a function that then just turns and calls ipw2100_up. |
6156 | * net_dev->init is called after name allocation but before the | 6256 | * net_dev->init is called after name allocation but before the |
6157 | * notifier chain is called */ | 6257 | * notifier chain is called */ |
6158 | down(&priv->action_sem); | 6258 | mutex_lock(&priv->action_mutex); |
6159 | err = register_netdev(dev); | 6259 | err = register_netdev(dev); |
6160 | if (err) { | 6260 | if (err) { |
6161 | printk(KERN_WARNING DRV_NAME | 6261 | printk(KERN_WARNING DRV_NAME |
@@ -6191,12 +6291,12 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6191 | 6291 | ||
6192 | priv->status |= STATUS_INITIALIZED; | 6292 | priv->status |= STATUS_INITIALIZED; |
6193 | 6293 | ||
6194 | up(&priv->action_sem); | 6294 | mutex_unlock(&priv->action_mutex); |
6195 | 6295 | ||
6196 | return 0; | 6296 | return 0; |
6197 | 6297 | ||
6198 | fail_unlock: | 6298 | fail_unlock: |
6199 | up(&priv->action_sem); | 6299 | mutex_unlock(&priv->action_mutex); |
6200 | 6300 | ||
6201 | fail: | 6301 | fail: |
6202 | if (dev) { | 6302 | if (dev) { |
@@ -6236,7 +6336,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) | |||
6236 | struct net_device *dev; | 6336 | struct net_device *dev; |
6237 | 6337 | ||
6238 | if (priv) { | 6338 | if (priv) { |
6239 | down(&priv->action_sem); | 6339 | mutex_lock(&priv->action_mutex); |
6240 | 6340 | ||
6241 | priv->status &= ~STATUS_INITIALIZED; | 6341 | priv->status &= ~STATUS_INITIALIZED; |
6242 | 6342 | ||
@@ -6251,9 +6351,9 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) | |||
6251 | /* Take down the hardware */ | 6351 | /* Take down the hardware */ |
6252 | ipw2100_down(priv); | 6352 | ipw2100_down(priv); |
6253 | 6353 | ||
6254 | /* Release the semaphore so that the network subsystem can | 6354 | /* Release the mutex so that the network subsystem can |
6255 | * complete any needed calls into the driver... */ | 6355 | * complete any needed calls into the driver... */ |
6256 | up(&priv->action_sem); | 6356 | mutex_unlock(&priv->action_mutex); |
6257 | 6357 | ||
6258 | /* Unregister the device first - this results in close() | 6358 | /* Unregister the device first - this results in close() |
6259 | * being called if the device is open. If we free storage | 6359 | * being called if the device is open. If we free storage |
@@ -6292,7 +6392,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) | |||
6292 | 6392 | ||
6293 | IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); | 6393 | IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); |
6294 | 6394 | ||
6295 | down(&priv->action_sem); | 6395 | mutex_lock(&priv->action_mutex); |
6296 | if (priv->status & STATUS_INITIALIZED) { | 6396 | if (priv->status & STATUS_INITIALIZED) { |
6297 | /* Take down the device; powers it off, etc. */ | 6397 | /* Take down the device; powers it off, etc. */ |
6298 | ipw2100_down(priv); | 6398 | ipw2100_down(priv); |
@@ -6305,7 +6405,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) | |||
6305 | pci_disable_device(pci_dev); | 6405 | pci_disable_device(pci_dev); |
6306 | pci_set_power_state(pci_dev, PCI_D3hot); | 6406 | pci_set_power_state(pci_dev, PCI_D3hot); |
6307 | 6407 | ||
6308 | up(&priv->action_sem); | 6408 | mutex_unlock(&priv->action_mutex); |
6309 | 6409 | ||
6310 | return 0; | 6410 | return 0; |
6311 | } | 6411 | } |
@@ -6319,7 +6419,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6319 | if (IPW2100_PM_DISABLED) | 6419 | if (IPW2100_PM_DISABLED) |
6320 | return 0; | 6420 | return 0; |
6321 | 6421 | ||
6322 | down(&priv->action_sem); | 6422 | mutex_lock(&priv->action_mutex); |
6323 | 6423 | ||
6324 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); | 6424 | IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); |
6325 | 6425 | ||
@@ -6345,7 +6445,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | |||
6345 | if (!(priv->status & STATUS_RF_KILL_SW)) | 6445 | if (!(priv->status & STATUS_RF_KILL_SW)) |
6346 | ipw2100_up(priv, 0); | 6446 | ipw2100_up(priv, 0); |
6347 | 6447 | ||
6348 | up(&priv->action_sem); | 6448 | mutex_unlock(&priv->action_mutex); |
6349 | 6449 | ||
6350 | return 0; | 6450 | return 0; |
6351 | } | 6451 | } |
@@ -6509,7 +6609,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev, | |||
6509 | if (priv->ieee->iw_mode == IW_MODE_INFRA) | 6609 | if (priv->ieee->iw_mode == IW_MODE_INFRA) |
6510 | return -EOPNOTSUPP; | 6610 | return -EOPNOTSUPP; |
6511 | 6611 | ||
6512 | down(&priv->action_sem); | 6612 | mutex_lock(&priv->action_mutex); |
6513 | if (!(priv->status & STATUS_INITIALIZED)) { | 6613 | if (!(priv->status & STATUS_INITIALIZED)) { |
6514 | err = -EIO; | 6614 | err = -EIO; |
6515 | goto done; | 6615 | goto done; |
@@ -6540,7 +6640,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev, | |||
6540 | } | 6640 | } |
6541 | 6641 | ||
6542 | done: | 6642 | done: |
6543 | up(&priv->action_sem); | 6643 | mutex_unlock(&priv->action_mutex); |
6544 | return err; | 6644 | return err; |
6545 | } | 6645 | } |
6546 | 6646 | ||
@@ -6581,7 +6681,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev, | |||
6581 | if (wrqu->mode == priv->ieee->iw_mode) | 6681 | if (wrqu->mode == priv->ieee->iw_mode) |
6582 | return 0; | 6682 | return 0; |
6583 | 6683 | ||
6584 | down(&priv->action_sem); | 6684 | mutex_lock(&priv->action_mutex); |
6585 | if (!(priv->status & STATUS_INITIALIZED)) { | 6685 | if (!(priv->status & STATUS_INITIALIZED)) { |
6586 | err = -EIO; | 6686 | err = -EIO; |
6587 | goto done; | 6687 | goto done; |
@@ -6604,7 +6704,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev, | |||
6604 | } | 6704 | } |
6605 | 6705 | ||
6606 | done: | 6706 | done: |
6607 | up(&priv->action_sem); | 6707 | mutex_unlock(&priv->action_mutex); |
6608 | return err; | 6708 | return err; |
6609 | } | 6709 | } |
6610 | 6710 | ||
@@ -6786,7 +6886,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, | |||
6786 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) | 6886 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) |
6787 | return -EINVAL; | 6887 | return -EINVAL; |
6788 | 6888 | ||
6789 | down(&priv->action_sem); | 6889 | mutex_lock(&priv->action_mutex); |
6790 | if (!(priv->status & STATUS_INITIALIZED)) { | 6890 | if (!(priv->status & STATUS_INITIALIZED)) { |
6791 | err = -EIO; | 6891 | err = -EIO; |
6792 | goto done; | 6892 | goto done; |
@@ -6815,7 +6915,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, | |||
6815 | wrqu->ap_addr.sa_data[5] & 0xff); | 6915 | wrqu->ap_addr.sa_data[5] & 0xff); |
6816 | 6916 | ||
6817 | done: | 6917 | done: |
6818 | up(&priv->action_sem); | 6918 | mutex_unlock(&priv->action_mutex); |
6819 | return err; | 6919 | return err; |
6820 | } | 6920 | } |
6821 | 6921 | ||
@@ -6851,7 +6951,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev, | |||
6851 | int length = 0; | 6951 | int length = 0; |
6852 | int err = 0; | 6952 | int err = 0; |
6853 | 6953 | ||
6854 | down(&priv->action_sem); | 6954 | mutex_lock(&priv->action_mutex); |
6855 | if (!(priv->status & STATUS_INITIALIZED)) { | 6955 | if (!(priv->status & STATUS_INITIALIZED)) { |
6856 | err = -EIO; | 6956 | err = -EIO; |
6857 | goto done; | 6957 | goto done; |
@@ -6888,7 +6988,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev, | |||
6888 | err = ipw2100_set_essid(priv, essid, length, 0); | 6988 | err = ipw2100_set_essid(priv, essid, length, 0); |
6889 | 6989 | ||
6890 | done: | 6990 | done: |
6891 | up(&priv->action_sem); | 6991 | mutex_unlock(&priv->action_mutex); |
6892 | return err; | 6992 | return err; |
6893 | } | 6993 | } |
6894 | 6994 | ||
@@ -6969,7 +7069,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev, | |||
6969 | u32 rate; | 7069 | u32 rate; |
6970 | int err = 0; | 7070 | int err = 0; |
6971 | 7071 | ||
6972 | down(&priv->action_sem); | 7072 | mutex_lock(&priv->action_mutex); |
6973 | if (!(priv->status & STATUS_INITIALIZED)) { | 7073 | if (!(priv->status & STATUS_INITIALIZED)) { |
6974 | err = -EIO; | 7074 | err = -EIO; |
6975 | goto done; | 7075 | goto done; |
@@ -6996,7 +7096,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev, | |||
6996 | 7096 | ||
6997 | IPW_DEBUG_WX("SET Rate -> %04X \n", rate); | 7097 | IPW_DEBUG_WX("SET Rate -> %04X \n", rate); |
6998 | done: | 7098 | done: |
6999 | up(&priv->action_sem); | 7099 | mutex_unlock(&priv->action_mutex); |
7000 | return err; | 7100 | return err; |
7001 | } | 7101 | } |
7002 | 7102 | ||
@@ -7016,7 +7116,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev, | |||
7016 | return 0; | 7116 | return 0; |
7017 | } | 7117 | } |
7018 | 7118 | ||
7019 | down(&priv->action_sem); | 7119 | mutex_lock(&priv->action_mutex); |
7020 | if (!(priv->status & STATUS_INITIALIZED)) { | 7120 | if (!(priv->status & STATUS_INITIALIZED)) { |
7021 | err = -EIO; | 7121 | err = -EIO; |
7022 | goto done; | 7122 | goto done; |
@@ -7048,7 +7148,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev, | |||
7048 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | 7148 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); |
7049 | 7149 | ||
7050 | done: | 7150 | done: |
7051 | up(&priv->action_sem); | 7151 | mutex_unlock(&priv->action_mutex); |
7052 | return err; | 7152 | return err; |
7053 | } | 7153 | } |
7054 | 7154 | ||
@@ -7063,7 +7163,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev, | |||
7063 | if (wrqu->rts.fixed == 0) | 7163 | if (wrqu->rts.fixed == 0) |
7064 | return -EINVAL; | 7164 | return -EINVAL; |
7065 | 7165 | ||
7066 | down(&priv->action_sem); | 7166 | mutex_lock(&priv->action_mutex); |
7067 | if (!(priv->status & STATUS_INITIALIZED)) { | 7167 | if (!(priv->status & STATUS_INITIALIZED)) { |
7068 | err = -EIO; | 7168 | err = -EIO; |
7069 | goto done; | 7169 | goto done; |
@@ -7083,7 +7183,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev, | |||
7083 | 7183 | ||
7084 | IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); | 7184 | IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); |
7085 | done: | 7185 | done: |
7086 | up(&priv->action_sem); | 7186 | mutex_unlock(&priv->action_mutex); |
7087 | return err; | 7187 | return err; |
7088 | } | 7188 | } |
7089 | 7189 | ||
@@ -7134,7 +7234,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev, | |||
7134 | value = wrqu->txpower.value; | 7234 | value = wrqu->txpower.value; |
7135 | } | 7235 | } |
7136 | 7236 | ||
7137 | down(&priv->action_sem); | 7237 | mutex_lock(&priv->action_mutex); |
7138 | if (!(priv->status & STATUS_INITIALIZED)) { | 7238 | if (!(priv->status & STATUS_INITIALIZED)) { |
7139 | err = -EIO; | 7239 | err = -EIO; |
7140 | goto done; | 7240 | goto done; |
@@ -7145,7 +7245,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev, | |||
7145 | IPW_DEBUG_WX("SET TX Power -> %d \n", value); | 7245 | IPW_DEBUG_WX("SET TX Power -> %d \n", value); |
7146 | 7246 | ||
7147 | done: | 7247 | done: |
7148 | up(&priv->action_sem); | 7248 | mutex_unlock(&priv->action_mutex); |
7149 | return err; | 7249 | return err; |
7150 | } | 7250 | } |
7151 | 7251 | ||
@@ -7237,7 +7337,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev, | |||
7237 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) | 7337 | if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) |
7238 | return 0; | 7338 | return 0; |
7239 | 7339 | ||
7240 | down(&priv->action_sem); | 7340 | mutex_lock(&priv->action_mutex); |
7241 | if (!(priv->status & STATUS_INITIALIZED)) { | 7341 | if (!(priv->status & STATUS_INITIALIZED)) { |
7242 | err = -EIO; | 7342 | err = -EIO; |
7243 | goto done; | 7343 | goto done; |
@@ -7264,7 +7364,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev, | |||
7264 | IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); | 7364 | IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); |
7265 | 7365 | ||
7266 | done: | 7366 | done: |
7267 | up(&priv->action_sem); | 7367 | mutex_unlock(&priv->action_mutex); |
7268 | return err; | 7368 | return err; |
7269 | } | 7369 | } |
7270 | 7370 | ||
@@ -7307,7 +7407,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev, | |||
7307 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7407 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7308 | int err = 0; | 7408 | int err = 0; |
7309 | 7409 | ||
7310 | down(&priv->action_sem); | 7410 | mutex_lock(&priv->action_mutex); |
7311 | if (!(priv->status & STATUS_INITIALIZED)) { | 7411 | if (!(priv->status & STATUS_INITIALIZED)) { |
7312 | err = -EIO; | 7412 | err = -EIO; |
7313 | goto done; | 7413 | goto done; |
@@ -7322,7 +7422,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev, | |||
7322 | } | 7422 | } |
7323 | 7423 | ||
7324 | done: | 7424 | done: |
7325 | up(&priv->action_sem); | 7425 | mutex_unlock(&priv->action_mutex); |
7326 | return err; | 7426 | return err; |
7327 | } | 7427 | } |
7328 | 7428 | ||
@@ -7372,7 +7472,7 @@ static int ipw2100_wx_set_power(struct net_device *dev, | |||
7372 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7472 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7373 | int err = 0; | 7473 | int err = 0; |
7374 | 7474 | ||
7375 | down(&priv->action_sem); | 7475 | mutex_lock(&priv->action_mutex); |
7376 | if (!(priv->status & STATUS_INITIALIZED)) { | 7476 | if (!(priv->status & STATUS_INITIALIZED)) { |
7377 | err = -EIO; | 7477 | err = -EIO; |
7378 | goto done; | 7478 | goto done; |
@@ -7405,7 +7505,7 @@ static int ipw2100_wx_set_power(struct net_device *dev, | |||
7405 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); | 7505 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); |
7406 | 7506 | ||
7407 | done: | 7507 | done: |
7408 | up(&priv->action_sem); | 7508 | mutex_unlock(&priv->action_mutex); |
7409 | return err; | 7509 | return err; |
7410 | 7510 | ||
7411 | } | 7511 | } |
@@ -7709,7 +7809,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev, | |||
7709 | int enable = (parms[0] > 0); | 7809 | int enable = (parms[0] > 0); |
7710 | int err = 0; | 7810 | int err = 0; |
7711 | 7811 | ||
7712 | down(&priv->action_sem); | 7812 | mutex_lock(&priv->action_mutex); |
7713 | if (!(priv->status & STATUS_INITIALIZED)) { | 7813 | if (!(priv->status & STATUS_INITIALIZED)) { |
7714 | err = -EIO; | 7814 | err = -EIO; |
7715 | goto done; | 7815 | goto done; |
@@ -7727,7 +7827,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev, | |||
7727 | err = ipw2100_switch_mode(priv, priv->last_mode); | 7827 | err = ipw2100_switch_mode(priv, priv->last_mode); |
7728 | } | 7828 | } |
7729 | done: | 7829 | done: |
7730 | up(&priv->action_sem); | 7830 | mutex_unlock(&priv->action_mutex); |
7731 | return err; | 7831 | return err; |
7732 | } | 7832 | } |
7733 | 7833 | ||
@@ -7750,7 +7850,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, | |||
7750 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7850 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7751 | int err = 0, mode = *(int *)extra; | 7851 | int err = 0, mode = *(int *)extra; |
7752 | 7852 | ||
7753 | down(&priv->action_sem); | 7853 | mutex_lock(&priv->action_mutex); |
7754 | if (!(priv->status & STATUS_INITIALIZED)) { | 7854 | if (!(priv->status & STATUS_INITIALIZED)) { |
7755 | err = -EIO; | 7855 | err = -EIO; |
7756 | goto done; | 7856 | goto done; |
@@ -7762,7 +7862,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, | |||
7762 | if (priv->power_mode != mode) | 7862 | if (priv->power_mode != mode) |
7763 | err = ipw2100_set_power_mode(priv, mode); | 7863 | err = ipw2100_set_power_mode(priv, mode); |
7764 | done: | 7864 | done: |
7765 | up(&priv->action_sem); | 7865 | mutex_unlock(&priv->action_mutex); |
7766 | return err; | 7866 | return err; |
7767 | } | 7867 | } |
7768 | 7868 | ||
@@ -7814,7 +7914,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev, | |||
7814 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7914 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7815 | int err, mode = *(int *)extra; | 7915 | int err, mode = *(int *)extra; |
7816 | 7916 | ||
7817 | down(&priv->action_sem); | 7917 | mutex_lock(&priv->action_mutex); |
7818 | if (!(priv->status & STATUS_INITIALIZED)) { | 7918 | if (!(priv->status & STATUS_INITIALIZED)) { |
7819 | err = -EIO; | 7919 | err = -EIO; |
7820 | goto done; | 7920 | goto done; |
@@ -7832,7 +7932,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev, | |||
7832 | err = ipw2100_system_config(priv, 0); | 7932 | err = ipw2100_system_config(priv, 0); |
7833 | 7933 | ||
7834 | done: | 7934 | done: |
7835 | up(&priv->action_sem); | 7935 | mutex_unlock(&priv->action_mutex); |
7836 | return err; | 7936 | return err; |
7837 | } | 7937 | } |
7838 | 7938 | ||
@@ -7862,7 +7962,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev, | |||
7862 | struct ipw2100_priv *priv = ieee80211_priv(dev); | 7962 | struct ipw2100_priv *priv = ieee80211_priv(dev); |
7863 | int err, mode = *(int *)extra; | 7963 | int err, mode = *(int *)extra; |
7864 | 7964 | ||
7865 | down(&priv->action_sem); | 7965 | mutex_lock(&priv->action_mutex); |
7866 | if (!(priv->status & STATUS_INITIALIZED)) { | 7966 | if (!(priv->status & STATUS_INITIALIZED)) { |
7867 | err = -EIO; | 7967 | err = -EIO; |
7868 | goto done; | 7968 | goto done; |
@@ -7879,7 +7979,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev, | |||
7879 | err = 0; | 7979 | err = 0; |
7880 | 7980 | ||
7881 | done: | 7981 | done: |
7882 | up(&priv->action_sem); | 7982 | mutex_unlock(&priv->action_mutex); |
7883 | return err; | 7983 | return err; |
7884 | } | 7984 | } |
7885 | 7985 | ||
@@ -8184,11 +8284,11 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8184 | if (priv->status & STATUS_STOPPING) | 8284 | if (priv->status & STATUS_STOPPING) |
8185 | return; | 8285 | return; |
8186 | 8286 | ||
8187 | down(&priv->action_sem); | 8287 | mutex_lock(&priv->action_mutex); |
8188 | 8288 | ||
8189 | IPW_DEBUG_WX("enter\n"); | 8289 | IPW_DEBUG_WX("enter\n"); |
8190 | 8290 | ||
8191 | up(&priv->action_sem); | 8291 | mutex_unlock(&priv->action_mutex); |
8192 | 8292 | ||
8193 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 8293 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
8194 | 8294 | ||
@@ -8211,7 +8311,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8211 | 8311 | ||
8212 | if (!(priv->status & STATUS_ASSOCIATED)) { | 8312 | if (!(priv->status & STATUS_ASSOCIATED)) { |
8213 | IPW_DEBUG_WX("Configuring ESSID\n"); | 8313 | IPW_DEBUG_WX("Configuring ESSID\n"); |
8214 | down(&priv->action_sem); | 8314 | mutex_lock(&priv->action_mutex); |
8215 | /* This is a disassociation event, so kick the firmware to | 8315 | /* This is a disassociation event, so kick the firmware to |
8216 | * look for another AP */ | 8316 | * look for another AP */ |
8217 | if (priv->config & CFG_STATIC_ESSID) | 8317 | if (priv->config & CFG_STATIC_ESSID) |
@@ -8219,7 +8319,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | |||
8219 | 0); | 8319 | 0); |
8220 | else | 8320 | else |
8221 | ipw2100_set_essid(priv, NULL, 0, 0); | 8321 | ipw2100_set_essid(priv, NULL, 0, 0); |
8222 | up(&priv->action_sem); | 8322 | mutex_unlock(&priv->action_mutex); |
8223 | } | 8323 | } |
8224 | 8324 | ||
8225 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 8325 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index f6c51441fa87..55b7227198df 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -41,7 +41,12 @@ | |||
41 | 41 | ||
42 | #include <net/ieee80211.h> | 42 | #include <net/ieee80211.h> |
43 | 43 | ||
44 | #ifdef CONFIG_IPW2100_MONITOR | ||
45 | #include <net/ieee80211_radiotap.h> | ||
46 | #endif | ||
47 | |||
44 | #include <linux/workqueue.h> | 48 | #include <linux/workqueue.h> |
49 | #include <linux/mutex.h> | ||
45 | 50 | ||
46 | struct ipw2100_priv; | 51 | struct ipw2100_priv; |
47 | struct ipw2100_tx_packet; | 52 | struct ipw2100_tx_packet; |
@@ -392,8 +397,10 @@ struct ipw2100_notification { | |||
392 | #define IPW_WEP104_CIPHER (1<<5) | 397 | #define IPW_WEP104_CIPHER (1<<5) |
393 | #define IPW_CKIP_CIPHER (1<<6) | 398 | #define IPW_CKIP_CIPHER (1<<6) |
394 | 399 | ||
395 | #define IPW_AUTH_OPEN 0 | 400 | #define IPW_AUTH_OPEN 0 |
396 | #define IPW_AUTH_SHARED 1 | 401 | #define IPW_AUTH_SHARED 1 |
402 | #define IPW_AUTH_LEAP 2 | ||
403 | #define IPW_AUTH_LEAP_CISCO_ID 0x80 | ||
397 | 404 | ||
398 | struct statistic { | 405 | struct statistic { |
399 | int value; | 406 | int value; |
@@ -588,8 +595,8 @@ struct ipw2100_priv { | |||
588 | int inta_other; | 595 | int inta_other; |
589 | 596 | ||
590 | spinlock_t low_lock; | 597 | spinlock_t low_lock; |
591 | struct semaphore action_sem; | 598 | struct mutex action_mutex; |
592 | struct semaphore adapter_sem; | 599 | struct mutex adapter_mutex; |
593 | 600 | ||
594 | wait_queue_head_t wait_command_queue; | 601 | wait_queue_head_t wait_command_queue; |
595 | }; | 602 | }; |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 287676ad80df..9dce522526c5 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | 802.11 status code portion of this file from ethereal-0.10.6: | 5 | 802.11 status code portion of this file from ethereal-0.10.6: |
6 | Copyright 2000, Axis Communications AB | 6 | Copyright 2000, Axis Communications AB |
@@ -33,9 +33,9 @@ | |||
33 | #include "ipw2200.h" | 33 | #include "ipw2200.h" |
34 | #include <linux/version.h> | 34 | #include <linux/version.h> |
35 | 35 | ||
36 | #define IPW2200_VERSION "git-1.0.8" | 36 | #define IPW2200_VERSION "git-1.1.1" |
37 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" | 37 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" |
38 | #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" | 38 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
39 | #define DRV_VERSION IPW2200_VERSION | 39 | #define DRV_VERSION IPW2200_VERSION |
40 | 40 | ||
41 | #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) | 41 | #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) |
@@ -55,7 +55,9 @@ static int associate = 1; | |||
55 | static int auto_create = 1; | 55 | static int auto_create = 1; |
56 | static int led = 0; | 56 | static int led = 0; |
57 | static int disable = 0; | 57 | static int disable = 0; |
58 | static int hwcrypto = 1; | 58 | static int bt_coexist = 0; |
59 | static int hwcrypto = 0; | ||
60 | static int roaming = 1; | ||
59 | static const char ipw_modes[] = { | 61 | static const char ipw_modes[] = { |
60 | 'a', 'b', 'g', '?' | 62 | 'a', 'b', 'g', '?' |
61 | }; | 63 | }; |
@@ -151,12 +153,6 @@ static int init_supported_rates(struct ipw_priv *priv, | |||
151 | static void ipw_set_hwcrypto_keys(struct ipw_priv *); | 153 | static void ipw_set_hwcrypto_keys(struct ipw_priv *); |
152 | static void ipw_send_wep_keys(struct ipw_priv *, int); | 154 | static void ipw_send_wep_keys(struct ipw_priv *, int); |
153 | 155 | ||
154 | static int ipw_is_valid_channel(struct ieee80211_device *, u8); | ||
155 | static int ipw_channel_to_index(struct ieee80211_device *, u8); | ||
156 | static u8 ipw_freq_to_channel(struct ieee80211_device *, u32); | ||
157 | static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *); | ||
158 | static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *); | ||
159 | |||
160 | static int snprint_line(char *buf, size_t count, | 156 | static int snprint_line(char *buf, size_t count, |
161 | const u8 * data, u32 len, u32 ofs) | 157 | const u8 * data, u32 len, u32 ofs) |
162 | { | 158 | { |
@@ -227,12 +223,15 @@ static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) | |||
227 | return total; | 223 | return total; |
228 | } | 224 | } |
229 | 225 | ||
226 | /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ | ||
230 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); | 227 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); |
231 | #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) | 228 | #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) |
232 | 229 | ||
230 | /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ | ||
233 | static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); | 231 | static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); |
234 | #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) | 232 | #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) |
235 | 233 | ||
234 | /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
236 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); | 235 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); |
237 | static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) | 236 | static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) |
238 | { | 237 | { |
@@ -241,6 +240,7 @@ static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) | |||
241 | _ipw_write_reg8(a, b, c); | 240 | _ipw_write_reg8(a, b, c); |
242 | } | 241 | } |
243 | 242 | ||
243 | /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
244 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); | 244 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); |
245 | static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) | 245 | static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) |
246 | { | 246 | { |
@@ -249,6 +249,7 @@ static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) | |||
249 | _ipw_write_reg16(a, b, c); | 249 | _ipw_write_reg16(a, b, c); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ | ||
252 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); | 253 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); |
253 | static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) | 254 | static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) |
254 | { | 255 | { |
@@ -257,48 +258,70 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) | |||
257 | _ipw_write_reg32(a, b, c); | 258 | _ipw_write_reg32(a, b, c); |
258 | } | 259 | } |
259 | 260 | ||
261 | /* 8-bit direct write (low 4K) */ | ||
260 | #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) | 262 | #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) |
263 | |||
264 | /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
261 | #define ipw_write8(ipw, ofs, val) \ | 265 | #define ipw_write8(ipw, ofs, val) \ |
262 | IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 266 | IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
263 | _ipw_write8(ipw, ofs, val) | 267 | _ipw_write8(ipw, ofs, val) |
264 | 268 | ||
269 | /* 16-bit direct write (low 4K) */ | ||
265 | #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) | 270 | #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) |
271 | |||
272 | /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
266 | #define ipw_write16(ipw, ofs, val) \ | 273 | #define ipw_write16(ipw, ofs, val) \ |
267 | IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 274 | IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
268 | _ipw_write16(ipw, ofs, val) | 275 | _ipw_write16(ipw, ofs, val) |
269 | 276 | ||
277 | /* 32-bit direct write (low 4K) */ | ||
270 | #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) | 278 | #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) |
279 | |||
280 | /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ | ||
271 | #define ipw_write32(ipw, ofs, val) \ | 281 | #define ipw_write32(ipw, ofs, val) \ |
272 | IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | 282 | IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ |
273 | _ipw_write32(ipw, ofs, val) | 283 | _ipw_write32(ipw, ofs, val) |
274 | 284 | ||
285 | /* 8-bit direct read (low 4K) */ | ||
275 | #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) | 286 | #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) |
287 | |||
288 | /* 8-bit direct read (low 4K), with debug wrapper */ | ||
276 | static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 289 | static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
277 | { | 290 | { |
278 | IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs)); | 291 | IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs)); |
279 | return _ipw_read8(ipw, ofs); | 292 | return _ipw_read8(ipw, ofs); |
280 | } | 293 | } |
281 | 294 | ||
295 | /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
282 | #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) | 296 | #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) |
283 | 297 | ||
298 | /* 16-bit direct read (low 4K) */ | ||
284 | #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) | 299 | #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) |
300 | |||
301 | /* 16-bit direct read (low 4K), with debug wrapper */ | ||
285 | static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 302 | static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
286 | { | 303 | { |
287 | IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs)); | 304 | IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs)); |
288 | return _ipw_read16(ipw, ofs); | 305 | return _ipw_read16(ipw, ofs); |
289 | } | 306 | } |
290 | 307 | ||
308 | /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
291 | #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) | 309 | #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) |
292 | 310 | ||
311 | /* 32-bit direct read (low 4K) */ | ||
293 | #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) | 312 | #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) |
313 | |||
314 | /* 32-bit direct read (low 4K), with debug wrapper */ | ||
294 | static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) | 315 | static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) |
295 | { | 316 | { |
296 | IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs)); | 317 | IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs)); |
297 | return _ipw_read32(ipw, ofs); | 318 | return _ipw_read32(ipw, ofs); |
298 | } | 319 | } |
299 | 320 | ||
321 | /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ | ||
300 | #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) | 322 | #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) |
301 | 323 | ||
324 | /* multi-byte read (above 4K), with debug wrapper */ | ||
302 | static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); | 325 | static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); |
303 | static inline void __ipw_read_indirect(const char *f, int l, | 326 | static inline void __ipw_read_indirect(const char *f, int l, |
304 | struct ipw_priv *a, u32 b, u8 * c, int d) | 327 | struct ipw_priv *a, u32 b, u8 * c, int d) |
@@ -308,15 +331,17 @@ static inline void __ipw_read_indirect(const char *f, int l, | |||
308 | _ipw_read_indirect(a, b, c, d); | 331 | _ipw_read_indirect(a, b, c, d); |
309 | } | 332 | } |
310 | 333 | ||
334 | /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ | ||
311 | #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d) | 335 | #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d) |
312 | 336 | ||
337 | /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ | ||
313 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, | 338 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, |
314 | int num); | 339 | int num); |
315 | #define ipw_write_indirect(a, b, c, d) \ | 340 | #define ipw_write_indirect(a, b, c, d) \ |
316 | IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ | 341 | IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ |
317 | _ipw_write_indirect(a, b, c, d) | 342 | _ipw_write_indirect(a, b, c, d) |
318 | 343 | ||
319 | /* indirect write s */ | 344 | /* 32-bit indirect write (above 4K) */ |
320 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) | 345 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) |
321 | { | 346 | { |
322 | IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); | 347 | IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); |
@@ -324,22 +349,29 @@ static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) | |||
324 | _ipw_write32(priv, IPW_INDIRECT_DATA, value); | 349 | _ipw_write32(priv, IPW_INDIRECT_DATA, value); |
325 | } | 350 | } |
326 | 351 | ||
352 | /* 8-bit indirect write (above 4K) */ | ||
327 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) | 353 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) |
328 | { | 354 | { |
355 | u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ | ||
356 | u32 dif_len = reg - aligned_addr; | ||
357 | |||
329 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | 358 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); |
330 | _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); | 359 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
331 | _ipw_write8(priv, IPW_INDIRECT_DATA, value); | 360 | _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); |
332 | } | 361 | } |
333 | 362 | ||
363 | /* 16-bit indirect write (above 4K) */ | ||
334 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) | 364 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) |
335 | { | 365 | { |
366 | u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ | ||
367 | u32 dif_len = (reg - aligned_addr) & (~0x1ul); | ||
368 | |||
336 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | 369 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); |
337 | _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); | 370 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
338 | _ipw_write16(priv, IPW_INDIRECT_DATA, value); | 371 | _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); |
339 | } | 372 | } |
340 | 373 | ||
341 | /* indirect read s */ | 374 | /* 8-bit indirect read (above 4K) */ |
342 | |||
343 | static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) | 375 | static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) |
344 | { | 376 | { |
345 | u32 word; | 377 | u32 word; |
@@ -349,6 +381,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) | |||
349 | return (word >> ((reg & 0x3) * 8)) & 0xff; | 381 | return (word >> ((reg & 0x3) * 8)) & 0xff; |
350 | } | 382 | } |
351 | 383 | ||
384 | /* 32-bit indirect read (above 4K) */ | ||
352 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) | 385 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) |
353 | { | 386 | { |
354 | u32 value; | 387 | u32 value; |
@@ -361,11 +394,12 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) | |||
361 | return value; | 394 | return value; |
362 | } | 395 | } |
363 | 396 | ||
364 | /* iterative/auto-increment 32 bit reads and writes */ | 397 | /* General purpose, no alignment requirement, iterative (multi-byte) read, */ |
398 | /* for area above 1st 4K of SRAM/reg space */ | ||
365 | static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | 399 | static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, |
366 | int num) | 400 | int num) |
367 | { | 401 | { |
368 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; | 402 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ |
369 | u32 dif_len = addr - aligned_addr; | 403 | u32 dif_len = addr - aligned_addr; |
370 | u32 i; | 404 | u32 i; |
371 | 405 | ||
@@ -375,7 +409,7 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
375 | return; | 409 | return; |
376 | } | 410 | } |
377 | 411 | ||
378 | /* Read the first nibble byte by byte */ | 412 | /* Read the first dword (or portion) byte by byte */ |
379 | if (unlikely(dif_len)) { | 413 | if (unlikely(dif_len)) { |
380 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 414 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
381 | /* Start reading at aligned_addr + dif_len */ | 415 | /* Start reading at aligned_addr + dif_len */ |
@@ -384,11 +418,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
384 | aligned_addr += 4; | 418 | aligned_addr += 4; |
385 | } | 419 | } |
386 | 420 | ||
421 | /* Read all of the middle dwords as dwords, with auto-increment */ | ||
387 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); | 422 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); |
388 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) | 423 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) |
389 | *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); | 424 | *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); |
390 | 425 | ||
391 | /* Copy the last nibble */ | 426 | /* Read the last dword (or portion) byte by byte */ |
392 | if (unlikely(num)) { | 427 | if (unlikely(num)) { |
393 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 428 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
394 | for (i = 0; num > 0; i++, num--) | 429 | for (i = 0; num > 0; i++, num--) |
@@ -396,10 +431,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
396 | } | 431 | } |
397 | } | 432 | } |
398 | 433 | ||
434 | /* General purpose, no alignment requirement, iterative (multi-byte) write, */ | ||
435 | /* for area above 1st 4K of SRAM/reg space */ | ||
399 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | 436 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, |
400 | int num) | 437 | int num) |
401 | { | 438 | { |
402 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; | 439 | u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ |
403 | u32 dif_len = addr - aligned_addr; | 440 | u32 dif_len = addr - aligned_addr; |
404 | u32 i; | 441 | u32 i; |
405 | 442 | ||
@@ -409,20 +446,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
409 | return; | 446 | return; |
410 | } | 447 | } |
411 | 448 | ||
412 | /* Write the first nibble byte by byte */ | 449 | /* Write the first dword (or portion) byte by byte */ |
413 | if (unlikely(dif_len)) { | 450 | if (unlikely(dif_len)) { |
414 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 451 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
415 | /* Start reading at aligned_addr + dif_len */ | 452 | /* Start writing at aligned_addr + dif_len */ |
416 | for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) | 453 | for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) |
417 | _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); | 454 | _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); |
418 | aligned_addr += 4; | 455 | aligned_addr += 4; |
419 | } | 456 | } |
420 | 457 | ||
458 | /* Write all of the middle dwords as dwords, with auto-increment */ | ||
421 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); | 459 | _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); |
422 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) | 460 | for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) |
423 | _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); | 461 | _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); |
424 | 462 | ||
425 | /* Copy the last nibble */ | 463 | /* Write the last dword (or portion) byte by byte */ |
426 | if (unlikely(num)) { | 464 | if (unlikely(num)) { |
427 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); | 465 | _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); |
428 | for (i = 0; num > 0; i++, num--, buf++) | 466 | for (i = 0; num > 0; i++, num--, buf++) |
@@ -430,17 +468,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |||
430 | } | 468 | } |
431 | } | 469 | } |
432 | 470 | ||
471 | /* General purpose, no alignment requirement, iterative (multi-byte) write, */ | ||
472 | /* for 1st 4K of SRAM/regs space */ | ||
433 | static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, | 473 | static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, |
434 | int num) | 474 | int num) |
435 | { | 475 | { |
436 | memcpy_toio((priv->hw_base + addr), buf, num); | 476 | memcpy_toio((priv->hw_base + addr), buf, num); |
437 | } | 477 | } |
438 | 478 | ||
479 | /* Set bit(s) in low 4K of SRAM/regs */ | ||
439 | static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) | 480 | static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) |
440 | { | 481 | { |
441 | ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); | 482 | ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); |
442 | } | 483 | } |
443 | 484 | ||
485 | /* Clear bit(s) in low 4K of SRAM/regs */ | ||
444 | static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) | 486 | static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) |
445 | { | 487 | { |
446 | ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); | 488 | ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); |
@@ -701,7 +743,7 @@ static void ipw_init_ordinals(struct ipw_priv *priv) | |||
701 | 743 | ||
702 | } | 744 | } |
703 | 745 | ||
704 | u32 ipw_register_toggle(u32 reg) | 746 | static u32 ipw_register_toggle(u32 reg) |
705 | { | 747 | { |
706 | reg &= ~IPW_START_STANDBY; | 748 | reg &= ~IPW_START_STANDBY; |
707 | if (reg & IPW_GATE_ODMA) | 749 | if (reg & IPW_GATE_ODMA) |
@@ -722,11 +764,11 @@ u32 ipw_register_toggle(u32 reg) | |||
722 | * - On radio OFF, turn off any LEDs started during radio on | 764 | * - On radio OFF, turn off any LEDs started during radio on |
723 | * | 765 | * |
724 | */ | 766 | */ |
725 | #define LD_TIME_LINK_ON 300 | 767 | #define LD_TIME_LINK_ON msecs_to_jiffies(300) |
726 | #define LD_TIME_LINK_OFF 2700 | 768 | #define LD_TIME_LINK_OFF msecs_to_jiffies(2700) |
727 | #define LD_TIME_ACT_ON 250 | 769 | #define LD_TIME_ACT_ON msecs_to_jiffies(250) |
728 | 770 | ||
729 | void ipw_led_link_on(struct ipw_priv *priv) | 771 | static void ipw_led_link_on(struct ipw_priv *priv) |
730 | { | 772 | { |
731 | unsigned long flags; | 773 | unsigned long flags; |
732 | u32 led; | 774 | u32 led; |
@@ -764,12 +806,12 @@ void ipw_led_link_on(struct ipw_priv *priv) | |||
764 | static void ipw_bg_led_link_on(void *data) | 806 | static void ipw_bg_led_link_on(void *data) |
765 | { | 807 | { |
766 | struct ipw_priv *priv = data; | 808 | struct ipw_priv *priv = data; |
767 | down(&priv->sem); | 809 | mutex_lock(&priv->mutex); |
768 | ipw_led_link_on(data); | 810 | ipw_led_link_on(data); |
769 | up(&priv->sem); | 811 | mutex_unlock(&priv->mutex); |
770 | } | 812 | } |
771 | 813 | ||
772 | void ipw_led_link_off(struct ipw_priv *priv) | 814 | static void ipw_led_link_off(struct ipw_priv *priv) |
773 | { | 815 | { |
774 | unsigned long flags; | 816 | unsigned long flags; |
775 | u32 led; | 817 | u32 led; |
@@ -808,9 +850,9 @@ void ipw_led_link_off(struct ipw_priv *priv) | |||
808 | static void ipw_bg_led_link_off(void *data) | 850 | static void ipw_bg_led_link_off(void *data) |
809 | { | 851 | { |
810 | struct ipw_priv *priv = data; | 852 | struct ipw_priv *priv = data; |
811 | down(&priv->sem); | 853 | mutex_lock(&priv->mutex); |
812 | ipw_led_link_off(data); | 854 | ipw_led_link_off(data); |
813 | up(&priv->sem); | 855 | mutex_unlock(&priv->mutex); |
814 | } | 856 | } |
815 | 857 | ||
816 | static void __ipw_led_activity_on(struct ipw_priv *priv) | 858 | static void __ipw_led_activity_on(struct ipw_priv *priv) |
@@ -847,6 +889,7 @@ static void __ipw_led_activity_on(struct ipw_priv *priv) | |||
847 | } | 889 | } |
848 | } | 890 | } |
849 | 891 | ||
892 | #if 0 | ||
850 | void ipw_led_activity_on(struct ipw_priv *priv) | 893 | void ipw_led_activity_on(struct ipw_priv *priv) |
851 | { | 894 | { |
852 | unsigned long flags; | 895 | unsigned long flags; |
@@ -854,8 +897,9 @@ void ipw_led_activity_on(struct ipw_priv *priv) | |||
854 | __ipw_led_activity_on(priv); | 897 | __ipw_led_activity_on(priv); |
855 | spin_unlock_irqrestore(&priv->lock, flags); | 898 | spin_unlock_irqrestore(&priv->lock, flags); |
856 | } | 899 | } |
900 | #endif /* 0 */ | ||
857 | 901 | ||
858 | void ipw_led_activity_off(struct ipw_priv *priv) | 902 | static void ipw_led_activity_off(struct ipw_priv *priv) |
859 | { | 903 | { |
860 | unsigned long flags; | 904 | unsigned long flags; |
861 | u32 led; | 905 | u32 led; |
@@ -885,12 +929,12 @@ void ipw_led_activity_off(struct ipw_priv *priv) | |||
885 | static void ipw_bg_led_activity_off(void *data) | 929 | static void ipw_bg_led_activity_off(void *data) |
886 | { | 930 | { |
887 | struct ipw_priv *priv = data; | 931 | struct ipw_priv *priv = data; |
888 | down(&priv->sem); | 932 | mutex_lock(&priv->mutex); |
889 | ipw_led_activity_off(data); | 933 | ipw_led_activity_off(data); |
890 | up(&priv->sem); | 934 | mutex_unlock(&priv->mutex); |
891 | } | 935 | } |
892 | 936 | ||
893 | void ipw_led_band_on(struct ipw_priv *priv) | 937 | static void ipw_led_band_on(struct ipw_priv *priv) |
894 | { | 938 | { |
895 | unsigned long flags; | 939 | unsigned long flags; |
896 | u32 led; | 940 | u32 led; |
@@ -925,7 +969,7 @@ void ipw_led_band_on(struct ipw_priv *priv) | |||
925 | spin_unlock_irqrestore(&priv->lock, flags); | 969 | spin_unlock_irqrestore(&priv->lock, flags); |
926 | } | 970 | } |
927 | 971 | ||
928 | void ipw_led_band_off(struct ipw_priv *priv) | 972 | static void ipw_led_band_off(struct ipw_priv *priv) |
929 | { | 973 | { |
930 | unsigned long flags; | 974 | unsigned long flags; |
931 | u32 led; | 975 | u32 led; |
@@ -948,24 +992,24 @@ void ipw_led_band_off(struct ipw_priv *priv) | |||
948 | spin_unlock_irqrestore(&priv->lock, flags); | 992 | spin_unlock_irqrestore(&priv->lock, flags); |
949 | } | 993 | } |
950 | 994 | ||
951 | void ipw_led_radio_on(struct ipw_priv *priv) | 995 | static void ipw_led_radio_on(struct ipw_priv *priv) |
952 | { | 996 | { |
953 | ipw_led_link_on(priv); | 997 | ipw_led_link_on(priv); |
954 | } | 998 | } |
955 | 999 | ||
956 | void ipw_led_radio_off(struct ipw_priv *priv) | 1000 | static void ipw_led_radio_off(struct ipw_priv *priv) |
957 | { | 1001 | { |
958 | ipw_led_activity_off(priv); | 1002 | ipw_led_activity_off(priv); |
959 | ipw_led_link_off(priv); | 1003 | ipw_led_link_off(priv); |
960 | } | 1004 | } |
961 | 1005 | ||
962 | void ipw_led_link_up(struct ipw_priv *priv) | 1006 | static void ipw_led_link_up(struct ipw_priv *priv) |
963 | { | 1007 | { |
964 | /* Set the Link Led on for all nic types */ | 1008 | /* Set the Link Led on for all nic types */ |
965 | ipw_led_link_on(priv); | 1009 | ipw_led_link_on(priv); |
966 | } | 1010 | } |
967 | 1011 | ||
968 | void ipw_led_link_down(struct ipw_priv *priv) | 1012 | static void ipw_led_link_down(struct ipw_priv *priv) |
969 | { | 1013 | { |
970 | ipw_led_activity_off(priv); | 1014 | ipw_led_activity_off(priv); |
971 | ipw_led_link_off(priv); | 1015 | ipw_led_link_off(priv); |
@@ -974,7 +1018,7 @@ void ipw_led_link_down(struct ipw_priv *priv) | |||
974 | ipw_led_radio_off(priv); | 1018 | ipw_led_radio_off(priv); |
975 | } | 1019 | } |
976 | 1020 | ||
977 | void ipw_led_init(struct ipw_priv *priv) | 1021 | static void ipw_led_init(struct ipw_priv *priv) |
978 | { | 1022 | { |
979 | priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; | 1023 | priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; |
980 | 1024 | ||
@@ -1025,7 +1069,7 @@ void ipw_led_init(struct ipw_priv *priv) | |||
1025 | } | 1069 | } |
1026 | } | 1070 | } |
1027 | 1071 | ||
1028 | void ipw_led_shutdown(struct ipw_priv *priv) | 1072 | static void ipw_led_shutdown(struct ipw_priv *priv) |
1029 | { | 1073 | { |
1030 | ipw_led_activity_off(priv); | 1074 | ipw_led_activity_off(priv); |
1031 | ipw_led_link_off(priv); | 1075 | ipw_led_link_off(priv); |
@@ -1074,6 +1118,7 @@ static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, | |||
1074 | 1118 | ||
1075 | static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) | 1119 | static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) |
1076 | { | 1120 | { |
1121 | /* length = 1st dword in log */ | ||
1077 | return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); | 1122 | return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); |
1078 | } | 1123 | } |
1079 | 1124 | ||
@@ -1603,7 +1648,7 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, | |||
1603 | break; | 1648 | break; |
1604 | } | 1649 | } |
1605 | 1650 | ||
1606 | if (ipw_is_valid_channel(priv->ieee, channel)) | 1651 | if (ieee80211_is_valid_channel(priv->ieee, channel)) |
1607 | priv->speed_scan[pos++] = channel; | 1652 | priv->speed_scan[pos++] = channel; |
1608 | else | 1653 | else |
1609 | IPW_WARNING("Skipping invalid channel request: %d\n", | 1654 | IPW_WARNING("Skipping invalid channel request: %d\n", |
@@ -1751,9 +1796,9 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1751 | } | 1796 | } |
1752 | 1797 | ||
1753 | if (inta & IPW_INTA_BIT_FATAL_ERROR) { | 1798 | if (inta & IPW_INTA_BIT_FATAL_ERROR) { |
1754 | IPW_ERROR("Firmware error detected. Restarting.\n"); | 1799 | IPW_WARNING("Firmware error detected. Restarting.\n"); |
1755 | if (priv->error) { | 1800 | if (priv->error) { |
1756 | IPW_ERROR("Sysfs 'error' log already exists.\n"); | 1801 | IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); |
1757 | #ifdef CONFIG_IPW2200_DEBUG | 1802 | #ifdef CONFIG_IPW2200_DEBUG |
1758 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { | 1803 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { |
1759 | struct ipw_fw_error *error = | 1804 | struct ipw_fw_error *error = |
@@ -1766,10 +1811,10 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1766 | } else { | 1811 | } else { |
1767 | priv->error = ipw_alloc_error_log(priv); | 1812 | priv->error = ipw_alloc_error_log(priv); |
1768 | if (priv->error) | 1813 | if (priv->error) |
1769 | IPW_ERROR("Sysfs 'error' log captured.\n"); | 1814 | IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); |
1770 | else | 1815 | else |
1771 | IPW_ERROR("Error allocating sysfs 'error' " | 1816 | IPW_DEBUG_FW("Error allocating sysfs 'error' " |
1772 | "log.\n"); | 1817 | "log.\n"); |
1773 | #ifdef CONFIG_IPW2200_DEBUG | 1818 | #ifdef CONFIG_IPW2200_DEBUG |
1774 | if (ipw_debug_level & IPW_DL_FW_ERRORS) | 1819 | if (ipw_debug_level & IPW_DL_FW_ERRORS) |
1775 | ipw_dump_error_log(priv, priv->error); | 1820 | ipw_dump_error_log(priv, priv->error); |
@@ -1870,7 +1915,8 @@ static char *get_cmd_string(u8 cmd) | |||
1870 | } | 1915 | } |
1871 | 1916 | ||
1872 | #define HOST_COMPLETE_TIMEOUT HZ | 1917 | #define HOST_COMPLETE_TIMEOUT HZ |
1873 | static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | 1918 | |
1919 | static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | ||
1874 | { | 1920 | { |
1875 | int rc = 0; | 1921 | int rc = 0; |
1876 | unsigned long flags; | 1922 | unsigned long flags; |
@@ -1897,9 +1943,15 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | |||
1897 | IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", | 1943 | IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", |
1898 | get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, | 1944 | get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, |
1899 | priv->status); | 1945 | priv->status); |
1900 | printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); | ||
1901 | 1946 | ||
1902 | rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0); | 1947 | #ifndef DEBUG_CMD_WEP_KEY |
1948 | if (cmd->cmd == IPW_CMD_WEP_KEY) | ||
1949 | IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); | ||
1950 | else | ||
1951 | #endif | ||
1952 | printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); | ||
1953 | |||
1954 | rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); | ||
1903 | if (rc) { | 1955 | if (rc) { |
1904 | priv->status &= ~STATUS_HCMD_ACTIVE; | 1956 | priv->status &= ~STATUS_HCMD_ACTIVE; |
1905 | IPW_ERROR("Failed to send %s: Reason %d\n", | 1957 | IPW_ERROR("Failed to send %s: Reason %d\n", |
@@ -1942,61 +1994,62 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | |||
1942 | return rc; | 1994 | return rc; |
1943 | } | 1995 | } |
1944 | 1996 | ||
1945 | static int ipw_send_host_complete(struct ipw_priv *priv) | 1997 | static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) |
1998 | { | ||
1999 | struct host_cmd cmd = { | ||
2000 | .cmd = command, | ||
2001 | }; | ||
2002 | |||
2003 | return __ipw_send_cmd(priv, &cmd); | ||
2004 | } | ||
2005 | |||
2006 | static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, | ||
2007 | void *data) | ||
1946 | { | 2008 | { |
1947 | struct host_cmd cmd = { | 2009 | struct host_cmd cmd = { |
1948 | .cmd = IPW_CMD_HOST_COMPLETE, | 2010 | .cmd = command, |
1949 | .len = 0 | 2011 | .len = len, |
2012 | .param = data, | ||
1950 | }; | 2013 | }; |
1951 | 2014 | ||
2015 | return __ipw_send_cmd(priv, &cmd); | ||
2016 | } | ||
2017 | |||
2018 | static int ipw_send_host_complete(struct ipw_priv *priv) | ||
2019 | { | ||
1952 | if (!priv) { | 2020 | if (!priv) { |
1953 | IPW_ERROR("Invalid args\n"); | 2021 | IPW_ERROR("Invalid args\n"); |
1954 | return -1; | 2022 | return -1; |
1955 | } | 2023 | } |
1956 | 2024 | ||
1957 | return ipw_send_cmd(priv, &cmd); | 2025 | return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); |
1958 | } | 2026 | } |
1959 | 2027 | ||
1960 | static int ipw_send_system_config(struct ipw_priv *priv, | 2028 | static int ipw_send_system_config(struct ipw_priv *priv, |
1961 | struct ipw_sys_config *config) | 2029 | struct ipw_sys_config *config) |
1962 | { | 2030 | { |
1963 | struct host_cmd cmd = { | ||
1964 | .cmd = IPW_CMD_SYSTEM_CONFIG, | ||
1965 | .len = sizeof(*config) | ||
1966 | }; | ||
1967 | |||
1968 | if (!priv || !config) { | 2031 | if (!priv || !config) { |
1969 | IPW_ERROR("Invalid args\n"); | 2032 | IPW_ERROR("Invalid args\n"); |
1970 | return -1; | 2033 | return -1; |
1971 | } | 2034 | } |
1972 | 2035 | ||
1973 | memcpy(cmd.param, config, sizeof(*config)); | 2036 | return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config), |
1974 | return ipw_send_cmd(priv, &cmd); | 2037 | config); |
1975 | } | 2038 | } |
1976 | 2039 | ||
1977 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) | 2040 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) |
1978 | { | 2041 | { |
1979 | struct host_cmd cmd = { | ||
1980 | .cmd = IPW_CMD_SSID, | ||
1981 | .len = min(len, IW_ESSID_MAX_SIZE) | ||
1982 | }; | ||
1983 | |||
1984 | if (!priv || !ssid) { | 2042 | if (!priv || !ssid) { |
1985 | IPW_ERROR("Invalid args\n"); | 2043 | IPW_ERROR("Invalid args\n"); |
1986 | return -1; | 2044 | return -1; |
1987 | } | 2045 | } |
1988 | 2046 | ||
1989 | memcpy(cmd.param, ssid, cmd.len); | 2047 | return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), |
1990 | return ipw_send_cmd(priv, &cmd); | 2048 | ssid); |
1991 | } | 2049 | } |
1992 | 2050 | ||
1993 | static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) | 2051 | static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) |
1994 | { | 2052 | { |
1995 | struct host_cmd cmd = { | ||
1996 | .cmd = IPW_CMD_ADAPTER_ADDRESS, | ||
1997 | .len = ETH_ALEN | ||
1998 | }; | ||
1999 | |||
2000 | if (!priv || !mac) { | 2053 | if (!priv || !mac) { |
2001 | IPW_ERROR("Invalid args\n"); | 2054 | IPW_ERROR("Invalid args\n"); |
2002 | return -1; | 2055 | return -1; |
@@ -2005,8 +2058,7 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) | |||
2005 | IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", | 2058 | IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", |
2006 | priv->net_dev->name, MAC_ARG(mac)); | 2059 | priv->net_dev->name, MAC_ARG(mac)); |
2007 | 2060 | ||
2008 | memcpy(cmd.param, mac, ETH_ALEN); | 2061 | return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); |
2009 | return ipw_send_cmd(priv, &cmd); | ||
2010 | } | 2062 | } |
2011 | 2063 | ||
2012 | /* | 2064 | /* |
@@ -2036,9 +2088,9 @@ static void ipw_adapter_restart(void *adapter) | |||
2036 | static void ipw_bg_adapter_restart(void *data) | 2088 | static void ipw_bg_adapter_restart(void *data) |
2037 | { | 2089 | { |
2038 | struct ipw_priv *priv = data; | 2090 | struct ipw_priv *priv = data; |
2039 | down(&priv->sem); | 2091 | mutex_lock(&priv->mutex); |
2040 | ipw_adapter_restart(data); | 2092 | ipw_adapter_restart(data); |
2041 | up(&priv->sem); | 2093 | mutex_unlock(&priv->mutex); |
2042 | } | 2094 | } |
2043 | 2095 | ||
2044 | #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) | 2096 | #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) |
@@ -2048,8 +2100,8 @@ static void ipw_scan_check(void *data) | |||
2048 | struct ipw_priv *priv = data; | 2100 | struct ipw_priv *priv = data; |
2049 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { | 2101 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { |
2050 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " | 2102 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " |
2051 | "adapter (%dms).\n", | 2103 | "adapter after (%dms).\n", |
2052 | IPW_SCAN_CHECK_WATCHDOG / 100); | 2104 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); |
2053 | queue_work(priv->workqueue, &priv->adapter_restart); | 2105 | queue_work(priv->workqueue, &priv->adapter_restart); |
2054 | } | 2106 | } |
2055 | } | 2107 | } |
@@ -2057,59 +2109,48 @@ static void ipw_scan_check(void *data) | |||
2057 | static void ipw_bg_scan_check(void *data) | 2109 | static void ipw_bg_scan_check(void *data) |
2058 | { | 2110 | { |
2059 | struct ipw_priv *priv = data; | 2111 | struct ipw_priv *priv = data; |
2060 | down(&priv->sem); | 2112 | mutex_lock(&priv->mutex); |
2061 | ipw_scan_check(data); | 2113 | ipw_scan_check(data); |
2062 | up(&priv->sem); | 2114 | mutex_unlock(&priv->mutex); |
2063 | } | 2115 | } |
2064 | 2116 | ||
2065 | static int ipw_send_scan_request_ext(struct ipw_priv *priv, | 2117 | static int ipw_send_scan_request_ext(struct ipw_priv *priv, |
2066 | struct ipw_scan_request_ext *request) | 2118 | struct ipw_scan_request_ext *request) |
2067 | { | 2119 | { |
2068 | struct host_cmd cmd = { | 2120 | return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, |
2069 | .cmd = IPW_CMD_SCAN_REQUEST_EXT, | 2121 | sizeof(*request), request); |
2070 | .len = sizeof(*request) | ||
2071 | }; | ||
2072 | |||
2073 | memcpy(cmd.param, request, sizeof(*request)); | ||
2074 | return ipw_send_cmd(priv, &cmd); | ||
2075 | } | 2122 | } |
2076 | 2123 | ||
2077 | static int ipw_send_scan_abort(struct ipw_priv *priv) | 2124 | static int ipw_send_scan_abort(struct ipw_priv *priv) |
2078 | { | 2125 | { |
2079 | struct host_cmd cmd = { | ||
2080 | .cmd = IPW_CMD_SCAN_ABORT, | ||
2081 | .len = 0 | ||
2082 | }; | ||
2083 | |||
2084 | if (!priv) { | 2126 | if (!priv) { |
2085 | IPW_ERROR("Invalid args\n"); | 2127 | IPW_ERROR("Invalid args\n"); |
2086 | return -1; | 2128 | return -1; |
2087 | } | 2129 | } |
2088 | 2130 | ||
2089 | return ipw_send_cmd(priv, &cmd); | 2131 | return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); |
2090 | } | 2132 | } |
2091 | 2133 | ||
2092 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) | 2134 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) |
2093 | { | 2135 | { |
2094 | struct host_cmd cmd = { | 2136 | struct ipw_sensitivity_calib calib = { |
2095 | .cmd = IPW_CMD_SENSITIVITY_CALIB, | 2137 | .beacon_rssi_raw = sens, |
2096 | .len = sizeof(struct ipw_sensitivity_calib) | ||
2097 | }; | 2138 | }; |
2098 | struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *) | 2139 | |
2099 | &cmd.param; | 2140 | return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), |
2100 | calib->beacon_rssi_raw = sens; | 2141 | &calib); |
2101 | return ipw_send_cmd(priv, &cmd); | ||
2102 | } | 2142 | } |
2103 | 2143 | ||
2104 | static int ipw_send_associate(struct ipw_priv *priv, | 2144 | static int ipw_send_associate(struct ipw_priv *priv, |
2105 | struct ipw_associate *associate) | 2145 | struct ipw_associate *associate) |
2106 | { | 2146 | { |
2107 | struct host_cmd cmd = { | ||
2108 | .cmd = IPW_CMD_ASSOCIATE, | ||
2109 | .len = sizeof(*associate) | ||
2110 | }; | ||
2111 | |||
2112 | struct ipw_associate tmp_associate; | 2147 | struct ipw_associate tmp_associate; |
2148 | |||
2149 | if (!priv || !associate) { | ||
2150 | IPW_ERROR("Invalid args\n"); | ||
2151 | return -1; | ||
2152 | } | ||
2153 | |||
2113 | memcpy(&tmp_associate, associate, sizeof(*associate)); | 2154 | memcpy(&tmp_associate, associate, sizeof(*associate)); |
2114 | tmp_associate.policy_support = | 2155 | tmp_associate.policy_support = |
2115 | cpu_to_le16(tmp_associate.policy_support); | 2156 | cpu_to_le16(tmp_associate.policy_support); |
@@ -2122,85 +2163,60 @@ static int ipw_send_associate(struct ipw_priv *priv, | |||
2122 | cpu_to_le16(tmp_associate.beacon_interval); | 2163 | cpu_to_le16(tmp_associate.beacon_interval); |
2123 | tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window); | 2164 | tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window); |
2124 | 2165 | ||
2125 | if (!priv || !associate) { | 2166 | return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate), |
2126 | IPW_ERROR("Invalid args\n"); | 2167 | &tmp_associate); |
2127 | return -1; | ||
2128 | } | ||
2129 | |||
2130 | memcpy(cmd.param, &tmp_associate, sizeof(*associate)); | ||
2131 | return ipw_send_cmd(priv, &cmd); | ||
2132 | } | 2168 | } |
2133 | 2169 | ||
2134 | static int ipw_send_supported_rates(struct ipw_priv *priv, | 2170 | static int ipw_send_supported_rates(struct ipw_priv *priv, |
2135 | struct ipw_supported_rates *rates) | 2171 | struct ipw_supported_rates *rates) |
2136 | { | 2172 | { |
2137 | struct host_cmd cmd = { | ||
2138 | .cmd = IPW_CMD_SUPPORTED_RATES, | ||
2139 | .len = sizeof(*rates) | ||
2140 | }; | ||
2141 | |||
2142 | if (!priv || !rates) { | 2173 | if (!priv || !rates) { |
2143 | IPW_ERROR("Invalid args\n"); | 2174 | IPW_ERROR("Invalid args\n"); |
2144 | return -1; | 2175 | return -1; |
2145 | } | 2176 | } |
2146 | 2177 | ||
2147 | memcpy(cmd.param, rates, sizeof(*rates)); | 2178 | return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), |
2148 | return ipw_send_cmd(priv, &cmd); | 2179 | rates); |
2149 | } | 2180 | } |
2150 | 2181 | ||
2151 | static int ipw_set_random_seed(struct ipw_priv *priv) | 2182 | static int ipw_set_random_seed(struct ipw_priv *priv) |
2152 | { | 2183 | { |
2153 | struct host_cmd cmd = { | 2184 | u32 val; |
2154 | .cmd = IPW_CMD_SEED_NUMBER, | ||
2155 | .len = sizeof(u32) | ||
2156 | }; | ||
2157 | 2185 | ||
2158 | if (!priv) { | 2186 | if (!priv) { |
2159 | IPW_ERROR("Invalid args\n"); | 2187 | IPW_ERROR("Invalid args\n"); |
2160 | return -1; | 2188 | return -1; |
2161 | } | 2189 | } |
2162 | 2190 | ||
2163 | get_random_bytes(&cmd.param, sizeof(u32)); | 2191 | get_random_bytes(&val, sizeof(val)); |
2164 | 2192 | ||
2165 | return ipw_send_cmd(priv, &cmd); | 2193 | return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); |
2166 | } | 2194 | } |
2167 | 2195 | ||
2168 | static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) | 2196 | static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) |
2169 | { | 2197 | { |
2170 | struct host_cmd cmd = { | ||
2171 | .cmd = IPW_CMD_CARD_DISABLE, | ||
2172 | .len = sizeof(u32) | ||
2173 | }; | ||
2174 | |||
2175 | if (!priv) { | 2198 | if (!priv) { |
2176 | IPW_ERROR("Invalid args\n"); | 2199 | IPW_ERROR("Invalid args\n"); |
2177 | return -1; | 2200 | return -1; |
2178 | } | 2201 | } |
2179 | 2202 | ||
2180 | *((u32 *) & cmd.param) = phy_off; | 2203 | return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off), |
2181 | 2204 | &phy_off); | |
2182 | return ipw_send_cmd(priv, &cmd); | ||
2183 | } | 2205 | } |
2184 | 2206 | ||
2185 | static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) | 2207 | static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) |
2186 | { | 2208 | { |
2187 | struct host_cmd cmd = { | ||
2188 | .cmd = IPW_CMD_TX_POWER, | ||
2189 | .len = sizeof(*power) | ||
2190 | }; | ||
2191 | |||
2192 | if (!priv || !power) { | 2209 | if (!priv || !power) { |
2193 | IPW_ERROR("Invalid args\n"); | 2210 | IPW_ERROR("Invalid args\n"); |
2194 | return -1; | 2211 | return -1; |
2195 | } | 2212 | } |
2196 | 2213 | ||
2197 | memcpy(cmd.param, power, sizeof(*power)); | 2214 | return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); |
2198 | return ipw_send_cmd(priv, &cmd); | ||
2199 | } | 2215 | } |
2200 | 2216 | ||
2201 | static int ipw_set_tx_power(struct ipw_priv *priv) | 2217 | static int ipw_set_tx_power(struct ipw_priv *priv) |
2202 | { | 2218 | { |
2203 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 2219 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
2204 | struct ipw_tx_power tx_power; | 2220 | struct ipw_tx_power tx_power; |
2205 | s8 max_power; | 2221 | s8 max_power; |
2206 | int i; | 2222 | int i; |
@@ -2247,18 +2263,14 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) | |||
2247 | struct ipw_rts_threshold rts_threshold = { | 2263 | struct ipw_rts_threshold rts_threshold = { |
2248 | .rts_threshold = rts, | 2264 | .rts_threshold = rts, |
2249 | }; | 2265 | }; |
2250 | struct host_cmd cmd = { | ||
2251 | .cmd = IPW_CMD_RTS_THRESHOLD, | ||
2252 | .len = sizeof(rts_threshold) | ||
2253 | }; | ||
2254 | 2266 | ||
2255 | if (!priv) { | 2267 | if (!priv) { |
2256 | IPW_ERROR("Invalid args\n"); | 2268 | IPW_ERROR("Invalid args\n"); |
2257 | return -1; | 2269 | return -1; |
2258 | } | 2270 | } |
2259 | 2271 | ||
2260 | memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold)); | 2272 | return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, |
2261 | return ipw_send_cmd(priv, &cmd); | 2273 | sizeof(rts_threshold), &rts_threshold); |
2262 | } | 2274 | } |
2263 | 2275 | ||
2264 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | 2276 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) |
@@ -2266,27 +2278,19 @@ static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | |||
2266 | struct ipw_frag_threshold frag_threshold = { | 2278 | struct ipw_frag_threshold frag_threshold = { |
2267 | .frag_threshold = frag, | 2279 | .frag_threshold = frag, |
2268 | }; | 2280 | }; |
2269 | struct host_cmd cmd = { | ||
2270 | .cmd = IPW_CMD_FRAG_THRESHOLD, | ||
2271 | .len = sizeof(frag_threshold) | ||
2272 | }; | ||
2273 | 2281 | ||
2274 | if (!priv) { | 2282 | if (!priv) { |
2275 | IPW_ERROR("Invalid args\n"); | 2283 | IPW_ERROR("Invalid args\n"); |
2276 | return -1; | 2284 | return -1; |
2277 | } | 2285 | } |
2278 | 2286 | ||
2279 | memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold)); | 2287 | return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, |
2280 | return ipw_send_cmd(priv, &cmd); | 2288 | sizeof(frag_threshold), &frag_threshold); |
2281 | } | 2289 | } |
2282 | 2290 | ||
2283 | static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | 2291 | static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) |
2284 | { | 2292 | { |
2285 | struct host_cmd cmd = { | 2293 | u32 param; |
2286 | .cmd = IPW_CMD_POWER_MODE, | ||
2287 | .len = sizeof(u32) | ||
2288 | }; | ||
2289 | u32 *param = (u32 *) (&cmd.param); | ||
2290 | 2294 | ||
2291 | if (!priv) { | 2295 | if (!priv) { |
2292 | IPW_ERROR("Invalid args\n"); | 2296 | IPW_ERROR("Invalid args\n"); |
@@ -2297,17 +2301,18 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | |||
2297 | * level */ | 2301 | * level */ |
2298 | switch (mode) { | 2302 | switch (mode) { |
2299 | case IPW_POWER_BATTERY: | 2303 | case IPW_POWER_BATTERY: |
2300 | *param = IPW_POWER_INDEX_3; | 2304 | param = IPW_POWER_INDEX_3; |
2301 | break; | 2305 | break; |
2302 | case IPW_POWER_AC: | 2306 | case IPW_POWER_AC: |
2303 | *param = IPW_POWER_MODE_CAM; | 2307 | param = IPW_POWER_MODE_CAM; |
2304 | break; | 2308 | break; |
2305 | default: | 2309 | default: |
2306 | *param = mode; | 2310 | param = mode; |
2307 | break; | 2311 | break; |
2308 | } | 2312 | } |
2309 | 2313 | ||
2310 | return ipw_send_cmd(priv, &cmd); | 2314 | return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), |
2315 | ¶m); | ||
2311 | } | 2316 | } |
2312 | 2317 | ||
2313 | static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) | 2318 | static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) |
@@ -2316,18 +2321,14 @@ static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) | |||
2316 | .short_retry_limit = slimit, | 2321 | .short_retry_limit = slimit, |
2317 | .long_retry_limit = llimit | 2322 | .long_retry_limit = llimit |
2318 | }; | 2323 | }; |
2319 | struct host_cmd cmd = { | ||
2320 | .cmd = IPW_CMD_RETRY_LIMIT, | ||
2321 | .len = sizeof(retry_limit) | ||
2322 | }; | ||
2323 | 2324 | ||
2324 | if (!priv) { | 2325 | if (!priv) { |
2325 | IPW_ERROR("Invalid args\n"); | 2326 | IPW_ERROR("Invalid args\n"); |
2326 | return -1; | 2327 | return -1; |
2327 | } | 2328 | } |
2328 | 2329 | ||
2329 | memcpy(cmd.param, &retry_limit, sizeof(retry_limit)); | 2330 | return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), |
2330 | return ipw_send_cmd(priv, &cmd); | 2331 | &retry_limit); |
2331 | } | 2332 | } |
2332 | 2333 | ||
2333 | /* | 2334 | /* |
@@ -2454,7 +2455,7 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv) | |||
2454 | /* | 2455 | /* |
2455 | If the data looks correct, then copy it to our private | 2456 | If the data looks correct, then copy it to our private |
2456 | copy. Otherwise let the firmware know to perform the operation | 2457 | copy. Otherwise let the firmware know to perform the operation |
2457 | on it's own | 2458 | on its own. |
2458 | */ | 2459 | */ |
2459 | if (priv->eeprom[EEPROM_VERSION] != 0) { | 2460 | if (priv->eeprom[EEPROM_VERSION] != 0) { |
2460 | IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); | 2461 | IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); |
@@ -2707,22 +2708,25 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, | |||
2707 | 2708 | ||
2708 | static int ipw_fw_dma_wait(struct ipw_priv *priv) | 2709 | static int ipw_fw_dma_wait(struct ipw_priv *priv) |
2709 | { | 2710 | { |
2710 | u32 current_index = 0; | 2711 | u32 current_index = 0, previous_index; |
2711 | u32 watchdog = 0; | 2712 | u32 watchdog = 0; |
2712 | 2713 | ||
2713 | IPW_DEBUG_FW(">> : \n"); | 2714 | IPW_DEBUG_FW(">> : \n"); |
2714 | 2715 | ||
2715 | current_index = ipw_fw_dma_command_block_index(priv); | 2716 | current_index = ipw_fw_dma_command_block_index(priv); |
2716 | IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n", | 2717 | IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", |
2717 | (int)priv->sram_desc.last_cb_index); | 2718 | (int)priv->sram_desc.last_cb_index); |
2718 | 2719 | ||
2719 | while (current_index < priv->sram_desc.last_cb_index) { | 2720 | while (current_index < priv->sram_desc.last_cb_index) { |
2720 | udelay(50); | 2721 | udelay(50); |
2722 | previous_index = current_index; | ||
2721 | current_index = ipw_fw_dma_command_block_index(priv); | 2723 | current_index = ipw_fw_dma_command_block_index(priv); |
2722 | 2724 | ||
2723 | watchdog++; | 2725 | if (previous_index < current_index) { |
2724 | 2726 | watchdog = 0; | |
2725 | if (watchdog > 400) { | 2727 | continue; |
2728 | } | ||
2729 | if (++watchdog > 400) { | ||
2726 | IPW_DEBUG_FW_INFO("Timeout\n"); | 2730 | IPW_DEBUG_FW_INFO("Timeout\n"); |
2727 | ipw_fw_dma_dump_command_block(priv); | 2731 | ipw_fw_dma_dump_command_block(priv); |
2728 | ipw_fw_dma_abort(priv); | 2732 | ipw_fw_dma_abort(priv); |
@@ -2772,6 +2776,7 @@ static inline int ipw_alive(struct ipw_priv *priv) | |||
2772 | return ipw_read32(priv, 0x90) == 0xd55555d5; | 2776 | return ipw_read32(priv, 0x90) == 0xd55555d5; |
2773 | } | 2777 | } |
2774 | 2778 | ||
2779 | /* timeout in msec, attempted in 10-msec quanta */ | ||
2775 | static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, | 2780 | static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, |
2776 | int timeout) | 2781 | int timeout) |
2777 | { | 2782 | { |
@@ -2800,10 +2805,11 @@ static int ipw_stop_master(struct ipw_priv *priv) | |||
2800 | /* stop master. typical delay - 0 */ | 2805 | /* stop master. typical delay - 0 */ |
2801 | ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); | 2806 | ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); |
2802 | 2807 | ||
2808 | /* timeout is in msec, polled in 10-msec quanta */ | ||
2803 | rc = ipw_poll_bit(priv, IPW_RESET_REG, | 2809 | rc = ipw_poll_bit(priv, IPW_RESET_REG, |
2804 | IPW_RESET_REG_MASTER_DISABLED, 100); | 2810 | IPW_RESET_REG_MASTER_DISABLED, 100); |
2805 | if (rc < 0) { | 2811 | if (rc < 0) { |
2806 | IPW_ERROR("stop master failed in 10ms\n"); | 2812 | IPW_ERROR("wait for stop master failed after 100ms\n"); |
2807 | return -1; | 2813 | return -1; |
2808 | } | 2814 | } |
2809 | 2815 | ||
@@ -2823,33 +2829,11 @@ static void ipw_arc_release(struct ipw_priv *priv) | |||
2823 | mdelay(5); | 2829 | mdelay(5); |
2824 | } | 2830 | } |
2825 | 2831 | ||
2826 | struct fw_header { | ||
2827 | u32 version; | ||
2828 | u32 mode; | ||
2829 | }; | ||
2830 | |||
2831 | struct fw_chunk { | 2832 | struct fw_chunk { |
2832 | u32 address; | 2833 | u32 address; |
2833 | u32 length; | 2834 | u32 length; |
2834 | }; | 2835 | }; |
2835 | 2836 | ||
2836 | #define IPW_FW_MAJOR_VERSION 2 | ||
2837 | #define IPW_FW_MINOR_VERSION 4 | ||
2838 | |||
2839 | #define IPW_FW_MINOR(x) ((x & 0xff) >> 8) | ||
2840 | #define IPW_FW_MAJOR(x) (x & 0xff) | ||
2841 | |||
2842 | #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION) | ||
2843 | |||
2844 | #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \ | ||
2845 | "." __stringify(IPW_FW_MINOR_VERSION) "-" | ||
2846 | |||
2847 | #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0 | ||
2848 | #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw" | ||
2849 | #else | ||
2850 | #define IPW_FW_NAME(x) "ipw2200_" x ".fw" | ||
2851 | #endif | ||
2852 | |||
2853 | static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) | 2837 | static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) |
2854 | { | 2838 | { |
2855 | int rc = 0, i, addr; | 2839 | int rc = 0, i, addr; |
@@ -2890,8 +2874,8 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) | |||
2890 | mdelay(1); | 2874 | mdelay(1); |
2891 | 2875 | ||
2892 | /* enable ucode store */ | 2876 | /* enable ucode store */ |
2893 | ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0); | 2877 | ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); |
2894 | ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS); | 2878 | ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); |
2895 | mdelay(1); | 2879 | mdelay(1); |
2896 | 2880 | ||
2897 | /* write ucode */ | 2881 | /* write ucode */ |
@@ -3036,7 +3020,7 @@ static int ipw_stop_nic(struct ipw_priv *priv) | |||
3036 | rc = ipw_poll_bit(priv, IPW_RESET_REG, | 3020 | rc = ipw_poll_bit(priv, IPW_RESET_REG, |
3037 | IPW_RESET_REG_MASTER_DISABLED, 500); | 3021 | IPW_RESET_REG_MASTER_DISABLED, 500); |
3038 | if (rc < 0) { | 3022 | if (rc < 0) { |
3039 | IPW_ERROR("wait for reg master disabled failed\n"); | 3023 | IPW_ERROR("wait for reg master disabled failed after 500ms\n"); |
3040 | return rc; | 3024 | return rc; |
3041 | } | 3025 | } |
3042 | 3026 | ||
@@ -3118,33 +3102,47 @@ static int ipw_reset_nic(struct ipw_priv *priv) | |||
3118 | return rc; | 3102 | return rc; |
3119 | } | 3103 | } |
3120 | 3104 | ||
3105 | |||
3106 | struct ipw_fw { | ||
3107 | u32 ver; | ||
3108 | u32 boot_size; | ||
3109 | u32 ucode_size; | ||
3110 | u32 fw_size; | ||
3111 | u8 data[0]; | ||
3112 | }; | ||
3113 | |||
3121 | static int ipw_get_fw(struct ipw_priv *priv, | 3114 | static int ipw_get_fw(struct ipw_priv *priv, |
3122 | const struct firmware **fw, const char *name) | 3115 | const struct firmware **raw, const char *name) |
3123 | { | 3116 | { |
3124 | struct fw_header *header; | 3117 | struct ipw_fw *fw; |
3125 | int rc; | 3118 | int rc; |
3126 | 3119 | ||
3127 | /* ask firmware_class module to get the boot firmware off disk */ | 3120 | /* ask firmware_class module to get the boot firmware off disk */ |
3128 | rc = request_firmware(fw, name, &priv->pci_dev->dev); | 3121 | rc = request_firmware(raw, name, &priv->pci_dev->dev); |
3129 | if (rc < 0) { | 3122 | if (rc < 0) { |
3130 | IPW_ERROR("%s load failed: Reason %d\n", name, rc); | 3123 | IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); |
3131 | return rc; | 3124 | return rc; |
3132 | } | 3125 | } |
3133 | 3126 | ||
3134 | header = (struct fw_header *)(*fw)->data; | 3127 | if ((*raw)->size < sizeof(*fw)) { |
3135 | if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) { | 3128 | IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); |
3136 | IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n", | 3129 | return -EINVAL; |
3137 | name, | 3130 | } |
3138 | IPW_FW_MAJOR(le32_to_cpu(header->version)), | 3131 | |
3139 | IPW_FW_MAJOR_VERSION); | 3132 | fw = (void *)(*raw)->data; |
3133 | |||
3134 | if ((*raw)->size < sizeof(*fw) + | ||
3135 | fw->boot_size + fw->ucode_size + fw->fw_size) { | ||
3136 | IPW_ERROR("%s is too small or corrupt (%zd)\n", | ||
3137 | name, (*raw)->size); | ||
3140 | return -EINVAL; | 3138 | return -EINVAL; |
3141 | } | 3139 | } |
3142 | 3140 | ||
3143 | IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n", | 3141 | IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", |
3144 | name, | 3142 | name, |
3145 | IPW_FW_MAJOR(le32_to_cpu(header->version)), | 3143 | le32_to_cpu(fw->ver) >> 16, |
3146 | IPW_FW_MINOR(le32_to_cpu(header->version)), | 3144 | le32_to_cpu(fw->ver) & 0xff, |
3147 | (*fw)->size - sizeof(struct fw_header)); | 3145 | (*raw)->size - sizeof(*fw)); |
3148 | return 0; | 3146 | return 0; |
3149 | } | 3147 | } |
3150 | 3148 | ||
@@ -3184,17 +3182,13 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv, | |||
3184 | 3182 | ||
3185 | #ifdef CONFIG_PM | 3183 | #ifdef CONFIG_PM |
3186 | static int fw_loaded = 0; | 3184 | static int fw_loaded = 0; |
3187 | static const struct firmware *bootfw = NULL; | 3185 | static const struct firmware *raw = NULL; |
3188 | static const struct firmware *firmware = NULL; | ||
3189 | static const struct firmware *ucode = NULL; | ||
3190 | 3186 | ||
3191 | static void free_firmware(void) | 3187 | static void free_firmware(void) |
3192 | { | 3188 | { |
3193 | if (fw_loaded) { | 3189 | if (fw_loaded) { |
3194 | release_firmware(bootfw); | 3190 | release_firmware(raw); |
3195 | release_firmware(ucode); | 3191 | raw = NULL; |
3196 | release_firmware(firmware); | ||
3197 | bootfw = ucode = firmware = NULL; | ||
3198 | fw_loaded = 0; | 3192 | fw_loaded = 0; |
3199 | } | 3193 | } |
3200 | } | 3194 | } |
@@ -3205,60 +3199,50 @@ static void free_firmware(void) | |||
3205 | static int ipw_load(struct ipw_priv *priv) | 3199 | static int ipw_load(struct ipw_priv *priv) |
3206 | { | 3200 | { |
3207 | #ifndef CONFIG_PM | 3201 | #ifndef CONFIG_PM |
3208 | const struct firmware *bootfw = NULL; | 3202 | const struct firmware *raw = NULL; |
3209 | const struct firmware *firmware = NULL; | ||
3210 | const struct firmware *ucode = NULL; | ||
3211 | #endif | 3203 | #endif |
3204 | struct ipw_fw *fw; | ||
3205 | u8 *boot_img, *ucode_img, *fw_img; | ||
3206 | u8 *name = NULL; | ||
3212 | int rc = 0, retries = 3; | 3207 | int rc = 0, retries = 3; |
3213 | 3208 | ||
3214 | #ifdef CONFIG_PM | 3209 | switch (priv->ieee->iw_mode) { |
3215 | if (!fw_loaded) { | 3210 | case IW_MODE_ADHOC: |
3216 | #endif | 3211 | name = "ipw2200-ibss.fw"; |
3217 | rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot")); | 3212 | break; |
3218 | if (rc) | ||
3219 | goto error; | ||
3220 | |||
3221 | switch (priv->ieee->iw_mode) { | ||
3222 | case IW_MODE_ADHOC: | ||
3223 | rc = ipw_get_fw(priv, &ucode, | ||
3224 | IPW_FW_NAME("ibss_ucode")); | ||
3225 | if (rc) | ||
3226 | goto error; | ||
3227 | |||
3228 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss")); | ||
3229 | break; | ||
3230 | |||
3231 | #ifdef CONFIG_IPW2200_MONITOR | 3213 | #ifdef CONFIG_IPW2200_MONITOR |
3232 | case IW_MODE_MONITOR: | 3214 | case IW_MODE_MONITOR: |
3233 | rc = ipw_get_fw(priv, &ucode, | 3215 | name = "ipw2200-sniffer.fw"; |
3234 | IPW_FW_NAME("sniffer_ucode")); | 3216 | break; |
3235 | if (rc) | ||
3236 | goto error; | ||
3237 | |||
3238 | rc = ipw_get_fw(priv, &firmware, | ||
3239 | IPW_FW_NAME("sniffer")); | ||
3240 | break; | ||
3241 | #endif | 3217 | #endif |
3242 | case IW_MODE_INFRA: | 3218 | case IW_MODE_INFRA: |
3243 | rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode")); | 3219 | name = "ipw2200-bss.fw"; |
3244 | if (rc) | 3220 | break; |
3245 | goto error; | 3221 | } |
3246 | |||
3247 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss")); | ||
3248 | break; | ||
3249 | 3222 | ||
3250 | default: | 3223 | if (!name) { |
3251 | rc = -EINVAL; | 3224 | rc = -EINVAL; |
3252 | } | 3225 | goto error; |
3226 | } | ||
3253 | 3227 | ||
3254 | if (rc) | 3228 | #ifdef CONFIG_PM |
3229 | if (!fw_loaded) { | ||
3230 | #endif | ||
3231 | rc = ipw_get_fw(priv, &raw, name); | ||
3232 | if (rc < 0) | ||
3255 | goto error; | 3233 | goto error; |
3256 | |||
3257 | #ifdef CONFIG_PM | 3234 | #ifdef CONFIG_PM |
3258 | fw_loaded = 1; | ||
3259 | } | 3235 | } |
3260 | #endif | 3236 | #endif |
3261 | 3237 | ||
3238 | fw = (void *)raw->data; | ||
3239 | boot_img = &fw->data[0]; | ||
3240 | ucode_img = &fw->data[fw->boot_size]; | ||
3241 | fw_img = &fw->data[fw->boot_size + fw->ucode_size]; | ||
3242 | |||
3243 | if (rc < 0) | ||
3244 | goto error; | ||
3245 | |||
3262 | if (!priv->rxq) | 3246 | if (!priv->rxq) |
3263 | priv->rxq = ipw_rx_queue_alloc(priv); | 3247 | priv->rxq = ipw_rx_queue_alloc(priv); |
3264 | else | 3248 | else |
@@ -3279,7 +3263,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3279 | ipw_stop_nic(priv); | 3263 | ipw_stop_nic(priv); |
3280 | 3264 | ||
3281 | rc = ipw_reset_nic(priv); | 3265 | rc = ipw_reset_nic(priv); |
3282 | if (rc) { | 3266 | if (rc < 0) { |
3283 | IPW_ERROR("Unable to reset NIC\n"); | 3267 | IPW_ERROR("Unable to reset NIC\n"); |
3284 | goto error; | 3268 | goto error; |
3285 | } | 3269 | } |
@@ -3288,8 +3272,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3288 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); | 3272 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); |
3289 | 3273 | ||
3290 | /* DMA the initial boot firmware into the device */ | 3274 | /* DMA the initial boot firmware into the device */ |
3291 | rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header), | 3275 | rc = ipw_load_firmware(priv, boot_img, fw->boot_size); |
3292 | bootfw->size - sizeof(struct fw_header)); | ||
3293 | if (rc < 0) { | 3276 | if (rc < 0) { |
3294 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); | 3277 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); |
3295 | goto error; | 3278 | goto error; |
@@ -3298,7 +3281,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3298 | /* kick start the device */ | 3281 | /* kick start the device */ |
3299 | ipw_start_nic(priv); | 3282 | ipw_start_nic(priv); |
3300 | 3283 | ||
3301 | /* wait for the device to finish it's initial startup sequence */ | 3284 | /* wait for the device to finish its initial startup sequence */ |
3302 | rc = ipw_poll_bit(priv, IPW_INTA_RW, | 3285 | rc = ipw_poll_bit(priv, IPW_INTA_RW, |
3303 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); | 3286 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); |
3304 | if (rc < 0) { | 3287 | if (rc < 0) { |
@@ -3311,8 +3294,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3311 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); | 3294 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); |
3312 | 3295 | ||
3313 | /* DMA the ucode into the device */ | 3296 | /* DMA the ucode into the device */ |
3314 | rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header), | 3297 | rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size); |
3315 | ucode->size - sizeof(struct fw_header)); | ||
3316 | if (rc < 0) { | 3298 | if (rc < 0) { |
3317 | IPW_ERROR("Unable to load ucode: %d\n", rc); | 3299 | IPW_ERROR("Unable to load ucode: %d\n", rc); |
3318 | goto error; | 3300 | goto error; |
@@ -3322,18 +3304,19 @@ static int ipw_load(struct ipw_priv *priv) | |||
3322 | ipw_stop_nic(priv); | 3304 | ipw_stop_nic(priv); |
3323 | 3305 | ||
3324 | /* DMA bss firmware into the device */ | 3306 | /* DMA bss firmware into the device */ |
3325 | rc = ipw_load_firmware(priv, firmware->data + | 3307 | rc = ipw_load_firmware(priv, fw_img, fw->fw_size); |
3326 | sizeof(struct fw_header), | ||
3327 | firmware->size - sizeof(struct fw_header)); | ||
3328 | if (rc < 0) { | 3308 | if (rc < 0) { |
3329 | IPW_ERROR("Unable to load firmware: %d\n", rc); | 3309 | IPW_ERROR("Unable to load firmware: %d\n", rc); |
3330 | goto error; | 3310 | goto error; |
3331 | } | 3311 | } |
3312 | #ifdef CONFIG_PM | ||
3313 | fw_loaded = 1; | ||
3314 | #endif | ||
3332 | 3315 | ||
3333 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); | 3316 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); |
3334 | 3317 | ||
3335 | rc = ipw_queue_reset(priv); | 3318 | rc = ipw_queue_reset(priv); |
3336 | if (rc) { | 3319 | if (rc < 0) { |
3337 | IPW_ERROR("Unable to initialize queues\n"); | 3320 | IPW_ERROR("Unable to initialize queues\n"); |
3338 | goto error; | 3321 | goto error; |
3339 | } | 3322 | } |
@@ -3362,7 +3345,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3362 | rc = ipw_poll_bit(priv, IPW_INTA_RW, | 3345 | rc = ipw_poll_bit(priv, IPW_INTA_RW, |
3363 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); | 3346 | IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); |
3364 | if (rc < 0) { | 3347 | if (rc < 0) { |
3365 | IPW_ERROR("device failed to start after 500ms\n"); | 3348 | IPW_ERROR("device failed to start within 500ms\n"); |
3366 | goto error; | 3349 | goto error; |
3367 | } | 3350 | } |
3368 | IPW_DEBUG_INFO("device response after %dms\n", rc); | 3351 | IPW_DEBUG_INFO("device response after %dms\n", rc); |
@@ -3386,9 +3369,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3386 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); | 3369 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); |
3387 | 3370 | ||
3388 | #ifndef CONFIG_PM | 3371 | #ifndef CONFIG_PM |
3389 | release_firmware(bootfw); | 3372 | release_firmware(raw); |
3390 | release_firmware(ucode); | ||
3391 | release_firmware(firmware); | ||
3392 | #endif | 3373 | #endif |
3393 | return 0; | 3374 | return 0; |
3394 | 3375 | ||
@@ -3398,15 +3379,11 @@ static int ipw_load(struct ipw_priv *priv) | |||
3398 | priv->rxq = NULL; | 3379 | priv->rxq = NULL; |
3399 | } | 3380 | } |
3400 | ipw_tx_queue_free(priv); | 3381 | ipw_tx_queue_free(priv); |
3401 | if (bootfw) | 3382 | if (raw) |
3402 | release_firmware(bootfw); | 3383 | release_firmware(raw); |
3403 | if (ucode) | ||
3404 | release_firmware(ucode); | ||
3405 | if (firmware) | ||
3406 | release_firmware(firmware); | ||
3407 | #ifdef CONFIG_PM | 3384 | #ifdef CONFIG_PM |
3408 | fw_loaded = 0; | 3385 | fw_loaded = 0; |
3409 | bootfw = ucode = firmware = NULL; | 3386 | raw = NULL; |
3410 | #endif | 3387 | #endif |
3411 | 3388 | ||
3412 | return rc; | 3389 | return rc; |
@@ -3715,9 +3692,9 @@ static int ipw_disassociate(void *data) | |||
3715 | static void ipw_bg_disassociate(void *data) | 3692 | static void ipw_bg_disassociate(void *data) |
3716 | { | 3693 | { |
3717 | struct ipw_priv *priv = data; | 3694 | struct ipw_priv *priv = data; |
3718 | down(&priv->sem); | 3695 | mutex_lock(&priv->mutex); |
3719 | ipw_disassociate(data); | 3696 | ipw_disassociate(data); |
3720 | up(&priv->sem); | 3697 | mutex_unlock(&priv->mutex); |
3721 | } | 3698 | } |
3722 | 3699 | ||
3723 | static void ipw_system_config(void *data) | 3700 | static void ipw_system_config(void *data) |
@@ -4077,9 +4054,9 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4077 | static void ipw_bg_gather_stats(void *data) | 4054 | static void ipw_bg_gather_stats(void *data) |
4078 | { | 4055 | { |
4079 | struct ipw_priv *priv = data; | 4056 | struct ipw_priv *priv = data; |
4080 | down(&priv->sem); | 4057 | mutex_lock(&priv->mutex); |
4081 | ipw_gather_stats(data); | 4058 | ipw_gather_stats(data); |
4082 | up(&priv->sem); | 4059 | mutex_unlock(&priv->mutex); |
4083 | } | 4060 | } |
4084 | 4061 | ||
4085 | /* Missed beacon behavior: | 4062 | /* Missed beacon behavior: |
@@ -4121,8 +4098,9 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4121 | return; | 4098 | return; |
4122 | } | 4099 | } |
4123 | 4100 | ||
4124 | if (missed_count > priv->roaming_threshold && | 4101 | if (roaming && |
4125 | missed_count <= priv->disassociate_threshold) { | 4102 | (missed_count > priv->roaming_threshold && |
4103 | missed_count <= priv->disassociate_threshold)) { | ||
4126 | /* If we are not already roaming, set the ROAM | 4104 | /* If we are not already roaming, set the ROAM |
4127 | * bit in the status and kick off a scan. | 4105 | * bit in the status and kick off a scan. |
4128 | * This can happen several times before we reach | 4106 | * This can happen several times before we reach |
@@ -4150,7 +4128,6 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4150 | } | 4128 | } |
4151 | 4129 | ||
4152 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); | 4130 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); |
4153 | |||
4154 | } | 4131 | } |
4155 | 4132 | ||
4156 | /** | 4133 | /** |
@@ -4527,10 +4504,9 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4527 | 4504 | ||
4528 | if (notif->size == sizeof(*x)) { | 4505 | if (notif->size == sizeof(*x)) { |
4529 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, | 4506 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, |
4530 | "link deterioration: '%s' " MAC_FMT | 4507 | "link deterioration: type %d, cnt %d\n", |
4531 | " \n", escape_essid(priv->essid, | 4508 | x->silence_notification_type, |
4532 | priv->essid_len), | 4509 | x->silence_count); |
4533 | MAC_ARG(priv->bssid)); | ||
4534 | memcpy(&priv->last_link_deterioration, x, | 4510 | memcpy(&priv->last_link_deterioration, x, |
4535 | sizeof(*x)); | 4511 | sizeof(*x)); |
4536 | } else { | 4512 | } else { |
@@ -4911,13 +4887,13 @@ static void ipw_rx_queue_replenish(void *data) | |||
4911 | static void ipw_bg_rx_queue_replenish(void *data) | 4887 | static void ipw_bg_rx_queue_replenish(void *data) |
4912 | { | 4888 | { |
4913 | struct ipw_priv *priv = data; | 4889 | struct ipw_priv *priv = data; |
4914 | down(&priv->sem); | 4890 | mutex_lock(&priv->mutex); |
4915 | ipw_rx_queue_replenish(data); | 4891 | ipw_rx_queue_replenish(data); |
4916 | up(&priv->sem); | 4892 | mutex_unlock(&priv->mutex); |
4917 | } | 4893 | } |
4918 | 4894 | ||
4919 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | 4895 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. |
4920 | * If an SKB has been detached, the POOL needs to have it's SKB set to NULL | 4896 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL |
4921 | * This free routine walks the list of POOL entries and if SKB is set to | 4897 | * This free routine walks the list of POOL entries and if SKB is set to |
4922 | * non NULL it is unmapped and freed | 4898 | * non NULL it is unmapped and freed |
4923 | */ | 4899 | */ |
@@ -5257,10 +5233,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, | |||
5257 | if (priv->ieee->scan_age != 0 && | 5233 | if (priv->ieee->scan_age != 0 && |
5258 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { | 5234 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { |
5259 | IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " | 5235 | IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " |
5260 | "because of age: %lums.\n", | 5236 | "because of age: %ums.\n", |
5261 | escape_essid(network->ssid, network->ssid_len), | 5237 | escape_essid(network->ssid, network->ssid_len), |
5262 | MAC_ARG(network->bssid), | 5238 | MAC_ARG(network->bssid), |
5263 | 1000 * (jiffies - network->last_scanned) / HZ); | 5239 | jiffies_to_msecs(jiffies - |
5240 | network->last_scanned)); | ||
5264 | return 0; | 5241 | return 0; |
5265 | } | 5242 | } |
5266 | 5243 | ||
@@ -5369,7 +5346,7 @@ static void ipw_merge_adhoc_network(void *data) | |||
5369 | return; | 5346 | return; |
5370 | } | 5347 | } |
5371 | 5348 | ||
5372 | down(&priv->sem); | 5349 | mutex_lock(&priv->mutex); |
5373 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { | 5350 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { |
5374 | IPW_DEBUG_MERGE("remove network %s\n", | 5351 | IPW_DEBUG_MERGE("remove network %s\n", |
5375 | escape_essid(priv->essid, | 5352 | escape_essid(priv->essid, |
@@ -5379,7 +5356,7 @@ static void ipw_merge_adhoc_network(void *data) | |||
5379 | 5356 | ||
5380 | ipw_disassociate(priv); | 5357 | ipw_disassociate(priv); |
5381 | priv->assoc_network = match.network; | 5358 | priv->assoc_network = match.network; |
5382 | up(&priv->sem); | 5359 | mutex_unlock(&priv->mutex); |
5383 | return; | 5360 | return; |
5384 | } | 5361 | } |
5385 | } | 5362 | } |
@@ -5467,11 +5444,12 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5467 | if (network->last_associate && | 5444 | if (network->last_associate && |
5468 | time_after(network->last_associate + (HZ * 3UL), jiffies)) { | 5445 | time_after(network->last_associate + (HZ * 3UL), jiffies)) { |
5469 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5446 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5470 | "because of storming (%lus since last " | 5447 | "because of storming (%ums since last " |
5471 | "assoc attempt).\n", | 5448 | "assoc attempt).\n", |
5472 | escape_essid(network->ssid, network->ssid_len), | 5449 | escape_essid(network->ssid, network->ssid_len), |
5473 | MAC_ARG(network->bssid), | 5450 | MAC_ARG(network->bssid), |
5474 | (jiffies - network->last_associate) / HZ); | 5451 | jiffies_to_msecs(jiffies - |
5452 | network->last_associate)); | ||
5475 | return 0; | 5453 | return 0; |
5476 | } | 5454 | } |
5477 | 5455 | ||
@@ -5479,10 +5457,11 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5479 | if (priv->ieee->scan_age != 0 && | 5457 | if (priv->ieee->scan_age != 0 && |
5480 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { | 5458 | time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { |
5481 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5459 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5482 | "because of age: %lums.\n", | 5460 | "because of age: %ums.\n", |
5483 | escape_essid(network->ssid, network->ssid_len), | 5461 | escape_essid(network->ssid, network->ssid_len), |
5484 | MAC_ARG(network->bssid), | 5462 | MAC_ARG(network->bssid), |
5485 | 1000 * (jiffies - network->last_scanned) / HZ); | 5463 | jiffies_to_msecs(jiffies - |
5464 | network->last_scanned)); | ||
5486 | return 0; | 5465 | return 0; |
5487 | } | 5466 | } |
5488 | 5467 | ||
@@ -5510,15 +5489,6 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5510 | return 0; | 5489 | return 0; |
5511 | } | 5490 | } |
5512 | 5491 | ||
5513 | if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 || | ||
5514 | network->rsn_ie_len > 0)) { | ||
5515 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | ||
5516 | "because of WPA capability mismatch.\n", | ||
5517 | escape_essid(network->ssid, network->ssid_len), | ||
5518 | MAC_ARG(network->bssid)); | ||
5519 | return 0; | ||
5520 | } | ||
5521 | |||
5522 | if ((priv->config & CFG_STATIC_BSSID) && | 5492 | if ((priv->config & CFG_STATIC_BSSID) && |
5523 | memcmp(network->bssid, priv->bssid, ETH_ALEN)) { | 5493 | memcmp(network->bssid, priv->bssid, ETH_ALEN)) { |
5524 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5494 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
@@ -5539,7 +5509,7 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5539 | } | 5509 | } |
5540 | 5510 | ||
5541 | /* Filter out invalid channel in current GEO */ | 5511 | /* Filter out invalid channel in current GEO */ |
5542 | if (!ipw_is_valid_channel(priv->ieee, network->channel)) { | 5512 | if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) { |
5543 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | 5513 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " |
5544 | "because of invalid channel in current GEO\n", | 5514 | "because of invalid channel in current GEO\n", |
5545 | escape_essid(network->ssid, network->ssid_len), | 5515 | escape_essid(network->ssid, network->ssid_len), |
@@ -5584,7 +5554,7 @@ static int ipw_best_network(struct ipw_priv *priv, | |||
5584 | static void ipw_adhoc_create(struct ipw_priv *priv, | 5554 | static void ipw_adhoc_create(struct ipw_priv *priv, |
5585 | struct ieee80211_network *network) | 5555 | struct ieee80211_network *network) |
5586 | { | 5556 | { |
5587 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 5557 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
5588 | int i; | 5558 | int i; |
5589 | 5559 | ||
5590 | /* | 5560 | /* |
@@ -5599,10 +5569,10 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5599 | * FW fatal error. | 5569 | * FW fatal error. |
5600 | * | 5570 | * |
5601 | */ | 5571 | */ |
5602 | switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { | 5572 | switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) { |
5603 | case IEEE80211_52GHZ_BAND: | 5573 | case IEEE80211_52GHZ_BAND: |
5604 | network->mode = IEEE_A; | 5574 | network->mode = IEEE_A; |
5605 | i = ipw_channel_to_index(priv->ieee, priv->channel); | 5575 | i = ieee80211_channel_to_index(priv->ieee, priv->channel); |
5606 | if (i == -1) | 5576 | if (i == -1) |
5607 | BUG(); | 5577 | BUG(); |
5608 | if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { | 5578 | if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { |
@@ -5616,7 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5616 | network->mode = IEEE_G; | 5586 | network->mode = IEEE_G; |
5617 | else | 5587 | else |
5618 | network->mode = IEEE_B; | 5588 | network->mode = IEEE_B; |
5619 | i = ipw_channel_to_index(priv->ieee, priv->channel); | 5589 | i = ieee80211_channel_to_index(priv->ieee, priv->channel); |
5620 | if (i == -1) | 5590 | if (i == -1) |
5621 | BUG(); | 5591 | BUG(); |
5622 | if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { | 5592 | if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { |
@@ -5671,54 +5641,44 @@ static void ipw_adhoc_create(struct ipw_priv *priv, | |||
5671 | 5641 | ||
5672 | static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) | 5642 | static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) |
5673 | { | 5643 | { |
5674 | struct ipw_tgi_tx_key *key; | 5644 | struct ipw_tgi_tx_key key; |
5675 | struct host_cmd cmd = { | ||
5676 | .cmd = IPW_CMD_TGI_TX_KEY, | ||
5677 | .len = sizeof(*key) | ||
5678 | }; | ||
5679 | 5645 | ||
5680 | if (!(priv->ieee->sec.flags & (1 << index))) | 5646 | if (!(priv->ieee->sec.flags & (1 << index))) |
5681 | return; | 5647 | return; |
5682 | 5648 | ||
5683 | key = (struct ipw_tgi_tx_key *)&cmd.param; | 5649 | key.key_id = index; |
5684 | key->key_id = index; | 5650 | memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); |
5685 | memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); | 5651 | key.security_type = type; |
5686 | key->security_type = type; | 5652 | key.station_index = 0; /* always 0 for BSS */ |
5687 | key->station_index = 0; /* always 0 for BSS */ | 5653 | key.flags = 0; |
5688 | key->flags = 0; | ||
5689 | /* 0 for new key; previous value of counter (after fatal error) */ | 5654 | /* 0 for new key; previous value of counter (after fatal error) */ |
5690 | key->tx_counter[0] = 0; | 5655 | key.tx_counter[0] = 0; |
5691 | key->tx_counter[1] = 0; | 5656 | key.tx_counter[1] = 0; |
5692 | 5657 | ||
5693 | ipw_send_cmd(priv, &cmd); | 5658 | ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); |
5694 | } | 5659 | } |
5695 | 5660 | ||
5696 | static void ipw_send_wep_keys(struct ipw_priv *priv, int type) | 5661 | static void ipw_send_wep_keys(struct ipw_priv *priv, int type) |
5697 | { | 5662 | { |
5698 | struct ipw_wep_key *key; | 5663 | struct ipw_wep_key key; |
5699 | int i; | 5664 | int i; |
5700 | struct host_cmd cmd = { | ||
5701 | .cmd = IPW_CMD_WEP_KEY, | ||
5702 | .len = sizeof(*key) | ||
5703 | }; | ||
5704 | 5665 | ||
5705 | key = (struct ipw_wep_key *)&cmd.param; | 5666 | key.cmd_id = DINO_CMD_WEP_KEY; |
5706 | key->cmd_id = DINO_CMD_WEP_KEY; | 5667 | key.seq_num = 0; |
5707 | key->seq_num = 0; | ||
5708 | 5668 | ||
5709 | /* Note: AES keys cannot be set for multiple times. | 5669 | /* Note: AES keys cannot be set for multiple times. |
5710 | * Only set it at the first time. */ | 5670 | * Only set it at the first time. */ |
5711 | for (i = 0; i < 4; i++) { | 5671 | for (i = 0; i < 4; i++) { |
5712 | key->key_index = i | type; | 5672 | key.key_index = i | type; |
5713 | if (!(priv->ieee->sec.flags & (1 << i))) { | 5673 | if (!(priv->ieee->sec.flags & (1 << i))) { |
5714 | key->key_size = 0; | 5674 | key.key_size = 0; |
5715 | continue; | 5675 | continue; |
5716 | } | 5676 | } |
5717 | 5677 | ||
5718 | key->key_size = priv->ieee->sec.key_sizes[i]; | 5678 | key.key_size = priv->ieee->sec.key_sizes[i]; |
5719 | memcpy(key->key, priv->ieee->sec.keys[i], key->key_size); | 5679 | memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); |
5720 | 5680 | ||
5721 | ipw_send_cmd(priv, &cmd); | 5681 | ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); |
5722 | } | 5682 | } |
5723 | } | 5683 | } |
5724 | 5684 | ||
@@ -5822,9 +5782,9 @@ static void ipw_adhoc_check(void *data) | |||
5822 | static void ipw_bg_adhoc_check(void *data) | 5782 | static void ipw_bg_adhoc_check(void *data) |
5823 | { | 5783 | { |
5824 | struct ipw_priv *priv = data; | 5784 | struct ipw_priv *priv = data; |
5825 | down(&priv->sem); | 5785 | mutex_lock(&priv->mutex); |
5826 | ipw_adhoc_check(data); | 5786 | ipw_adhoc_check(data); |
5827 | up(&priv->sem); | 5787 | mutex_unlock(&priv->mutex); |
5828 | } | 5788 | } |
5829 | 5789 | ||
5830 | #ifdef CONFIG_IPW2200_DEBUG | 5790 | #ifdef CONFIG_IPW2200_DEBUG |
@@ -5950,7 +5910,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
5950 | const struct ieee80211_geo *geo; | 5910 | const struct ieee80211_geo *geo; |
5951 | int i; | 5911 | int i; |
5952 | 5912 | ||
5953 | geo = ipw_get_geo(priv->ieee); | 5913 | geo = ieee80211_get_geo(priv->ieee); |
5954 | 5914 | ||
5955 | if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { | 5915 | if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { |
5956 | int start = channel_index; | 5916 | int start = channel_index; |
@@ -6010,7 +5970,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6010 | channel_index++; | 5970 | channel_index++; |
6011 | scan->channels_list[channel_index] = channel; | 5971 | scan->channels_list[channel_index] = channel; |
6012 | index = | 5972 | index = |
6013 | ipw_channel_to_index(priv->ieee, channel); | 5973 | ieee80211_channel_to_index(priv->ieee, channel); |
6014 | ipw_set_scan_type(scan, channel_index, | 5974 | ipw_set_scan_type(scan, channel_index, |
6015 | geo->bg[index]. | 5975 | geo->bg[index]. |
6016 | flags & | 5976 | flags & |
@@ -6051,7 +6011,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6051 | (priv->status & STATUS_EXIT_PENDING)) | 6011 | (priv->status & STATUS_EXIT_PENDING)) |
6052 | return 0; | 6012 | return 0; |
6053 | 6013 | ||
6054 | down(&priv->sem); | 6014 | mutex_lock(&priv->mutex); |
6055 | 6015 | ||
6056 | if (priv->status & STATUS_SCANNING) { | 6016 | if (priv->status & STATUS_SCANNING) { |
6057 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); | 6017 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); |
@@ -6092,7 +6052,7 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6092 | u8 channel; | 6052 | u8 channel; |
6093 | u8 band = 0; | 6053 | u8 band = 0; |
6094 | 6054 | ||
6095 | switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { | 6055 | switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) { |
6096 | case IEEE80211_52GHZ_BAND: | 6056 | case IEEE80211_52GHZ_BAND: |
6097 | band = (u8) (IPW_A_MODE << 6) | 1; | 6057 | band = (u8) (IPW_A_MODE << 6) | 1; |
6098 | channel = priv->channel; | 6058 | channel = priv->channel; |
@@ -6159,16 +6119,16 @@ static int ipw_request_scan(struct ipw_priv *priv) | |||
6159 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6119 | queue_delayed_work(priv->workqueue, &priv->scan_check, |
6160 | IPW_SCAN_CHECK_WATCHDOG); | 6120 | IPW_SCAN_CHECK_WATCHDOG); |
6161 | done: | 6121 | done: |
6162 | up(&priv->sem); | 6122 | mutex_unlock(&priv->mutex); |
6163 | return err; | 6123 | return err; |
6164 | } | 6124 | } |
6165 | 6125 | ||
6166 | static void ipw_bg_abort_scan(void *data) | 6126 | static void ipw_bg_abort_scan(void *data) |
6167 | { | 6127 | { |
6168 | struct ipw_priv *priv = data; | 6128 | struct ipw_priv *priv = data; |
6169 | down(&priv->sem); | 6129 | mutex_lock(&priv->mutex); |
6170 | ipw_abort_scan(data); | 6130 | ipw_abort_scan(data); |
6171 | up(&priv->sem); | 6131 | mutex_unlock(&priv->mutex); |
6172 | } | 6132 | } |
6173 | 6133 | ||
6174 | static int ipw_wpa_enable(struct ipw_priv *priv, int value) | 6134 | static int ipw_wpa_enable(struct ipw_priv *priv, int value) |
@@ -6193,6 +6153,9 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) | |||
6193 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { | 6153 | } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { |
6194 | sec.auth_mode = WLAN_AUTH_OPEN; | 6154 | sec.auth_mode = WLAN_AUTH_OPEN; |
6195 | ieee->open_wep = 1; | 6155 | ieee->open_wep = 1; |
6156 | } else if (value & IW_AUTH_ALG_LEAP) { | ||
6157 | sec.auth_mode = WLAN_AUTH_LEAP; | ||
6158 | ieee->open_wep = 1; | ||
6196 | } else | 6159 | } else |
6197 | return -EINVAL; | 6160 | return -EINVAL; |
6198 | 6161 | ||
@@ -6204,7 +6167,8 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) | |||
6204 | return ret; | 6167 | return ret; |
6205 | } | 6168 | } |
6206 | 6169 | ||
6207 | void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len) | 6170 | static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, |
6171 | int wpa_ie_len) | ||
6208 | { | 6172 | { |
6209 | /* make sure WPA is enabled */ | 6173 | /* make sure WPA is enabled */ |
6210 | ipw_wpa_enable(priv, 1); | 6174 | ipw_wpa_enable(priv, 1); |
@@ -6215,15 +6179,10 @@ void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len) | |||
6215 | static int ipw_set_rsn_capa(struct ipw_priv *priv, | 6179 | static int ipw_set_rsn_capa(struct ipw_priv *priv, |
6216 | char *capabilities, int length) | 6180 | char *capabilities, int length) |
6217 | { | 6181 | { |
6218 | struct host_cmd cmd = { | ||
6219 | .cmd = IPW_CMD_RSN_CAPABILITIES, | ||
6220 | .len = length, | ||
6221 | }; | ||
6222 | |||
6223 | IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); | 6182 | IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); |
6224 | 6183 | ||
6225 | memcpy(cmd.param, capabilities, length); | 6184 | return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, |
6226 | return ipw_send_cmd(priv, &cmd); | 6185 | capabilities); |
6227 | } | 6186 | } |
6228 | 6187 | ||
6229 | /* | 6188 | /* |
@@ -6244,7 +6203,7 @@ static int ipw_wx_set_genie(struct net_device *dev, | |||
6244 | (wrqu->data.length && extra == NULL)) | 6203 | (wrqu->data.length && extra == NULL)) |
6245 | return -EINVAL; | 6204 | return -EINVAL; |
6246 | 6205 | ||
6247 | //down(&priv->sem); | 6206 | //mutex_lock(&priv->mutex); |
6248 | 6207 | ||
6249 | //if (!ieee->wpa_enabled) { | 6208 | //if (!ieee->wpa_enabled) { |
6250 | // err = -EOPNOTSUPP; | 6209 | // err = -EOPNOTSUPP; |
@@ -6270,7 +6229,7 @@ static int ipw_wx_set_genie(struct net_device *dev, | |||
6270 | 6229 | ||
6271 | ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); | 6230 | ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); |
6272 | out: | 6231 | out: |
6273 | //up(&priv->sem); | 6232 | //mutex_unlock(&priv->mutex); |
6274 | return err; | 6233 | return err; |
6275 | } | 6234 | } |
6276 | 6235 | ||
@@ -6283,7 +6242,7 @@ static int ipw_wx_get_genie(struct net_device *dev, | |||
6283 | struct ieee80211_device *ieee = priv->ieee; | 6242 | struct ieee80211_device *ieee = priv->ieee; |
6284 | int err = 0; | 6243 | int err = 0; |
6285 | 6244 | ||
6286 | //down(&priv->sem); | 6245 | //mutex_lock(&priv->mutex); |
6287 | 6246 | ||
6288 | //if (!ieee->wpa_enabled) { | 6247 | //if (!ieee->wpa_enabled) { |
6289 | // err = -EOPNOTSUPP; | 6248 | // err = -EOPNOTSUPP; |
@@ -6304,7 +6263,7 @@ static int ipw_wx_get_genie(struct net_device *dev, | |||
6304 | memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); | 6263 | memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); |
6305 | 6264 | ||
6306 | out: | 6265 | out: |
6307 | //up(&priv->sem); | 6266 | //mutex_unlock(&priv->mutex); |
6308 | return err; | 6267 | return err; |
6309 | } | 6268 | } |
6310 | 6269 | ||
@@ -6556,7 +6515,7 @@ static int ipw_wx_set_mlme(struct net_device *dev, | |||
6556 | * get the modulation type of the current network or | 6515 | * get the modulation type of the current network or |
6557 | * the card current mode | 6516 | * the card current mode |
6558 | */ | 6517 | */ |
6559 | u8 ipw_qos_current_mode(struct ipw_priv * priv) | 6518 | static u8 ipw_qos_current_mode(struct ipw_priv * priv) |
6560 | { | 6519 | { |
6561 | u8 mode = 0; | 6520 | u8 mode = 0; |
6562 | 6521 | ||
@@ -6964,12 +6923,12 @@ static void ipw_bg_qos_activate(void *data) | |||
6964 | if (priv == NULL) | 6923 | if (priv == NULL) |
6965 | return; | 6924 | return; |
6966 | 6925 | ||
6967 | down(&priv->sem); | 6926 | mutex_lock(&priv->mutex); |
6968 | 6927 | ||
6969 | if (priv->status & STATUS_ASSOCIATED) | 6928 | if (priv->status & STATUS_ASSOCIATED) |
6970 | ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); | 6929 | ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); |
6971 | 6930 | ||
6972 | up(&priv->sem); | 6931 | mutex_unlock(&priv->mutex); |
6973 | } | 6932 | } |
6974 | 6933 | ||
6975 | static int ipw_handle_probe_response(struct net_device *dev, | 6934 | static int ipw_handle_probe_response(struct net_device *dev, |
@@ -7010,25 +6969,15 @@ static int ipw_handle_assoc_response(struct net_device *dev, | |||
7010 | static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters | 6969 | static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters |
7011 | *qos_param) | 6970 | *qos_param) |
7012 | { | 6971 | { |
7013 | struct host_cmd cmd = { | 6972 | return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, |
7014 | .cmd = IPW_CMD_QOS_PARAMETERS, | 6973 | sizeof(*qos_param) * 3, qos_param); |
7015 | .len = (sizeof(struct ieee80211_qos_parameters) * 3) | ||
7016 | }; | ||
7017 | |||
7018 | memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3); | ||
7019 | return ipw_send_cmd(priv, &cmd); | ||
7020 | } | 6974 | } |
7021 | 6975 | ||
7022 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element | 6976 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element |
7023 | *qos_param) | 6977 | *qos_param) |
7024 | { | 6978 | { |
7025 | struct host_cmd cmd = { | 6979 | return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), |
7026 | .cmd = IPW_CMD_WME_INFO, | 6980 | qos_param); |
7027 | .len = sizeof(*qos_param) | ||
7028 | }; | ||
7029 | |||
7030 | memcpy(cmd.param, qos_param, sizeof(*qos_param)); | ||
7031 | return ipw_send_cmd(priv, &cmd); | ||
7032 | } | 6981 | } |
7033 | 6982 | ||
7034 | #endif /* CONFIG_IPW_QOS */ | 6983 | #endif /* CONFIG_IPW_QOS */ |
@@ -7052,19 +7001,21 @@ static int ipw_associate_network(struct ipw_priv *priv, | |||
7052 | 7001 | ||
7053 | memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); | 7002 | memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); |
7054 | priv->assoc_request.channel = network->channel; | 7003 | priv->assoc_request.channel = network->channel; |
7004 | priv->assoc_request.auth_key = 0; | ||
7005 | |||
7055 | if ((priv->capability & CAP_PRIVACY_ON) && | 7006 | if ((priv->capability & CAP_PRIVACY_ON) && |
7056 | (priv->capability & CAP_SHARED_KEY)) { | 7007 | (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { |
7057 | priv->assoc_request.auth_type = AUTH_SHARED_KEY; | 7008 | priv->assoc_request.auth_type = AUTH_SHARED_KEY; |
7058 | priv->assoc_request.auth_key = priv->ieee->sec.active_key; | 7009 | priv->assoc_request.auth_key = priv->ieee->sec.active_key; |
7059 | 7010 | ||
7060 | if ((priv->capability & CAP_PRIVACY_ON) && | 7011 | if (priv->ieee->sec.level == SEC_LEVEL_1) |
7061 | (priv->ieee->sec.level == SEC_LEVEL_1) && | ||
7062 | !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) | ||
7063 | ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); | 7012 | ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); |
7064 | } else { | 7013 | |
7014 | } else if ((priv->capability & CAP_PRIVACY_ON) && | ||
7015 | (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) | ||
7016 | priv->assoc_request.auth_type = AUTH_LEAP; | ||
7017 | else | ||
7065 | priv->assoc_request.auth_type = AUTH_OPEN; | 7018 | priv->assoc_request.auth_type = AUTH_OPEN; |
7066 | priv->assoc_request.auth_key = 0; | ||
7067 | } | ||
7068 | 7019 | ||
7069 | if (priv->ieee->wpa_ie_len) { | 7020 | if (priv->ieee->wpa_ie_len) { |
7070 | priv->assoc_request.policy_support = 0x02; /* RSN active */ | 7021 | priv->assoc_request.policy_support = 0x02; /* RSN active */ |
@@ -7278,9 +7229,9 @@ static void ipw_roam(void *data) | |||
7278 | static void ipw_bg_roam(void *data) | 7229 | static void ipw_bg_roam(void *data) |
7279 | { | 7230 | { |
7280 | struct ipw_priv *priv = data; | 7231 | struct ipw_priv *priv = data; |
7281 | down(&priv->sem); | 7232 | mutex_lock(&priv->mutex); |
7282 | ipw_roam(data); | 7233 | ipw_roam(data); |
7283 | up(&priv->sem); | 7234 | mutex_unlock(&priv->mutex); |
7284 | } | 7235 | } |
7285 | 7236 | ||
7286 | static int ipw_associate(void *data) | 7237 | static int ipw_associate(void *data) |
@@ -7375,9 +7326,9 @@ static int ipw_associate(void *data) | |||
7375 | static void ipw_bg_associate(void *data) | 7326 | static void ipw_bg_associate(void *data) |
7376 | { | 7327 | { |
7377 | struct ipw_priv *priv = data; | 7328 | struct ipw_priv *priv = data; |
7378 | down(&priv->sem); | 7329 | mutex_lock(&priv->mutex); |
7379 | ipw_associate(data); | 7330 | ipw_associate(data); |
7380 | up(&priv->sem); | 7331 | mutex_unlock(&priv->mutex); |
7381 | } | 7332 | } |
7382 | 7333 | ||
7383 | static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, | 7334 | static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, |
@@ -7811,12 +7762,10 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7811 | 7762 | ||
7812 | while (i != r) { | 7763 | while (i != r) { |
7813 | rxb = priv->rxq->queue[i]; | 7764 | rxb = priv->rxq->queue[i]; |
7814 | #ifdef CONFIG_IPW2200_DEBUG | ||
7815 | if (unlikely(rxb == NULL)) { | 7765 | if (unlikely(rxb == NULL)) { |
7816 | printk(KERN_CRIT "Queue not allocated!\n"); | 7766 | printk(KERN_CRIT "Queue not allocated!\n"); |
7817 | break; | 7767 | break; |
7818 | } | 7768 | } |
7819 | #endif | ||
7820 | priv->rxq->queue[i] = NULL; | 7769 | priv->rxq->queue[i] = NULL; |
7821 | 7770 | ||
7822 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, | 7771 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, |
@@ -7835,7 +7784,8 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7835 | le16_to_cpu(pkt->u.frame.rssi_dbm) - | 7784 | le16_to_cpu(pkt->u.frame.rssi_dbm) - |
7836 | IPW_RSSI_TO_DBM, | 7785 | IPW_RSSI_TO_DBM, |
7837 | .signal = | 7786 | .signal = |
7838 | le16_to_cpu(pkt->u.frame.signal), | 7787 | le16_to_cpu(pkt->u.frame.rssi_dbm) - |
7788 | IPW_RSSI_TO_DBM + 0x100, | ||
7839 | .noise = | 7789 | .noise = |
7840 | le16_to_cpu(pkt->u.frame.noise), | 7790 | le16_to_cpu(pkt->u.frame.noise), |
7841 | .rate = pkt->u.frame.rate, | 7791 | .rate = pkt->u.frame.rate, |
@@ -7899,7 +7849,8 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7899 | le16_to_cpu(pkt->u.frame.length)); | 7849 | le16_to_cpu(pkt->u.frame.length)); |
7900 | 7850 | ||
7901 | if (le16_to_cpu(pkt->u.frame.length) < | 7851 | if (le16_to_cpu(pkt->u.frame.length) < |
7902 | frame_hdr_len(header)) { | 7852 | ieee80211_get_hdrlen(le16_to_cpu( |
7853 | header->frame_ctl))) { | ||
7903 | IPW_DEBUG_DROP | 7854 | IPW_DEBUG_DROP |
7904 | ("Received packet is too small. " | 7855 | ("Received packet is too small. " |
7905 | "Dropping.\n"); | 7856 | "Dropping.\n"); |
@@ -7989,7 +7940,14 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7989 | #define DEFAULT_SHORT_RETRY_LIMIT 7U | 7940 | #define DEFAULT_SHORT_RETRY_LIMIT 7U |
7990 | #define DEFAULT_LONG_RETRY_LIMIT 4U | 7941 | #define DEFAULT_LONG_RETRY_LIMIT 4U |
7991 | 7942 | ||
7992 | static int ipw_sw_reset(struct ipw_priv *priv, int init) | 7943 | /** |
7944 | * ipw_sw_reset | ||
7945 | * @option: options to control different reset behaviour | ||
7946 | * 0 = reset everything except the 'disable' module_param | ||
7947 | * 1 = reset everything and print out driver info (for probe only) | ||
7948 | * 2 = reset everything | ||
7949 | */ | ||
7950 | static int ipw_sw_reset(struct ipw_priv *priv, int option) | ||
7993 | { | 7951 | { |
7994 | int band, modulation; | 7952 | int band, modulation; |
7995 | int old_mode = priv->ieee->iw_mode; | 7953 | int old_mode = priv->ieee->iw_mode; |
@@ -8016,7 +7974,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8016 | priv->essid_len = 0; | 7974 | priv->essid_len = 0; |
8017 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | 7975 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); |
8018 | 7976 | ||
8019 | if (disable) { | 7977 | if (disable && option) { |
8020 | priv->status |= STATUS_RF_KILL_SW; | 7978 | priv->status |= STATUS_RF_KILL_SW; |
8021 | IPW_DEBUG_INFO("Radio disabled.\n"); | 7979 | IPW_DEBUG_INFO("Radio disabled.\n"); |
8022 | } | 7980 | } |
@@ -8068,7 +8026,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8068 | 8026 | ||
8069 | if ((priv->pci_dev->device == 0x4223) || | 8027 | if ((priv->pci_dev->device == 0x4223) || |
8070 | (priv->pci_dev->device == 0x4224)) { | 8028 | (priv->pci_dev->device == 0x4224)) { |
8071 | if (init) | 8029 | if (option == 1) |
8072 | printk(KERN_INFO DRV_NAME | 8030 | printk(KERN_INFO DRV_NAME |
8073 | ": Detected Intel PRO/Wireless 2915ABG Network " | 8031 | ": Detected Intel PRO/Wireless 2915ABG Network " |
8074 | "Connection\n"); | 8032 | "Connection\n"); |
@@ -8079,7 +8037,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init) | |||
8079 | priv->adapter = IPW_2915ABG; | 8037 | priv->adapter = IPW_2915ABG; |
8080 | priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; | 8038 | priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; |
8081 | } else { | 8039 | } else { |
8082 | if (init) | 8040 | if (option == 1) |
8083 | printk(KERN_INFO DRV_NAME | 8041 | printk(KERN_INFO DRV_NAME |
8084 | ": Detected Intel PRO/Wireless 2200BG Network " | 8042 | ": Detected Intel PRO/Wireless 2200BG Network " |
8085 | "Connection\n"); | 8043 | "Connection\n"); |
@@ -8126,7 +8084,7 @@ static int ipw_wx_get_name(struct net_device *dev, | |||
8126 | union iwreq_data *wrqu, char *extra) | 8084 | union iwreq_data *wrqu, char *extra) |
8127 | { | 8085 | { |
8128 | struct ipw_priv *priv = ieee80211_priv(dev); | 8086 | struct ipw_priv *priv = ieee80211_priv(dev); |
8129 | down(&priv->sem); | 8087 | mutex_lock(&priv->mutex); |
8130 | if (priv->status & STATUS_RF_KILL_MASK) | 8088 | if (priv->status & STATUS_RF_KILL_MASK) |
8131 | strcpy(wrqu->name, "radio off"); | 8089 | strcpy(wrqu->name, "radio off"); |
8132 | else if (!(priv->status & STATUS_ASSOCIATED)) | 8090 | else if (!(priv->status & STATUS_ASSOCIATED)) |
@@ -8135,7 +8093,7 @@ static int ipw_wx_get_name(struct net_device *dev, | |||
8135 | snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", | 8093 | snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", |
8136 | ipw_modes[priv->assoc_request.ieee_mode]); | 8094 | ipw_modes[priv->assoc_request.ieee_mode]); |
8137 | IPW_DEBUG_WX("Name: %s\n", wrqu->name); | 8095 | IPW_DEBUG_WX("Name: %s\n", wrqu->name); |
8138 | up(&priv->sem); | 8096 | mutex_unlock(&priv->mutex); |
8139 | return 0; | 8097 | return 0; |
8140 | } | 8098 | } |
8141 | 8099 | ||
@@ -8196,7 +8154,7 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8196 | union iwreq_data *wrqu, char *extra) | 8154 | union iwreq_data *wrqu, char *extra) |
8197 | { | 8155 | { |
8198 | struct ipw_priv *priv = ieee80211_priv(dev); | 8156 | struct ipw_priv *priv = ieee80211_priv(dev); |
8199 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 8157 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
8200 | struct iw_freq *fwrq = &wrqu->freq; | 8158 | struct iw_freq *fwrq = &wrqu->freq; |
8201 | int ret = 0, i; | 8159 | int ret = 0, i; |
8202 | u8 channel, flags; | 8160 | u8 channel, flags; |
@@ -8204,24 +8162,24 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8204 | 8162 | ||
8205 | if (fwrq->m == 0) { | 8163 | if (fwrq->m == 0) { |
8206 | IPW_DEBUG_WX("SET Freq/Channel -> any\n"); | 8164 | IPW_DEBUG_WX("SET Freq/Channel -> any\n"); |
8207 | down(&priv->sem); | 8165 | mutex_lock(&priv->mutex); |
8208 | ret = ipw_set_channel(priv, 0); | 8166 | ret = ipw_set_channel(priv, 0); |
8209 | up(&priv->sem); | 8167 | mutex_unlock(&priv->mutex); |
8210 | return ret; | 8168 | return ret; |
8211 | } | 8169 | } |
8212 | /* if setting by freq convert to channel */ | 8170 | /* if setting by freq convert to channel */ |
8213 | if (fwrq->e == 1) { | 8171 | if (fwrq->e == 1) { |
8214 | channel = ipw_freq_to_channel(priv->ieee, fwrq->m); | 8172 | channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m); |
8215 | if (channel == 0) | 8173 | if (channel == 0) |
8216 | return -EINVAL; | 8174 | return -EINVAL; |
8217 | } else | 8175 | } else |
8218 | channel = fwrq->m; | 8176 | channel = fwrq->m; |
8219 | 8177 | ||
8220 | if (!(band = ipw_is_valid_channel(priv->ieee, channel))) | 8178 | if (!(band = ieee80211_is_valid_channel(priv->ieee, channel))) |
8221 | return -EINVAL; | 8179 | return -EINVAL; |
8222 | 8180 | ||
8223 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | 8181 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { |
8224 | i = ipw_channel_to_index(priv->ieee, channel); | 8182 | i = ieee80211_channel_to_index(priv->ieee, channel); |
8225 | if (i == -1) | 8183 | if (i == -1) |
8226 | return -EINVAL; | 8184 | return -EINVAL; |
8227 | 8185 | ||
@@ -8234,9 +8192,9 @@ static int ipw_wx_set_freq(struct net_device *dev, | |||
8234 | } | 8192 | } |
8235 | 8193 | ||
8236 | IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); | 8194 | IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); |
8237 | down(&priv->sem); | 8195 | mutex_lock(&priv->mutex); |
8238 | ret = ipw_set_channel(priv, channel); | 8196 | ret = ipw_set_channel(priv, channel); |
8239 | up(&priv->sem); | 8197 | mutex_unlock(&priv->mutex); |
8240 | return ret; | 8198 | return ret; |
8241 | } | 8199 | } |
8242 | 8200 | ||
@@ -8250,14 +8208,14 @@ static int ipw_wx_get_freq(struct net_device *dev, | |||
8250 | 8208 | ||
8251 | /* If we are associated, trying to associate, or have a statically | 8209 | /* If we are associated, trying to associate, or have a statically |
8252 | * configured CHANNEL then return that; otherwise return ANY */ | 8210 | * configured CHANNEL then return that; otherwise return ANY */ |
8253 | down(&priv->sem); | 8211 | mutex_lock(&priv->mutex); |
8254 | if (priv->config & CFG_STATIC_CHANNEL || | 8212 | if (priv->config & CFG_STATIC_CHANNEL || |
8255 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) | 8213 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) |
8256 | wrqu->freq.m = priv->channel; | 8214 | wrqu->freq.m = priv->channel; |
8257 | else | 8215 | else |
8258 | wrqu->freq.m = 0; | 8216 | wrqu->freq.m = 0; |
8259 | 8217 | ||
8260 | up(&priv->sem); | 8218 | mutex_unlock(&priv->mutex); |
8261 | IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); | 8219 | IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); |
8262 | return 0; | 8220 | return 0; |
8263 | } | 8221 | } |
@@ -8287,7 +8245,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8287 | if (wrqu->mode == priv->ieee->iw_mode) | 8245 | if (wrqu->mode == priv->ieee->iw_mode) |
8288 | return 0; | 8246 | return 0; |
8289 | 8247 | ||
8290 | down(&priv->sem); | 8248 | mutex_lock(&priv->mutex); |
8291 | 8249 | ||
8292 | ipw_sw_reset(priv, 0); | 8250 | ipw_sw_reset(priv, 0); |
8293 | 8251 | ||
@@ -8310,7 +8268,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8310 | priv->ieee->iw_mode = wrqu->mode; | 8268 | priv->ieee->iw_mode = wrqu->mode; |
8311 | 8269 | ||
8312 | queue_work(priv->workqueue, &priv->adapter_restart); | 8270 | queue_work(priv->workqueue, &priv->adapter_restart); |
8313 | up(&priv->sem); | 8271 | mutex_unlock(&priv->mutex); |
8314 | return err; | 8272 | return err; |
8315 | } | 8273 | } |
8316 | 8274 | ||
@@ -8319,10 +8277,10 @@ static int ipw_wx_get_mode(struct net_device *dev, | |||
8319 | union iwreq_data *wrqu, char *extra) | 8277 | union iwreq_data *wrqu, char *extra) |
8320 | { | 8278 | { |
8321 | struct ipw_priv *priv = ieee80211_priv(dev); | 8279 | struct ipw_priv *priv = ieee80211_priv(dev); |
8322 | down(&priv->sem); | 8280 | mutex_lock(&priv->mutex); |
8323 | wrqu->mode = priv->ieee->iw_mode; | 8281 | wrqu->mode = priv->ieee->iw_mode; |
8324 | IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); | 8282 | IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); |
8325 | up(&priv->sem); | 8283 | mutex_unlock(&priv->mutex); |
8326 | return 0; | 8284 | return 0; |
8327 | } | 8285 | } |
8328 | 8286 | ||
@@ -8349,7 +8307,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8349 | { | 8307 | { |
8350 | struct ipw_priv *priv = ieee80211_priv(dev); | 8308 | struct ipw_priv *priv = ieee80211_priv(dev); |
8351 | struct iw_range *range = (struct iw_range *)extra; | 8309 | struct iw_range *range = (struct iw_range *)extra; |
8352 | const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); | 8310 | const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee); |
8353 | int i = 0, j; | 8311 | int i = 0, j; |
8354 | 8312 | ||
8355 | wrqu->data.length = sizeof(*range); | 8313 | wrqu->data.length = sizeof(*range); |
@@ -8361,7 +8319,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8361 | range->max_qual.qual = 100; | 8319 | range->max_qual.qual = 100; |
8362 | /* TODO: Find real max RSSI and stick here */ | 8320 | /* TODO: Find real max RSSI and stick here */ |
8363 | range->max_qual.level = 0; | 8321 | range->max_qual.level = 0; |
8364 | range->max_qual.noise = priv->ieee->worst_rssi + 0x100; | 8322 | range->max_qual.noise = 0; |
8365 | range->max_qual.updated = 7; /* Updated all three */ | 8323 | range->max_qual.updated = 7; /* Updated all three */ |
8366 | 8324 | ||
8367 | range->avg_qual.qual = 70; | 8325 | range->avg_qual.qual = 70; |
@@ -8369,7 +8327,7 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8369 | range->avg_qual.level = 0; /* FIXME to real average level */ | 8327 | range->avg_qual.level = 0; /* FIXME to real average level */ |
8370 | range->avg_qual.noise = 0; | 8328 | range->avg_qual.noise = 0; |
8371 | range->avg_qual.updated = 7; /* Updated all three */ | 8329 | range->avg_qual.updated = 7; /* Updated all three */ |
8372 | down(&priv->sem); | 8330 | mutex_lock(&priv->mutex); |
8373 | range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); | 8331 | range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); |
8374 | 8332 | ||
8375 | for (i = 0; i < range->num_bitrates; i++) | 8333 | for (i = 0; i < range->num_bitrates; i++) |
@@ -8387,31 +8345,39 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8387 | 8345 | ||
8388 | /* Set the Wireless Extension versions */ | 8346 | /* Set the Wireless Extension versions */ |
8389 | range->we_version_compiled = WIRELESS_EXT; | 8347 | range->we_version_compiled = WIRELESS_EXT; |
8390 | range->we_version_source = 16; | 8348 | range->we_version_source = 18; |
8391 | 8349 | ||
8392 | i = 0; | 8350 | i = 0; |
8393 | if (priv->ieee->mode & (IEEE_B | IEEE_G)) { | 8351 | if (priv->ieee->mode & (IEEE_B | IEEE_G)) { |
8394 | for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; | 8352 | for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { |
8395 | i++, j++) { | 8353 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && |
8354 | (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY)) | ||
8355 | continue; | ||
8356 | |||
8396 | range->freq[i].i = geo->bg[j].channel; | 8357 | range->freq[i].i = geo->bg[j].channel; |
8397 | range->freq[i].m = geo->bg[j].freq * 100000; | 8358 | range->freq[i].m = geo->bg[j].freq * 100000; |
8398 | range->freq[i].e = 1; | 8359 | range->freq[i].e = 1; |
8360 | i++; | ||
8399 | } | 8361 | } |
8400 | } | 8362 | } |
8401 | 8363 | ||
8402 | if (priv->ieee->mode & IEEE_A) { | 8364 | if (priv->ieee->mode & IEEE_A) { |
8403 | for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; | 8365 | for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { |
8404 | i++, j++) { | 8366 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && |
8367 | (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY)) | ||
8368 | continue; | ||
8369 | |||
8405 | range->freq[i].i = geo->a[j].channel; | 8370 | range->freq[i].i = geo->a[j].channel; |
8406 | range->freq[i].m = geo->a[j].freq * 100000; | 8371 | range->freq[i].m = geo->a[j].freq * 100000; |
8407 | range->freq[i].e = 1; | 8372 | range->freq[i].e = 1; |
8373 | i++; | ||
8408 | } | 8374 | } |
8409 | } | 8375 | } |
8410 | 8376 | ||
8411 | range->num_channels = i; | 8377 | range->num_channels = i; |
8412 | range->num_frequency = i; | 8378 | range->num_frequency = i; |
8413 | 8379 | ||
8414 | up(&priv->sem); | 8380 | mutex_unlock(&priv->mutex); |
8415 | 8381 | ||
8416 | /* Event capability (kernel + driver) */ | 8382 | /* Event capability (kernel + driver) */ |
8417 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | | 8383 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | |
@@ -8419,6 +8385,9 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8419 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); | 8385 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); |
8420 | range->event_capa[1] = IW_EVENT_CAPA_K_1; | 8386 | range->event_capa[1] = IW_EVENT_CAPA_K_1; |
8421 | 8387 | ||
8388 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | ||
8389 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; | ||
8390 | |||
8422 | IPW_DEBUG_WX("GET Range\n"); | 8391 | IPW_DEBUG_WX("GET Range\n"); |
8423 | return 0; | 8392 | return 0; |
8424 | } | 8393 | } |
@@ -8438,7 +8407,7 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8438 | 8407 | ||
8439 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) | 8408 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) |
8440 | return -EINVAL; | 8409 | return -EINVAL; |
8441 | down(&priv->sem); | 8410 | mutex_lock(&priv->mutex); |
8442 | if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || | 8411 | if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || |
8443 | !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { | 8412 | !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { |
8444 | /* we disable mandatory BSSID association */ | 8413 | /* we disable mandatory BSSID association */ |
@@ -8447,14 +8416,14 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8447 | IPW_DEBUG_ASSOC("Attempting to associate with new " | 8416 | IPW_DEBUG_ASSOC("Attempting to associate with new " |
8448 | "parameters.\n"); | 8417 | "parameters.\n"); |
8449 | ipw_associate(priv); | 8418 | ipw_associate(priv); |
8450 | up(&priv->sem); | 8419 | mutex_unlock(&priv->mutex); |
8451 | return 0; | 8420 | return 0; |
8452 | } | 8421 | } |
8453 | 8422 | ||
8454 | priv->config |= CFG_STATIC_BSSID; | 8423 | priv->config |= CFG_STATIC_BSSID; |
8455 | if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { | 8424 | if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { |
8456 | IPW_DEBUG_WX("BSSID set to current BSSID.\n"); | 8425 | IPW_DEBUG_WX("BSSID set to current BSSID.\n"); |
8457 | up(&priv->sem); | 8426 | mutex_unlock(&priv->mutex); |
8458 | return 0; | 8427 | return 0; |
8459 | } | 8428 | } |
8460 | 8429 | ||
@@ -8468,7 +8437,7 @@ static int ipw_wx_set_wap(struct net_device *dev, | |||
8468 | if (!ipw_disassociate(priv)) | 8437 | if (!ipw_disassociate(priv)) |
8469 | ipw_associate(priv); | 8438 | ipw_associate(priv); |
8470 | 8439 | ||
8471 | up(&priv->sem); | 8440 | mutex_unlock(&priv->mutex); |
8472 | return 0; | 8441 | return 0; |
8473 | } | 8442 | } |
8474 | 8443 | ||
@@ -8479,7 +8448,7 @@ static int ipw_wx_get_wap(struct net_device *dev, | |||
8479 | struct ipw_priv *priv = ieee80211_priv(dev); | 8448 | struct ipw_priv *priv = ieee80211_priv(dev); |
8480 | /* If we are associated, trying to associate, or have a statically | 8449 | /* If we are associated, trying to associate, or have a statically |
8481 | * configured BSSID then return that; otherwise return ANY */ | 8450 | * configured BSSID then return that; otherwise return ANY */ |
8482 | down(&priv->sem); | 8451 | mutex_lock(&priv->mutex); |
8483 | if (priv->config & CFG_STATIC_BSSID || | 8452 | if (priv->config & CFG_STATIC_BSSID || |
8484 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | 8453 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { |
8485 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; | 8454 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; |
@@ -8489,7 +8458,7 @@ static int ipw_wx_get_wap(struct net_device *dev, | |||
8489 | 8458 | ||
8490 | IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", | 8459 | IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", |
8491 | MAC_ARG(wrqu->ap_addr.sa_data)); | 8460 | MAC_ARG(wrqu->ap_addr.sa_data)); |
8492 | up(&priv->sem); | 8461 | mutex_unlock(&priv->mutex); |
8493 | return 0; | 8462 | return 0; |
8494 | } | 8463 | } |
8495 | 8464 | ||
@@ -8500,7 +8469,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8500 | struct ipw_priv *priv = ieee80211_priv(dev); | 8469 | struct ipw_priv *priv = ieee80211_priv(dev); |
8501 | char *essid = ""; /* ANY */ | 8470 | char *essid = ""; /* ANY */ |
8502 | int length = 0; | 8471 | int length = 0; |
8503 | down(&priv->sem); | 8472 | mutex_lock(&priv->mutex); |
8504 | if (wrqu->essid.flags && wrqu->essid.length) { | 8473 | if (wrqu->essid.flags && wrqu->essid.length) { |
8505 | length = wrqu->essid.length - 1; | 8474 | length = wrqu->essid.length - 1; |
8506 | essid = extra; | 8475 | essid = extra; |
@@ -8515,7 +8484,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8515 | priv->config &= ~CFG_STATIC_ESSID; | 8484 | priv->config &= ~CFG_STATIC_ESSID; |
8516 | ipw_associate(priv); | 8485 | ipw_associate(priv); |
8517 | } | 8486 | } |
8518 | up(&priv->sem); | 8487 | mutex_unlock(&priv->mutex); |
8519 | return 0; | 8488 | return 0; |
8520 | } | 8489 | } |
8521 | 8490 | ||
@@ -8525,7 +8494,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8525 | 8494 | ||
8526 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { | 8495 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { |
8527 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); | 8496 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); |
8528 | up(&priv->sem); | 8497 | mutex_unlock(&priv->mutex); |
8529 | return 0; | 8498 | return 0; |
8530 | } | 8499 | } |
8531 | 8500 | ||
@@ -8540,7 +8509,7 @@ static int ipw_wx_set_essid(struct net_device *dev, | |||
8540 | if (!ipw_disassociate(priv)) | 8509 | if (!ipw_disassociate(priv)) |
8541 | ipw_associate(priv); | 8510 | ipw_associate(priv); |
8542 | 8511 | ||
8543 | up(&priv->sem); | 8512 | mutex_unlock(&priv->mutex); |
8544 | return 0; | 8513 | return 0; |
8545 | } | 8514 | } |
8546 | 8515 | ||
@@ -8552,7 +8521,7 @@ static int ipw_wx_get_essid(struct net_device *dev, | |||
8552 | 8521 | ||
8553 | /* If we are associated, trying to associate, or have a statically | 8522 | /* If we are associated, trying to associate, or have a statically |
8554 | * configured ESSID then return that; otherwise return ANY */ | 8523 | * configured ESSID then return that; otherwise return ANY */ |
8555 | down(&priv->sem); | 8524 | mutex_lock(&priv->mutex); |
8556 | if (priv->config & CFG_STATIC_ESSID || | 8525 | if (priv->config & CFG_STATIC_ESSID || |
8557 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | 8526 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { |
8558 | IPW_DEBUG_WX("Getting essid: '%s'\n", | 8527 | IPW_DEBUG_WX("Getting essid: '%s'\n", |
@@ -8565,7 +8534,7 @@ static int ipw_wx_get_essid(struct net_device *dev, | |||
8565 | wrqu->essid.length = 0; | 8534 | wrqu->essid.length = 0; |
8566 | wrqu->essid.flags = 0; /* active */ | 8535 | wrqu->essid.flags = 0; /* active */ |
8567 | } | 8536 | } |
8568 | up(&priv->sem); | 8537 | mutex_unlock(&priv->mutex); |
8569 | return 0; | 8538 | return 0; |
8570 | } | 8539 | } |
8571 | 8540 | ||
@@ -8578,12 +8547,12 @@ static int ipw_wx_set_nick(struct net_device *dev, | |||
8578 | IPW_DEBUG_WX("Setting nick to '%s'\n", extra); | 8547 | IPW_DEBUG_WX("Setting nick to '%s'\n", extra); |
8579 | if (wrqu->data.length > IW_ESSID_MAX_SIZE) | 8548 | if (wrqu->data.length > IW_ESSID_MAX_SIZE) |
8580 | return -E2BIG; | 8549 | return -E2BIG; |
8581 | down(&priv->sem); | 8550 | mutex_lock(&priv->mutex); |
8582 | wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); | 8551 | wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); |
8583 | memset(priv->nick, 0, sizeof(priv->nick)); | 8552 | memset(priv->nick, 0, sizeof(priv->nick)); |
8584 | memcpy(priv->nick, extra, wrqu->data.length); | 8553 | memcpy(priv->nick, extra, wrqu->data.length); |
8585 | IPW_DEBUG_TRACE("<<\n"); | 8554 | IPW_DEBUG_TRACE("<<\n"); |
8586 | up(&priv->sem); | 8555 | mutex_unlock(&priv->mutex); |
8587 | return 0; | 8556 | return 0; |
8588 | 8557 | ||
8589 | } | 8558 | } |
@@ -8594,11 +8563,57 @@ static int ipw_wx_get_nick(struct net_device *dev, | |||
8594 | { | 8563 | { |
8595 | struct ipw_priv *priv = ieee80211_priv(dev); | 8564 | struct ipw_priv *priv = ieee80211_priv(dev); |
8596 | IPW_DEBUG_WX("Getting nick\n"); | 8565 | IPW_DEBUG_WX("Getting nick\n"); |
8597 | down(&priv->sem); | 8566 | mutex_lock(&priv->mutex); |
8598 | wrqu->data.length = strlen(priv->nick) + 1; | 8567 | wrqu->data.length = strlen(priv->nick) + 1; |
8599 | memcpy(extra, priv->nick, wrqu->data.length); | 8568 | memcpy(extra, priv->nick, wrqu->data.length); |
8600 | wrqu->data.flags = 1; /* active */ | 8569 | wrqu->data.flags = 1; /* active */ |
8601 | up(&priv->sem); | 8570 | mutex_unlock(&priv->mutex); |
8571 | return 0; | ||
8572 | } | ||
8573 | |||
8574 | static int ipw_wx_set_sens(struct net_device *dev, | ||
8575 | struct iw_request_info *info, | ||
8576 | union iwreq_data *wrqu, char *extra) | ||
8577 | { | ||
8578 | struct ipw_priv *priv = ieee80211_priv(dev); | ||
8579 | int err = 0; | ||
8580 | |||
8581 | IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); | ||
8582 | IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); | ||
8583 | mutex_lock(&priv->mutex); | ||
8584 | |||
8585 | if (wrqu->sens.fixed == 0) | ||
8586 | { | ||
8587 | priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; | ||
8588 | priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; | ||
8589 | goto out; | ||
8590 | } | ||
8591 | if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || | ||
8592 | (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { | ||
8593 | err = -EINVAL; | ||
8594 | goto out; | ||
8595 | } | ||
8596 | |||
8597 | priv->roaming_threshold = wrqu->sens.value; | ||
8598 | priv->disassociate_threshold = 3*wrqu->sens.value; | ||
8599 | out: | ||
8600 | mutex_unlock(&priv->mutex); | ||
8601 | return err; | ||
8602 | } | ||
8603 | |||
8604 | static int ipw_wx_get_sens(struct net_device *dev, | ||
8605 | struct iw_request_info *info, | ||
8606 | union iwreq_data *wrqu, char *extra) | ||
8607 | { | ||
8608 | struct ipw_priv *priv = ieee80211_priv(dev); | ||
8609 | mutex_lock(&priv->mutex); | ||
8610 | wrqu->sens.fixed = 1; | ||
8611 | wrqu->sens.value = priv->roaming_threshold; | ||
8612 | mutex_unlock(&priv->mutex); | ||
8613 | |||
8614 | IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", | ||
8615 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); | ||
8616 | |||
8602 | return 0; | 8617 | return 0; |
8603 | } | 8618 | } |
8604 | 8619 | ||
@@ -8691,7 +8706,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8691 | apply: | 8706 | apply: |
8692 | IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", | 8707 | IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", |
8693 | mask, fixed ? "fixed" : "sub-rates"); | 8708 | mask, fixed ? "fixed" : "sub-rates"); |
8694 | down(&priv->sem); | 8709 | mutex_lock(&priv->mutex); |
8695 | if (mask == IEEE80211_DEFAULT_RATES_MASK) { | 8710 | if (mask == IEEE80211_DEFAULT_RATES_MASK) { |
8696 | priv->config &= ~CFG_FIXED_RATE; | 8711 | priv->config &= ~CFG_FIXED_RATE; |
8697 | ipw_set_fixed_rate(priv, priv->ieee->mode); | 8712 | ipw_set_fixed_rate(priv, priv->ieee->mode); |
@@ -8700,7 +8715,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8700 | 8715 | ||
8701 | if (priv->rates_mask == mask) { | 8716 | if (priv->rates_mask == mask) { |
8702 | IPW_DEBUG_WX("Mask set to current mask.\n"); | 8717 | IPW_DEBUG_WX("Mask set to current mask.\n"); |
8703 | up(&priv->sem); | 8718 | mutex_unlock(&priv->mutex); |
8704 | return 0; | 8719 | return 0; |
8705 | } | 8720 | } |
8706 | 8721 | ||
@@ -8711,7 +8726,7 @@ static int ipw_wx_set_rate(struct net_device *dev, | |||
8711 | if (!ipw_disassociate(priv)) | 8726 | if (!ipw_disassociate(priv)) |
8712 | ipw_associate(priv); | 8727 | ipw_associate(priv); |
8713 | 8728 | ||
8714 | up(&priv->sem); | 8729 | mutex_unlock(&priv->mutex); |
8715 | return 0; | 8730 | return 0; |
8716 | } | 8731 | } |
8717 | 8732 | ||
@@ -8720,9 +8735,9 @@ static int ipw_wx_get_rate(struct net_device *dev, | |||
8720 | union iwreq_data *wrqu, char *extra) | 8735 | union iwreq_data *wrqu, char *extra) |
8721 | { | 8736 | { |
8722 | struct ipw_priv *priv = ieee80211_priv(dev); | 8737 | struct ipw_priv *priv = ieee80211_priv(dev); |
8723 | down(&priv->sem); | 8738 | mutex_lock(&priv->mutex); |
8724 | wrqu->bitrate.value = priv->last_rate; | 8739 | wrqu->bitrate.value = priv->last_rate; |
8725 | up(&priv->sem); | 8740 | mutex_unlock(&priv->mutex); |
8726 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | 8741 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); |
8727 | return 0; | 8742 | return 0; |
8728 | } | 8743 | } |
@@ -8732,20 +8747,20 @@ static int ipw_wx_set_rts(struct net_device *dev, | |||
8732 | union iwreq_data *wrqu, char *extra) | 8747 | union iwreq_data *wrqu, char *extra) |
8733 | { | 8748 | { |
8734 | struct ipw_priv *priv = ieee80211_priv(dev); | 8749 | struct ipw_priv *priv = ieee80211_priv(dev); |
8735 | down(&priv->sem); | 8750 | mutex_lock(&priv->mutex); |
8736 | if (wrqu->rts.disabled) | 8751 | if (wrqu->rts.disabled) |
8737 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; | 8752 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; |
8738 | else { | 8753 | else { |
8739 | if (wrqu->rts.value < MIN_RTS_THRESHOLD || | 8754 | if (wrqu->rts.value < MIN_RTS_THRESHOLD || |
8740 | wrqu->rts.value > MAX_RTS_THRESHOLD) { | 8755 | wrqu->rts.value > MAX_RTS_THRESHOLD) { |
8741 | up(&priv->sem); | 8756 | mutex_unlock(&priv->mutex); |
8742 | return -EINVAL; | 8757 | return -EINVAL; |
8743 | } | 8758 | } |
8744 | priv->rts_threshold = wrqu->rts.value; | 8759 | priv->rts_threshold = wrqu->rts.value; |
8745 | } | 8760 | } |
8746 | 8761 | ||
8747 | ipw_send_rts_threshold(priv, priv->rts_threshold); | 8762 | ipw_send_rts_threshold(priv, priv->rts_threshold); |
8748 | up(&priv->sem); | 8763 | mutex_unlock(&priv->mutex); |
8749 | IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); | 8764 | IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); |
8750 | return 0; | 8765 | return 0; |
8751 | } | 8766 | } |
@@ -8755,11 +8770,11 @@ static int ipw_wx_get_rts(struct net_device *dev, | |||
8755 | union iwreq_data *wrqu, char *extra) | 8770 | union iwreq_data *wrqu, char *extra) |
8756 | { | 8771 | { |
8757 | struct ipw_priv *priv = ieee80211_priv(dev); | 8772 | struct ipw_priv *priv = ieee80211_priv(dev); |
8758 | down(&priv->sem); | 8773 | mutex_lock(&priv->mutex); |
8759 | wrqu->rts.value = priv->rts_threshold; | 8774 | wrqu->rts.value = priv->rts_threshold; |
8760 | wrqu->rts.fixed = 0; /* no auto select */ | 8775 | wrqu->rts.fixed = 0; /* no auto select */ |
8761 | wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); | 8776 | wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); |
8762 | up(&priv->sem); | 8777 | mutex_unlock(&priv->mutex); |
8763 | IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); | 8778 | IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); |
8764 | return 0; | 8779 | return 0; |
8765 | } | 8780 | } |
@@ -8771,7 +8786,7 @@ static int ipw_wx_set_txpow(struct net_device *dev, | |||
8771 | struct ipw_priv *priv = ieee80211_priv(dev); | 8786 | struct ipw_priv *priv = ieee80211_priv(dev); |
8772 | int err = 0; | 8787 | int err = 0; |
8773 | 8788 | ||
8774 | down(&priv->sem); | 8789 | mutex_lock(&priv->mutex); |
8775 | if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { | 8790 | if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { |
8776 | err = -EINPROGRESS; | 8791 | err = -EINPROGRESS; |
8777 | goto out; | 8792 | goto out; |
@@ -8794,7 +8809,7 @@ static int ipw_wx_set_txpow(struct net_device *dev, | |||
8794 | priv->tx_power = wrqu->power.value; | 8809 | priv->tx_power = wrqu->power.value; |
8795 | err = ipw_set_tx_power(priv); | 8810 | err = ipw_set_tx_power(priv); |
8796 | out: | 8811 | out: |
8797 | up(&priv->sem); | 8812 | mutex_unlock(&priv->mutex); |
8798 | return err; | 8813 | return err; |
8799 | } | 8814 | } |
8800 | 8815 | ||
@@ -8803,12 +8818,12 @@ static int ipw_wx_get_txpow(struct net_device *dev, | |||
8803 | union iwreq_data *wrqu, char *extra) | 8818 | union iwreq_data *wrqu, char *extra) |
8804 | { | 8819 | { |
8805 | struct ipw_priv *priv = ieee80211_priv(dev); | 8820 | struct ipw_priv *priv = ieee80211_priv(dev); |
8806 | down(&priv->sem); | 8821 | mutex_lock(&priv->mutex); |
8807 | wrqu->power.value = priv->tx_power; | 8822 | wrqu->power.value = priv->tx_power; |
8808 | wrqu->power.fixed = 1; | 8823 | wrqu->power.fixed = 1; |
8809 | wrqu->power.flags = IW_TXPOW_DBM; | 8824 | wrqu->power.flags = IW_TXPOW_DBM; |
8810 | wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; | 8825 | wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; |
8811 | up(&priv->sem); | 8826 | mutex_unlock(&priv->mutex); |
8812 | 8827 | ||
8813 | IPW_DEBUG_WX("GET TX Power -> %s %d \n", | 8828 | IPW_DEBUG_WX("GET TX Power -> %s %d \n", |
8814 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); | 8829 | wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); |
@@ -8821,13 +8836,13 @@ static int ipw_wx_set_frag(struct net_device *dev, | |||
8821 | union iwreq_data *wrqu, char *extra) | 8836 | union iwreq_data *wrqu, char *extra) |
8822 | { | 8837 | { |
8823 | struct ipw_priv *priv = ieee80211_priv(dev); | 8838 | struct ipw_priv *priv = ieee80211_priv(dev); |
8824 | down(&priv->sem); | 8839 | mutex_lock(&priv->mutex); |
8825 | if (wrqu->frag.disabled) | 8840 | if (wrqu->frag.disabled) |
8826 | priv->ieee->fts = DEFAULT_FTS; | 8841 | priv->ieee->fts = DEFAULT_FTS; |
8827 | else { | 8842 | else { |
8828 | if (wrqu->frag.value < MIN_FRAG_THRESHOLD || | 8843 | if (wrqu->frag.value < MIN_FRAG_THRESHOLD || |
8829 | wrqu->frag.value > MAX_FRAG_THRESHOLD) { | 8844 | wrqu->frag.value > MAX_FRAG_THRESHOLD) { |
8830 | up(&priv->sem); | 8845 | mutex_unlock(&priv->mutex); |
8831 | return -EINVAL; | 8846 | return -EINVAL; |
8832 | } | 8847 | } |
8833 | 8848 | ||
@@ -8835,7 +8850,7 @@ static int ipw_wx_set_frag(struct net_device *dev, | |||
8835 | } | 8850 | } |
8836 | 8851 | ||
8837 | ipw_send_frag_threshold(priv, wrqu->frag.value); | 8852 | ipw_send_frag_threshold(priv, wrqu->frag.value); |
8838 | up(&priv->sem); | 8853 | mutex_unlock(&priv->mutex); |
8839 | IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); | 8854 | IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); |
8840 | return 0; | 8855 | return 0; |
8841 | } | 8856 | } |
@@ -8845,11 +8860,11 @@ static int ipw_wx_get_frag(struct net_device *dev, | |||
8845 | union iwreq_data *wrqu, char *extra) | 8860 | union iwreq_data *wrqu, char *extra) |
8846 | { | 8861 | { |
8847 | struct ipw_priv *priv = ieee80211_priv(dev); | 8862 | struct ipw_priv *priv = ieee80211_priv(dev); |
8848 | down(&priv->sem); | 8863 | mutex_lock(&priv->mutex); |
8849 | wrqu->frag.value = priv->ieee->fts; | 8864 | wrqu->frag.value = priv->ieee->fts; |
8850 | wrqu->frag.fixed = 0; /* no auto select */ | 8865 | wrqu->frag.fixed = 0; /* no auto select */ |
8851 | wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); | 8866 | wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); |
8852 | up(&priv->sem); | 8867 | mutex_unlock(&priv->mutex); |
8853 | IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); | 8868 | IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); |
8854 | 8869 | ||
8855 | return 0; | 8870 | return 0; |
@@ -8870,7 +8885,7 @@ static int ipw_wx_set_retry(struct net_device *dev, | |||
8870 | if (wrqu->retry.value < 0 || wrqu->retry.value > 255) | 8885 | if (wrqu->retry.value < 0 || wrqu->retry.value > 255) |
8871 | return -EINVAL; | 8886 | return -EINVAL; |
8872 | 8887 | ||
8873 | down(&priv->sem); | 8888 | mutex_lock(&priv->mutex); |
8874 | if (wrqu->retry.flags & IW_RETRY_MIN) | 8889 | if (wrqu->retry.flags & IW_RETRY_MIN) |
8875 | priv->short_retry_limit = (u8) wrqu->retry.value; | 8890 | priv->short_retry_limit = (u8) wrqu->retry.value; |
8876 | else if (wrqu->retry.flags & IW_RETRY_MAX) | 8891 | else if (wrqu->retry.flags & IW_RETRY_MAX) |
@@ -8882,7 +8897,7 @@ static int ipw_wx_set_retry(struct net_device *dev, | |||
8882 | 8897 | ||
8883 | ipw_send_retry_limit(priv, priv->short_retry_limit, | 8898 | ipw_send_retry_limit(priv, priv->short_retry_limit, |
8884 | priv->long_retry_limit); | 8899 | priv->long_retry_limit); |
8885 | up(&priv->sem); | 8900 | mutex_unlock(&priv->mutex); |
8886 | IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", | 8901 | IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", |
8887 | priv->short_retry_limit, priv->long_retry_limit); | 8902 | priv->short_retry_limit, priv->long_retry_limit); |
8888 | return 0; | 8903 | return 0; |
@@ -8894,11 +8909,11 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
8894 | { | 8909 | { |
8895 | struct ipw_priv *priv = ieee80211_priv(dev); | 8910 | struct ipw_priv *priv = ieee80211_priv(dev); |
8896 | 8911 | ||
8897 | down(&priv->sem); | 8912 | mutex_lock(&priv->mutex); |
8898 | wrqu->retry.disabled = 0; | 8913 | wrqu->retry.disabled = 0; |
8899 | 8914 | ||
8900 | if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { | 8915 | if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { |
8901 | up(&priv->sem); | 8916 | mutex_unlock(&priv->mutex); |
8902 | return -EINVAL; | 8917 | return -EINVAL; |
8903 | } | 8918 | } |
8904 | 8919 | ||
@@ -8912,7 +8927,7 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
8912 | wrqu->retry.flags = IW_RETRY_LIMIT; | 8927 | wrqu->retry.flags = IW_RETRY_LIMIT; |
8913 | wrqu->retry.value = priv->short_retry_limit; | 8928 | wrqu->retry.value = priv->short_retry_limit; |
8914 | } | 8929 | } |
8915 | up(&priv->sem); | 8930 | mutex_unlock(&priv->mutex); |
8916 | 8931 | ||
8917 | IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); | 8932 | IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); |
8918 | 8933 | ||
@@ -8929,7 +8944,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | |||
8929 | (priv->status & STATUS_EXIT_PENDING)) | 8944 | (priv->status & STATUS_EXIT_PENDING)) |
8930 | return 0; | 8945 | return 0; |
8931 | 8946 | ||
8932 | down(&priv->sem); | 8947 | mutex_lock(&priv->mutex); |
8933 | 8948 | ||
8934 | if (priv->status & STATUS_RF_KILL_MASK) { | 8949 | if (priv->status & STATUS_RF_KILL_MASK) { |
8935 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); | 8950 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); |
@@ -8981,7 +8996,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | |||
8981 | priv->status |= STATUS_SCANNING; | 8996 | priv->status |= STATUS_SCANNING; |
8982 | 8997 | ||
8983 | done: | 8998 | done: |
8984 | up(&priv->sem); | 8999 | mutex_unlock(&priv->mutex); |
8985 | return err; | 9000 | return err; |
8986 | } | 9001 | } |
8987 | 9002 | ||
@@ -9024,7 +9039,7 @@ static int ipw_wx_set_encode(struct net_device *dev, | |||
9024 | int ret; | 9039 | int ret; |
9025 | u32 cap = priv->capability; | 9040 | u32 cap = priv->capability; |
9026 | 9041 | ||
9027 | down(&priv->sem); | 9042 | mutex_lock(&priv->mutex); |
9028 | ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); | 9043 | ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); |
9029 | 9044 | ||
9030 | /* In IBSS mode, we need to notify the firmware to update | 9045 | /* In IBSS mode, we need to notify the firmware to update |
@@ -9034,7 +9049,7 @@ static int ipw_wx_set_encode(struct net_device *dev, | |||
9034 | priv->status & STATUS_ASSOCIATED) | 9049 | priv->status & STATUS_ASSOCIATED) |
9035 | ipw_disassociate(priv); | 9050 | ipw_disassociate(priv); |
9036 | 9051 | ||
9037 | up(&priv->sem); | 9052 | mutex_unlock(&priv->mutex); |
9038 | return ret; | 9053 | return ret; |
9039 | } | 9054 | } |
9040 | 9055 | ||
@@ -9052,17 +9067,17 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9052 | { | 9067 | { |
9053 | struct ipw_priv *priv = ieee80211_priv(dev); | 9068 | struct ipw_priv *priv = ieee80211_priv(dev); |
9054 | int err; | 9069 | int err; |
9055 | down(&priv->sem); | 9070 | mutex_lock(&priv->mutex); |
9056 | if (wrqu->power.disabled) { | 9071 | if (wrqu->power.disabled) { |
9057 | priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); | 9072 | priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); |
9058 | err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); | 9073 | err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); |
9059 | if (err) { | 9074 | if (err) { |
9060 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9075 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9061 | up(&priv->sem); | 9076 | mutex_unlock(&priv->mutex); |
9062 | return err; | 9077 | return err; |
9063 | } | 9078 | } |
9064 | IPW_DEBUG_WX("SET Power Management Mode -> off\n"); | 9079 | IPW_DEBUG_WX("SET Power Management Mode -> off\n"); |
9065 | up(&priv->sem); | 9080 | mutex_unlock(&priv->mutex); |
9066 | return 0; | 9081 | return 0; |
9067 | } | 9082 | } |
9068 | 9083 | ||
@@ -9074,7 +9089,7 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9074 | default: /* Otherwise we don't support it */ | 9089 | default: /* Otherwise we don't support it */ |
9075 | IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", | 9090 | IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", |
9076 | wrqu->power.flags); | 9091 | wrqu->power.flags); |
9077 | up(&priv->sem); | 9092 | mutex_unlock(&priv->mutex); |
9078 | return -EOPNOTSUPP; | 9093 | return -EOPNOTSUPP; |
9079 | } | 9094 | } |
9080 | 9095 | ||
@@ -9087,12 +9102,12 @@ static int ipw_wx_set_power(struct net_device *dev, | |||
9087 | err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); | 9102 | err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); |
9088 | if (err) { | 9103 | if (err) { |
9089 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9104 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9090 | up(&priv->sem); | 9105 | mutex_unlock(&priv->mutex); |
9091 | return err; | 9106 | return err; |
9092 | } | 9107 | } |
9093 | 9108 | ||
9094 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); | 9109 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); |
9095 | up(&priv->sem); | 9110 | mutex_unlock(&priv->mutex); |
9096 | return 0; | 9111 | return 0; |
9097 | } | 9112 | } |
9098 | 9113 | ||
@@ -9101,13 +9116,13 @@ static int ipw_wx_get_power(struct net_device *dev, | |||
9101 | union iwreq_data *wrqu, char *extra) | 9116 | union iwreq_data *wrqu, char *extra) |
9102 | { | 9117 | { |
9103 | struct ipw_priv *priv = ieee80211_priv(dev); | 9118 | struct ipw_priv *priv = ieee80211_priv(dev); |
9104 | down(&priv->sem); | 9119 | mutex_lock(&priv->mutex); |
9105 | if (!(priv->power_mode & IPW_POWER_ENABLED)) | 9120 | if (!(priv->power_mode & IPW_POWER_ENABLED)) |
9106 | wrqu->power.disabled = 1; | 9121 | wrqu->power.disabled = 1; |
9107 | else | 9122 | else |
9108 | wrqu->power.disabled = 0; | 9123 | wrqu->power.disabled = 0; |
9109 | 9124 | ||
9110 | up(&priv->sem); | 9125 | mutex_unlock(&priv->mutex); |
9111 | IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); | 9126 | IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); |
9112 | 9127 | ||
9113 | return 0; | 9128 | return 0; |
@@ -9120,7 +9135,7 @@ static int ipw_wx_set_powermode(struct net_device *dev, | |||
9120 | struct ipw_priv *priv = ieee80211_priv(dev); | 9135 | struct ipw_priv *priv = ieee80211_priv(dev); |
9121 | int mode = *(int *)extra; | 9136 | int mode = *(int *)extra; |
9122 | int err; | 9137 | int err; |
9123 | down(&priv->sem); | 9138 | mutex_lock(&priv->mutex); |
9124 | if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { | 9139 | if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { |
9125 | mode = IPW_POWER_AC; | 9140 | mode = IPW_POWER_AC; |
9126 | priv->power_mode = mode; | 9141 | priv->power_mode = mode; |
@@ -9133,11 +9148,11 @@ static int ipw_wx_set_powermode(struct net_device *dev, | |||
9133 | 9148 | ||
9134 | if (err) { | 9149 | if (err) { |
9135 | IPW_DEBUG_WX("failed setting power mode.\n"); | 9150 | IPW_DEBUG_WX("failed setting power mode.\n"); |
9136 | up(&priv->sem); | 9151 | mutex_unlock(&priv->mutex); |
9137 | return err; | 9152 | return err; |
9138 | } | 9153 | } |
9139 | } | 9154 | } |
9140 | up(&priv->sem); | 9155 | mutex_unlock(&priv->mutex); |
9141 | return 0; | 9156 | return 0; |
9142 | } | 9157 | } |
9143 | 9158 | ||
@@ -9186,7 +9201,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9186 | IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); | 9201 | IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); |
9187 | return -EINVAL; | 9202 | return -EINVAL; |
9188 | } | 9203 | } |
9189 | down(&priv->sem); | 9204 | mutex_lock(&priv->mutex); |
9190 | if (priv->adapter == IPW_2915ABG) { | 9205 | if (priv->adapter == IPW_2915ABG) { |
9191 | priv->ieee->abg_true = 1; | 9206 | priv->ieee->abg_true = 1; |
9192 | if (mode & IEEE_A) { | 9207 | if (mode & IEEE_A) { |
@@ -9198,7 +9213,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9198 | if (mode & IEEE_A) { | 9213 | if (mode & IEEE_A) { |
9199 | IPW_WARNING("Attempt to set 2200BG into " | 9214 | IPW_WARNING("Attempt to set 2200BG into " |
9200 | "802.11a mode\n"); | 9215 | "802.11a mode\n"); |
9201 | up(&priv->sem); | 9216 | mutex_unlock(&priv->mutex); |
9202 | return -EINVAL; | 9217 | return -EINVAL; |
9203 | } | 9218 | } |
9204 | 9219 | ||
@@ -9235,7 +9250,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, | |||
9235 | IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", | 9250 | IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", |
9236 | mode & IEEE_A ? 'a' : '.', | 9251 | mode & IEEE_A ? 'a' : '.', |
9237 | mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); | 9252 | mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); |
9238 | up(&priv->sem); | 9253 | mutex_unlock(&priv->mutex); |
9239 | return 0; | 9254 | return 0; |
9240 | } | 9255 | } |
9241 | 9256 | ||
@@ -9244,7 +9259,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, | |||
9244 | union iwreq_data *wrqu, char *extra) | 9259 | union iwreq_data *wrqu, char *extra) |
9245 | { | 9260 | { |
9246 | struct ipw_priv *priv = ieee80211_priv(dev); | 9261 | struct ipw_priv *priv = ieee80211_priv(dev); |
9247 | down(&priv->sem); | 9262 | mutex_lock(&priv->mutex); |
9248 | switch (priv->ieee->mode) { | 9263 | switch (priv->ieee->mode) { |
9249 | case IEEE_A: | 9264 | case IEEE_A: |
9250 | strncpy(extra, "802.11a (1)", MAX_WX_STRING); | 9265 | strncpy(extra, "802.11a (1)", MAX_WX_STRING); |
@@ -9275,7 +9290,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, | |||
9275 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); | 9290 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); |
9276 | 9291 | ||
9277 | wrqu->data.length = strlen(extra) + 1; | 9292 | wrqu->data.length = strlen(extra) + 1; |
9278 | up(&priv->sem); | 9293 | mutex_unlock(&priv->mutex); |
9279 | 9294 | ||
9280 | return 0; | 9295 | return 0; |
9281 | } | 9296 | } |
@@ -9286,7 +9301,7 @@ static int ipw_wx_set_preamble(struct net_device *dev, | |||
9286 | { | 9301 | { |
9287 | struct ipw_priv *priv = ieee80211_priv(dev); | 9302 | struct ipw_priv *priv = ieee80211_priv(dev); |
9288 | int mode = *(int *)extra; | 9303 | int mode = *(int *)extra; |
9289 | down(&priv->sem); | 9304 | mutex_lock(&priv->mutex); |
9290 | /* Switching from SHORT -> LONG requires a disassociation */ | 9305 | /* Switching from SHORT -> LONG requires a disassociation */ |
9291 | if (mode == 1) { | 9306 | if (mode == 1) { |
9292 | if (!(priv->config & CFG_PREAMBLE_LONG)) { | 9307 | if (!(priv->config & CFG_PREAMBLE_LONG)) { |
@@ -9305,11 +9320,11 @@ static int ipw_wx_set_preamble(struct net_device *dev, | |||
9305 | priv->config &= ~CFG_PREAMBLE_LONG; | 9320 | priv->config &= ~CFG_PREAMBLE_LONG; |
9306 | goto done; | 9321 | goto done; |
9307 | } | 9322 | } |
9308 | up(&priv->sem); | 9323 | mutex_unlock(&priv->mutex); |
9309 | return -EINVAL; | 9324 | return -EINVAL; |
9310 | 9325 | ||
9311 | done: | 9326 | done: |
9312 | up(&priv->sem); | 9327 | mutex_unlock(&priv->mutex); |
9313 | return 0; | 9328 | return 0; |
9314 | } | 9329 | } |
9315 | 9330 | ||
@@ -9318,12 +9333,12 @@ static int ipw_wx_get_preamble(struct net_device *dev, | |||
9318 | union iwreq_data *wrqu, char *extra) | 9333 | union iwreq_data *wrqu, char *extra) |
9319 | { | 9334 | { |
9320 | struct ipw_priv *priv = ieee80211_priv(dev); | 9335 | struct ipw_priv *priv = ieee80211_priv(dev); |
9321 | down(&priv->sem); | 9336 | mutex_lock(&priv->mutex); |
9322 | if (priv->config & CFG_PREAMBLE_LONG) | 9337 | if (priv->config & CFG_PREAMBLE_LONG) |
9323 | snprintf(wrqu->name, IFNAMSIZ, "long (1)"); | 9338 | snprintf(wrqu->name, IFNAMSIZ, "long (1)"); |
9324 | else | 9339 | else |
9325 | snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); | 9340 | snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); |
9326 | up(&priv->sem); | 9341 | mutex_unlock(&priv->mutex); |
9327 | return 0; | 9342 | return 0; |
9328 | } | 9343 | } |
9329 | 9344 | ||
@@ -9335,7 +9350,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9335 | struct ipw_priv *priv = ieee80211_priv(dev); | 9350 | struct ipw_priv *priv = ieee80211_priv(dev); |
9336 | int *parms = (int *)extra; | 9351 | int *parms = (int *)extra; |
9337 | int enable = (parms[0] > 0); | 9352 | int enable = (parms[0] > 0); |
9338 | down(&priv->sem); | 9353 | mutex_lock(&priv->mutex); |
9339 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); | 9354 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); |
9340 | if (enable) { | 9355 | if (enable) { |
9341 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 9356 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
@@ -9350,13 +9365,13 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9350 | ipw_set_channel(priv, parms[1]); | 9365 | ipw_set_channel(priv, parms[1]); |
9351 | } else { | 9366 | } else { |
9352 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 9367 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
9353 | up(&priv->sem); | 9368 | mutex_unlock(&priv->mutex); |
9354 | return 0; | 9369 | return 0; |
9355 | } | 9370 | } |
9356 | priv->net_dev->type = ARPHRD_ETHER; | 9371 | priv->net_dev->type = ARPHRD_ETHER; |
9357 | queue_work(priv->workqueue, &priv->adapter_restart); | 9372 | queue_work(priv->workqueue, &priv->adapter_restart); |
9358 | } | 9373 | } |
9359 | up(&priv->sem); | 9374 | mutex_unlock(&priv->mutex); |
9360 | return 0; | 9375 | return 0; |
9361 | } | 9376 | } |
9362 | 9377 | ||
@@ -9386,9 +9401,9 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9386 | 9401 | ||
9387 | IPW_DEBUG_WX("SW_RESET\n"); | 9402 | IPW_DEBUG_WX("SW_RESET\n"); |
9388 | 9403 | ||
9389 | down(&priv->sem); | 9404 | mutex_lock(&priv->mutex); |
9390 | 9405 | ||
9391 | ret = ipw_sw_reset(priv, 0); | 9406 | ret = ipw_sw_reset(priv, 2); |
9392 | if (!ret) { | 9407 | if (!ret) { |
9393 | free_firmware(); | 9408 | free_firmware(); |
9394 | ipw_adapter_restart(priv); | 9409 | ipw_adapter_restart(priv); |
@@ -9398,9 +9413,9 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9398 | * module parameter, so take appropriate action */ | 9413 | * module parameter, so take appropriate action */ |
9399 | ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); | 9414 | ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); |
9400 | 9415 | ||
9401 | up(&priv->sem); | 9416 | mutex_unlock(&priv->mutex); |
9402 | ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); | 9417 | ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); |
9403 | down(&priv->sem); | 9418 | mutex_lock(&priv->mutex); |
9404 | 9419 | ||
9405 | if (!(priv->status & STATUS_RF_KILL_MASK)) { | 9420 | if (!(priv->status & STATUS_RF_KILL_MASK)) { |
9406 | /* Configuration likely changed -- force [re]association */ | 9421 | /* Configuration likely changed -- force [re]association */ |
@@ -9410,7 +9425,7 @@ static int ipw_wx_sw_reset(struct net_device *dev, | |||
9410 | ipw_associate(priv); | 9425 | ipw_associate(priv); |
9411 | } | 9426 | } |
9412 | 9427 | ||
9413 | up(&priv->sem); | 9428 | mutex_unlock(&priv->mutex); |
9414 | 9429 | ||
9415 | return 0; | 9430 | return 0; |
9416 | } | 9431 | } |
@@ -9423,6 +9438,8 @@ static iw_handler ipw_wx_handlers[] = { | |||
9423 | IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, | 9438 | IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, |
9424 | IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, | 9439 | IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, |
9425 | IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, | 9440 | IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, |
9441 | IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, | ||
9442 | IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, | ||
9426 | IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, | 9443 | IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, |
9427 | IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, | 9444 | IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, |
9428 | IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, | 9445 | IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, |
@@ -9568,7 +9585,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) | |||
9568 | wstats->qual.level = average_value(&priv->average_rssi); | 9585 | wstats->qual.level = average_value(&priv->average_rssi); |
9569 | wstats->qual.noise = average_value(&priv->average_noise); | 9586 | wstats->qual.noise = average_value(&priv->average_noise); |
9570 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | | 9587 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | |
9571 | IW_QUAL_NOISE_UPDATED; | 9588 | IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; |
9572 | 9589 | ||
9573 | wstats->miss.beacon = average_value(&priv->average_missed_beacons); | 9590 | wstats->miss.beacon = average_value(&priv->average_missed_beacons); |
9574 | wstats->discard.retries = priv->last_tx_failures; | 9591 | wstats->discard.retries = priv->last_tx_failures; |
@@ -9586,7 +9603,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) | |||
9586 | static void init_sys_config(struct ipw_sys_config *sys_config) | 9603 | static void init_sys_config(struct ipw_sys_config *sys_config) |
9587 | { | 9604 | { |
9588 | memset(sys_config, 0, sizeof(struct ipw_sys_config)); | 9605 | memset(sys_config, 0, sizeof(struct ipw_sys_config)); |
9589 | sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ | 9606 | sys_config->bt_coexistence = 0; |
9590 | sys_config->answer_broadcast_ssid_probe = 0; | 9607 | sys_config->answer_broadcast_ssid_probe = 0; |
9591 | sys_config->accept_all_data_frames = 0; | 9608 | sys_config->accept_all_data_frames = 0; |
9592 | sys_config->accept_non_directed_frames = 1; | 9609 | sys_config->accept_non_directed_frames = 1; |
@@ -9594,12 +9611,13 @@ static void init_sys_config(struct ipw_sys_config *sys_config) | |||
9594 | sys_config->disable_unicast_decryption = 1; | 9611 | sys_config->disable_unicast_decryption = 1; |
9595 | sys_config->exclude_multicast_unencrypted = 0; | 9612 | sys_config->exclude_multicast_unencrypted = 0; |
9596 | sys_config->disable_multicast_decryption = 1; | 9613 | sys_config->disable_multicast_decryption = 1; |
9597 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH; | 9614 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV; |
9598 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ | 9615 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ |
9599 | sys_config->dot11g_auto_detection = 0; | 9616 | sys_config->dot11g_auto_detection = 0; |
9600 | sys_config->enable_cts_to_self = 0; | 9617 | sys_config->enable_cts_to_self = 0; |
9601 | sys_config->bt_coexist_collision_thr = 0; | 9618 | sys_config->bt_coexist_collision_thr = 0; |
9602 | sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 | 9619 | sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 |
9620 | sys_config->silence_threshold = 0x1e; | ||
9603 | } | 9621 | } |
9604 | 9622 | ||
9605 | static int ipw_net_open(struct net_device *dev) | 9623 | static int ipw_net_open(struct net_device *dev) |
@@ -9607,11 +9625,11 @@ static int ipw_net_open(struct net_device *dev) | |||
9607 | struct ipw_priv *priv = ieee80211_priv(dev); | 9625 | struct ipw_priv *priv = ieee80211_priv(dev); |
9608 | IPW_DEBUG_INFO("dev->open\n"); | 9626 | IPW_DEBUG_INFO("dev->open\n"); |
9609 | /* we should be verifying the device is ready to be opened */ | 9627 | /* we should be verifying the device is ready to be opened */ |
9610 | down(&priv->sem); | 9628 | mutex_lock(&priv->mutex); |
9611 | if (!(priv->status & STATUS_RF_KILL_MASK) && | 9629 | if (!(priv->status & STATUS_RF_KILL_MASK) && |
9612 | (priv->status & STATUS_ASSOCIATED)) | 9630 | (priv->status & STATUS_ASSOCIATED)) |
9613 | netif_start_queue(dev); | 9631 | netif_start_queue(dev); |
9614 | up(&priv->sem); | 9632 | mutex_unlock(&priv->mutex); |
9615 | return 0; | 9633 | return 0; |
9616 | } | 9634 | } |
9617 | 9635 | ||
@@ -9647,11 +9665,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9647 | u16 remaining_bytes; | 9665 | u16 remaining_bytes; |
9648 | int fc; | 9666 | int fc; |
9649 | 9667 | ||
9650 | /* If there isn't room in the queue, we return busy and let the | ||
9651 | * network stack requeue the packet for us */ | ||
9652 | if (ipw_queue_space(q) < q->high_mark) | ||
9653 | return NETDEV_TX_BUSY; | ||
9654 | |||
9655 | switch (priv->ieee->iw_mode) { | 9668 | switch (priv->ieee->iw_mode) { |
9656 | case IW_MODE_ADHOC: | 9669 | case IW_MODE_ADHOC: |
9657 | hdr_len = IEEE80211_3ADDR_LEN; | 9670 | hdr_len = IEEE80211_3ADDR_LEN; |
@@ -9817,6 +9830,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9817 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); | 9830 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); |
9818 | ipw_write32(priv, q->reg_w, q->first_empty); | 9831 | ipw_write32(priv, q->reg_w, q->first_empty); |
9819 | 9832 | ||
9833 | if (ipw_queue_space(q) < q->high_mark) | ||
9834 | netif_stop_queue(priv->net_dev); | ||
9835 | |||
9820 | return NETDEV_TX_OK; | 9836 | return NETDEV_TX_OK; |
9821 | 9837 | ||
9822 | drop: | 9838 | drop: |
@@ -9890,13 +9906,13 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) | |||
9890 | struct sockaddr *addr = p; | 9906 | struct sockaddr *addr = p; |
9891 | if (!is_valid_ether_addr(addr->sa_data)) | 9907 | if (!is_valid_ether_addr(addr->sa_data)) |
9892 | return -EADDRNOTAVAIL; | 9908 | return -EADDRNOTAVAIL; |
9893 | down(&priv->sem); | 9909 | mutex_lock(&priv->mutex); |
9894 | priv->config |= CFG_CUSTOM_MAC; | 9910 | priv->config |= CFG_CUSTOM_MAC; |
9895 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | 9911 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); |
9896 | printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", | 9912 | printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", |
9897 | priv->net_dev->name, MAC_ARG(priv->mac_addr)); | 9913 | priv->net_dev->name, MAC_ARG(priv->mac_addr)); |
9898 | queue_work(priv->workqueue, &priv->adapter_restart); | 9914 | queue_work(priv->workqueue, &priv->adapter_restart); |
9899 | up(&priv->sem); | 9915 | mutex_unlock(&priv->mutex); |
9900 | return 0; | 9916 | return 0; |
9901 | } | 9917 | } |
9902 | 9918 | ||
@@ -9940,9 +9956,9 @@ static int ipw_ethtool_get_eeprom(struct net_device *dev, | |||
9940 | 9956 | ||
9941 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) | 9957 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) |
9942 | return -EINVAL; | 9958 | return -EINVAL; |
9943 | down(&p->sem); | 9959 | mutex_lock(&p->mutex); |
9944 | memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); | 9960 | memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); |
9945 | up(&p->sem); | 9961 | mutex_unlock(&p->mutex); |
9946 | return 0; | 9962 | return 0; |
9947 | } | 9963 | } |
9948 | 9964 | ||
@@ -9954,12 +9970,11 @@ static int ipw_ethtool_set_eeprom(struct net_device *dev, | |||
9954 | 9970 | ||
9955 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) | 9971 | if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) |
9956 | return -EINVAL; | 9972 | return -EINVAL; |
9957 | down(&p->sem); | 9973 | mutex_lock(&p->mutex); |
9958 | memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); | 9974 | memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); |
9959 | for (i = IPW_EEPROM_DATA; | 9975 | for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) |
9960 | i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++) | 9976 | ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); |
9961 | ipw_write8(p, i, p->eeprom[i]); | 9977 | mutex_unlock(&p->mutex); |
9962 | up(&p->sem); | ||
9963 | return 0; | 9978 | return 0; |
9964 | } | 9979 | } |
9965 | 9980 | ||
@@ -10054,12 +10069,12 @@ static void ipw_rf_kill(void *adapter) | |||
10054 | static void ipw_bg_rf_kill(void *data) | 10069 | static void ipw_bg_rf_kill(void *data) |
10055 | { | 10070 | { |
10056 | struct ipw_priv *priv = data; | 10071 | struct ipw_priv *priv = data; |
10057 | down(&priv->sem); | 10072 | mutex_lock(&priv->mutex); |
10058 | ipw_rf_kill(data); | 10073 | ipw_rf_kill(data); |
10059 | up(&priv->sem); | 10074 | mutex_unlock(&priv->mutex); |
10060 | } | 10075 | } |
10061 | 10076 | ||
10062 | void ipw_link_up(struct ipw_priv *priv) | 10077 | static void ipw_link_up(struct ipw_priv *priv) |
10063 | { | 10078 | { |
10064 | priv->last_seq_num = -1; | 10079 | priv->last_seq_num = -1; |
10065 | priv->last_frag_num = -1; | 10080 | priv->last_frag_num = -1; |
@@ -10089,12 +10104,12 @@ void ipw_link_up(struct ipw_priv *priv) | |||
10089 | static void ipw_bg_link_up(void *data) | 10104 | static void ipw_bg_link_up(void *data) |
10090 | { | 10105 | { |
10091 | struct ipw_priv *priv = data; | 10106 | struct ipw_priv *priv = data; |
10092 | down(&priv->sem); | 10107 | mutex_lock(&priv->mutex); |
10093 | ipw_link_up(data); | 10108 | ipw_link_up(data); |
10094 | up(&priv->sem); | 10109 | mutex_unlock(&priv->mutex); |
10095 | } | 10110 | } |
10096 | 10111 | ||
10097 | void ipw_link_down(struct ipw_priv *priv) | 10112 | static void ipw_link_down(struct ipw_priv *priv) |
10098 | { | 10113 | { |
10099 | ipw_led_link_down(priv); | 10114 | ipw_led_link_down(priv); |
10100 | netif_carrier_off(priv->net_dev); | 10115 | netif_carrier_off(priv->net_dev); |
@@ -10117,9 +10132,9 @@ void ipw_link_down(struct ipw_priv *priv) | |||
10117 | static void ipw_bg_link_down(void *data) | 10132 | static void ipw_bg_link_down(void *data) |
10118 | { | 10133 | { |
10119 | struct ipw_priv *priv = data; | 10134 | struct ipw_priv *priv = data; |
10120 | down(&priv->sem); | 10135 | mutex_lock(&priv->mutex); |
10121 | ipw_link_down(data); | 10136 | ipw_link_down(data); |
10122 | up(&priv->sem); | 10137 | mutex_unlock(&priv->mutex); |
10123 | } | 10138 | } |
10124 | 10139 | ||
10125 | static int ipw_setup_deferred_work(struct ipw_priv *priv) | 10140 | static int ipw_setup_deferred_work(struct ipw_priv *priv) |
@@ -10292,6 +10307,20 @@ static int ipw_config(struct ipw_priv *priv) | |||
10292 | 10307 | ||
10293 | /* set basic system config settings */ | 10308 | /* set basic system config settings */ |
10294 | init_sys_config(&priv->sys_config); | 10309 | init_sys_config(&priv->sys_config); |
10310 | |||
10311 | /* Support Bluetooth if we have BT h/w on board, and user wants to. | ||
10312 | * Does not support BT priority yet (don't abort or defer our Tx) */ | ||
10313 | if (bt_coexist) { | ||
10314 | unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; | ||
10315 | |||
10316 | if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) | ||
10317 | priv->sys_config.bt_coexistence | ||
10318 | |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; | ||
10319 | if (bt_caps & EEPROM_SKU_CAP_BT_OOB) | ||
10320 | priv->sys_config.bt_coexistence | ||
10321 | |= CFG_BT_COEXISTENCE_OOB; | ||
10322 | } | ||
10323 | |||
10295 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) | 10324 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) |
10296 | priv->sys_config.answer_broadcast_ssid_probe = 1; | 10325 | priv->sys_config.answer_broadcast_ssid_probe = 1; |
10297 | else | 10326 | else |
@@ -10349,6 +10378,9 @@ static int ipw_config(struct ipw_priv *priv) | |||
10349 | * not intended for resale of the above mentioned Intel adapters has | 10378 | * not intended for resale of the above mentioned Intel adapters has |
10350 | * not been tested. | 10379 | * not been tested. |
10351 | * | 10380 | * |
10381 | * Remember to update the table in README.ipw2200 when changing this | ||
10382 | * table. | ||
10383 | * | ||
10352 | */ | 10384 | */ |
10353 | static const struct ieee80211_geo ipw_geos[] = { | 10385 | static const struct ieee80211_geo ipw_geos[] = { |
10354 | { /* Restricted */ | 10386 | { /* Restricted */ |
@@ -10596,96 +10628,6 @@ static const struct ieee80211_geo ipw_geos[] = { | |||
10596 | } | 10628 | } |
10597 | }; | 10629 | }; |
10598 | 10630 | ||
10599 | /* GEO code borrowed from ieee80211_geo.c */ | ||
10600 | static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | ||
10601 | { | ||
10602 | int i; | ||
10603 | |||
10604 | /* Driver needs to initialize the geography map before using | ||
10605 | * these helper functions */ | ||
10606 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10607 | |||
10608 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10609 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10610 | /* NOTE: If G mode is currently supported but | ||
10611 | * this is a B only channel, we don't see it | ||
10612 | * as valid. */ | ||
10613 | if ((ieee->geo.bg[i].channel == channel) && | ||
10614 | (!(ieee->mode & IEEE_G) || | ||
10615 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) | ||
10616 | return IEEE80211_24GHZ_BAND; | ||
10617 | |||
10618 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10619 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10620 | if (ieee->geo.a[i].channel == channel) | ||
10621 | return IEEE80211_52GHZ_BAND; | ||
10622 | |||
10623 | return 0; | ||
10624 | } | ||
10625 | |||
10626 | static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel) | ||
10627 | { | ||
10628 | int i; | ||
10629 | |||
10630 | /* Driver needs to initialize the geography map before using | ||
10631 | * these helper functions */ | ||
10632 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10633 | |||
10634 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10635 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10636 | if (ieee->geo.bg[i].channel == channel) | ||
10637 | return i; | ||
10638 | |||
10639 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10640 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10641 | if (ieee->geo.a[i].channel == channel) | ||
10642 | return i; | ||
10643 | |||
10644 | return -1; | ||
10645 | } | ||
10646 | |||
10647 | static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq) | ||
10648 | { | ||
10649 | int i; | ||
10650 | |||
10651 | /* Driver needs to initialize the geography map before using | ||
10652 | * these helper functions */ | ||
10653 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
10654 | |||
10655 | freq /= 100000; | ||
10656 | |||
10657 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
10658 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
10659 | if (ieee->geo.bg[i].freq == freq) | ||
10660 | return ieee->geo.bg[i].channel; | ||
10661 | |||
10662 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
10663 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
10664 | if (ieee->geo.a[i].freq == freq) | ||
10665 | return ieee->geo.a[i].channel; | ||
10666 | |||
10667 | return 0; | ||
10668 | } | ||
10669 | |||
10670 | static int ipw_set_geo(struct ieee80211_device *ieee, | ||
10671 | const struct ieee80211_geo *geo) | ||
10672 | { | ||
10673 | memcpy(ieee->geo.name, geo->name, 3); | ||
10674 | ieee->geo.name[3] = '\0'; | ||
10675 | ieee->geo.bg_channels = geo->bg_channels; | ||
10676 | ieee->geo.a_channels = geo->a_channels; | ||
10677 | memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * | ||
10678 | sizeof(struct ieee80211_channel)); | ||
10679 | memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * | ||
10680 | sizeof(struct ieee80211_channel)); | ||
10681 | return 0; | ||
10682 | } | ||
10683 | |||
10684 | static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee) | ||
10685 | { | ||
10686 | return &ieee->geo; | ||
10687 | } | ||
10688 | |||
10689 | #define MAX_HW_RESTARTS 5 | 10631 | #define MAX_HW_RESTARTS 5 |
10690 | static int ipw_up(struct ipw_priv *priv) | 10632 | static int ipw_up(struct ipw_priv *priv) |
10691 | { | 10633 | { |
@@ -10732,14 +10674,11 @@ static int ipw_up(struct ipw_priv *priv) | |||
10732 | priv->eeprom[EEPROM_COUNTRY_CODE + 2]); | 10674 | priv->eeprom[EEPROM_COUNTRY_CODE + 2]); |
10733 | j = 0; | 10675 | j = 0; |
10734 | } | 10676 | } |
10735 | if (ipw_set_geo(priv->ieee, &ipw_geos[j])) { | 10677 | if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) { |
10736 | IPW_WARNING("Could not set geography."); | 10678 | IPW_WARNING("Could not set geography."); |
10737 | return 0; | 10679 | return 0; |
10738 | } | 10680 | } |
10739 | 10681 | ||
10740 | IPW_DEBUG_INFO("Geography %03d [%s] detected.\n", | ||
10741 | j, priv->ieee->geo.name); | ||
10742 | |||
10743 | if (priv->status & STATUS_RF_KILL_SW) { | 10682 | if (priv->status & STATUS_RF_KILL_SW) { |
10744 | IPW_WARNING("Radio disabled by module parameter.\n"); | 10683 | IPW_WARNING("Radio disabled by module parameter.\n"); |
10745 | return 0; | 10684 | return 0; |
@@ -10782,9 +10721,9 @@ static int ipw_up(struct ipw_priv *priv) | |||
10782 | static void ipw_bg_up(void *data) | 10721 | static void ipw_bg_up(void *data) |
10783 | { | 10722 | { |
10784 | struct ipw_priv *priv = data; | 10723 | struct ipw_priv *priv = data; |
10785 | down(&priv->sem); | 10724 | mutex_lock(&priv->mutex); |
10786 | ipw_up(data); | 10725 | ipw_up(data); |
10787 | up(&priv->sem); | 10726 | mutex_unlock(&priv->mutex); |
10788 | } | 10727 | } |
10789 | 10728 | ||
10790 | static void ipw_deinit(struct ipw_priv *priv) | 10729 | static void ipw_deinit(struct ipw_priv *priv) |
@@ -10853,23 +10792,23 @@ static void ipw_down(struct ipw_priv *priv) | |||
10853 | static void ipw_bg_down(void *data) | 10792 | static void ipw_bg_down(void *data) |
10854 | { | 10793 | { |
10855 | struct ipw_priv *priv = data; | 10794 | struct ipw_priv *priv = data; |
10856 | down(&priv->sem); | 10795 | mutex_lock(&priv->mutex); |
10857 | ipw_down(data); | 10796 | ipw_down(data); |
10858 | up(&priv->sem); | 10797 | mutex_unlock(&priv->mutex); |
10859 | } | 10798 | } |
10860 | 10799 | ||
10861 | /* Called by register_netdev() */ | 10800 | /* Called by register_netdev() */ |
10862 | static int ipw_net_init(struct net_device *dev) | 10801 | static int ipw_net_init(struct net_device *dev) |
10863 | { | 10802 | { |
10864 | struct ipw_priv *priv = ieee80211_priv(dev); | 10803 | struct ipw_priv *priv = ieee80211_priv(dev); |
10865 | down(&priv->sem); | 10804 | mutex_lock(&priv->mutex); |
10866 | 10805 | ||
10867 | if (ipw_up(priv)) { | 10806 | if (ipw_up(priv)) { |
10868 | up(&priv->sem); | 10807 | mutex_unlock(&priv->mutex); |
10869 | return -EIO; | 10808 | return -EIO; |
10870 | } | 10809 | } |
10871 | 10810 | ||
10872 | up(&priv->sem); | 10811 | mutex_unlock(&priv->mutex); |
10873 | return 0; | 10812 | return 0; |
10874 | } | 10813 | } |
10875 | 10814 | ||
@@ -10959,7 +10898,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10959 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) | 10898 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) |
10960 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); | 10899 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); |
10961 | 10900 | ||
10962 | init_MUTEX(&priv->sem); | 10901 | mutex_init(&priv->mutex); |
10963 | if (pci_enable_device(pdev)) { | 10902 | if (pci_enable_device(pdev)) { |
10964 | err = -ENODEV; | 10903 | err = -ENODEV; |
10965 | goto out_free_ieee80211; | 10904 | goto out_free_ieee80211; |
@@ -11017,7 +10956,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11017 | SET_MODULE_OWNER(net_dev); | 10956 | SET_MODULE_OWNER(net_dev); |
11018 | SET_NETDEV_DEV(net_dev, &pdev->dev); | 10957 | SET_NETDEV_DEV(net_dev, &pdev->dev); |
11019 | 10958 | ||
11020 | down(&priv->sem); | 10959 | mutex_lock(&priv->mutex); |
11021 | 10960 | ||
11022 | priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; | 10961 | priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; |
11023 | priv->ieee->set_security = shim__set_security; | 10962 | priv->ieee->set_security = shim__set_security; |
@@ -11050,16 +10989,22 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11050 | err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); | 10989 | err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); |
11051 | if (err) { | 10990 | if (err) { |
11052 | IPW_ERROR("failed to create sysfs device attributes\n"); | 10991 | IPW_ERROR("failed to create sysfs device attributes\n"); |
11053 | up(&priv->sem); | 10992 | mutex_unlock(&priv->mutex); |
11054 | goto out_release_irq; | 10993 | goto out_release_irq; |
11055 | } | 10994 | } |
11056 | 10995 | ||
11057 | up(&priv->sem); | 10996 | mutex_unlock(&priv->mutex); |
11058 | err = register_netdev(net_dev); | 10997 | err = register_netdev(net_dev); |
11059 | if (err) { | 10998 | if (err) { |
11060 | IPW_ERROR("failed to register network device\n"); | 10999 | IPW_ERROR("failed to register network device\n"); |
11061 | goto out_remove_sysfs; | 11000 | goto out_remove_sysfs; |
11062 | } | 11001 | } |
11002 | |||
11003 | printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " | ||
11004 | "channels, %d 802.11a channels)\n", | ||
11005 | priv->ieee->geo.name, priv->ieee->geo.bg_channels, | ||
11006 | priv->ieee->geo.a_channels); | ||
11007 | |||
11063 | return 0; | 11008 | return 0; |
11064 | 11009 | ||
11065 | out_remove_sysfs: | 11010 | out_remove_sysfs: |
@@ -11091,13 +11036,13 @@ static void ipw_pci_remove(struct pci_dev *pdev) | |||
11091 | if (!priv) | 11036 | if (!priv) |
11092 | return; | 11037 | return; |
11093 | 11038 | ||
11094 | down(&priv->sem); | 11039 | mutex_lock(&priv->mutex); |
11095 | 11040 | ||
11096 | priv->status |= STATUS_EXIT_PENDING; | 11041 | priv->status |= STATUS_EXIT_PENDING; |
11097 | ipw_down(priv); | 11042 | ipw_down(priv); |
11098 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | 11043 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); |
11099 | 11044 | ||
11100 | up(&priv->sem); | 11045 | mutex_unlock(&priv->mutex); |
11101 | 11046 | ||
11102 | unregister_netdev(priv->net_dev); | 11047 | unregister_netdev(priv->net_dev); |
11103 | 11048 | ||
@@ -11250,8 +11195,10 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); | |||
11250 | module_param(led, int, 0444); | 11195 | module_param(led, int, 0444); |
11251 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); | 11196 | MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); |
11252 | 11197 | ||
11198 | #ifdef CONFIG_IPW2200_DEBUG | ||
11253 | module_param(debug, int, 0444); | 11199 | module_param(debug, int, 0444); |
11254 | MODULE_PARM_DESC(debug, "debug output mask"); | 11200 | MODULE_PARM_DESC(debug, "debug output mask"); |
11201 | #endif | ||
11255 | 11202 | ||
11256 | module_param(channel, int, 0444); | 11203 | module_param(channel, int, 0444); |
11257 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); | 11204 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); |
@@ -11281,12 +11228,18 @@ module_param(mode, int, 0444); | |||
11281 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); | 11228 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); |
11282 | #endif | 11229 | #endif |
11283 | 11230 | ||
11231 | module_param(bt_coexist, int, 0444); | ||
11232 | MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); | ||
11233 | |||
11284 | module_param(hwcrypto, int, 0444); | 11234 | module_param(hwcrypto, int, 0444); |
11285 | MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default on)"); | 11235 | MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); |
11286 | 11236 | ||
11287 | module_param(cmdlog, int, 0444); | 11237 | module_param(cmdlog, int, 0444); |
11288 | MODULE_PARM_DESC(cmdlog, | 11238 | MODULE_PARM_DESC(cmdlog, |
11289 | "allocate a ring buffer for logging firmware commands"); | 11239 | "allocate a ring buffer for logging firmware commands"); |
11290 | 11240 | ||
11241 | module_param(roaming, int, 0444); | ||
11242 | MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); | ||
11243 | |||
11291 | module_exit(ipw_exit); | 11244 | module_exit(ipw_exit); |
11292 | module_init(ipw_init); | 11245 | module_init(ipw_init); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index e65620a4d79e..4b9804900702 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
34 | #include <linux/config.h> | 34 | #include <linux/config.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/mutex.h> | ||
36 | 37 | ||
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/netdevice.h> | 39 | #include <linux/netdevice.h> |
@@ -46,6 +47,7 @@ | |||
46 | #include <linux/firmware.h> | 47 | #include <linux/firmware.h> |
47 | #include <linux/wireless.h> | 48 | #include <linux/wireless.h> |
48 | #include <linux/dma-mapping.h> | 49 | #include <linux/dma-mapping.h> |
50 | #include <linux/jiffies.h> | ||
49 | #include <asm/io.h> | 51 | #include <asm/io.h> |
50 | 52 | ||
51 | #include <net/ieee80211.h> | 53 | #include <net/ieee80211.h> |
@@ -244,8 +246,10 @@ enum connection_manager_assoc_states { | |||
244 | #define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 | 246 | #define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 |
245 | 247 | ||
246 | #define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 | 248 | #define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 |
247 | #define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 24 | 249 | #define IPW_MB_ROAMING_THRESHOLD_MIN 1 |
248 | #define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 | 250 | #define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 |
251 | #define IPW_MB_ROAMING_THRESHOLD_MAX 30 | ||
252 | #define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 3*IPW_MB_ROAMING_THRESHOLD_DEFAULT | ||
249 | #define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 | 253 | #define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 |
250 | 254 | ||
251 | #define MACADRR_BYTE_LEN 6 | 255 | #define MACADRR_BYTE_LEN 6 |
@@ -616,13 +620,16 @@ struct notif_tgi_tx_key { | |||
616 | u8 reserved; | 620 | u8 reserved; |
617 | } __attribute__ ((packed)); | 621 | } __attribute__ ((packed)); |
618 | 622 | ||
623 | #define SILENCE_OVER_THRESH (1) | ||
624 | #define SILENCE_UNDER_THRESH (2) | ||
625 | |||
619 | struct notif_link_deterioration { | 626 | struct notif_link_deterioration { |
620 | struct ipw_cmd_stats stats; | 627 | struct ipw_cmd_stats stats; |
621 | u8 rate; | 628 | u8 rate; |
622 | u8 modulation; | 629 | u8 modulation; |
623 | struct rate_histogram histogram; | 630 | struct rate_histogram histogram; |
624 | u8 reserved1; | 631 | u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ |
625 | u16 reserved2; | 632 | u16 silence_count; |
626 | } __attribute__ ((packed)); | 633 | } __attribute__ ((packed)); |
627 | 634 | ||
628 | struct notif_association { | 635 | struct notif_association { |
@@ -780,7 +787,7 @@ struct ipw_sys_config { | |||
780 | u8 enable_cts_to_self; | 787 | u8 enable_cts_to_self; |
781 | u8 enable_multicast_filtering; | 788 | u8 enable_multicast_filtering; |
782 | u8 bt_coexist_collision_thr; | 789 | u8 bt_coexist_collision_thr; |
783 | u8 reserved2; | 790 | u8 silence_threshold; |
784 | u8 accept_all_mgmt_bcpr; | 791 | u8 accept_all_mgmt_bcpr; |
785 | u8 accept_all_mgtm_frames; | 792 | u8 accept_all_mgtm_frames; |
786 | u8 pass_noise_stats_to_host; | 793 | u8 pass_noise_stats_to_host; |
@@ -852,7 +859,7 @@ struct ipw_scan_request_ext { | |||
852 | u16 dwell_time[IPW_SCAN_TYPES]; | 859 | u16 dwell_time[IPW_SCAN_TYPES]; |
853 | } __attribute__ ((packed)); | 860 | } __attribute__ ((packed)); |
854 | 861 | ||
855 | extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) | 862 | static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) |
856 | { | 863 | { |
857 | if (index % 2) | 864 | if (index % 2) |
858 | return scan->scan_type[index / 2] & 0x0F; | 865 | return scan->scan_type[index / 2] & 0x0F; |
@@ -860,7 +867,7 @@ extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) | |||
860 | return (scan->scan_type[index / 2] & 0xF0) >> 4; | 867 | return (scan->scan_type[index / 2] & 0xF0) >> 4; |
861 | } | 868 | } |
862 | 869 | ||
863 | extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, | 870 | static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, |
864 | u8 index, u8 scan_type) | 871 | u8 index, u8 scan_type) |
865 | { | 872 | { |
866 | if (index % 2) | 873 | if (index % 2) |
@@ -1120,7 +1127,7 @@ struct ipw_priv { | |||
1120 | struct ieee80211_device *ieee; | 1127 | struct ieee80211_device *ieee; |
1121 | 1128 | ||
1122 | spinlock_t lock; | 1129 | spinlock_t lock; |
1123 | struct semaphore sem; | 1130 | struct mutex mutex; |
1124 | 1131 | ||
1125 | /* basic pci-network driver stuff */ | 1132 | /* basic pci-network driver stuff */ |
1126 | struct pci_dev *pci_dev; | 1133 | struct pci_dev *pci_dev; |
@@ -1406,13 +1413,6 @@ do { if (ipw_debug_level & (level)) \ | |||
1406 | * Register bit definitions | 1413 | * Register bit definitions |
1407 | */ | 1414 | */ |
1408 | 1415 | ||
1409 | /* Dino control registers bits */ | ||
1410 | |||
1411 | #define DINO_ENABLE_SYSTEM 0x80 | ||
1412 | #define DINO_ENABLE_CS 0x40 | ||
1413 | #define DINO_RXFIFO_DATA 0x01 | ||
1414 | #define DINO_CONTROL_REG 0x00200000 | ||
1415 | |||
1416 | #define IPW_INTA_RW 0x00000008 | 1416 | #define IPW_INTA_RW 0x00000008 |
1417 | #define IPW_INTA_MASK_R 0x0000000C | 1417 | #define IPW_INTA_MASK_R 0x0000000C |
1418 | #define IPW_INDIRECT_ADDR 0x00000010 | 1418 | #define IPW_INDIRECT_ADDR 0x00000010 |
@@ -1459,6 +1459,11 @@ do { if (ipw_debug_level & (level)) \ | |||
1459 | #define IPW_DOMAIN_0_END 0x1000 | 1459 | #define IPW_DOMAIN_0_END 0x1000 |
1460 | #define CLX_MEM_BAR_SIZE 0x1000 | 1460 | #define CLX_MEM_BAR_SIZE 0x1000 |
1461 | 1461 | ||
1462 | /* Dino/baseband control registers bits */ | ||
1463 | |||
1464 | #define DINO_ENABLE_SYSTEM 0x80 /* 1 = baseband processor on, 0 = reset */ | ||
1465 | #define DINO_ENABLE_CS 0x40 /* 1 = enable ucode load */ | ||
1466 | #define DINO_RXFIFO_DATA 0x01 /* 1 = data available */ | ||
1462 | #define IPW_BASEBAND_CONTROL_STATUS 0X00200000 | 1467 | #define IPW_BASEBAND_CONTROL_STATUS 0X00200000 |
1463 | #define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 | 1468 | #define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 |
1464 | #define IPW_BASEBAND_RX_FIFO_READ 0X00200004 | 1469 | #define IPW_BASEBAND_RX_FIFO_READ 0X00200004 |
@@ -1567,13 +1572,18 @@ do { if (ipw_debug_level & (level)) \ | |||
1567 | #define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ | 1572 | #define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ |
1568 | #define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ | 1573 | #define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ |
1569 | 1574 | ||
1570 | /* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/ | 1575 | /* NIC type as found in the one byte EEPROM_NIC_TYPE offset */ |
1571 | #define EEPROM_NIC_TYPE_0 0 | 1576 | #define EEPROM_NIC_TYPE_0 0 |
1572 | #define EEPROM_NIC_TYPE_1 1 | 1577 | #define EEPROM_NIC_TYPE_1 1 |
1573 | #define EEPROM_NIC_TYPE_2 2 | 1578 | #define EEPROM_NIC_TYPE_2 2 |
1574 | #define EEPROM_NIC_TYPE_3 3 | 1579 | #define EEPROM_NIC_TYPE_3 3 |
1575 | #define EEPROM_NIC_TYPE_4 4 | 1580 | #define EEPROM_NIC_TYPE_4 4 |
1576 | 1581 | ||
1582 | /* Bluetooth Coexistence capabilities as found in EEPROM_SKU_CAPABILITY */ | ||
1583 | #define EEPROM_SKU_CAP_BT_CHANNEL_SIG 0x01 /* we can tell BT our channel # */ | ||
1584 | #define EEPROM_SKU_CAP_BT_PRIORITY 0x02 /* BT can take priority over us */ | ||
1585 | #define EEPROM_SKU_CAP_BT_OOB 0x04 /* we can signal BT out-of-band */ | ||
1586 | |||
1577 | #define FW_MEM_REG_LOWER_BOUND 0x00300000 | 1587 | #define FW_MEM_REG_LOWER_BOUND 0x00300000 |
1578 | #define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) | 1588 | #define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) |
1579 | #define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) | 1589 | #define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) |
@@ -1658,9 +1668,10 @@ enum { | |||
1658 | IPW_FW_ERROR_FATAL_ERROR | 1668 | IPW_FW_ERROR_FATAL_ERROR |
1659 | }; | 1669 | }; |
1660 | 1670 | ||
1661 | #define AUTH_OPEN 0 | 1671 | #define AUTH_OPEN 0 |
1662 | #define AUTH_SHARED_KEY 1 | 1672 | #define AUTH_SHARED_KEY 1 |
1663 | #define AUTH_IGNORE 3 | 1673 | #define AUTH_LEAP 2 |
1674 | #define AUTH_IGNORE 3 | ||
1664 | 1675 | ||
1665 | #define HC_ASSOCIATE 0 | 1676 | #define HC_ASSOCIATE 0 |
1666 | #define HC_REASSOCIATE 1 | 1677 | #define HC_REASSOCIATE 1 |
@@ -1860,7 +1871,7 @@ struct host_cmd { | |||
1860 | u8 cmd; | 1871 | u8 cmd; |
1861 | u8 len; | 1872 | u8 len; |
1862 | u16 reserved; | 1873 | u16 reserved; |
1863 | u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; | 1874 | u32 *param; |
1864 | } __attribute__ ((packed)); | 1875 | } __attribute__ ((packed)); |
1865 | 1876 | ||
1866 | struct ipw_cmd_log { | 1877 | struct ipw_cmd_log { |
@@ -1869,21 +1880,24 @@ struct ipw_cmd_log { | |||
1869 | struct host_cmd cmd; | 1880 | struct host_cmd cmd; |
1870 | }; | 1881 | }; |
1871 | 1882 | ||
1872 | #define CFG_BT_COEXISTENCE_MIN 0x00 | 1883 | /* SysConfig command parameters ... */ |
1873 | #define CFG_BT_COEXISTENCE_DEFER 0x02 | 1884 | /* bt_coexistence param */ |
1874 | #define CFG_BT_COEXISTENCE_KILL 0x04 | 1885 | #define CFG_BT_COEXISTENCE_SIGNAL_CHNL 0x01 /* tell BT our chnl # */ |
1875 | #define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 | 1886 | #define CFG_BT_COEXISTENCE_DEFER 0x02 /* defer our Tx if BT traffic */ |
1876 | #define CFG_BT_COEXISTENCE_OOB 0x10 | 1887 | #define CFG_BT_COEXISTENCE_KILL 0x04 /* kill our Tx if BT traffic */ |
1877 | #define CFG_BT_COEXISTENCE_MAX 0xFF | 1888 | #define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 /* multimedia extensions */ |
1878 | #define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM */ | 1889 | #define CFG_BT_COEXISTENCE_OOB 0x10 /* signal BT via out-of-band */ |
1879 | 1890 | ||
1880 | #define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0 | 1891 | /* clear-to-send to self param */ |
1881 | #define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1 | 1892 | #define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x00 |
1893 | #define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x01 | ||
1882 | #define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN | 1894 | #define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN |
1883 | 1895 | ||
1884 | #define CFG_SYS_ANTENNA_BOTH 0x000 | 1896 | /* Antenna diversity param (h/w can select best antenna, based on signal) */ |
1885 | #define CFG_SYS_ANTENNA_A 0x001 | 1897 | #define CFG_SYS_ANTENNA_BOTH 0x00 /* NIC selects best antenna */ |
1886 | #define CFG_SYS_ANTENNA_B 0x003 | 1898 | #define CFG_SYS_ANTENNA_A 0x01 /* force antenna A */ |
1899 | #define CFG_SYS_ANTENNA_B 0x03 /* force antenna B */ | ||
1900 | #define CFG_SYS_ANTENNA_SLOW_DIV 0x02 /* consider background noise */ | ||
1887 | 1901 | ||
1888 | /* | 1902 | /* |
1889 | * The definitions below were lifted off the ipw2100 driver, which only | 1903 | * The definitions below were lifted off the ipw2100 driver, which only |
@@ -1899,27 +1913,4 @@ struct ipw_cmd_log { | |||
1899 | 1913 | ||
1900 | #define IPW_MAX_CONFIG_RETRIES 10 | 1914 | #define IPW_MAX_CONFIG_RETRIES 10 |
1901 | 1915 | ||
1902 | static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr) | ||
1903 | { | ||
1904 | u32 retval; | ||
1905 | u16 fc; | ||
1906 | |||
1907 | retval = sizeof(struct ieee80211_hdr_3addr); | ||
1908 | fc = le16_to_cpu(hdr->frame_ctl); | ||
1909 | |||
1910 | /* | ||
1911 | * Function ToDS FromDS | ||
1912 | * IBSS 0 0 | ||
1913 | * To AP 1 0 | ||
1914 | * From AP 0 1 | ||
1915 | * WDS (bridge) 1 1 | ||
1916 | * | ||
1917 | * Only WDS frames use Address4 among them. --YZ | ||
1918 | */ | ||
1919 | if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS)) | ||
1920 | retval -= ETH_ALEN; | ||
1921 | |||
1922 | return retval; | ||
1923 | } | ||
1924 | |||
1925 | #endif /* __ipw2200_h__ */ | 1916 | #endif /* __ipw2200_h__ */ |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index bf6271ee387a..75ce6ddb0cf5 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -55,10 +55,8 @@ | |||
55 | #include <linux/etherdevice.h> | 55 | #include <linux/etherdevice.h> |
56 | #include <linux/skbuff.h> | 56 | #include <linux/skbuff.h> |
57 | #include <linux/bitops.h> | 57 | #include <linux/bitops.h> |
58 | #ifdef CONFIG_NET_RADIO | ||
59 | #include <linux/wireless.h> | 58 | #include <linux/wireless.h> |
60 | #include <net/iw_handler.h> | 59 | #include <net/iw_handler.h> |
61 | #endif | ||
62 | 60 | ||
63 | #include <pcmcia/cs_types.h> | 61 | #include <pcmcia/cs_types.h> |
64 | #include <pcmcia/cs.h> | 62 | #include <pcmcia/cs.h> |
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 18baacfc5a2c..18a44580b53b 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
@@ -112,7 +112,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE"; | |||
112 | #include <linux/ip.h> | 112 | #include <linux/ip.h> |
113 | #include <linux/tcp.h> | 113 | #include <linux/tcp.h> |
114 | #include <linux/time.h> | 114 | #include <linux/time.h> |
115 | 115 | #include <linux/jiffies.h> | |
116 | 116 | ||
117 | /************************************************************************/ | 117 | /************************************************************************/ |
118 | /* Useful structures and definitions */ | 118 | /* Useful structures and definitions */ |
@@ -1569,7 +1569,7 @@ static int strip_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1569 | del_timer(&strip_info->idle_timer); | 1569 | del_timer(&strip_info->idle_timer); |
1570 | 1570 | ||
1571 | 1571 | ||
1572 | if (jiffies - strip_info->pps_timer > HZ) { | 1572 | if (time_after(jiffies, strip_info->pps_timer + HZ)) { |
1573 | unsigned long t = jiffies - strip_info->pps_timer; | 1573 | unsigned long t = jiffies - strip_info->pps_timer; |
1574 | unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t; | 1574 | unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t; |
1575 | unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t; | 1575 | unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t; |
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h index 166e28b9a4f7..5cb0bc8bb128 100644 --- a/drivers/net/wireless/wavelan.p.h +++ b/drivers/net/wireless/wavelan.p.h | |||
@@ -98,11 +98,7 @@ | |||
98 | * characteristics of the hardware. Applications such as mobile IP may | 98 | * characteristics of the hardware. Applications such as mobile IP may |
99 | * take advantage of it. | 99 | * take advantage of it. |
100 | * | 100 | * |
101 | * You will need to enable the CONFIG_NET_RADIO define in the kernel | 101 | * It might be a good idea as well to fetch the wireless tools to |
102 | * configuration to enable the wireless extensions (this is the one | ||
103 | * giving access to the radio network device choice). | ||
104 | * | ||
105 | * It might also be a good idea as well to fetch the wireless tools to | ||
106 | * configure the device and play a bit. | 102 | * configure the device and play a bit. |
107 | */ | 103 | */ |
108 | 104 | ||
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h index f2d597568151..451f6271dcbc 100644 --- a/drivers/net/wireless/wavelan_cs.p.h +++ b/drivers/net/wireless/wavelan_cs.p.h | |||
@@ -99,11 +99,7 @@ | |||
99 | * caracteristics of the hardware in a standard way and support for | 99 | * caracteristics of the hardware in a standard way and support for |
100 | * applications for taking advantage of it (like Mobile IP). | 100 | * applications for taking advantage of it (like Mobile IP). |
101 | * | 101 | * |
102 | * You will need to enable the CONFIG_NET_RADIO define in the kernel | 102 | * It might be a good idea as well to fetch the wireless tools to |
103 | * configuration to enable the wireless extensions (this is the one | ||
104 | * giving access to the radio network device choice). | ||
105 | * | ||
106 | * It might also be a good idea as well to fetch the wireless tools to | ||
107 | * configure the device and play a bit. | 103 | * configure the device and play a bit. |
108 | */ | 104 | */ |
109 | 105 | ||
@@ -440,11 +436,8 @@ | |||
440 | #include <linux/ioport.h> | 436 | #include <linux/ioport.h> |
441 | #include <linux/fcntl.h> | 437 | #include <linux/fcntl.h> |
442 | #include <linux/ethtool.h> | 438 | #include <linux/ethtool.h> |
443 | |||
444 | #ifdef CONFIG_NET_RADIO | ||
445 | #include <linux/wireless.h> /* Wireless extensions */ | 439 | #include <linux/wireless.h> /* Wireless extensions */ |
446 | #include <net/iw_handler.h> /* New driver API */ | 440 | #include <net/iw_handler.h> /* New driver API */ |
447 | #endif | ||
448 | 441 | ||
449 | /* Pcmcia headers that we need */ | 442 | /* Pcmcia headers that we need */ |
450 | #include <pcmcia/cs_types.h> | 443 | #include <pcmcia/cs_types.h> |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 1c2506535f7e..75d56bfef0ee 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -69,8 +69,8 @@ static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ | |||
69 | static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ | 69 | static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ |
70 | static int fifo_cfg = 0x0028; | 70 | static int fifo_cfg = 0x0028; |
71 | #else | 71 | #else |
72 | static int dma_ctrl = 0x004A0263; /* Constrained by errata */ | 72 | static const int dma_ctrl = 0x004A0263; /* Constrained by errata */ |
73 | static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ | 73 | static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 76 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
@@ -266,7 +266,7 @@ struct pci_id_info { | |||
266 | int drv_flags; /* Driver use, intended as capability flags. */ | 266 | int drv_flags; /* Driver use, intended as capability flags. */ |
267 | }; | 267 | }; |
268 | 268 | ||
269 | static struct pci_id_info pci_id_tbl[] = { | 269 | static const struct pci_id_info pci_id_tbl[] = { |
270 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, | 270 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, |
271 | PCI_IOTYPE, YELLOWFIN_SIZE, | 271 | PCI_IOTYPE, YELLOWFIN_SIZE, |
272 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, | 272 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 8ab6e12153ba..761021603597 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
28 | #include <linux/etherdevice.h> | 28 | #include <linux/etherdevice.h> |
29 | #include <linux/zorro.h> | 29 | #include <linux/zorro.h> |
30 | #include <linux/jiffies.h> | ||
30 | 31 | ||
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
32 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
@@ -151,7 +152,7 @@ static int __devinit zorro8390_init(struct net_device *dev, | |||
151 | z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); | 152 | z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); |
152 | 153 | ||
153 | while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) | 154 | while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) |
154 | if (jiffies - reset_start_time > 2*HZ/100) { | 155 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
155 | printk(KERN_WARNING " not found (no reset ack).\n"); | 156 | printk(KERN_WARNING " not found (no reset ack).\n"); |
156 | return -ENODEV; | 157 | return -ENODEV; |
157 | } | 158 | } |
@@ -273,7 +274,7 @@ static void zorro8390_reset_8390(struct net_device *dev) | |||
273 | 274 | ||
274 | /* This check _should_not_ be necessary, omit eventually. */ | 275 | /* This check _should_not_ be necessary, omit eventually. */ |
275 | while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) | 276 | while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) |
276 | if (jiffies - reset_start_time > 2*HZ/100) { | 277 | if (time_after(jiffies, reset_start_time + 2*HZ/100)) { |
277 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", | 278 | printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", |
278 | dev->name); | 279 | dev->name); |
279 | break; | 280 | break; |
@@ -400,7 +401,7 @@ static void zorro8390_block_output(struct net_device *dev, int count, | |||
400 | dma_start = jiffies; | 401 | dma_start = jiffies; |
401 | 402 | ||
402 | while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) | 403 | while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) |
403 | if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ | 404 | if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ |
404 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", | 405 | printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", |
405 | dev->name); | 406 | dev->name); |
406 | zorro8390_reset_8390(dev); | 407 | zorro8390_reset_8390(dev); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2e727f49ad19..44133250da2e 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -273,7 +273,7 @@ removeseg: | |||
273 | list_del(&dev_info->lh); | 273 | list_del(&dev_info->lh); |
274 | 274 | ||
275 | del_gendisk(dev_info->gd); | 275 | del_gendisk(dev_info->gd); |
276 | blk_put_queue(dev_info->dcssblk_queue); | 276 | blk_cleanup_queue(dev_info->dcssblk_queue); |
277 | dev_info->gd->queue = NULL; | 277 | dev_info->gd->queue = NULL; |
278 | put_disk(dev_info->gd); | 278 | put_disk(dev_info->gd); |
279 | device_unregister(dev); | 279 | device_unregister(dev); |
@@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
491 | unregister_dev: | 491 | unregister_dev: |
492 | PRINT_ERR("device_create_file() failed!\n"); | 492 | PRINT_ERR("device_create_file() failed!\n"); |
493 | list_del(&dev_info->lh); | 493 | list_del(&dev_info->lh); |
494 | blk_put_queue(dev_info->dcssblk_queue); | 494 | blk_cleanup_queue(dev_info->dcssblk_queue); |
495 | dev_info->gd->queue = NULL; | 495 | dev_info->gd->queue = NULL; |
496 | put_disk(dev_info->gd); | 496 | put_disk(dev_info->gd); |
497 | device_unregister(&dev_info->dev); | 497 | device_unregister(&dev_info->dev); |
@@ -505,7 +505,7 @@ list_del: | |||
505 | unload_seg: | 505 | unload_seg: |
506 | segment_unload(local_buf); | 506 | segment_unload(local_buf); |
507 | dealloc_gendisk: | 507 | dealloc_gendisk: |
508 | blk_put_queue(dev_info->dcssblk_queue); | 508 | blk_cleanup_queue(dev_info->dcssblk_queue); |
509 | dev_info->gd->queue = NULL; | 509 | dev_info->gd->queue = NULL; |
510 | put_disk(dev_info->gd); | 510 | put_disk(dev_info->gd); |
511 | free_dev_info: | 511 | free_dev_info: |
@@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
562 | list_del(&dev_info->lh); | 562 | list_del(&dev_info->lh); |
563 | 563 | ||
564 | del_gendisk(dev_info->gd); | 564 | del_gendisk(dev_info->gd); |
565 | blk_put_queue(dev_info->dcssblk_queue); | 565 | blk_cleanup_queue(dev_info->dcssblk_queue); |
566 | dev_info->gd->queue = NULL; | 566 | dev_info->gd->queue = NULL; |
567 | put_disk(dev_info->gd); | 567 | put_disk(dev_info->gd); |
568 | device_unregister(&dev_info->dev); | 568 | device_unregister(&dev_info->dev); |
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1c8b612d8234..3e156e005f2e 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c | |||
@@ -440,7 +440,8 @@ static int __init bbc_i2c_init(void) | |||
440 | struct linux_ebus_device *edev = NULL; | 440 | struct linux_ebus_device *edev = NULL; |
441 | int err, index = 0; | 441 | int err, index = 0; |
442 | 442 | ||
443 | if (tlb_type != cheetah || !bbc_present()) | 443 | if ((tlb_type != cheetah && tlb_type != cheetah_plus) || |
444 | !bbc_present()) | ||
444 | return -ENODEV; | 445 | return -ENODEV; |
445 | 446 | ||
446 | for_each_ebus(ebus) { | 447 | for_each_ebus(ebus) { |
@@ -486,3 +487,4 @@ static void bbc_i2c_cleanup(void) | |||
486 | 487 | ||
487 | module_init(bbc_i2c_init); | 488 | module_init(bbc_i2c_init); |
488 | module_exit(bbc_i2c_cleanup); | 489 | module_exit(bbc_i2c_cleanup); |
490 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b3c561abe3f6..89e5413cc2a3 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -582,6 +582,13 @@ config SERIAL_SUNSAB_CONSOLE | |||
582 | on your Sparc system as the console, you can do so by answering | 582 | on your Sparc system as the console, you can do so by answering |
583 | Y to this option. | 583 | Y to this option. |
584 | 584 | ||
585 | config SERIAL_SUNHV | ||
586 | bool "Sun4v Hypervisor Console support" | ||
587 | depends on SPARC64 | ||
588 | help | ||
589 | This driver supports the console device found on SUN4V Sparc | ||
590 | systems. Say Y if you want to be able to use this device. | ||
591 | |||
585 | config SERIAL_IP22_ZILOG | 592 | config SERIAL_IP22_ZILOG |
586 | tristate "IP22 Zilog8530 serial support" | 593 | tristate "IP22 Zilog8530 serial support" |
587 | depends on SGI_IP22 | 594 | depends on SGI_IP22 |
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index eaf8e01db198..50c221af9e6d 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o | |||
30 | obj-$(CONFIG_SERIAL_SA1100) += sa1100.o | 30 | obj-$(CONFIG_SERIAL_SA1100) += sa1100.o |
31 | obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o | 31 | obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o |
32 | obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o | 32 | obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o |
33 | obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o | ||
33 | obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o | 34 | obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o |
34 | obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o | 35 | obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o |
35 | obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o | 36 | obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o |
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c new file mode 100644 index 000000000000..f137804b3133 --- /dev/null +++ b/drivers/serial/sunhv.c | |||
@@ -0,0 +1,550 @@ | |||
1 | /* sunhv.c: Serial driver for SUN4V hypervisor console. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/tty.h> | ||
10 | #include <linux/tty_flip.h> | ||
11 | #include <linux/major.h> | ||
12 | #include <linux/circ_buf.h> | ||
13 | #include <linux/serial.h> | ||
14 | #include <linux/sysrq.h> | ||
15 | #include <linux/console.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/init.h> | ||
20 | |||
21 | #include <asm/hypervisor.h> | ||
22 | #include <asm/spitfire.h> | ||
23 | #include <asm/vdev.h> | ||
24 | #include <asm/oplib.h> | ||
25 | #include <asm/irq.h> | ||
26 | |||
27 | #if defined(CONFIG_MAGIC_SYSRQ) | ||
28 | #define SUPPORT_SYSRQ | ||
29 | #endif | ||
30 | |||
31 | #include <linux/serial_core.h> | ||
32 | |||
33 | #include "suncore.h" | ||
34 | |||
35 | #define CON_BREAK ((long)-1) | ||
36 | #define CON_HUP ((long)-2) | ||
37 | |||
38 | static inline long hypervisor_con_getchar(long *status) | ||
39 | { | ||
40 | register unsigned long func asm("%o5"); | ||
41 | register unsigned long arg0 asm("%o0"); | ||
42 | register unsigned long arg1 asm("%o1"); | ||
43 | |||
44 | func = HV_FAST_CONS_GETCHAR; | ||
45 | arg0 = 0; | ||
46 | arg1 = 0; | ||
47 | __asm__ __volatile__("ta %6" | ||
48 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | ||
49 | : "0" (func), "1" (arg0), "2" (arg1), | ||
50 | "i" (HV_FAST_TRAP)); | ||
51 | |||
52 | *status = arg0; | ||
53 | |||
54 | return (long) arg1; | ||
55 | } | ||
56 | |||
57 | static inline long hypervisor_con_putchar(long ch) | ||
58 | { | ||
59 | register unsigned long func asm("%o5"); | ||
60 | register unsigned long arg0 asm("%o0"); | ||
61 | |||
62 | func = HV_FAST_CONS_PUTCHAR; | ||
63 | arg0 = ch; | ||
64 | __asm__ __volatile__("ta %4" | ||
65 | : "=&r" (func), "=&r" (arg0) | ||
66 | : "0" (func), "1" (arg0), "i" (HV_FAST_TRAP)); | ||
67 | |||
68 | return (long) arg0; | ||
69 | } | ||
70 | |||
71 | #define IGNORE_BREAK 0x1 | ||
72 | #define IGNORE_ALL 0x2 | ||
73 | |||
74 | static int hung_up = 0; | ||
75 | |||
76 | static struct tty_struct *receive_chars(struct uart_port *port, struct pt_regs *regs) | ||
77 | { | ||
78 | struct tty_struct *tty = NULL; | ||
79 | int saw_console_brk = 0; | ||
80 | int limit = 10000; | ||
81 | |||
82 | if (port->info != NULL) /* Unopened serial console */ | ||
83 | tty = port->info->tty; | ||
84 | |||
85 | while (limit-- > 0) { | ||
86 | long status; | ||
87 | long c = hypervisor_con_getchar(&status); | ||
88 | unsigned char flag; | ||
89 | |||
90 | if (status == HV_EWOULDBLOCK) | ||
91 | break; | ||
92 | |||
93 | if (c == CON_BREAK) { | ||
94 | if (uart_handle_break(port)) | ||
95 | continue; | ||
96 | saw_console_brk = 1; | ||
97 | c = 0; | ||
98 | } | ||
99 | |||
100 | if (c == CON_HUP) { | ||
101 | hung_up = 1; | ||
102 | uart_handle_dcd_change(port, 0); | ||
103 | } else if (hung_up) { | ||
104 | hung_up = 0; | ||
105 | uart_handle_dcd_change(port, 1); | ||
106 | } | ||
107 | |||
108 | if (tty == NULL) { | ||
109 | uart_handle_sysrq_char(port, c, regs); | ||
110 | continue; | ||
111 | } | ||
112 | |||
113 | flag = TTY_NORMAL; | ||
114 | port->icount.rx++; | ||
115 | if (c == CON_BREAK) { | ||
116 | port->icount.brk++; | ||
117 | if (uart_handle_break(port)) | ||
118 | continue; | ||
119 | flag = TTY_BREAK; | ||
120 | } | ||
121 | |||
122 | if (uart_handle_sysrq_char(port, c, regs)) | ||
123 | continue; | ||
124 | |||
125 | if ((port->ignore_status_mask & IGNORE_ALL) || | ||
126 | ((port->ignore_status_mask & IGNORE_BREAK) && | ||
127 | (c == CON_BREAK))) | ||
128 | continue; | ||
129 | |||
130 | tty_insert_flip_char(tty, c, flag); | ||
131 | } | ||
132 | |||
133 | if (saw_console_brk) | ||
134 | sun_do_break(); | ||
135 | |||
136 | return tty; | ||
137 | } | ||
138 | |||
139 | static void transmit_chars(struct uart_port *port) | ||
140 | { | ||
141 | struct circ_buf *xmit; | ||
142 | |||
143 | if (!port->info) | ||
144 | return; | ||
145 | |||
146 | xmit = &port->info->xmit; | ||
147 | if (uart_circ_empty(xmit) || uart_tx_stopped(port)) | ||
148 | return; | ||
149 | |||
150 | while (!uart_circ_empty(xmit)) { | ||
151 | long status = hypervisor_con_putchar(xmit->buf[xmit->tail]); | ||
152 | |||
153 | if (status != HV_EOK) | ||
154 | break; | ||
155 | |||
156 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | ||
157 | port->icount.tx++; | ||
158 | } | ||
159 | |||
160 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
161 | uart_write_wakeup(port); | ||
162 | } | ||
163 | |||
164 | static irqreturn_t sunhv_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
165 | { | ||
166 | struct uart_port *port = dev_id; | ||
167 | struct tty_struct *tty; | ||
168 | unsigned long flags; | ||
169 | |||
170 | spin_lock_irqsave(&port->lock, flags); | ||
171 | tty = receive_chars(port, regs); | ||
172 | transmit_chars(port); | ||
173 | spin_unlock_irqrestore(&port->lock, flags); | ||
174 | |||
175 | if (tty) | ||
176 | tty_flip_buffer_push(tty); | ||
177 | |||
178 | return IRQ_HANDLED; | ||
179 | } | ||
180 | |||
181 | /* port->lock is not held. */ | ||
182 | static unsigned int sunhv_tx_empty(struct uart_port *port) | ||
183 | { | ||
184 | /* Transmitter is always empty for us. If the circ buffer | ||
185 | * is non-empty or there is an x_char pending, our caller | ||
186 | * will do the right thing and ignore what we return here. | ||
187 | */ | ||
188 | return TIOCSER_TEMT; | ||
189 | } | ||
190 | |||
191 | /* port->lock held by caller. */ | ||
192 | static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl) | ||
193 | { | ||
194 | return; | ||
195 | } | ||
196 | |||
197 | /* port->lock is held by caller and interrupts are disabled. */ | ||
198 | static unsigned int sunhv_get_mctrl(struct uart_port *port) | ||
199 | { | ||
200 | return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; | ||
201 | } | ||
202 | |||
203 | /* port->lock held by caller. */ | ||
204 | static void sunhv_stop_tx(struct uart_port *port) | ||
205 | { | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | /* port->lock held by caller. */ | ||
210 | static void sunhv_start_tx(struct uart_port *port) | ||
211 | { | ||
212 | struct circ_buf *xmit = &port->info->xmit; | ||
213 | |||
214 | while (!uart_circ_empty(xmit)) { | ||
215 | long status = hypervisor_con_putchar(xmit->buf[xmit->tail]); | ||
216 | |||
217 | if (status != HV_EOK) | ||
218 | break; | ||
219 | |||
220 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | ||
221 | port->icount.tx++; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* port->lock is not held. */ | ||
226 | static void sunhv_send_xchar(struct uart_port *port, char ch) | ||
227 | { | ||
228 | unsigned long flags; | ||
229 | int limit = 10000; | ||
230 | |||
231 | spin_lock_irqsave(&port->lock, flags); | ||
232 | |||
233 | while (limit-- > 0) { | ||
234 | long status = hypervisor_con_putchar(ch); | ||
235 | if (status == HV_EOK) | ||
236 | break; | ||
237 | } | ||
238 | |||
239 | spin_unlock_irqrestore(&port->lock, flags); | ||
240 | } | ||
241 | |||
242 | /* port->lock held by caller. */ | ||
243 | static void sunhv_stop_rx(struct uart_port *port) | ||
244 | { | ||
245 | } | ||
246 | |||
247 | /* port->lock held by caller. */ | ||
248 | static void sunhv_enable_ms(struct uart_port *port) | ||
249 | { | ||
250 | } | ||
251 | |||
252 | /* port->lock is not held. */ | ||
253 | static void sunhv_break_ctl(struct uart_port *port, int break_state) | ||
254 | { | ||
255 | if (break_state) { | ||
256 | unsigned long flags; | ||
257 | int limit = 1000000; | ||
258 | |||
259 | spin_lock_irqsave(&port->lock, flags); | ||
260 | |||
261 | while (limit-- > 0) { | ||
262 | long status = hypervisor_con_putchar(CON_BREAK); | ||
263 | if (status == HV_EOK) | ||
264 | break; | ||
265 | udelay(2); | ||
266 | } | ||
267 | |||
268 | spin_unlock_irqrestore(&port->lock, flags); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* port->lock is not held. */ | ||
273 | static int sunhv_startup(struct uart_port *port) | ||
274 | { | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /* port->lock is not held. */ | ||
279 | static void sunhv_shutdown(struct uart_port *port) | ||
280 | { | ||
281 | } | ||
282 | |||
283 | /* port->lock is not held. */ | ||
284 | static void sunhv_set_termios(struct uart_port *port, struct termios *termios, | ||
285 | struct termios *old) | ||
286 | { | ||
287 | unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); | ||
288 | unsigned int quot = uart_get_divisor(port, baud); | ||
289 | unsigned int iflag, cflag; | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&port->lock, flags); | ||
293 | |||
294 | iflag = termios->c_iflag; | ||
295 | cflag = termios->c_cflag; | ||
296 | |||
297 | port->ignore_status_mask = 0; | ||
298 | if (iflag & IGNBRK) | ||
299 | port->ignore_status_mask |= IGNORE_BREAK; | ||
300 | if ((cflag & CREAD) == 0) | ||
301 | port->ignore_status_mask |= IGNORE_ALL; | ||
302 | |||
303 | /* XXX */ | ||
304 | uart_update_timeout(port, cflag, | ||
305 | (port->uartclk / (16 * quot))); | ||
306 | |||
307 | spin_unlock_irqrestore(&port->lock, flags); | ||
308 | } | ||
309 | |||
310 | static const char *sunhv_type(struct uart_port *port) | ||
311 | { | ||
312 | return "SUN4V HCONS"; | ||
313 | } | ||
314 | |||
315 | static void sunhv_release_port(struct uart_port *port) | ||
316 | { | ||
317 | } | ||
318 | |||
319 | static int sunhv_request_port(struct uart_port *port) | ||
320 | { | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void sunhv_config_port(struct uart_port *port, int flags) | ||
325 | { | ||
326 | } | ||
327 | |||
328 | static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser) | ||
329 | { | ||
330 | return -EINVAL; | ||
331 | } | ||
332 | |||
333 | static struct uart_ops sunhv_pops = { | ||
334 | .tx_empty = sunhv_tx_empty, | ||
335 | .set_mctrl = sunhv_set_mctrl, | ||
336 | .get_mctrl = sunhv_get_mctrl, | ||
337 | .stop_tx = sunhv_stop_tx, | ||
338 | .start_tx = sunhv_start_tx, | ||
339 | .send_xchar = sunhv_send_xchar, | ||
340 | .stop_rx = sunhv_stop_rx, | ||
341 | .enable_ms = sunhv_enable_ms, | ||
342 | .break_ctl = sunhv_break_ctl, | ||
343 | .startup = sunhv_startup, | ||
344 | .shutdown = sunhv_shutdown, | ||
345 | .set_termios = sunhv_set_termios, | ||
346 | .type = sunhv_type, | ||
347 | .release_port = sunhv_release_port, | ||
348 | .request_port = sunhv_request_port, | ||
349 | .config_port = sunhv_config_port, | ||
350 | .verify_port = sunhv_verify_port, | ||
351 | }; | ||
352 | |||
353 | static struct uart_driver sunhv_reg = { | ||
354 | .owner = THIS_MODULE, | ||
355 | .driver_name = "serial", | ||
356 | .devfs_name = "tts/", | ||
357 | .dev_name = "ttyS", | ||
358 | .major = TTY_MAJOR, | ||
359 | }; | ||
360 | |||
361 | static struct uart_port *sunhv_port; | ||
362 | |||
363 | static inline void sunhv_console_putchar(struct uart_port *port, char c) | ||
364 | { | ||
365 | unsigned long flags; | ||
366 | int limit = 1000000; | ||
367 | |||
368 | spin_lock_irqsave(&port->lock, flags); | ||
369 | |||
370 | while (limit-- > 0) { | ||
371 | long status = hypervisor_con_putchar(c); | ||
372 | if (status == HV_EOK) | ||
373 | break; | ||
374 | udelay(2); | ||
375 | } | ||
376 | |||
377 | spin_unlock_irqrestore(&port->lock, flags); | ||
378 | } | ||
379 | |||
380 | static void sunhv_console_write(struct console *con, const char *s, unsigned n) | ||
381 | { | ||
382 | struct uart_port *port = sunhv_port; | ||
383 | int i; | ||
384 | |||
385 | for (i = 0; i < n; i++) { | ||
386 | if (*s == '\n') | ||
387 | sunhv_console_putchar(port, '\r'); | ||
388 | sunhv_console_putchar(port, *s++); | ||
389 | } | ||
390 | } | ||
391 | |||
392 | static struct console sunhv_console = { | ||
393 | .name = "ttyHV", | ||
394 | .write = sunhv_console_write, | ||
395 | .device = uart_console_device, | ||
396 | .flags = CON_PRINTBUFFER, | ||
397 | .index = -1, | ||
398 | .data = &sunhv_reg, | ||
399 | }; | ||
400 | |||
401 | static inline struct console *SUNHV_CONSOLE(void) | ||
402 | { | ||
403 | if (con_is_present()) | ||
404 | return NULL; | ||
405 | |||
406 | sunhv_console.index = 0; | ||
407 | |||
408 | return &sunhv_console; | ||
409 | } | ||
410 | |||
411 | static int __init hv_console_compatible(char *buf, int len) | ||
412 | { | ||
413 | while (len) { | ||
414 | int this_len; | ||
415 | |||
416 | if (!strcmp(buf, "qcn")) | ||
417 | return 1; | ||
418 | |||
419 | this_len = strlen(buf) + 1; | ||
420 | |||
421 | buf += this_len; | ||
422 | len -= this_len; | ||
423 | } | ||
424 | |||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static unsigned int __init get_interrupt(void) | ||
429 | { | ||
430 | const char *cons_str = "console"; | ||
431 | const char *compat_str = "compatible"; | ||
432 | int node = prom_getchild(sun4v_vdev_root); | ||
433 | char buf[64]; | ||
434 | int err, len; | ||
435 | |||
436 | node = prom_searchsiblings(node, cons_str); | ||
437 | if (!node) | ||
438 | return 0; | ||
439 | |||
440 | len = prom_getproplen(node, compat_str); | ||
441 | if (len == 0 || len == -1) | ||
442 | return 0; | ||
443 | |||
444 | err = prom_getproperty(node, compat_str, buf, 64); | ||
445 | if (err == -1) | ||
446 | return 0; | ||
447 | |||
448 | if (!hv_console_compatible(buf, len)) | ||
449 | return 0; | ||
450 | |||
451 | /* Ok, the this is the OBP node for the sun4v hypervisor | ||
452 | * console device. Decode the interrupt. | ||
453 | */ | ||
454 | return sun4v_vdev_device_interrupt(node); | ||
455 | } | ||
456 | |||
457 | static int __init sunhv_init(void) | ||
458 | { | ||
459 | struct uart_port *port; | ||
460 | int ret; | ||
461 | |||
462 | if (tlb_type != hypervisor) | ||
463 | return -ENODEV; | ||
464 | |||
465 | port = kmalloc(sizeof(struct uart_port), GFP_KERNEL); | ||
466 | if (unlikely(!port)) | ||
467 | return -ENOMEM; | ||
468 | |||
469 | memset(port, 0, sizeof(struct uart_port)); | ||
470 | |||
471 | port->line = 0; | ||
472 | port->ops = &sunhv_pops; | ||
473 | port->type = PORT_SUNHV; | ||
474 | port->uartclk = ( 29491200 / 16 ); /* arbitrary */ | ||
475 | |||
476 | /* Set this just to make uart_configure_port() happy. */ | ||
477 | port->membase = (unsigned char __iomem *) __pa(port); | ||
478 | |||
479 | port->irq = get_interrupt(); | ||
480 | if (!port->irq) { | ||
481 | kfree(port); | ||
482 | return -ENODEV; | ||
483 | } | ||
484 | |||
485 | sunhv_reg.minor = sunserial_current_minor; | ||
486 | sunhv_reg.nr = 1; | ||
487 | |||
488 | ret = uart_register_driver(&sunhv_reg); | ||
489 | if (ret < 0) { | ||
490 | printk(KERN_ERR "SUNHV: uart_register_driver() failed %d\n", | ||
491 | ret); | ||
492 | kfree(port); | ||
493 | |||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | sunhv_reg.tty_driver->name_base = sunhv_reg.minor - 64; | ||
498 | sunserial_current_minor += 1; | ||
499 | |||
500 | sunhv_reg.cons = SUNHV_CONSOLE(); | ||
501 | |||
502 | sunhv_port = port; | ||
503 | |||
504 | ret = uart_add_one_port(&sunhv_reg, port); | ||
505 | if (ret < 0) { | ||
506 | printk(KERN_ERR "SUNHV: uart_add_one_port() failed %d\n", ret); | ||
507 | sunserial_current_minor -= 1; | ||
508 | uart_unregister_driver(&sunhv_reg); | ||
509 | kfree(port); | ||
510 | sunhv_port = NULL; | ||
511 | return -ENODEV; | ||
512 | } | ||
513 | |||
514 | if (request_irq(port->irq, sunhv_interrupt, | ||
515 | SA_SHIRQ, "serial(sunhv)", port)) { | ||
516 | printk(KERN_ERR "sunhv: Cannot register IRQ\n"); | ||
517 | uart_remove_one_port(&sunhv_reg, port); | ||
518 | sunserial_current_minor -= 1; | ||
519 | uart_unregister_driver(&sunhv_reg); | ||
520 | kfree(port); | ||
521 | sunhv_port = NULL; | ||
522 | return -ENODEV; | ||
523 | } | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | static void __exit sunhv_exit(void) | ||
529 | { | ||
530 | struct uart_port *port = sunhv_port; | ||
531 | |||
532 | BUG_ON(!port); | ||
533 | |||
534 | free_irq(port->irq, port); | ||
535 | |||
536 | uart_remove_one_port(&sunhv_reg, port); | ||
537 | sunserial_current_minor -= 1; | ||
538 | |||
539 | uart_unregister_driver(&sunhv_reg); | ||
540 | |||
541 | kfree(sunhv_port); | ||
542 | sunhv_port = NULL; | ||
543 | } | ||
544 | |||
545 | module_init(sunhv_init); | ||
546 | module_exit(sunhv_exit); | ||
547 | |||
548 | MODULE_AUTHOR("David S. Miller"); | ||
549 | MODULE_DESCRIPTION("SUN4V Hypervisor console driver") | ||
550 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 85664228a0b6..a2fb0c2fb121 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c | |||
@@ -955,14 +955,13 @@ static struct console sunsab_console = { | |||
955 | .index = -1, | 955 | .index = -1, |
956 | .data = &sunsab_reg, | 956 | .data = &sunsab_reg, |
957 | }; | 957 | }; |
958 | #define SUNSAB_CONSOLE (&sunsab_console) | ||
959 | 958 | ||
960 | static void __init sunsab_console_init(void) | 959 | static inline struct console *SUNSAB_CONSOLE(void) |
961 | { | 960 | { |
962 | int i; | 961 | int i; |
963 | 962 | ||
964 | if (con_is_present()) | 963 | if (con_is_present()) |
965 | return; | 964 | return NULL; |
966 | 965 | ||
967 | for (i = 0; i < num_channels; i++) { | 966 | for (i = 0; i < num_channels; i++) { |
968 | int this_minor = sunsab_reg.minor + i; | 967 | int this_minor = sunsab_reg.minor + i; |
@@ -971,13 +970,14 @@ static void __init sunsab_console_init(void) | |||
971 | break; | 970 | break; |
972 | } | 971 | } |
973 | if (i == num_channels) | 972 | if (i == num_channels) |
974 | return; | 973 | return NULL; |
975 | 974 | ||
976 | sunsab_console.index = i; | 975 | sunsab_console.index = i; |
977 | register_console(&sunsab_console); | 976 | |
977 | return &sunsab_console; | ||
978 | } | 978 | } |
979 | #else | 979 | #else |
980 | #define SUNSAB_CONSOLE (NULL) | 980 | #define SUNSAB_CONSOLE() (NULL) |
981 | #define sunsab_console_init() do { } while (0) | 981 | #define sunsab_console_init() do { } while (0) |
982 | #endif | 982 | #endif |
983 | 983 | ||
@@ -1124,7 +1124,6 @@ static int __init sunsab_init(void) | |||
1124 | 1124 | ||
1125 | sunsab_reg.minor = sunserial_current_minor; | 1125 | sunsab_reg.minor = sunserial_current_minor; |
1126 | sunsab_reg.nr = num_channels; | 1126 | sunsab_reg.nr = num_channels; |
1127 | sunsab_reg.cons = SUNSAB_CONSOLE; | ||
1128 | 1127 | ||
1129 | ret = uart_register_driver(&sunsab_reg); | 1128 | ret = uart_register_driver(&sunsab_reg); |
1130 | if (ret < 0) { | 1129 | if (ret < 0) { |
@@ -1143,10 +1142,12 @@ static int __init sunsab_init(void) | |||
1143 | return ret; | 1142 | return ret; |
1144 | } | 1143 | } |
1145 | 1144 | ||
1145 | sunsab_reg.tty_driver->name_base = sunsab_reg.minor - 64; | ||
1146 | |||
1147 | sunsab_reg.cons = SUNSAB_CONSOLE(); | ||
1148 | |||
1146 | sunserial_current_minor += num_channels; | 1149 | sunserial_current_minor += num_channels; |
1147 | 1150 | ||
1148 | sunsab_console_init(); | ||
1149 | |||
1150 | for (i = 0; i < num_channels; i++) { | 1151 | for (i = 0; i < num_channels; i++) { |
1151 | struct uart_sunsab_port *up = &sunsab_ports[i]; | 1152 | struct uart_sunsab_port *up = &sunsab_ports[i]; |
1152 | 1153 | ||
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 4e453fa966ae..46c44b83f57c 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1280,6 +1280,7 @@ static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up, int channel) | |||
1280 | struct serio *serio; | 1280 | struct serio *serio; |
1281 | #endif | 1281 | #endif |
1282 | 1282 | ||
1283 | spin_lock_init(&up->port.lock); | ||
1283 | up->port.line = channel; | 1284 | up->port.line = channel; |
1284 | up->port.type = PORT_UNKNOWN; | 1285 | up->port.type = PORT_UNKNOWN; |
1285 | up->port.uartclk = (SU_BASE_BAUD * 16); | 1286 | up->port.uartclk = (SU_BASE_BAUD * 16); |
@@ -1464,18 +1465,17 @@ static struct console sunsu_cons = { | |||
1464 | .index = -1, | 1465 | .index = -1, |
1465 | .data = &sunsu_reg, | 1466 | .data = &sunsu_reg, |
1466 | }; | 1467 | }; |
1467 | #define SUNSU_CONSOLE (&sunsu_cons) | ||
1468 | 1468 | ||
1469 | /* | 1469 | /* |
1470 | * Register console. | 1470 | * Register console. |
1471 | */ | 1471 | */ |
1472 | 1472 | ||
1473 | static int __init sunsu_serial_console_init(void) | 1473 | static inline struct console *SUNSU_CONSOLE(void) |
1474 | { | 1474 | { |
1475 | int i; | 1475 | int i; |
1476 | 1476 | ||
1477 | if (con_is_present()) | 1477 | if (con_is_present()) |
1478 | return 0; | 1478 | return NULL; |
1479 | 1479 | ||
1480 | for (i = 0; i < UART_NR; i++) { | 1480 | for (i = 0; i < UART_NR; i++) { |
1481 | int this_minor = sunsu_reg.minor + i; | 1481 | int this_minor = sunsu_reg.minor + i; |
@@ -1484,16 +1484,16 @@ static int __init sunsu_serial_console_init(void) | |||
1484 | break; | 1484 | break; |
1485 | } | 1485 | } |
1486 | if (i == UART_NR) | 1486 | if (i == UART_NR) |
1487 | return 0; | 1487 | return NULL; |
1488 | if (sunsu_ports[i].port_node == 0) | 1488 | if (sunsu_ports[i].port_node == 0) |
1489 | return 0; | 1489 | return NULL; |
1490 | 1490 | ||
1491 | sunsu_cons.index = i; | 1491 | sunsu_cons.index = i; |
1492 | register_console(&sunsu_cons); | 1492 | |
1493 | return 0; | 1493 | return &sunsu_cons; |
1494 | } | 1494 | } |
1495 | #else | 1495 | #else |
1496 | #define SUNSU_CONSOLE (NULL) | 1496 | #define SUNSU_CONSOLE() (NULL) |
1497 | #define sunsu_serial_console_init() do { } while (0) | 1497 | #define sunsu_serial_console_init() do { } while (0) |
1498 | #endif | 1498 | #endif |
1499 | 1499 | ||
@@ -1510,6 +1510,7 @@ static int __init sunsu_serial_init(void) | |||
1510 | up->su_type == SU_PORT_KBD) | 1510 | up->su_type == SU_PORT_KBD) |
1511 | continue; | 1511 | continue; |
1512 | 1512 | ||
1513 | spin_lock_init(&up->port.lock); | ||
1513 | up->port.flags |= UPF_BOOT_AUTOCONF; | 1514 | up->port.flags |= UPF_BOOT_AUTOCONF; |
1514 | up->port.type = PORT_UNKNOWN; | 1515 | up->port.type = PORT_UNKNOWN; |
1515 | up->port.uartclk = (SU_BASE_BAUD * 16); | 1516 | up->port.uartclk = (SU_BASE_BAUD * 16); |
@@ -1523,16 +1524,19 @@ static int __init sunsu_serial_init(void) | |||
1523 | } | 1524 | } |
1524 | 1525 | ||
1525 | sunsu_reg.minor = sunserial_current_minor; | 1526 | sunsu_reg.minor = sunserial_current_minor; |
1526 | sunserial_current_minor += instance; | ||
1527 | 1527 | ||
1528 | sunsu_reg.nr = instance; | 1528 | sunsu_reg.nr = instance; |
1529 | sunsu_reg.cons = SUNSU_CONSOLE; | ||
1530 | 1529 | ||
1531 | ret = uart_register_driver(&sunsu_reg); | 1530 | ret = uart_register_driver(&sunsu_reg); |
1532 | if (ret < 0) | 1531 | if (ret < 0) |
1533 | return ret; | 1532 | return ret; |
1534 | 1533 | ||
1535 | sunsu_serial_console_init(); | 1534 | sunsu_reg.tty_driver->name_base = sunsu_reg.minor - 64; |
1535 | |||
1536 | sunserial_current_minor += instance; | ||
1537 | |||
1538 | sunsu_reg.cons = SUNSU_CONSOLE(); | ||
1539 | |||
1536 | for (i = 0; i < UART_NR; i++) { | 1540 | for (i = 0; i < UART_NR; i++) { |
1537 | struct uart_sunsu_port *up = &sunsu_ports[i]; | 1541 | struct uart_sunsu_port *up = &sunsu_ports[i]; |
1538 | 1542 | ||
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 5cc4d4c2935c..10b35c6f287d 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c | |||
@@ -1390,7 +1390,6 @@ static struct console sunzilog_console = { | |||
1390 | .index = -1, | 1390 | .index = -1, |
1391 | .data = &sunzilog_reg, | 1391 | .data = &sunzilog_reg, |
1392 | }; | 1392 | }; |
1393 | #define SUNZILOG_CONSOLE (&sunzilog_console) | ||
1394 | 1393 | ||
1395 | static int __init sunzilog_console_init(void) | 1394 | static int __init sunzilog_console_init(void) |
1396 | { | 1395 | { |
@@ -1413,8 +1412,31 @@ static int __init sunzilog_console_init(void) | |||
1413 | register_console(&sunzilog_console); | 1412 | register_console(&sunzilog_console); |
1414 | return 0; | 1413 | return 0; |
1415 | } | 1414 | } |
1415 | |||
1416 | static inline struct console *SUNZILOG_CONSOLE(void) | ||
1417 | { | ||
1418 | int i; | ||
1419 | |||
1420 | if (con_is_present()) | ||
1421 | return NULL; | ||
1422 | |||
1423 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
1424 | int this_minor = sunzilog_reg.minor + i; | ||
1425 | |||
1426 | if ((this_minor - 64) == (serial_console - 1)) | ||
1427 | break; | ||
1428 | } | ||
1429 | if (i == NUM_CHANNELS) | ||
1430 | return NULL; | ||
1431 | |||
1432 | sunzilog_console.index = i; | ||
1433 | sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS; | ||
1434 | |||
1435 | return &sunzilog_console; | ||
1436 | } | ||
1437 | |||
1416 | #else | 1438 | #else |
1417 | #define SUNZILOG_CONSOLE (NULL) | 1439 | #define SUNZILOG_CONSOLE() (NULL) |
1418 | #define sunzilog_console_init() do { } while (0) | 1440 | #define sunzilog_console_init() do { } while (0) |
1419 | #endif | 1441 | #endif |
1420 | 1442 | ||
@@ -1666,14 +1688,15 @@ static int __init sunzilog_ports_init(void) | |||
1666 | } | 1688 | } |
1667 | 1689 | ||
1668 | sunzilog_reg.nr = uart_count; | 1690 | sunzilog_reg.nr = uart_count; |
1669 | sunzilog_reg.cons = SUNZILOG_CONSOLE; | ||
1670 | |||
1671 | sunzilog_reg.minor = sunserial_current_minor; | 1691 | sunzilog_reg.minor = sunserial_current_minor; |
1672 | sunserial_current_minor += uart_count; | ||
1673 | 1692 | ||
1674 | ret = uart_register_driver(&sunzilog_reg); | 1693 | ret = uart_register_driver(&sunzilog_reg); |
1675 | if (ret == 0) { | 1694 | if (ret == 0) { |
1676 | sunzilog_console_init(); | 1695 | sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; |
1696 | sunzilog_reg.cons = SUNZILOG_CONSOLE(); | ||
1697 | |||
1698 | sunserial_current_minor += uart_count; | ||
1699 | |||
1677 | for (i = 0; i < NUM_CHANNELS; i++) { | 1700 | for (i = 0; i < NUM_CHANNELS; i++) { |
1678 | struct uart_sunzilog_port *up = &sunzilog_port_table[i]; | 1701 | struct uart_sunzilog_port *up = &sunzilog_port_table[i]; |
1679 | 1702 | ||
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile index 6f1e0e95587a..3adb6395e42d 100644 --- a/fs/jfs/Makefile +++ b/fs/jfs/Makefile | |||
@@ -8,7 +8,8 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \ | |||
8 | jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \ | 8 | jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \ |
9 | jfs_unicode.o jfs_dtree.o jfs_inode.o \ | 9 | jfs_unicode.o jfs_dtree.o jfs_inode.o \ |
10 | jfs_extent.o symlink.o jfs_metapage.o \ | 10 | jfs_extent.o symlink.o jfs_metapage.o \ |
11 | jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o resize.o xattr.o | 11 | jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o \ |
12 | resize.o xattr.o ioctl.o | ||
12 | 13 | ||
13 | jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o | 14 | jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o |
14 | 15 | ||
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 461e4934ca7c..e2281300979c 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c | |||
@@ -183,6 +183,9 @@ cleanup: | |||
183 | posix_acl_release(acl); | 183 | posix_acl_release(acl); |
184 | } else | 184 | } else |
185 | inode->i_mode &= ~current->fs->umask; | 185 | inode->i_mode &= ~current->fs->umask; |
186 | |||
187 | JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) | | ||
188 | inode->i_mode; | ||
186 | 189 | ||
187 | return rc; | 190 | return rc; |
188 | } | 191 | } |
@@ -207,12 +210,12 @@ static int jfs_acl_chmod(struct inode *inode) | |||
207 | rc = posix_acl_chmod_masq(clone, inode->i_mode); | 210 | rc = posix_acl_chmod_masq(clone, inode->i_mode); |
208 | if (!rc) { | 211 | if (!rc) { |
209 | tid_t tid = txBegin(inode->i_sb, 0); | 212 | tid_t tid = txBegin(inode->i_sb, 0); |
210 | down(&JFS_IP(inode)->commit_sem); | 213 | mutex_lock(&JFS_IP(inode)->commit_mutex); |
211 | rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); | 214 | rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); |
212 | if (!rc) | 215 | if (!rc) |
213 | rc = txCommit(tid, 1, &inode, 0); | 216 | rc = txCommit(tid, 1, &inode, 0); |
214 | txEnd(tid); | 217 | txEnd(tid); |
215 | up(&JFS_IP(inode)->commit_sem); | 218 | mutex_unlock(&JFS_IP(inode)->commit_mutex); |
216 | } | 219 | } |
217 | 220 | ||
218 | posix_acl_release(clone); | 221 | posix_acl_release(clone); |
diff --git a/fs/jfs/file.c b/fs/jfs/file.c index c2c19c9ed9a4..e1ac6e497e2b 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c | |||
@@ -113,4 +113,5 @@ struct file_operations jfs_file_operations = { | |||
113 | .sendfile = generic_file_sendfile, | 113 | .sendfile = generic_file_sendfile, |
114 | .fsync = jfs_fsync, | 114 | .fsync = jfs_fsync, |
115 | .release = jfs_release, | 115 | .release = jfs_release, |
116 | .ioctl = jfs_ioctl, | ||
116 | }; | 117 | }; |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9f942ca8e4e3..51a5fed90cca 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -55,6 +55,7 @@ void jfs_read_inode(struct inode *inode) | |||
55 | inode->i_op = &jfs_file_inode_operations; | 55 | inode->i_op = &jfs_file_inode_operations; |
56 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | 56 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
57 | } | 57 | } |
58 | jfs_set_inode_flags(inode); | ||
58 | } | 59 | } |
59 | 60 | ||
60 | /* | 61 | /* |
@@ -89,16 +90,16 @@ int jfs_commit_inode(struct inode *inode, int wait) | |||
89 | } | 90 | } |
90 | 91 | ||
91 | tid = txBegin(inode->i_sb, COMMIT_INODE); | 92 | tid = txBegin(inode->i_sb, COMMIT_INODE); |
92 | down(&JFS_IP(inode)->commit_sem); | 93 | mutex_lock(&JFS_IP(inode)->commit_mutex); |
93 | 94 | ||
94 | /* | 95 | /* |
95 | * Retest inode state after taking commit_sem | 96 | * Retest inode state after taking commit_mutex |
96 | */ | 97 | */ |
97 | if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) | 98 | if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) |
98 | rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); | 99 | rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); |
99 | 100 | ||
100 | txEnd(tid); | 101 | txEnd(tid); |
101 | up(&JFS_IP(inode)->commit_sem); | 102 | mutex_unlock(&JFS_IP(inode)->commit_mutex); |
102 | return rc; | 103 | return rc; |
103 | } | 104 | } |
104 | 105 | ||
@@ -335,18 +336,18 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length) | |||
335 | tid = txBegin(ip->i_sb, 0); | 336 | tid = txBegin(ip->i_sb, 0); |
336 | 337 | ||
337 | /* | 338 | /* |
338 | * The commit_sem cannot be taken before txBegin. | 339 | * The commit_mutex cannot be taken before txBegin. |
339 | * txBegin may block and there is a chance the inode | 340 | * txBegin may block and there is a chance the inode |
340 | * could be marked dirty and need to be committed | 341 | * could be marked dirty and need to be committed |
341 | * before txBegin unblocks | 342 | * before txBegin unblocks |
342 | */ | 343 | */ |
343 | down(&JFS_IP(ip)->commit_sem); | 344 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
344 | 345 | ||
345 | newsize = xtTruncate(tid, ip, length, | 346 | newsize = xtTruncate(tid, ip, length, |
346 | COMMIT_TRUNCATE | COMMIT_PWMAP); | 347 | COMMIT_TRUNCATE | COMMIT_PWMAP); |
347 | if (newsize < 0) { | 348 | if (newsize < 0) { |
348 | txEnd(tid); | 349 | txEnd(tid); |
349 | up(&JFS_IP(ip)->commit_sem); | 350 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
350 | break; | 351 | break; |
351 | } | 352 | } |
352 | 353 | ||
@@ -355,7 +356,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length) | |||
355 | 356 | ||
356 | txCommit(tid, 1, &ip, 0); | 357 | txCommit(tid, 1, &ip, 0); |
357 | txEnd(tid); | 358 | txEnd(tid); |
358 | up(&JFS_IP(ip)->commit_sem); | 359 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
359 | } while (newsize > length); /* Truncate isn't always atomic */ | 360 | } while (newsize > length); /* Truncate isn't always atomic */ |
360 | } | 361 | } |
361 | 362 | ||
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c new file mode 100644 index 000000000000..67b3774820eb --- /dev/null +++ b/fs/jfs/ioctl.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * linux/fs/jfs/ioctl.c | ||
3 | * | ||
4 | * Copyright (C) 2006 Herbert Poetzl | ||
5 | * adapted from Remy Card's ext2/ioctl.c | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/ext2_fs.h> | ||
10 | #include <linux/ctype.h> | ||
11 | #include <linux/capability.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <asm/current.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | |||
16 | #include "jfs_incore.h" | ||
17 | #include "jfs_dinode.h" | ||
18 | #include "jfs_inode.h" | ||
19 | |||
20 | |||
21 | static struct { | ||
22 | long jfs_flag; | ||
23 | long ext2_flag; | ||
24 | } jfs_map[] = { | ||
25 | {JFS_NOATIME_FL, EXT2_NOATIME_FL}, | ||
26 | {JFS_DIRSYNC_FL, EXT2_DIRSYNC_FL}, | ||
27 | {JFS_SYNC_FL, EXT2_SYNC_FL}, | ||
28 | {JFS_SECRM_FL, EXT2_SECRM_FL}, | ||
29 | {JFS_UNRM_FL, EXT2_UNRM_FL}, | ||
30 | {JFS_APPEND_FL, EXT2_APPEND_FL}, | ||
31 | {JFS_IMMUTABLE_FL, EXT2_IMMUTABLE_FL}, | ||
32 | {0, 0}, | ||
33 | }; | ||
34 | |||
35 | static long jfs_map_ext2(unsigned long flags, int from) | ||
36 | { | ||
37 | int index=0; | ||
38 | long mapped=0; | ||
39 | |||
40 | while (jfs_map[index].jfs_flag) { | ||
41 | if (from) { | ||
42 | if (jfs_map[index].ext2_flag & flags) | ||
43 | mapped |= jfs_map[index].jfs_flag; | ||
44 | } else { | ||
45 | if (jfs_map[index].jfs_flag & flags) | ||
46 | mapped |= jfs_map[index].ext2_flag; | ||
47 | } | ||
48 | index++; | ||
49 | } | ||
50 | return mapped; | ||
51 | } | ||
52 | |||
53 | |||
54 | int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | ||
55 | unsigned long arg) | ||
56 | { | ||
57 | struct jfs_inode_info *jfs_inode = JFS_IP(inode); | ||
58 | unsigned int flags; | ||
59 | |||
60 | switch (cmd) { | ||
61 | case JFS_IOC_GETFLAGS: | ||
62 | flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE; | ||
63 | flags = jfs_map_ext2(flags, 0); | ||
64 | return put_user(flags, (int __user *) arg); | ||
65 | case JFS_IOC_SETFLAGS: { | ||
66 | unsigned int oldflags; | ||
67 | |||
68 | if (IS_RDONLY(inode)) | ||
69 | return -EROFS; | ||
70 | |||
71 | if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) | ||
72 | return -EACCES; | ||
73 | |||
74 | if (get_user(flags, (int __user *) arg)) | ||
75 | return -EFAULT; | ||
76 | |||
77 | flags = jfs_map_ext2(flags, 1); | ||
78 | if (!S_ISDIR(inode->i_mode)) | ||
79 | flags &= ~JFS_DIRSYNC_FL; | ||
80 | |||
81 | oldflags = jfs_inode->mode2; | ||
82 | |||
83 | /* | ||
84 | * The IMMUTABLE and APPEND_ONLY flags can only be changed by | ||
85 | * the relevant capability. | ||
86 | */ | ||
87 | if ((oldflags & JFS_IMMUTABLE_FL) || | ||
88 | ((flags ^ oldflags) & | ||
89 | (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { | ||
90 | if (!capable(CAP_LINUX_IMMUTABLE)) | ||
91 | return -EPERM; | ||
92 | } | ||
93 | |||
94 | flags = flags & JFS_FL_USER_MODIFIABLE; | ||
95 | flags |= oldflags & ~JFS_FL_USER_MODIFIABLE; | ||
96 | jfs_inode->mode2 = flags; | ||
97 | |||
98 | jfs_set_inode_flags(inode); | ||
99 | inode->i_ctime = CURRENT_TIME_SEC; | ||
100 | mark_inode_dirty(inode); | ||
101 | return 0; | ||
102 | } | ||
103 | default: | ||
104 | return -ENOTTY; | ||
105 | } | ||
106 | } | ||
107 | |||
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h index 580a3258449b..9f2572aea561 100644 --- a/fs/jfs/jfs_dinode.h +++ b/fs/jfs/jfs_dinode.h | |||
@@ -139,13 +139,36 @@ struct dinode { | |||
139 | 139 | ||
140 | /* more extended mode bits: attributes for OS/2 */ | 140 | /* more extended mode bits: attributes for OS/2 */ |
141 | #define IREADONLY 0x02000000 /* no write access to file */ | 141 | #define IREADONLY 0x02000000 /* no write access to file */ |
142 | #define IARCHIVE 0x40000000 /* file archive bit */ | ||
143 | #define ISYSTEM 0x08000000 /* system file */ | ||
144 | #define IHIDDEN 0x04000000 /* hidden file */ | 142 | #define IHIDDEN 0x04000000 /* hidden file */ |
145 | #define IRASH 0x4E000000 /* mask for changeable attributes */ | 143 | #define ISYSTEM 0x08000000 /* system file */ |
146 | #define INEWNAME 0x80000000 /* non-8.3 filename format */ | 144 | |
147 | #define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ | 145 | #define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ |
146 | #define IARCHIVE 0x40000000 /* file archive bit */ | ||
147 | #define INEWNAME 0x80000000 /* non-8.3 filename format */ | ||
148 | |||
149 | #define IRASH 0x4E000000 /* mask for changeable attributes */ | ||
148 | #define ATTRSHIFT 25 /* bits to shift to move attribute | 150 | #define ATTRSHIFT 25 /* bits to shift to move attribute |
149 | specification to mode position */ | 151 | specification to mode position */ |
150 | 152 | ||
153 | /* extended attributes for Linux */ | ||
154 | |||
155 | #define JFS_NOATIME_FL 0x00080000 /* do not update atime */ | ||
156 | |||
157 | #define JFS_DIRSYNC_FL 0x00100000 /* dirsync behaviour */ | ||
158 | #define JFS_SYNC_FL 0x00200000 /* Synchronous updates */ | ||
159 | #define JFS_SECRM_FL 0x00400000 /* Secure deletion */ | ||
160 | #define JFS_UNRM_FL 0x00800000 /* allow for undelete */ | ||
161 | |||
162 | #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */ | ||
163 | #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */ | ||
164 | |||
165 | #define JFS_FL_USER_VISIBLE 0x03F80000 | ||
166 | #define JFS_FL_USER_MODIFIABLE 0x03F80000 | ||
167 | #define JFS_FL_INHERIT 0x03C80000 | ||
168 | |||
169 | /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */ | ||
170 | #define JFS_IOC_GETFLAGS _IOR('f', 1, long) | ||
171 | #define JFS_IOC_SETFLAGS _IOW('f', 2, long) | ||
172 | |||
173 | |||
151 | #endif /*_H_JFS_DINODE */ | 174 | #endif /*_H_JFS_DINODE */ |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 79b5404db100..c161c98954e0 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -64,9 +64,9 @@ | |||
64 | * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. | 64 | * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | #define BMAP_LOCK_INIT(bmp) init_MUTEX(&bmp->db_bmaplock) | 67 | #define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock) |
68 | #define BMAP_LOCK(bmp) down(&bmp->db_bmaplock) | 68 | #define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock) |
69 | #define BMAP_UNLOCK(bmp) up(&bmp->db_bmaplock) | 69 | #define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * forward references | 72 | * forward references |
@@ -125,7 +125,7 @@ static int dbGetL2AGSize(s64 nblocks); | |||
125 | * into the table, with the table elements yielding the maximum | 125 | * into the table, with the table elements yielding the maximum |
126 | * binary buddy of free bits within the character. | 126 | * binary buddy of free bits within the character. |
127 | */ | 127 | */ |
128 | static s8 budtab[256] = { | 128 | static const s8 budtab[256] = { |
129 | 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 129 | 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
130 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | 130 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
131 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | 131 | 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 32e25884e7e8..8b14cc8e0228 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h | |||
@@ -243,7 +243,7 @@ struct dbmap { | |||
243 | struct bmap { | 243 | struct bmap { |
244 | struct dbmap db_bmap; /* on-disk aggregate map descriptor */ | 244 | struct dbmap db_bmap; /* on-disk aggregate map descriptor */ |
245 | struct inode *db_ipbmap; /* ptr to aggregate map incore inode */ | 245 | struct inode *db_ipbmap; /* ptr to aggregate map incore inode */ |
246 | struct semaphore db_bmaplock; /* aggregate map lock */ | 246 | struct mutex db_bmaplock; /* aggregate map lock */ |
247 | atomic_t db_active[MAXAG]; /* count of active, open files in AG */ | 247 | atomic_t db_active[MAXAG]; /* count of active, open files in AG */ |
248 | u32 *db_DBmap; | 248 | u32 *db_DBmap; |
249 | }; | 249 | }; |
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 404f33eae507..6c3f08319846 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c | |||
@@ -1005,6 +1005,9 @@ static int dtSplitUp(tid_t tid, | |||
1005 | 1005 | ||
1006 | DT_PUTPAGE(smp); | 1006 | DT_PUTPAGE(smp); |
1007 | 1007 | ||
1008 | if (!DO_INDEX(ip)) | ||
1009 | ip->i_size = xlen << sbi->l2bsize; | ||
1010 | |||
1008 | goto freeKeyName; | 1011 | goto freeKeyName; |
1009 | } | 1012 | } |
1010 | 1013 | ||
@@ -1055,7 +1058,9 @@ static int dtSplitUp(tid_t tid, | |||
1055 | xaddr = addressPXD(pxd) + xlen; | 1058 | xaddr = addressPXD(pxd) + xlen; |
1056 | dbFree(ip, xaddr, (s64) n); | 1059 | dbFree(ip, xaddr, (s64) n); |
1057 | } | 1060 | } |
1058 | } | 1061 | } else if (!DO_INDEX(ip)) |
1062 | ip->i_size = lengthPXD(pxd) << sbi->l2bsize; | ||
1063 | |||
1059 | 1064 | ||
1060 | extendOut: | 1065 | extendOut: |
1061 | DT_PUTPAGE(smp); | 1066 | DT_PUTPAGE(smp); |
@@ -1098,6 +1103,9 @@ static int dtSplitUp(tid_t tid, | |||
1098 | goto splitOut; | 1103 | goto splitOut; |
1099 | } | 1104 | } |
1100 | 1105 | ||
1106 | if (!DO_INDEX(ip)) | ||
1107 | ip->i_size += PSIZE; | ||
1108 | |||
1101 | /* | 1109 | /* |
1102 | * propagate up the router entry for the leaf page just split | 1110 | * propagate up the router entry for the leaf page just split |
1103 | * | 1111 | * |
@@ -2424,6 +2432,9 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, | |||
2424 | break; | 2432 | break; |
2425 | } | 2433 | } |
2426 | 2434 | ||
2435 | if (!DO_INDEX(ip)) | ||
2436 | ip->i_size -= PSIZE; | ||
2437 | |||
2427 | return 0; | 2438 | return 0; |
2428 | } | 2439 | } |
2429 | 2440 | ||
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 4879603daa1c..5549378358bf 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c | |||
@@ -94,7 +94,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
94 | txBeginAnon(ip->i_sb); | 94 | txBeginAnon(ip->i_sb); |
95 | 95 | ||
96 | /* Avoid race with jfs_commit_inode() */ | 96 | /* Avoid race with jfs_commit_inode() */ |
97 | down(&JFS_IP(ip)->commit_sem); | 97 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
98 | 98 | ||
99 | /* validate extent length */ | 99 | /* validate extent length */ |
100 | if (xlen > MAXXLEN) | 100 | if (xlen > MAXXLEN) |
@@ -136,14 +136,14 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
136 | */ | 136 | */ |
137 | nxlen = xlen; | 137 | nxlen = xlen; |
138 | if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { | 138 | if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { |
139 | up(&JFS_IP(ip)->commit_sem); | 139 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
140 | return (rc); | 140 | return (rc); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Allocate blocks to quota. */ | 143 | /* Allocate blocks to quota. */ |
144 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { | 144 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { |
145 | dbFree(ip, nxaddr, (s64) nxlen); | 145 | dbFree(ip, nxaddr, (s64) nxlen); |
146 | up(&JFS_IP(ip)->commit_sem); | 146 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
147 | return -EDQUOT; | 147 | return -EDQUOT; |
148 | } | 148 | } |
149 | 149 | ||
@@ -165,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
165 | if (rc) { | 165 | if (rc) { |
166 | dbFree(ip, nxaddr, nxlen); | 166 | dbFree(ip, nxaddr, nxlen); |
167 | DQUOT_FREE_BLOCK(ip, nxlen); | 167 | DQUOT_FREE_BLOCK(ip, nxlen); |
168 | up(&JFS_IP(ip)->commit_sem); | 168 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
169 | return (rc); | 169 | return (rc); |
170 | } | 170 | } |
171 | 171 | ||
@@ -177,7 +177,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr) | |||
177 | 177 | ||
178 | mark_inode_dirty(ip); | 178 | mark_inode_dirty(ip); |
179 | 179 | ||
180 | up(&JFS_IP(ip)->commit_sem); | 180 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
181 | /* | 181 | /* |
182 | * COMMIT_SyncList flags an anonymous tlock on page that is on | 182 | * COMMIT_SyncList flags an anonymous tlock on page that is on |
183 | * sync list. | 183 | * sync list. |
@@ -222,7 +222,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
222 | /* This blocks if we are low on resources */ | 222 | /* This blocks if we are low on resources */ |
223 | txBeginAnon(ip->i_sb); | 223 | txBeginAnon(ip->i_sb); |
224 | 224 | ||
225 | down(&JFS_IP(ip)->commit_sem); | 225 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
226 | /* validate extent length */ | 226 | /* validate extent length */ |
227 | if (nxlen > MAXXLEN) | 227 | if (nxlen > MAXXLEN) |
228 | nxlen = MAXXLEN; | 228 | nxlen = MAXXLEN; |
@@ -258,7 +258,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
258 | /* Allocat blocks to quota. */ | 258 | /* Allocat blocks to quota. */ |
259 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { | 259 | if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { |
260 | dbFree(ip, nxaddr, (s64) nxlen); | 260 | dbFree(ip, nxaddr, (s64) nxlen); |
261 | up(&JFS_IP(ip)->commit_sem); | 261 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
262 | return -EDQUOT; | 262 | return -EDQUOT; |
263 | } | 263 | } |
264 | 264 | ||
@@ -338,7 +338,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr) | |||
338 | 338 | ||
339 | mark_inode_dirty(ip); | 339 | mark_inode_dirty(ip); |
340 | exit: | 340 | exit: |
341 | up(&JFS_IP(ip)->commit_sem); | 341 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
342 | return (rc); | 342 | return (rc); |
343 | } | 343 | } |
344 | #endif /* _NOTYET */ | 344 | #endif /* _NOTYET */ |
@@ -439,12 +439,12 @@ int extRecord(struct inode *ip, xad_t * xp) | |||
439 | 439 | ||
440 | txBeginAnon(ip->i_sb); | 440 | txBeginAnon(ip->i_sb); |
441 | 441 | ||
442 | down(&JFS_IP(ip)->commit_sem); | 442 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
443 | 443 | ||
444 | /* update the extent */ | 444 | /* update the extent */ |
445 | rc = xtUpdate(0, ip, xp); | 445 | rc = xtUpdate(0, ip, xp); |
446 | 446 | ||
447 | up(&JFS_IP(ip)->commit_sem); | 447 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
448 | return rc; | 448 | return rc; |
449 | } | 449 | } |
450 | 450 | ||
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 4efa0d0eec39..ccbe60aff83d 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c | |||
@@ -66,14 +66,14 @@ static HLIST_HEAD(aggregate_hash); | |||
66 | * imap locks | 66 | * imap locks |
67 | */ | 67 | */ |
68 | /* iag free list lock */ | 68 | /* iag free list lock */ |
69 | #define IAGFREE_LOCK_INIT(imap) init_MUTEX(&imap->im_freelock) | 69 | #define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock) |
70 | #define IAGFREE_LOCK(imap) down(&imap->im_freelock) | 70 | #define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock) |
71 | #define IAGFREE_UNLOCK(imap) up(&imap->im_freelock) | 71 | #define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock) |
72 | 72 | ||
73 | /* per ag iag list locks */ | 73 | /* per ag iag list locks */ |
74 | #define AG_LOCK_INIT(imap,index) init_MUTEX(&(imap->im_aglock[index])) | 74 | #define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index])) |
75 | #define AG_LOCK(imap,agno) down(&imap->im_aglock[agno]) | 75 | #define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno]) |
76 | #define AG_UNLOCK(imap,agno) up(&imap->im_aglock[agno]) | 76 | #define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno]) |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * forward references | 79 | * forward references |
@@ -1261,7 +1261,7 @@ int diFree(struct inode *ip) | |||
1261 | * to be freed by the transaction; | 1261 | * to be freed by the transaction; |
1262 | */ | 1262 | */ |
1263 | tid = txBegin(ipimap->i_sb, COMMIT_FORCE); | 1263 | tid = txBegin(ipimap->i_sb, COMMIT_FORCE); |
1264 | down(&JFS_IP(ipimap)->commit_sem); | 1264 | mutex_lock(&JFS_IP(ipimap)->commit_mutex); |
1265 | 1265 | ||
1266 | /* acquire tlock of the iag page of the freed ixad | 1266 | /* acquire tlock of the iag page of the freed ixad |
1267 | * to force the page NOHOMEOK (even though no data is | 1267 | * to force the page NOHOMEOK (even though no data is |
@@ -1294,7 +1294,7 @@ int diFree(struct inode *ip) | |||
1294 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); | 1294 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); |
1295 | 1295 | ||
1296 | txEnd(tid); | 1296 | txEnd(tid); |
1297 | up(&JFS_IP(ipimap)->commit_sem); | 1297 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
1298 | 1298 | ||
1299 | /* unlock the AG inode map information */ | 1299 | /* unlock the AG inode map information */ |
1300 | AG_UNLOCK(imap, agno); | 1300 | AG_UNLOCK(imap, agno); |
@@ -2554,13 +2554,13 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) | |||
2554 | * addressing structure pointing to the new iag page; | 2554 | * addressing structure pointing to the new iag page; |
2555 | */ | 2555 | */ |
2556 | tid = txBegin(sb, COMMIT_FORCE); | 2556 | tid = txBegin(sb, COMMIT_FORCE); |
2557 | down(&JFS_IP(ipimap)->commit_sem); | 2557 | mutex_lock(&JFS_IP(ipimap)->commit_mutex); |
2558 | 2558 | ||
2559 | /* update the inode map addressing structure to point to it */ | 2559 | /* update the inode map addressing structure to point to it */ |
2560 | if ((rc = | 2560 | if ((rc = |
2561 | xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { | 2561 | xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { |
2562 | txEnd(tid); | 2562 | txEnd(tid); |
2563 | up(&JFS_IP(ipimap)->commit_sem); | 2563 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
2564 | /* Free the blocks allocated for the iag since it was | 2564 | /* Free the blocks allocated for the iag since it was |
2565 | * not successfully added to the inode map | 2565 | * not successfully added to the inode map |
2566 | */ | 2566 | */ |
@@ -2626,7 +2626,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) | |||
2626 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); | 2626 | rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); |
2627 | 2627 | ||
2628 | txEnd(tid); | 2628 | txEnd(tid); |
2629 | up(&JFS_IP(ipimap)->commit_sem); | 2629 | mutex_unlock(&JFS_IP(ipimap)->commit_mutex); |
2630 | 2630 | ||
2631 | duplicateIXtree(sb, blkno, xlen, &xaddr); | 2631 | duplicateIXtree(sb, blkno, xlen, &xaddr); |
2632 | 2632 | ||
@@ -3074,14 +3074,40 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno, | |||
3074 | static int copy_from_dinode(struct dinode * dip, struct inode *ip) | 3074 | static int copy_from_dinode(struct dinode * dip, struct inode *ip) |
3075 | { | 3075 | { |
3076 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); | 3076 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); |
3077 | struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); | ||
3077 | 3078 | ||
3078 | jfs_ip->fileset = le32_to_cpu(dip->di_fileset); | 3079 | jfs_ip->fileset = le32_to_cpu(dip->di_fileset); |
3079 | jfs_ip->mode2 = le32_to_cpu(dip->di_mode); | 3080 | jfs_ip->mode2 = le32_to_cpu(dip->di_mode); |
3080 | 3081 | ||
3081 | ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; | 3082 | ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; |
3083 | if (sbi->umask != -1) { | ||
3084 | ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask); | ||
3085 | /* For directories, add x permission if r is allowed by umask */ | ||
3086 | if (S_ISDIR(ip->i_mode)) { | ||
3087 | if (ip->i_mode & 0400) | ||
3088 | ip->i_mode |= 0100; | ||
3089 | if (ip->i_mode & 0040) | ||
3090 | ip->i_mode |= 0010; | ||
3091 | if (ip->i_mode & 0004) | ||
3092 | ip->i_mode |= 0001; | ||
3093 | } | ||
3094 | } | ||
3082 | ip->i_nlink = le32_to_cpu(dip->di_nlink); | 3095 | ip->i_nlink = le32_to_cpu(dip->di_nlink); |
3083 | ip->i_uid = le32_to_cpu(dip->di_uid); | 3096 | |
3084 | ip->i_gid = le32_to_cpu(dip->di_gid); | 3097 | jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); |
3098 | if (sbi->uid == -1) | ||
3099 | ip->i_uid = jfs_ip->saved_uid; | ||
3100 | else { | ||
3101 | ip->i_uid = sbi->uid; | ||
3102 | } | ||
3103 | |||
3104 | jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); | ||
3105 | if (sbi->gid == -1) | ||
3106 | ip->i_gid = jfs_ip->saved_gid; | ||
3107 | else { | ||
3108 | ip->i_gid = sbi->gid; | ||
3109 | } | ||
3110 | |||
3085 | ip->i_size = le64_to_cpu(dip->di_size); | 3111 | ip->i_size = le64_to_cpu(dip->di_size); |
3086 | ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); | 3112 | ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); |
3087 | ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); | 3113 | ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); |
@@ -3132,21 +3158,33 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip) | |||
3132 | static void copy_to_dinode(struct dinode * dip, struct inode *ip) | 3158 | static void copy_to_dinode(struct dinode * dip, struct inode *ip) |
3133 | { | 3159 | { |
3134 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); | 3160 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); |
3161 | struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); | ||
3135 | 3162 | ||
3136 | dip->di_fileset = cpu_to_le32(jfs_ip->fileset); | 3163 | dip->di_fileset = cpu_to_le32(jfs_ip->fileset); |
3137 | dip->di_inostamp = cpu_to_le32(JFS_SBI(ip->i_sb)->inostamp); | 3164 | dip->di_inostamp = cpu_to_le32(sbi->inostamp); |
3138 | dip->di_number = cpu_to_le32(ip->i_ino); | 3165 | dip->di_number = cpu_to_le32(ip->i_ino); |
3139 | dip->di_gen = cpu_to_le32(ip->i_generation); | 3166 | dip->di_gen = cpu_to_le32(ip->i_generation); |
3140 | dip->di_size = cpu_to_le64(ip->i_size); | 3167 | dip->di_size = cpu_to_le64(ip->i_size); |
3141 | dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); | 3168 | dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); |
3142 | dip->di_nlink = cpu_to_le32(ip->i_nlink); | 3169 | dip->di_nlink = cpu_to_le32(ip->i_nlink); |
3143 | dip->di_uid = cpu_to_le32(ip->i_uid); | 3170 | if (sbi->uid == -1) |
3144 | dip->di_gid = cpu_to_le32(ip->i_gid); | 3171 | dip->di_uid = cpu_to_le32(ip->i_uid); |
3172 | else | ||
3173 | dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); | ||
3174 | if (sbi->gid == -1) | ||
3175 | dip->di_gid = cpu_to_le32(ip->i_gid); | ||
3176 | else | ||
3177 | dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); | ||
3145 | /* | 3178 | /* |
3146 | * mode2 is only needed for storing the higher order bits. | 3179 | * mode2 is only needed for storing the higher order bits. |
3147 | * Trust i_mode for the lower order ones | 3180 | * Trust i_mode for the lower order ones |
3148 | */ | 3181 | */ |
3149 | dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode); | 3182 | if (sbi->umask == -1) |
3183 | dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | | ||
3184 | ip->i_mode); | ||
3185 | else /* Leave the original permissions alone */ | ||
3186 | dip->di_mode = cpu_to_le32(jfs_ip->mode2); | ||
3187 | |||
3150 | dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); | 3188 | dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); |
3151 | dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); | 3189 | dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); |
3152 | dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); | 3190 | dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); |
diff --git a/fs/jfs/jfs_imap.h b/fs/jfs/jfs_imap.h index 6b59adec036a..6e24465f0f98 100644 --- a/fs/jfs/jfs_imap.h +++ b/fs/jfs/jfs_imap.h | |||
@@ -140,8 +140,8 @@ struct dinomap { | |||
140 | struct inomap { | 140 | struct inomap { |
141 | struct dinomap im_imap; /* 4096: inode allocation control */ | 141 | struct dinomap im_imap; /* 4096: inode allocation control */ |
142 | struct inode *im_ipimap; /* 4: ptr to inode for imap */ | 142 | struct inode *im_ipimap; /* 4: ptr to inode for imap */ |
143 | struct semaphore im_freelock; /* 4: iag free list lock */ | 143 | struct mutex im_freelock; /* 4: iag free list lock */ |
144 | struct semaphore im_aglock[MAXAG]; /* 512: per AG locks */ | 144 | struct mutex im_aglock[MAXAG]; /* 512: per AG locks */ |
145 | u32 *im_DBGdimap; | 145 | u32 *im_DBGdimap; |
146 | atomic_t im_numinos; /* num of backed inodes */ | 146 | atomic_t im_numinos; /* num of backed inodes */ |
147 | atomic_t im_numfree; /* num of free backed inodes */ | 147 | atomic_t im_numfree; /* num of free backed inodes */ |
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index dc21a5bd54d4..54d73716ca8c 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #ifndef _H_JFS_INCORE | 19 | #ifndef _H_JFS_INCORE |
20 | #define _H_JFS_INCORE | 20 | #define _H_JFS_INCORE |
21 | 21 | ||
22 | #include <linux/mutex.h> | ||
22 | #include <linux/rwsem.h> | 23 | #include <linux/rwsem.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
@@ -37,6 +38,8 @@ | |||
37 | struct jfs_inode_info { | 38 | struct jfs_inode_info { |
38 | int fileset; /* fileset number (always 16)*/ | 39 | int fileset; /* fileset number (always 16)*/ |
39 | uint mode2; /* jfs-specific mode */ | 40 | uint mode2; /* jfs-specific mode */ |
41 | uint saved_uid; /* saved for uid mount option */ | ||
42 | uint saved_gid; /* saved for gid mount option */ | ||
40 | pxd_t ixpxd; /* inode extent descriptor */ | 43 | pxd_t ixpxd; /* inode extent descriptor */ |
41 | dxd_t acl; /* dxd describing acl */ | 44 | dxd_t acl; /* dxd describing acl */ |
42 | dxd_t ea; /* dxd describing ea */ | 45 | dxd_t ea; /* dxd describing ea */ |
@@ -62,12 +65,12 @@ struct jfs_inode_info { | |||
62 | */ | 65 | */ |
63 | struct rw_semaphore rdwrlock; | 66 | struct rw_semaphore rdwrlock; |
64 | /* | 67 | /* |
65 | * commit_sem serializes transaction processing on an inode. | 68 | * commit_mutex serializes transaction processing on an inode. |
66 | * It must be taken after beginning a transaction (txBegin), since | 69 | * It must be taken after beginning a transaction (txBegin), since |
67 | * dirty inodes may be committed while a new transaction on the | 70 | * dirty inodes may be committed while a new transaction on the |
68 | * inode is blocked in txBegin or TxBeginAnon | 71 | * inode is blocked in txBegin or TxBeginAnon |
69 | */ | 72 | */ |
70 | struct semaphore commit_sem; | 73 | struct mutex commit_mutex; |
71 | /* xattr_sem allows us to access the xattrs without taking i_mutex */ | 74 | /* xattr_sem allows us to access the xattrs without taking i_mutex */ |
72 | struct rw_semaphore xattr_sem; | 75 | struct rw_semaphore xattr_sem; |
73 | lid_t xtlid; /* lid of xtree lock on directory */ | 76 | lid_t xtlid; /* lid of xtree lock on directory */ |
@@ -169,6 +172,9 @@ struct jfs_sb_info { | |||
169 | uint state; /* mount/recovery state */ | 172 | uint state; /* mount/recovery state */ |
170 | unsigned long flag; /* mount time flags */ | 173 | unsigned long flag; /* mount time flags */ |
171 | uint p_state; /* state prior to going no integrity */ | 174 | uint p_state; /* state prior to going no integrity */ |
175 | uint uid; /* uid to override on-disk uid */ | ||
176 | uint gid; /* gid to override on-disk gid */ | ||
177 | uint umask; /* umask to override on-disk umask */ | ||
172 | }; | 178 | }; |
173 | 179 | ||
174 | /* jfs_sb_info commit_state */ | 180 | /* jfs_sb_info commit_state */ |
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index 2af5efbfd06f..495df402916d 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c | |||
@@ -25,6 +25,26 @@ | |||
25 | #include "jfs_dinode.h" | 25 | #include "jfs_dinode.h" |
26 | #include "jfs_debug.h" | 26 | #include "jfs_debug.h" |
27 | 27 | ||
28 | |||
29 | void jfs_set_inode_flags(struct inode *inode) | ||
30 | { | ||
31 | unsigned int flags = JFS_IP(inode)->mode2; | ||
32 | |||
33 | inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | | ||
34 | S_NOATIME | S_DIRSYNC | S_SYNC); | ||
35 | |||
36 | if (flags & JFS_IMMUTABLE_FL) | ||
37 | inode->i_flags |= S_IMMUTABLE; | ||
38 | if (flags & JFS_APPEND_FL) | ||
39 | inode->i_flags |= S_APPEND; | ||
40 | if (flags & JFS_NOATIME_FL) | ||
41 | inode->i_flags |= S_NOATIME; | ||
42 | if (flags & JFS_DIRSYNC_FL) | ||
43 | inode->i_flags |= S_DIRSYNC; | ||
44 | if (flags & JFS_SYNC_FL) | ||
45 | inode->i_flags |= S_SYNC; | ||
46 | } | ||
47 | |||
28 | /* | 48 | /* |
29 | * NAME: ialloc() | 49 | * NAME: ialloc() |
30 | * | 50 | * |
@@ -63,6 +83,13 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
63 | inode->i_gid = current->fsgid; | 83 | inode->i_gid = current->fsgid; |
64 | 84 | ||
65 | /* | 85 | /* |
86 | * New inodes need to save sane values on disk when | ||
87 | * uid & gid mount options are used | ||
88 | */ | ||
89 | jfs_inode->saved_uid = inode->i_uid; | ||
90 | jfs_inode->saved_gid = inode->i_gid; | ||
91 | |||
92 | /* | ||
66 | * Allocate inode to quota. | 93 | * Allocate inode to quota. |
67 | */ | 94 | */ |
68 | if (DQUOT_ALLOC_INODE(inode)) { | 95 | if (DQUOT_ALLOC_INODE(inode)) { |
@@ -74,10 +101,20 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
74 | } | 101 | } |
75 | 102 | ||
76 | inode->i_mode = mode; | 103 | inode->i_mode = mode; |
77 | if (S_ISDIR(mode)) | 104 | /* inherit flags from parent */ |
78 | jfs_inode->mode2 = IDIRECTORY | mode; | 105 | jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT; |
79 | else | 106 | |
80 | jfs_inode->mode2 = INLINEEA | ISPARSE | mode; | 107 | if (S_ISDIR(mode)) { |
108 | jfs_inode->mode2 |= IDIRECTORY; | ||
109 | jfs_inode->mode2 &= ~JFS_DIRSYNC_FL; | ||
110 | } | ||
111 | else { | ||
112 | jfs_inode->mode2 |= INLINEEA | ISPARSE; | ||
113 | if (S_ISLNK(mode)) | ||
114 | jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL); | ||
115 | } | ||
116 | jfs_inode->mode2 |= mode; | ||
117 | |||
81 | inode->i_blksize = sb->s_blocksize; | 118 | inode->i_blksize = sb->s_blocksize; |
82 | inode->i_blocks = 0; | 119 | inode->i_blocks = 0; |
83 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 120 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
@@ -98,6 +135,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
98 | jfs_inode->atlhead = 0; | 135 | jfs_inode->atlhead = 0; |
99 | jfs_inode->atltail = 0; | 136 | jfs_inode->atltail = 0; |
100 | jfs_inode->xtlid = 0; | 137 | jfs_inode->xtlid = 0; |
138 | jfs_set_inode_flags(inode); | ||
101 | 139 | ||
102 | jfs_info("ialloc returns inode = 0x%p\n", inode); | 140 | jfs_info("ialloc returns inode = 0x%p\n", inode); |
103 | 141 | ||
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b54bac576cb3..095d471b9f9a 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | extern struct inode *ialloc(struct inode *, umode_t); | 21 | extern struct inode *ialloc(struct inode *, umode_t); |
22 | extern int jfs_fsync(struct file *, struct dentry *, int); | 22 | extern int jfs_fsync(struct file *, struct dentry *, int); |
23 | extern int jfs_ioctl(struct inode *, struct file *, | ||
24 | unsigned int, unsigned long); | ||
23 | extern void jfs_read_inode(struct inode *); | 25 | extern void jfs_read_inode(struct inode *); |
24 | extern int jfs_commit_inode(struct inode *, int); | 26 | extern int jfs_commit_inode(struct inode *, int); |
25 | extern int jfs_write_inode(struct inode*, int); | 27 | extern int jfs_write_inode(struct inode*, int); |
@@ -29,6 +31,7 @@ extern void jfs_truncate(struct inode *); | |||
29 | extern void jfs_truncate_nolock(struct inode *, loff_t); | 31 | extern void jfs_truncate_nolock(struct inode *, loff_t); |
30 | extern void jfs_free_zero_link(struct inode *); | 32 | extern void jfs_free_zero_link(struct inode *); |
31 | extern struct dentry *jfs_get_parent(struct dentry *dentry); | 33 | extern struct dentry *jfs_get_parent(struct dentry *dentry); |
34 | extern void jfs_set_inode_flags(struct inode *); | ||
32 | 35 | ||
33 | extern struct address_space_operations jfs_aops; | 36 | extern struct address_space_operations jfs_aops; |
34 | extern struct inode_operations jfs_dir_inode_operations; | 37 | extern struct inode_operations jfs_dir_inode_operations; |
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h index 10ad1d086685..70ac9f7d1e00 100644 --- a/fs/jfs/jfs_lock.h +++ b/fs/jfs/jfs_lock.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define _H_JFS_LOCK | 20 | #define _H_JFS_LOCK |
21 | 21 | ||
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/mutex.h> | ||
23 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
24 | 25 | ||
25 | /* | 26 | /* |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index d27bac6acaa3..0b348b13b551 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/interrupt.h> | 64 | #include <linux/interrupt.h> |
65 | #include <linux/smp_lock.h> | 65 | #include <linux/smp_lock.h> |
66 | #include <linux/completion.h> | 66 | #include <linux/completion.h> |
67 | #include <linux/kthread.h> | ||
67 | #include <linux/buffer_head.h> /* for sync_blockdev() */ | 68 | #include <linux/buffer_head.h> /* for sync_blockdev() */ |
68 | #include <linux/bio.h> | 69 | #include <linux/bio.h> |
69 | #include <linux/suspend.h> | 70 | #include <linux/suspend.h> |
@@ -81,15 +82,14 @@ | |||
81 | */ | 82 | */ |
82 | static struct lbuf *log_redrive_list; | 83 | static struct lbuf *log_redrive_list; |
83 | static DEFINE_SPINLOCK(log_redrive_lock); | 84 | static DEFINE_SPINLOCK(log_redrive_lock); |
84 | DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait); | ||
85 | 85 | ||
86 | 86 | ||
87 | /* | 87 | /* |
88 | * log read/write serialization (per log) | 88 | * log read/write serialization (per log) |
89 | */ | 89 | */ |
90 | #define LOG_LOCK_INIT(log) init_MUTEX(&(log)->loglock) | 90 | #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock) |
91 | #define LOG_LOCK(log) down(&((log)->loglock)) | 91 | #define LOG_LOCK(log) mutex_lock(&((log)->loglock)) |
92 | #define LOG_UNLOCK(log) up(&((log)->loglock)) | 92 | #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock)) |
93 | 93 | ||
94 | 94 | ||
95 | /* | 95 | /* |
@@ -1105,11 +1105,10 @@ int lmLogOpen(struct super_block *sb) | |||
1105 | } | 1105 | } |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) { | 1108 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { |
1109 | up(&jfs_log_sem); | 1109 | up(&jfs_log_sem); |
1110 | return -ENOMEM; | 1110 | return -ENOMEM; |
1111 | } | 1111 | } |
1112 | memset(log, 0, sizeof(struct jfs_log)); | ||
1113 | INIT_LIST_HEAD(&log->sb_list); | 1112 | INIT_LIST_HEAD(&log->sb_list); |
1114 | init_waitqueue_head(&log->syncwait); | 1113 | init_waitqueue_head(&log->syncwait); |
1115 | 1114 | ||
@@ -1181,9 +1180,8 @@ static int open_inline_log(struct super_block *sb) | |||
1181 | struct jfs_log *log; | 1180 | struct jfs_log *log; |
1182 | int rc; | 1181 | int rc; |
1183 | 1182 | ||
1184 | if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) | 1183 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) |
1185 | return -ENOMEM; | 1184 | return -ENOMEM; |
1186 | memset(log, 0, sizeof(struct jfs_log)); | ||
1187 | INIT_LIST_HEAD(&log->sb_list); | 1185 | INIT_LIST_HEAD(&log->sb_list); |
1188 | init_waitqueue_head(&log->syncwait); | 1186 | init_waitqueue_head(&log->syncwait); |
1189 | 1187 | ||
@@ -1216,12 +1214,11 @@ static int open_dummy_log(struct super_block *sb) | |||
1216 | 1214 | ||
1217 | down(&jfs_log_sem); | 1215 | down(&jfs_log_sem); |
1218 | if (!dummy_log) { | 1216 | if (!dummy_log) { |
1219 | dummy_log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL); | 1217 | dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); |
1220 | if (!dummy_log) { | 1218 | if (!dummy_log) { |
1221 | up(&jfs_log_sem); | 1219 | up(&jfs_log_sem); |
1222 | return -ENOMEM; | 1220 | return -ENOMEM; |
1223 | } | 1221 | } |
1224 | memset(dummy_log, 0, sizeof(struct jfs_log)); | ||
1225 | INIT_LIST_HEAD(&dummy_log->sb_list); | 1222 | INIT_LIST_HEAD(&dummy_log->sb_list); |
1226 | init_waitqueue_head(&dummy_log->syncwait); | 1223 | init_waitqueue_head(&dummy_log->syncwait); |
1227 | dummy_log->no_integrity = 1; | 1224 | dummy_log->no_integrity = 1; |
@@ -1980,7 +1977,7 @@ static inline void lbmRedrive(struct lbuf *bp) | |||
1980 | log_redrive_list = bp; | 1977 | log_redrive_list = bp; |
1981 | spin_unlock_irqrestore(&log_redrive_lock, flags); | 1978 | spin_unlock_irqrestore(&log_redrive_lock, flags); |
1982 | 1979 | ||
1983 | wake_up(&jfs_IO_thread_wait); | 1980 | wake_up_process(jfsIOthread); |
1984 | } | 1981 | } |
1985 | 1982 | ||
1986 | 1983 | ||
@@ -2347,13 +2344,7 @@ int jfsIOWait(void *arg) | |||
2347 | { | 2344 | { |
2348 | struct lbuf *bp; | 2345 | struct lbuf *bp; |
2349 | 2346 | ||
2350 | daemonize("jfsIO"); | ||
2351 | |||
2352 | complete(&jfsIOwait); | ||
2353 | |||
2354 | do { | 2347 | do { |
2355 | DECLARE_WAITQUEUE(wq, current); | ||
2356 | |||
2357 | spin_lock_irq(&log_redrive_lock); | 2348 | spin_lock_irq(&log_redrive_lock); |
2358 | while ((bp = log_redrive_list) != 0) { | 2349 | while ((bp = log_redrive_list) != 0) { |
2359 | log_redrive_list = bp->l_redrive_next; | 2350 | log_redrive_list = bp->l_redrive_next; |
@@ -2362,21 +2353,19 @@ int jfsIOWait(void *arg) | |||
2362 | lbmStartIO(bp); | 2353 | lbmStartIO(bp); |
2363 | spin_lock_irq(&log_redrive_lock); | 2354 | spin_lock_irq(&log_redrive_lock); |
2364 | } | 2355 | } |
2356 | spin_unlock_irq(&log_redrive_lock); | ||
2357 | |||
2365 | if (freezing(current)) { | 2358 | if (freezing(current)) { |
2366 | spin_unlock_irq(&log_redrive_lock); | ||
2367 | refrigerator(); | 2359 | refrigerator(); |
2368 | } else { | 2360 | } else { |
2369 | add_wait_queue(&jfs_IO_thread_wait, &wq); | ||
2370 | set_current_state(TASK_INTERRUPTIBLE); | 2361 | set_current_state(TASK_INTERRUPTIBLE); |
2371 | spin_unlock_irq(&log_redrive_lock); | ||
2372 | schedule(); | 2362 | schedule(); |
2373 | current->state = TASK_RUNNING; | 2363 | current->state = TASK_RUNNING; |
2374 | remove_wait_queue(&jfs_IO_thread_wait, &wq); | ||
2375 | } | 2364 | } |
2376 | } while (!jfs_stop_threads); | 2365 | } while (!kthread_should_stop()); |
2377 | 2366 | ||
2378 | jfs_info("jfsIOWait being killed!"); | 2367 | jfs_info("jfsIOWait being killed!"); |
2379 | complete_and_exit(&jfsIOwait, 0); | 2368 | return 0; |
2380 | } | 2369 | } |
2381 | 2370 | ||
2382 | /* | 2371 | /* |
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h index e4978b5b65ee..8c6909b80014 100644 --- a/fs/jfs/jfs_logmgr.h +++ b/fs/jfs/jfs_logmgr.h | |||
@@ -389,7 +389,7 @@ struct jfs_log { | |||
389 | int eor; /* 4: eor of last record in eol page */ | 389 | int eor; /* 4: eor of last record in eol page */ |
390 | struct lbuf *bp; /* 4: current log page buffer */ | 390 | struct lbuf *bp; /* 4: current log page buffer */ |
391 | 391 | ||
392 | struct semaphore loglock; /* 4: log write serialization lock */ | 392 | struct mutex loglock; /* 4: log write serialization lock */ |
393 | 393 | ||
394 | /* syncpt */ | 394 | /* syncpt */ |
395 | int nextsync; /* 4: bytes to write before next syncpt */ | 395 | int nextsync; /* 4: bytes to write before next syncpt */ |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 8a53981f9f27..5fbaeaadccd3 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -104,10 +104,9 @@ static inline int insert_metapage(struct page *page, struct metapage *mp) | |||
104 | if (PagePrivate(page)) | 104 | if (PagePrivate(page)) |
105 | a = mp_anchor(page); | 105 | a = mp_anchor(page); |
106 | else { | 106 | else { |
107 | a = kmalloc(sizeof(struct meta_anchor), GFP_NOFS); | 107 | a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS); |
108 | if (!a) | 108 | if (!a) |
109 | return -ENOMEM; | 109 | return -ENOMEM; |
110 | memset(a, 0, sizeof(struct meta_anchor)); | ||
111 | set_page_private(page, (unsigned long)a); | 110 | set_page_private(page, (unsigned long)a); |
112 | SetPagePrivate(page); | 111 | SetPagePrivate(page); |
113 | kmap(page); | 112 | kmap(page); |
diff --git a/fs/jfs/jfs_superblock.h b/fs/jfs/jfs_superblock.h index fcf781bf31cb..682cf1a68a18 100644 --- a/fs/jfs/jfs_superblock.h +++ b/fs/jfs/jfs_superblock.h | |||
@@ -113,12 +113,9 @@ extern int jfs_mount(struct super_block *); | |||
113 | extern int jfs_mount_rw(struct super_block *, int); | 113 | extern int jfs_mount_rw(struct super_block *, int); |
114 | extern int jfs_umount(struct super_block *); | 114 | extern int jfs_umount(struct super_block *); |
115 | extern int jfs_umount_rw(struct super_block *); | 115 | extern int jfs_umount_rw(struct super_block *); |
116 | |||
117 | extern int jfs_stop_threads; | ||
118 | extern struct completion jfsIOwait; | ||
119 | extern wait_queue_head_t jfs_IO_thread_wait; | ||
120 | extern wait_queue_head_t jfs_commit_thread_wait; | ||
121 | extern wait_queue_head_t jfs_sync_thread_wait; | ||
122 | extern int jfs_extendfs(struct super_block *, s64, int); | 116 | extern int jfs_extendfs(struct super_block *, s64, int); |
123 | 117 | ||
118 | extern struct task_struct *jfsIOthread; | ||
119 | extern struct task_struct *jfsSyncThread; | ||
120 | |||
124 | #endif /*_H_JFS_SUPERBLOCK */ | 121 | #endif /*_H_JFS_SUPERBLOCK */ |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index 2ddb6b892bcf..ac3d66948e8c 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/suspend.h> | 49 | #include <linux/suspend.h> |
50 | #include <linux/module.h> | 50 | #include <linux/module.h> |
51 | #include <linux/moduleparam.h> | 51 | #include <linux/moduleparam.h> |
52 | #include <linux/kthread.h> | ||
52 | #include "jfs_incore.h" | 53 | #include "jfs_incore.h" |
53 | #include "jfs_inode.h" | 54 | #include "jfs_inode.h" |
54 | #include "jfs_filsys.h" | 55 | #include "jfs_filsys.h" |
@@ -121,8 +122,7 @@ static DEFINE_SPINLOCK(jfsTxnLock); | |||
121 | #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags) | 122 | #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags) |
122 | #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags) | 123 | #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags) |
123 | 124 | ||
124 | DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait); | 125 | static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait); |
125 | DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait); | ||
126 | static int jfs_commit_thread_waking; | 126 | static int jfs_commit_thread_waking; |
127 | 127 | ||
128 | /* | 128 | /* |
@@ -207,7 +207,7 @@ static lid_t txLockAlloc(void) | |||
207 | if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) { | 207 | if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) { |
208 | jfs_info("txLockAlloc tlocks low"); | 208 | jfs_info("txLockAlloc tlocks low"); |
209 | jfs_tlocks_low = 1; | 209 | jfs_tlocks_low = 1; |
210 | wake_up(&jfs_sync_thread_wait); | 210 | wake_up_process(jfsSyncThread); |
211 | } | 211 | } |
212 | 212 | ||
213 | return lid; | 213 | return lid; |
@@ -2743,10 +2743,6 @@ int jfs_lazycommit(void *arg) | |||
2743 | unsigned long flags; | 2743 | unsigned long flags; |
2744 | struct jfs_sb_info *sbi; | 2744 | struct jfs_sb_info *sbi; |
2745 | 2745 | ||
2746 | daemonize("jfsCommit"); | ||
2747 | |||
2748 | complete(&jfsIOwait); | ||
2749 | |||
2750 | do { | 2746 | do { |
2751 | LAZY_LOCK(flags); | 2747 | LAZY_LOCK(flags); |
2752 | jfs_commit_thread_waking = 0; /* OK to wake another thread */ | 2748 | jfs_commit_thread_waking = 0; /* OK to wake another thread */ |
@@ -2806,13 +2802,13 @@ int jfs_lazycommit(void *arg) | |||
2806 | current->state = TASK_RUNNING; | 2802 | current->state = TASK_RUNNING; |
2807 | remove_wait_queue(&jfs_commit_thread_wait, &wq); | 2803 | remove_wait_queue(&jfs_commit_thread_wait, &wq); |
2808 | } | 2804 | } |
2809 | } while (!jfs_stop_threads); | 2805 | } while (!kthread_should_stop()); |
2810 | 2806 | ||
2811 | if (!list_empty(&TxAnchor.unlock_queue)) | 2807 | if (!list_empty(&TxAnchor.unlock_queue)) |
2812 | jfs_err("jfs_lazycommit being killed w/pending transactions!"); | 2808 | jfs_err("jfs_lazycommit being killed w/pending transactions!"); |
2813 | else | 2809 | else |
2814 | jfs_info("jfs_lazycommit being killed\n"); | 2810 | jfs_info("jfs_lazycommit being killed\n"); |
2815 | complete_and_exit(&jfsIOwait, 0); | 2811 | return 0; |
2816 | } | 2812 | } |
2817 | 2813 | ||
2818 | void txLazyUnlock(struct tblock * tblk) | 2814 | void txLazyUnlock(struct tblock * tblk) |
@@ -2876,10 +2872,10 @@ restart: | |||
2876 | */ | 2872 | */ |
2877 | TXN_UNLOCK(); | 2873 | TXN_UNLOCK(); |
2878 | tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE); | 2874 | tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE); |
2879 | down(&jfs_ip->commit_sem); | 2875 | mutex_lock(&jfs_ip->commit_mutex); |
2880 | txCommit(tid, 1, &ip, 0); | 2876 | txCommit(tid, 1, &ip, 0); |
2881 | txEnd(tid); | 2877 | txEnd(tid); |
2882 | up(&jfs_ip->commit_sem); | 2878 | mutex_unlock(&jfs_ip->commit_mutex); |
2883 | /* | 2879 | /* |
2884 | * Just to be safe. I don't know how | 2880 | * Just to be safe. I don't know how |
2885 | * long we can run without blocking | 2881 | * long we can run without blocking |
@@ -2932,10 +2928,6 @@ int jfs_sync(void *arg) | |||
2932 | int rc; | 2928 | int rc; |
2933 | tid_t tid; | 2929 | tid_t tid; |
2934 | 2930 | ||
2935 | daemonize("jfsSync"); | ||
2936 | |||
2937 | complete(&jfsIOwait); | ||
2938 | |||
2939 | do { | 2931 | do { |
2940 | /* | 2932 | /* |
2941 | * write each inode on the anonymous inode list | 2933 | * write each inode on the anonymous inode list |
@@ -2952,7 +2944,7 @@ int jfs_sync(void *arg) | |||
2952 | * Inode is being freed | 2944 | * Inode is being freed |
2953 | */ | 2945 | */ |
2954 | list_del_init(&jfs_ip->anon_inode_list); | 2946 | list_del_init(&jfs_ip->anon_inode_list); |
2955 | } else if (! down_trylock(&jfs_ip->commit_sem)) { | 2947 | } else if (! !mutex_trylock(&jfs_ip->commit_mutex)) { |
2956 | /* | 2948 | /* |
2957 | * inode will be removed from anonymous list | 2949 | * inode will be removed from anonymous list |
2958 | * when it is committed | 2950 | * when it is committed |
@@ -2961,7 +2953,7 @@ int jfs_sync(void *arg) | |||
2961 | tid = txBegin(ip->i_sb, COMMIT_INODE); | 2953 | tid = txBegin(ip->i_sb, COMMIT_INODE); |
2962 | rc = txCommit(tid, 1, &ip, 0); | 2954 | rc = txCommit(tid, 1, &ip, 0); |
2963 | txEnd(tid); | 2955 | txEnd(tid); |
2964 | up(&jfs_ip->commit_sem); | 2956 | mutex_unlock(&jfs_ip->commit_mutex); |
2965 | 2957 | ||
2966 | iput(ip); | 2958 | iput(ip); |
2967 | /* | 2959 | /* |
@@ -2971,7 +2963,7 @@ int jfs_sync(void *arg) | |||
2971 | cond_resched(); | 2963 | cond_resched(); |
2972 | TXN_LOCK(); | 2964 | TXN_LOCK(); |
2973 | } else { | 2965 | } else { |
2974 | /* We can't get the commit semaphore. It may | 2966 | /* We can't get the commit mutex. It may |
2975 | * be held by a thread waiting for tlock's | 2967 | * be held by a thread waiting for tlock's |
2976 | * so let's not block here. Save it to | 2968 | * so let's not block here. Save it to |
2977 | * put back on the anon_list. | 2969 | * put back on the anon_list. |
@@ -2996,19 +2988,15 @@ int jfs_sync(void *arg) | |||
2996 | TXN_UNLOCK(); | 2988 | TXN_UNLOCK(); |
2997 | refrigerator(); | 2989 | refrigerator(); |
2998 | } else { | 2990 | } else { |
2999 | DECLARE_WAITQUEUE(wq, current); | ||
3000 | |||
3001 | add_wait_queue(&jfs_sync_thread_wait, &wq); | ||
3002 | set_current_state(TASK_INTERRUPTIBLE); | 2991 | set_current_state(TASK_INTERRUPTIBLE); |
3003 | TXN_UNLOCK(); | 2992 | TXN_UNLOCK(); |
3004 | schedule(); | 2993 | schedule(); |
3005 | current->state = TASK_RUNNING; | 2994 | current->state = TASK_RUNNING; |
3006 | remove_wait_queue(&jfs_sync_thread_wait, &wq); | ||
3007 | } | 2995 | } |
3008 | } while (!jfs_stop_threads); | 2996 | } while (!kthread_should_stop()); |
3009 | 2997 | ||
3010 | jfs_info("jfs_sync being killed"); | 2998 | jfs_info("jfs_sync being killed"); |
3011 | complete_and_exit(&jfsIOwait, 0); | 2999 | return 0; |
3012 | } | 3000 | } |
3013 | 3001 | ||
3014 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG) | 3002 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG) |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4abbe8604302..309cee575f7d 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, | |||
104 | 104 | ||
105 | tid = txBegin(dip->i_sb, 0); | 105 | tid = txBegin(dip->i_sb, 0); |
106 | 106 | ||
107 | down(&JFS_IP(dip)->commit_sem); | 107 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
108 | down(&JFS_IP(ip)->commit_sem); | 108 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
109 | 109 | ||
110 | rc = jfs_init_acl(tid, ip, dip); | 110 | rc = jfs_init_acl(tid, ip, dip); |
111 | if (rc) | 111 | if (rc) |
@@ -165,8 +165,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, | |||
165 | 165 | ||
166 | out3: | 166 | out3: |
167 | txEnd(tid); | 167 | txEnd(tid); |
168 | up(&JFS_IP(dip)->commit_sem); | 168 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
169 | up(&JFS_IP(ip)->commit_sem); | 169 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
170 | if (rc) { | 170 | if (rc) { |
171 | free_ea_wmap(ip); | 171 | free_ea_wmap(ip); |
172 | ip->i_nlink = 0; | 172 | ip->i_nlink = 0; |
@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) | |||
238 | 238 | ||
239 | tid = txBegin(dip->i_sb, 0); | 239 | tid = txBegin(dip->i_sb, 0); |
240 | 240 | ||
241 | down(&JFS_IP(dip)->commit_sem); | 241 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
242 | down(&JFS_IP(ip)->commit_sem); | 242 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
243 | 243 | ||
244 | rc = jfs_init_acl(tid, ip, dip); | 244 | rc = jfs_init_acl(tid, ip, dip); |
245 | if (rc) | 245 | if (rc) |
@@ -300,8 +300,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) | |||
300 | 300 | ||
301 | out3: | 301 | out3: |
302 | txEnd(tid); | 302 | txEnd(tid); |
303 | up(&JFS_IP(dip)->commit_sem); | 303 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
304 | up(&JFS_IP(ip)->commit_sem); | 304 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
305 | if (rc) { | 305 | if (rc) { |
306 | free_ea_wmap(ip); | 306 | free_ea_wmap(ip); |
307 | ip->i_nlink = 0; | 307 | ip->i_nlink = 0; |
@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
365 | 365 | ||
366 | tid = txBegin(dip->i_sb, 0); | 366 | tid = txBegin(dip->i_sb, 0); |
367 | 367 | ||
368 | down(&JFS_IP(dip)->commit_sem); | 368 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
369 | down(&JFS_IP(ip)->commit_sem); | 369 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
370 | 370 | ||
371 | iplist[0] = dip; | 371 | iplist[0] = dip; |
372 | iplist[1] = ip; | 372 | iplist[1] = ip; |
@@ -384,8 +384,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
384 | if (rc == -EIO) | 384 | if (rc == -EIO) |
385 | txAbort(tid, 1); | 385 | txAbort(tid, 1); |
386 | txEnd(tid); | 386 | txEnd(tid); |
387 | up(&JFS_IP(dip)->commit_sem); | 387 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
388 | up(&JFS_IP(ip)->commit_sem); | 388 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
389 | 389 | ||
390 | goto out2; | 390 | goto out2; |
391 | } | 391 | } |
@@ -422,8 +422,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
422 | 422 | ||
423 | txEnd(tid); | 423 | txEnd(tid); |
424 | 424 | ||
425 | up(&JFS_IP(dip)->commit_sem); | 425 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
426 | up(&JFS_IP(ip)->commit_sem); | 426 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
427 | 427 | ||
428 | /* | 428 | /* |
429 | * Truncating the directory index table is not guaranteed. It | 429 | * Truncating the directory index table is not guaranteed. It |
@@ -488,8 +488,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
488 | 488 | ||
489 | tid = txBegin(dip->i_sb, 0); | 489 | tid = txBegin(dip->i_sb, 0); |
490 | 490 | ||
491 | down(&JFS_IP(dip)->commit_sem); | 491 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
492 | down(&JFS_IP(ip)->commit_sem); | 492 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
493 | 493 | ||
494 | iplist[0] = dip; | 494 | iplist[0] = dip; |
495 | iplist[1] = ip; | 495 | iplist[1] = ip; |
@@ -503,8 +503,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
503 | if (rc == -EIO) | 503 | if (rc == -EIO) |
504 | txAbort(tid, 1); /* Marks FS Dirty */ | 504 | txAbort(tid, 1); /* Marks FS Dirty */ |
505 | txEnd(tid); | 505 | txEnd(tid); |
506 | up(&JFS_IP(dip)->commit_sem); | 506 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
507 | up(&JFS_IP(ip)->commit_sem); | 507 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
508 | IWRITE_UNLOCK(ip); | 508 | IWRITE_UNLOCK(ip); |
509 | goto out1; | 509 | goto out1; |
510 | } | 510 | } |
@@ -527,8 +527,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
527 | if ((new_size = commitZeroLink(tid, ip)) < 0) { | 527 | if ((new_size = commitZeroLink(tid, ip)) < 0) { |
528 | txAbort(tid, 1); /* Marks FS Dirty */ | 528 | txAbort(tid, 1); /* Marks FS Dirty */ |
529 | txEnd(tid); | 529 | txEnd(tid); |
530 | up(&JFS_IP(dip)->commit_sem); | 530 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
531 | up(&JFS_IP(ip)->commit_sem); | 531 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
532 | IWRITE_UNLOCK(ip); | 532 | IWRITE_UNLOCK(ip); |
533 | rc = new_size; | 533 | rc = new_size; |
534 | goto out1; | 534 | goto out1; |
@@ -556,13 +556,13 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
556 | 556 | ||
557 | txEnd(tid); | 557 | txEnd(tid); |
558 | 558 | ||
559 | up(&JFS_IP(dip)->commit_sem); | 559 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
560 | up(&JFS_IP(ip)->commit_sem); | 560 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
561 | 561 | ||
562 | 562 | ||
563 | while (new_size && (rc == 0)) { | 563 | while (new_size && (rc == 0)) { |
564 | tid = txBegin(dip->i_sb, 0); | 564 | tid = txBegin(dip->i_sb, 0); |
565 | down(&JFS_IP(ip)->commit_sem); | 565 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
566 | new_size = xtTruncate_pmap(tid, ip, new_size); | 566 | new_size = xtTruncate_pmap(tid, ip, new_size); |
567 | if (new_size < 0) { | 567 | if (new_size < 0) { |
568 | txAbort(tid, 1); /* Marks FS Dirty */ | 568 | txAbort(tid, 1); /* Marks FS Dirty */ |
@@ -570,7 +570,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
570 | } else | 570 | } else |
571 | rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); | 571 | rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); |
572 | txEnd(tid); | 572 | txEnd(tid); |
573 | up(&JFS_IP(ip)->commit_sem); | 573 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
574 | } | 574 | } |
575 | 575 | ||
576 | if (ip->i_nlink == 0) | 576 | if (ip->i_nlink == 0) |
@@ -805,8 +805,8 @@ static int jfs_link(struct dentry *old_dentry, | |||
805 | 805 | ||
806 | tid = txBegin(ip->i_sb, 0); | 806 | tid = txBegin(ip->i_sb, 0); |
807 | 807 | ||
808 | down(&JFS_IP(dir)->commit_sem); | 808 | mutex_lock(&JFS_IP(dir)->commit_mutex); |
809 | down(&JFS_IP(ip)->commit_sem); | 809 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
810 | 810 | ||
811 | /* | 811 | /* |
812 | * scan parent directory for entry/freespace | 812 | * scan parent directory for entry/freespace |
@@ -847,8 +847,8 @@ static int jfs_link(struct dentry *old_dentry, | |||
847 | out: | 847 | out: |
848 | txEnd(tid); | 848 | txEnd(tid); |
849 | 849 | ||
850 | up(&JFS_IP(dir)->commit_sem); | 850 | mutex_unlock(&JFS_IP(dir)->commit_mutex); |
851 | up(&JFS_IP(ip)->commit_sem); | 851 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
852 | 852 | ||
853 | jfs_info("jfs_link: rc:%d", rc); | 853 | jfs_info("jfs_link: rc:%d", rc); |
854 | return rc; | 854 | return rc; |
@@ -916,8 +916,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
916 | 916 | ||
917 | tid = txBegin(dip->i_sb, 0); | 917 | tid = txBegin(dip->i_sb, 0); |
918 | 918 | ||
919 | down(&JFS_IP(dip)->commit_sem); | 919 | mutex_lock(&JFS_IP(dip)->commit_mutex); |
920 | down(&JFS_IP(ip)->commit_sem); | 920 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
921 | 921 | ||
922 | rc = jfs_init_security(tid, ip, dip); | 922 | rc = jfs_init_security(tid, ip, dip); |
923 | if (rc) | 923 | if (rc) |
@@ -1037,8 +1037,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
1037 | 1037 | ||
1038 | out3: | 1038 | out3: |
1039 | txEnd(tid); | 1039 | txEnd(tid); |
1040 | up(&JFS_IP(dip)->commit_sem); | 1040 | mutex_unlock(&JFS_IP(dip)->commit_mutex); |
1041 | up(&JFS_IP(ip)->commit_sem); | 1041 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
1042 | if (rc) { | 1042 | if (rc) { |
1043 | free_ea_wmap(ip); | 1043 | free_ea_wmap(ip); |
1044 | ip->i_nlink = 0; | 1044 | ip->i_nlink = 0; |
@@ -1141,13 +1141,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1141 | */ | 1141 | */ |
1142 | tid = txBegin(new_dir->i_sb, 0); | 1142 | tid = txBegin(new_dir->i_sb, 0); |
1143 | 1143 | ||
1144 | down(&JFS_IP(new_dir)->commit_sem); | 1144 | mutex_lock(&JFS_IP(new_dir)->commit_mutex); |
1145 | down(&JFS_IP(old_ip)->commit_sem); | 1145 | mutex_lock(&JFS_IP(old_ip)->commit_mutex); |
1146 | if (old_dir != new_dir) | 1146 | if (old_dir != new_dir) |
1147 | down(&JFS_IP(old_dir)->commit_sem); | 1147 | mutex_lock(&JFS_IP(old_dir)->commit_mutex); |
1148 | 1148 | ||
1149 | if (new_ip) { | 1149 | if (new_ip) { |
1150 | down(&JFS_IP(new_ip)->commit_sem); | 1150 | mutex_lock(&JFS_IP(new_ip)->commit_mutex); |
1151 | /* | 1151 | /* |
1152 | * Change existing directory entry to new inode number | 1152 | * Change existing directory entry to new inode number |
1153 | */ | 1153 | */ |
@@ -1160,10 +1160,10 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1160 | if (S_ISDIR(new_ip->i_mode)) { | 1160 | if (S_ISDIR(new_ip->i_mode)) { |
1161 | new_ip->i_nlink--; | 1161 | new_ip->i_nlink--; |
1162 | if (new_ip->i_nlink) { | 1162 | if (new_ip->i_nlink) { |
1163 | up(&JFS_IP(new_dir)->commit_sem); | 1163 | mutex_unlock(&JFS_IP(new_dir)->commit_mutex); |
1164 | up(&JFS_IP(old_ip)->commit_sem); | 1164 | mutex_unlock(&JFS_IP(old_ip)->commit_mutex); |
1165 | if (old_dir != new_dir) | 1165 | if (old_dir != new_dir) |
1166 | up(&JFS_IP(old_dir)->commit_sem); | 1166 | mutex_unlock(&JFS_IP(old_dir)->commit_mutex); |
1167 | if (!S_ISDIR(old_ip->i_mode) && new_ip) | 1167 | if (!S_ISDIR(old_ip->i_mode) && new_ip) |
1168 | IWRITE_UNLOCK(new_ip); | 1168 | IWRITE_UNLOCK(new_ip); |
1169 | jfs_error(new_ip->i_sb, | 1169 | jfs_error(new_ip->i_sb, |
@@ -1282,16 +1282,16 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1282 | out4: | 1282 | out4: |
1283 | txEnd(tid); | 1283 | txEnd(tid); |
1284 | 1284 | ||
1285 | up(&JFS_IP(new_dir)->commit_sem); | 1285 | mutex_unlock(&JFS_IP(new_dir)->commit_mutex); |
1286 | up(&JFS_IP(old_ip)->commit_sem); | 1286 | mutex_unlock(&JFS_IP(old_ip)->commit_mutex); |
1287 | if (old_dir != new_dir) | 1287 | if (old_dir != new_dir) |
1288 | up(&JFS_IP(old_dir)->commit_sem); | 1288 | mutex_unlock(&JFS_IP(old_dir)->commit_mutex); |
1289 | if (new_ip) | 1289 | if (new_ip) |
1290 | up(&JFS_IP(new_ip)->commit_sem); | 1290 | mutex_unlock(&JFS_IP(new_ip)->commit_mutex); |
1291 | 1291 | ||
1292 | while (new_size && (rc == 0)) { | 1292 | while (new_size && (rc == 0)) { |
1293 | tid = txBegin(new_ip->i_sb, 0); | 1293 | tid = txBegin(new_ip->i_sb, 0); |
1294 | down(&JFS_IP(new_ip)->commit_sem); | 1294 | mutex_lock(&JFS_IP(new_ip)->commit_mutex); |
1295 | new_size = xtTruncate_pmap(tid, new_ip, new_size); | 1295 | new_size = xtTruncate_pmap(tid, new_ip, new_size); |
1296 | if (new_size < 0) { | 1296 | if (new_size < 0) { |
1297 | txAbort(tid, 1); | 1297 | txAbort(tid, 1); |
@@ -1299,7 +1299,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1299 | } else | 1299 | } else |
1300 | rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); | 1300 | rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); |
1301 | txEnd(tid); | 1301 | txEnd(tid); |
1302 | up(&JFS_IP(new_ip)->commit_sem); | 1302 | mutex_unlock(&JFS_IP(new_ip)->commit_mutex); |
1303 | } | 1303 | } |
1304 | if (new_ip && (new_ip->i_nlink == 0)) | 1304 | if (new_ip && (new_ip->i_nlink == 0)) |
1305 | set_cflag(COMMIT_Nolink, new_ip); | 1305 | set_cflag(COMMIT_Nolink, new_ip); |
@@ -1361,8 +1361,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
1361 | 1361 | ||
1362 | tid = txBegin(dir->i_sb, 0); | 1362 | tid = txBegin(dir->i_sb, 0); |
1363 | 1363 | ||
1364 | down(&JFS_IP(dir)->commit_sem); | 1364 | mutex_lock(&JFS_IP(dir)->commit_mutex); |
1365 | down(&JFS_IP(ip)->commit_sem); | 1365 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
1366 | 1366 | ||
1367 | rc = jfs_init_acl(tid, ip, dir); | 1367 | rc = jfs_init_acl(tid, ip, dir); |
1368 | if (rc) | 1368 | if (rc) |
@@ -1407,8 +1407,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
1407 | 1407 | ||
1408 | out3: | 1408 | out3: |
1409 | txEnd(tid); | 1409 | txEnd(tid); |
1410 | up(&JFS_IP(ip)->commit_sem); | 1410 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
1411 | up(&JFS_IP(dir)->commit_sem); | 1411 | mutex_unlock(&JFS_IP(dir)->commit_mutex); |
1412 | if (rc) { | 1412 | if (rc) { |
1413 | free_ea_wmap(ip); | 1413 | free_ea_wmap(ip); |
1414 | ip->i_nlink = 0; | 1414 | ip->i_nlink = 0; |
@@ -1523,6 +1523,7 @@ struct file_operations jfs_dir_operations = { | |||
1523 | .read = generic_read_dir, | 1523 | .read = generic_read_dir, |
1524 | .readdir = jfs_readdir, | 1524 | .readdir = jfs_readdir, |
1525 | .fsync = jfs_fsync, | 1525 | .fsync = jfs_fsync, |
1526 | .ioctl = jfs_ioctl, | ||
1526 | }; | 1527 | }; |
1527 | 1528 | ||
1528 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) | 1529 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 8d31f1336431..18f69e6aa719 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/vfs.h> | 25 | #include <linux/vfs.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/kthread.h> | ||
28 | #include <linux/posix_acl.h> | 29 | #include <linux/posix_acl.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
@@ -54,11 +55,9 @@ static int commit_threads = 0; | |||
54 | module_param(commit_threads, int, 0); | 55 | module_param(commit_threads, int, 0); |
55 | MODULE_PARM_DESC(commit_threads, "Number of commit threads"); | 56 | MODULE_PARM_DESC(commit_threads, "Number of commit threads"); |
56 | 57 | ||
57 | int jfs_stop_threads; | 58 | static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; |
58 | static pid_t jfsIOthread; | 59 | struct task_struct *jfsIOthread; |
59 | static pid_t jfsCommitThread[MAX_COMMIT_THREADS]; | 60 | struct task_struct *jfsSyncThread; |
60 | static pid_t jfsSyncThread; | ||
61 | DECLARE_COMPLETION(jfsIOwait); | ||
62 | 61 | ||
63 | #ifdef CONFIG_JFS_DEBUG | 62 | #ifdef CONFIG_JFS_DEBUG |
64 | int jfsloglevel = JFS_LOGLEVEL_WARN; | 63 | int jfsloglevel = JFS_LOGLEVEL_WARN; |
@@ -195,7 +194,7 @@ static void jfs_put_super(struct super_block *sb) | |||
195 | enum { | 194 | enum { |
196 | Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, | 195 | Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, |
197 | Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, | 196 | Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, |
198 | Opt_usrquota, Opt_grpquota | 197 | Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask |
199 | }; | 198 | }; |
200 | 199 | ||
201 | static match_table_t tokens = { | 200 | static match_table_t tokens = { |
@@ -209,6 +208,9 @@ static match_table_t tokens = { | |||
209 | {Opt_ignore, "quota"}, | 208 | {Opt_ignore, "quota"}, |
210 | {Opt_usrquota, "usrquota"}, | 209 | {Opt_usrquota, "usrquota"}, |
211 | {Opt_grpquota, "grpquota"}, | 210 | {Opt_grpquota, "grpquota"}, |
211 | {Opt_uid, "uid=%u"}, | ||
212 | {Opt_gid, "gid=%u"}, | ||
213 | {Opt_umask, "umask=%u"}, | ||
212 | {Opt_err, NULL} | 214 | {Opt_err, NULL} |
213 | }; | 215 | }; |
214 | 216 | ||
@@ -313,7 +315,29 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, | |||
313 | "JFS: quota operations not supported\n"); | 315 | "JFS: quota operations not supported\n"); |
314 | break; | 316 | break; |
315 | #endif | 317 | #endif |
316 | 318 | case Opt_uid: | |
319 | { | ||
320 | char *uid = args[0].from; | ||
321 | sbi->uid = simple_strtoul(uid, &uid, 0); | ||
322 | break; | ||
323 | } | ||
324 | case Opt_gid: | ||
325 | { | ||
326 | char *gid = args[0].from; | ||
327 | sbi->gid = simple_strtoul(gid, &gid, 0); | ||
328 | break; | ||
329 | } | ||
330 | case Opt_umask: | ||
331 | { | ||
332 | char *umask = args[0].from; | ||
333 | sbi->umask = simple_strtoul(umask, &umask, 8); | ||
334 | if (sbi->umask & ~0777) { | ||
335 | printk(KERN_ERR | ||
336 | "JFS: Invalid value of umask\n"); | ||
337 | goto cleanup; | ||
338 | } | ||
339 | break; | ||
340 | } | ||
317 | default: | 341 | default: |
318 | printk("jfs: Unrecognized mount option \"%s\" " | 342 | printk("jfs: Unrecognized mount option \"%s\" " |
319 | " or missing value\n", p); | 343 | " or missing value\n", p); |
@@ -396,12 +420,12 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
396 | if (!new_valid_dev(sb->s_bdev->bd_dev)) | 420 | if (!new_valid_dev(sb->s_bdev->bd_dev)) |
397 | return -EOVERFLOW; | 421 | return -EOVERFLOW; |
398 | 422 | ||
399 | sbi = kmalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); | 423 | sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); |
400 | if (!sbi) | 424 | if (!sbi) |
401 | return -ENOSPC; | 425 | return -ENOSPC; |
402 | memset(sbi, 0, sizeof (struct jfs_sb_info)); | ||
403 | sb->s_fs_info = sbi; | 426 | sb->s_fs_info = sbi; |
404 | sbi->sb = sb; | 427 | sbi->sb = sb; |
428 | sbi->uid = sbi->gid = sbi->umask = -1; | ||
405 | 429 | ||
406 | /* initialize the mount flag and determine the default error handler */ | 430 | /* initialize the mount flag and determine the default error handler */ |
407 | flag = JFS_ERR_REMOUNT_RO; | 431 | flag = JFS_ERR_REMOUNT_RO; |
@@ -564,10 +588,14 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
564 | { | 588 | { |
565 | struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); | 589 | struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); |
566 | 590 | ||
591 | if (sbi->uid != -1) | ||
592 | seq_printf(seq, ",uid=%d", sbi->uid); | ||
593 | if (sbi->gid != -1) | ||
594 | seq_printf(seq, ",gid=%d", sbi->gid); | ||
595 | if (sbi->umask != -1) | ||
596 | seq_printf(seq, ",umask=%03o", sbi->umask); | ||
567 | if (sbi->flag & JFS_NOINTEGRITY) | 597 | if (sbi->flag & JFS_NOINTEGRITY) |
568 | seq_puts(seq, ",nointegrity"); | 598 | seq_puts(seq, ",nointegrity"); |
569 | else | ||
570 | seq_puts(seq, ",integrity"); | ||
571 | 599 | ||
572 | #if defined(CONFIG_QUOTA) | 600 | #if defined(CONFIG_QUOTA) |
573 | if (sbi->flag & JFS_USRQUOTA) | 601 | if (sbi->flag & JFS_USRQUOTA) |
@@ -617,7 +645,7 @@ static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) | |||
617 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); | 645 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); |
618 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); | 646 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); |
619 | init_rwsem(&jfs_ip->rdwrlock); | 647 | init_rwsem(&jfs_ip->rdwrlock); |
620 | init_MUTEX(&jfs_ip->commit_sem); | 648 | mutex_init(&jfs_ip->commit_mutex); |
621 | init_rwsem(&jfs_ip->xattr_sem); | 649 | init_rwsem(&jfs_ip->xattr_sem); |
622 | spin_lock_init(&jfs_ip->ag_lock); | 650 | spin_lock_init(&jfs_ip->ag_lock); |
623 | jfs_ip->active_ag = -1; | 651 | jfs_ip->active_ag = -1; |
@@ -661,12 +689,12 @@ static int __init init_jfs_fs(void) | |||
661 | /* | 689 | /* |
662 | * I/O completion thread (endio) | 690 | * I/O completion thread (endio) |
663 | */ | 691 | */ |
664 | jfsIOthread = kernel_thread(jfsIOWait, NULL, CLONE_KERNEL); | 692 | jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); |
665 | if (jfsIOthread < 0) { | 693 | if (IS_ERR(jfsIOthread)) { |
666 | jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsIOthread); | 694 | rc = PTR_ERR(jfsIOthread); |
695 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); | ||
667 | goto end_txmngr; | 696 | goto end_txmngr; |
668 | } | 697 | } |
669 | wait_for_completion(&jfsIOwait); /* Wait until thread starts */ | ||
670 | 698 | ||
671 | if (commit_threads < 1) | 699 | if (commit_threads < 1) |
672 | commit_threads = num_online_cpus(); | 700 | commit_threads = num_online_cpus(); |
@@ -674,24 +702,21 @@ static int __init init_jfs_fs(void) | |||
674 | commit_threads = MAX_COMMIT_THREADS; | 702 | commit_threads = MAX_COMMIT_THREADS; |
675 | 703 | ||
676 | for (i = 0; i < commit_threads; i++) { | 704 | for (i = 0; i < commit_threads; i++) { |
677 | jfsCommitThread[i] = kernel_thread(jfs_lazycommit, NULL, | 705 | jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); |
678 | CLONE_KERNEL); | 706 | if (IS_ERR(jfsCommitThread[i])) { |
679 | if (jfsCommitThread[i] < 0) { | 707 | rc = PTR_ERR(jfsCommitThread[i]); |
680 | jfs_err("init_jfs_fs: fork failed w/rc = %d", | 708 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); |
681 | jfsCommitThread[i]); | ||
682 | commit_threads = i; | 709 | commit_threads = i; |
683 | goto kill_committask; | 710 | goto kill_committask; |
684 | } | 711 | } |
685 | /* Wait until thread starts */ | ||
686 | wait_for_completion(&jfsIOwait); | ||
687 | } | 712 | } |
688 | 713 | ||
689 | jfsSyncThread = kernel_thread(jfs_sync, NULL, CLONE_KERNEL); | 714 | jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); |
690 | if (jfsSyncThread < 0) { | 715 | if (IS_ERR(jfsSyncThread)) { |
691 | jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsSyncThread); | 716 | rc = PTR_ERR(jfsSyncThread); |
717 | jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); | ||
692 | goto kill_committask; | 718 | goto kill_committask; |
693 | } | 719 | } |
694 | wait_for_completion(&jfsIOwait); /* Wait until thread starts */ | ||
695 | 720 | ||
696 | #ifdef PROC_FS_JFS | 721 | #ifdef PROC_FS_JFS |
697 | jfs_proc_init(); | 722 | jfs_proc_init(); |
@@ -700,13 +725,9 @@ static int __init init_jfs_fs(void) | |||
700 | return register_filesystem(&jfs_fs_type); | 725 | return register_filesystem(&jfs_fs_type); |
701 | 726 | ||
702 | kill_committask: | 727 | kill_committask: |
703 | jfs_stop_threads = 1; | ||
704 | wake_up_all(&jfs_commit_thread_wait); | ||
705 | for (i = 0; i < commit_threads; i++) | 728 | for (i = 0; i < commit_threads; i++) |
706 | wait_for_completion(&jfsIOwait); | 729 | kthread_stop(jfsCommitThread[i]); |
707 | 730 | kthread_stop(jfsIOthread); | |
708 | wake_up(&jfs_IO_thread_wait); | ||
709 | wait_for_completion(&jfsIOwait); /* Wait for thread exit */ | ||
710 | end_txmngr: | 731 | end_txmngr: |
711 | txExit(); | 732 | txExit(); |
712 | free_metapage: | 733 | free_metapage: |
@@ -722,16 +743,13 @@ static void __exit exit_jfs_fs(void) | |||
722 | 743 | ||
723 | jfs_info("exit_jfs_fs called"); | 744 | jfs_info("exit_jfs_fs called"); |
724 | 745 | ||
725 | jfs_stop_threads = 1; | ||
726 | txExit(); | 746 | txExit(); |
727 | metapage_exit(); | 747 | metapage_exit(); |
728 | wake_up(&jfs_IO_thread_wait); | 748 | |
729 | wait_for_completion(&jfsIOwait); /* Wait until IO thread exits */ | 749 | kthread_stop(jfsIOthread); |
730 | wake_up_all(&jfs_commit_thread_wait); | ||
731 | for (i = 0; i < commit_threads; i++) | 750 | for (i = 0; i < commit_threads; i++) |
732 | wait_for_completion(&jfsIOwait); | 751 | kthread_stop(jfsCommitThread[i]); |
733 | wake_up(&jfs_sync_thread_wait); | 752 | kthread_stop(jfsSyncThread); |
734 | wait_for_completion(&jfsIOwait); /* Wait until Sync thread exits */ | ||
735 | #ifdef PROC_FS_JFS | 753 | #ifdef PROC_FS_JFS |
736 | jfs_proc_clean(); | 754 | jfs_proc_clean(); |
737 | #endif | 755 | #endif |
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index f23048f9471f..9bc5b7c055ce 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c | |||
@@ -934,13 +934,13 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
934 | } | 934 | } |
935 | 935 | ||
936 | tid = txBegin(inode->i_sb, 0); | 936 | tid = txBegin(inode->i_sb, 0); |
937 | down(&ji->commit_sem); | 937 | mutex_lock(&ji->commit_mutex); |
938 | rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, | 938 | rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, |
939 | flags); | 939 | flags); |
940 | if (!rc) | 940 | if (!rc) |
941 | rc = txCommit(tid, 1, &inode, 0); | 941 | rc = txCommit(tid, 1, &inode, 0); |
942 | txEnd(tid); | 942 | txEnd(tid); |
943 | up(&ji->commit_sem); | 943 | mutex_unlock(&ji->commit_mutex); |
944 | 944 | ||
945 | return rc; | 945 | return rc; |
946 | } | 946 | } |
@@ -1093,12 +1093,12 @@ int jfs_removexattr(struct dentry *dentry, const char *name) | |||
1093 | return rc; | 1093 | return rc; |
1094 | 1094 | ||
1095 | tid = txBegin(inode->i_sb, 0); | 1095 | tid = txBegin(inode->i_sb, 0); |
1096 | down(&ji->commit_sem); | 1096 | mutex_lock(&ji->commit_mutex); |
1097 | rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); | 1097 | rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); |
1098 | if (!rc) | 1098 | if (!rc) |
1099 | rc = txCommit(tid, 1, &inode, 0); | 1099 | rc = txCommit(tid, 1, &inode, 0); |
1100 | txEnd(tid); | 1100 | txEnd(tid); |
1101 | up(&ji->commit_sem); | 1101 | mutex_unlock(&ji->commit_mutex); |
1102 | 1102 | ||
1103 | return rc; | 1103 | return rc; |
1104 | } | 1104 | } |
diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h index d856e640acd3..59083ed85232 100644 --- a/include/asm-sparc/idprom.h +++ b/include/asm-sparc/idprom.h | |||
@@ -7,27 +7,19 @@ | |||
7 | #ifndef _SPARC_IDPROM_H | 7 | #ifndef _SPARC_IDPROM_H |
8 | #define _SPARC_IDPROM_H | 8 | #define _SPARC_IDPROM_H |
9 | 9 | ||
10 | /* Offset into the EEPROM where the id PROM is located on the 4c */ | 10 | #include <linux/types.h> |
11 | #define IDPROM_OFFSET 0x7d8 | ||
12 | 11 | ||
13 | /* On sun4m; physical. */ | 12 | struct idprom { |
14 | /* MicroSPARC(-II) does not decode 31rd bit, but it works. */ | 13 | u8 id_format; /* Format identifier (always 0x01) */ |
15 | #define IDPROM_OFFSET_M 0xfd8 | 14 | u8 id_machtype; /* Machine type */ |
16 | 15 | u8 id_ethaddr[6]; /* Hardware ethernet address */ | |
17 | struct idprom | 16 | s32 id_date; /* Date of manufacture */ |
18 | { | 17 | u32 id_sernum:24; /* Unique serial number */ |
19 | unsigned char id_format; /* Format identifier (always 0x01) */ | 18 | u8 id_cksum; /* Checksum - xor of the data bytes */ |
20 | unsigned char id_machtype; /* Machine type */ | 19 | u8 reserved[16]; |
21 | unsigned char id_ethaddr[6]; /* Hardware ethernet address */ | ||
22 | long id_date; /* Date of manufacture */ | ||
23 | unsigned int id_sernum:24; /* Unique serial number */ | ||
24 | unsigned char id_cksum; /* Checksum - xor of the data bytes */ | ||
25 | unsigned char reserved[16]; | ||
26 | }; | 20 | }; |
27 | 21 | ||
28 | extern struct idprom *idprom; | 22 | extern struct idprom *idprom; |
29 | extern void idprom_init(void); | 23 | extern void idprom_init(void); |
30 | 24 | ||
31 | #define IDPROM_SIZE (sizeof(struct idprom)) | ||
32 | |||
33 | #endif /* !(_SPARC_IDPROM_H) */ | 25 | #endif /* !(_SPARC_IDPROM_H) */ |
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h index d0d76b30eb4c..f283f8aaf6a9 100644 --- a/include/asm-sparc/oplib.h +++ b/include/asm-sparc/oplib.h | |||
@@ -165,6 +165,7 @@ enum prom_input_device { | |||
165 | PROMDEV_ITTYA, /* input from ttya */ | 165 | PROMDEV_ITTYA, /* input from ttya */ |
166 | PROMDEV_ITTYB, /* input from ttyb */ | 166 | PROMDEV_ITTYB, /* input from ttyb */ |
167 | PROMDEV_IRSC, /* input from rsc */ | 167 | PROMDEV_IRSC, /* input from rsc */ |
168 | PROMDEV_IVCONS, /* input from virtual-console */ | ||
168 | PROMDEV_I_UNK, | 169 | PROMDEV_I_UNK, |
169 | }; | 170 | }; |
170 | 171 | ||
@@ -177,6 +178,7 @@ enum prom_output_device { | |||
177 | PROMDEV_OTTYA, /* to ttya */ | 178 | PROMDEV_OTTYA, /* to ttya */ |
178 | PROMDEV_OTTYB, /* to ttyb */ | 179 | PROMDEV_OTTYB, /* to ttyb */ |
179 | PROMDEV_ORSC, /* to rsc */ | 180 | PROMDEV_ORSC, /* to rsc */ |
181 | PROMDEV_OVCONS, /* to virtual-console */ | ||
180 | PROMDEV_O_UNK, | 182 | PROMDEV_O_UNK, |
181 | }; | 183 | }; |
182 | 184 | ||
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h index f8f1ec1f06e6..3cf132e1aa25 100644 --- a/include/asm-sparc/uaccess.h +++ b/include/asm-sparc/uaccess.h | |||
@@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | |||
120 | default: __pu_ret = __put_user_bad(); break; \ | 120 | default: __pu_ret = __put_user_bad(); break; \ |
121 | } } else { __pu_ret = -EFAULT; } __pu_ret; }) | 121 | } } else { __pu_ret = -EFAULT; } __pu_ret; }) |
122 | 122 | ||
123 | #define __put_user_check_ret(x,addr,size,retval) ({ \ | ||
124 | register int __foo __asm__ ("l1"); \ | ||
125 | if (__access_ok(addr,size)) { \ | ||
126 | switch (size) { \ | ||
127 | case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ | ||
128 | case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ | ||
129 | case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ | ||
130 | case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ | ||
131 | default: if (__put_user_bad()) return retval; break; \ | ||
132 | } } else return retval; }) | ||
133 | |||
134 | #define __put_user_nocheck(x,addr,size) ({ \ | 123 | #define __put_user_nocheck(x,addr,size) ({ \ |
135 | register int __pu_ret; \ | 124 | register int __pu_ret; \ |
136 | switch (size) { \ | 125 | switch (size) { \ |
@@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | |||
141 | default: __pu_ret = __put_user_bad(); break; \ | 130 | default: __pu_ret = __put_user_bad(); break; \ |
142 | } __pu_ret; }) | 131 | } __pu_ret; }) |
143 | 132 | ||
144 | #define __put_user_nocheck_ret(x,addr,size,retval) ({ \ | ||
145 | register int __foo __asm__ ("l1"); \ | ||
146 | switch (size) { \ | ||
147 | case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ | ||
148 | case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ | ||
149 | case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ | ||
150 | case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ | ||
151 | default: if (__put_user_bad()) return retval; break; \ | ||
152 | } }) | ||
153 | |||
154 | #define __put_user_asm(x,size,addr,ret) \ | 133 | #define __put_user_asm(x,size,addr,ret) \ |
155 | __asm__ __volatile__( \ | 134 | __asm__ __volatile__( \ |
156 | "/* Put user asm, inline. */\n" \ | 135 | "/* Put user asm, inline. */\n" \ |
@@ -170,32 +149,6 @@ __asm__ __volatile__( \ | |||
170 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ | 149 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ |
171 | "i" (-EFAULT)) | 150 | "i" (-EFAULT)) |
172 | 151 | ||
173 | #define __put_user_asm_ret(x,size,addr,ret,foo) \ | ||
174 | if (__builtin_constant_p(ret) && ret == -EFAULT) \ | ||
175 | __asm__ __volatile__( \ | ||
176 | "/* Put user asm ret, inline. */\n" \ | ||
177 | "1:\t" "st"#size " %1, %2\n\n\t" \ | ||
178 | ".section __ex_table,#alloc\n\t" \ | ||
179 | ".align 4\n\t" \ | ||
180 | ".word 1b, __ret_efault\n\n\t" \ | ||
181 | ".previous\n\n\t" \ | ||
182 | : "=r" (foo) : "r" (x), "m" (*__m(addr))); \ | ||
183 | else \ | ||
184 | __asm__ __volatile( \ | ||
185 | "/* Put user asm ret, inline. */\n" \ | ||
186 | "1:\t" "st"#size " %1, %2\n\n\t" \ | ||
187 | ".section .fixup,#alloc,#execinstr\n\t" \ | ||
188 | ".align 4\n" \ | ||
189 | "3:\n\t" \ | ||
190 | "ret\n\t" \ | ||
191 | " restore %%g0, %3, %%o0\n\t" \ | ||
192 | ".previous\n\n\t" \ | ||
193 | ".section __ex_table,#alloc\n\t" \ | ||
194 | ".align 4\n\t" \ | ||
195 | ".word 1b, 3b\n\n\t" \ | ||
196 | ".previous\n\n\t" \ | ||
197 | : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret)) | ||
198 | |||
199 | extern int __put_user_bad(void); | 152 | extern int __put_user_bad(void); |
200 | 153 | ||
201 | #define __get_user_check(x,addr,size,type) ({ \ | 154 | #define __get_user_check(x,addr,size,type) ({ \ |
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index 02af289e3f46..35cb5c9e0c92 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h | |||
@@ -95,7 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ | |||
95 | 95 | ||
96 | #ifdef __KERNEL__ | 96 | #ifdef __KERNEL__ |
97 | 97 | ||
98 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) | 98 | #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE) |
99 | #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL)) | ||
100 | |||
101 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | ||
102 | STACK_TOP32 : STACK_TOP64) | ||
99 | 103 | ||
100 | #endif | 104 | #endif |
101 | 105 | ||
diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h index 534855660f2a..662a21107ae6 100644 --- a/include/asm-sparc64/asi.h +++ b/include/asm-sparc64/asi.h | |||
@@ -25,14 +25,27 @@ | |||
25 | 25 | ||
26 | /* SpitFire and later extended ASIs. The "(III)" marker designates | 26 | /* SpitFire and later extended ASIs. The "(III)" marker designates |
27 | * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates | 27 | * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates |
28 | * Chip Multi Threading specific ASIs. | 28 | * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific |
29 | * ASIs, "(4V)" designates SUN4V specific ASIs. | ||
29 | */ | 30 | */ |
30 | #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ | 31 | #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ |
31 | #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ | 32 | #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ |
33 | #define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ | ||
34 | #define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ | ||
32 | #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ | 35 | #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ |
33 | #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ | 36 | #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ |
37 | #define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ | ||
38 | #define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ | ||
39 | #define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */ | ||
40 | #define ASI_MMU 0x21 /* (4V) MMU Context Registers */ | ||
41 | #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, | ||
42 | * secondary, user | ||
43 | */ | ||
34 | #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ | 44 | #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ |
45 | #define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */ | ||
46 | #define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */ | ||
35 | #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ | 47 | #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ |
48 | #define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */ | ||
36 | #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ | 49 | #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ |
37 | #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ | 50 | #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ |
38 | #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ | 51 | #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ |
@@ -137,6 +150,9 @@ | |||
137 | #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ | 150 | #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ |
138 | #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ | 151 | #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ |
139 | #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ | 152 | #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ |
153 | #define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load, | ||
154 | * primary, implicit | ||
155 | */ | ||
140 | #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ | 156 | #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ |
141 | #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ | 157 | #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ |
142 | #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ | 158 | #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 74de79dca915..c66a81bbc84d 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -1,41 +1,224 @@ | |||
1 | /* cpudata.h: Per-cpu parameters. | 1 | /* cpudata.h: Per-cpu parameters. |
2 | * | 2 | * |
3 | * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _SPARC64_CPUDATA_H | 6 | #ifndef _SPARC64_CPUDATA_H |
7 | #define _SPARC64_CPUDATA_H | 7 | #define _SPARC64_CPUDATA_H |
8 | 8 | ||
9 | #include <asm/hypervisor.h> | ||
10 | #include <asm/asi.h> | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
9 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/threads.h> | ||
10 | 16 | ||
11 | typedef struct { | 17 | typedef struct { |
12 | /* Dcache line 1 */ | 18 | /* Dcache line 1 */ |
13 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ | 19 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ |
14 | unsigned int multiplier; | 20 | unsigned int multiplier; |
15 | unsigned int counter; | 21 | unsigned int counter; |
16 | unsigned int idle_volume; | 22 | unsigned int __pad1; |
17 | unsigned long clock_tick; /* %tick's per second */ | 23 | unsigned long clock_tick; /* %tick's per second */ |
18 | unsigned long udelay_val; | 24 | unsigned long udelay_val; |
19 | 25 | ||
20 | /* Dcache line 2 */ | 26 | /* Dcache line 2, rarely used */ |
21 | unsigned int pgcache_size; | ||
22 | unsigned int __pad1; | ||
23 | unsigned long *pte_cache[2]; | ||
24 | unsigned long *pgd_cache; | ||
25 | |||
26 | /* Dcache line 3, rarely used */ | ||
27 | unsigned int dcache_size; | 27 | unsigned int dcache_size; |
28 | unsigned int dcache_line_size; | 28 | unsigned int dcache_line_size; |
29 | unsigned int icache_size; | 29 | unsigned int icache_size; |
30 | unsigned int icache_line_size; | 30 | unsigned int icache_line_size; |
31 | unsigned int ecache_size; | 31 | unsigned int ecache_size; |
32 | unsigned int ecache_line_size; | 32 | unsigned int ecache_line_size; |
33 | unsigned int __pad2; | ||
34 | unsigned int __pad3; | 33 | unsigned int __pad3; |
34 | unsigned int __pad4; | ||
35 | } cpuinfo_sparc; | 35 | } cpuinfo_sparc; |
36 | 36 | ||
37 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 37 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
38 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 38 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
39 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 39 | #define local_cpu_data() __get_cpu_var(__cpu_data) |
40 | 40 | ||
41 | /* Trap handling code needs to get at a few critical values upon | ||
42 | * trap entry and to process TSB misses. These cannot be in the | ||
43 | * per_cpu() area as we really need to lock them into the TLB and | ||
44 | * thus make them part of the main kernel image. As a result we | ||
45 | * try to make this as small as possible. | ||
46 | * | ||
47 | * This is padded out and aligned to 64-bytes to avoid false sharing | ||
48 | * on SMP. | ||
49 | */ | ||
50 | |||
51 | /* If you modify the size of this structure, please update | ||
52 | * TRAP_BLOCK_SZ_SHIFT below. | ||
53 | */ | ||
54 | struct thread_info; | ||
55 | struct trap_per_cpu { | ||
56 | /* D-cache line 1: Basic thread information, cpu and device mondo queues */ | ||
57 | struct thread_info *thread; | ||
58 | unsigned long pgd_paddr; | ||
59 | unsigned long cpu_mondo_pa; | ||
60 | unsigned long dev_mondo_pa; | ||
61 | |||
62 | /* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ | ||
63 | unsigned long resum_mondo_pa; | ||
64 | unsigned long resum_kernel_buf_pa; | ||
65 | unsigned long nonresum_mondo_pa; | ||
66 | unsigned long nonresum_kernel_buf_pa; | ||
67 | |||
68 | /* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ | ||
69 | struct hv_fault_status fault_info; | ||
70 | |||
71 | /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ | ||
72 | unsigned long cpu_mondo_block_pa; | ||
73 | unsigned long cpu_list_pa; | ||
74 | unsigned long __pad1[2]; | ||
75 | |||
76 | /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ | ||
77 | unsigned long __pad2[4]; | ||
78 | } __attribute__((aligned(64))); | ||
79 | extern struct trap_per_cpu trap_block[NR_CPUS]; | ||
80 | extern void init_cur_cpu_trap(struct thread_info *); | ||
81 | extern void setup_tba(void); | ||
82 | |||
83 | struct cpuid_patch_entry { | ||
84 | unsigned int addr; | ||
85 | unsigned int cheetah_safari[4]; | ||
86 | unsigned int cheetah_jbus[4]; | ||
87 | unsigned int starfire[4]; | ||
88 | unsigned int sun4v[4]; | ||
89 | }; | ||
90 | extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; | ||
91 | |||
92 | struct sun4v_1insn_patch_entry { | ||
93 | unsigned int addr; | ||
94 | unsigned int insn; | ||
95 | }; | ||
96 | extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, | ||
97 | __sun4v_1insn_patch_end; | ||
98 | |||
99 | struct sun4v_2insn_patch_entry { | ||
100 | unsigned int addr; | ||
101 | unsigned int insns[2]; | ||
102 | }; | ||
103 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | ||
104 | __sun4v_2insn_patch_end; | ||
105 | |||
106 | #endif /* !(__ASSEMBLY__) */ | ||
107 | |||
108 | #define TRAP_PER_CPU_THREAD 0x00 | ||
109 | #define TRAP_PER_CPU_PGD_PADDR 0x08 | ||
110 | #define TRAP_PER_CPU_CPU_MONDO_PA 0x10 | ||
111 | #define TRAP_PER_CPU_DEV_MONDO_PA 0x18 | ||
112 | #define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 | ||
113 | #define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 | ||
114 | #define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 | ||
115 | #define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 | ||
116 | #define TRAP_PER_CPU_FAULT_INFO 0x40 | ||
117 | #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 | ||
118 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | ||
119 | |||
120 | #define TRAP_BLOCK_SZ_SHIFT 8 | ||
121 | |||
122 | #include <asm/scratchpad.h> | ||
123 | |||
124 | #define __GET_CPUID(REG) \ | ||
125 | /* Spitfire implementation (default). */ \ | ||
126 | 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ | ||
127 | srlx REG, 17, REG; \ | ||
128 | and REG, 0x1f, REG; \ | ||
129 | nop; \ | ||
130 | .section .cpuid_patch, "ax"; \ | ||
131 | /* Instruction location. */ \ | ||
132 | .word 661b; \ | ||
133 | /* Cheetah Safari implementation. */ \ | ||
134 | ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ | ||
135 | srlx REG, 17, REG; \ | ||
136 | and REG, 0x3ff, REG; \ | ||
137 | nop; \ | ||
138 | /* Cheetah JBUS implementation. */ \ | ||
139 | ldxa [%g0] ASI_JBUS_CONFIG, REG; \ | ||
140 | srlx REG, 17, REG; \ | ||
141 | and REG, 0x1f, REG; \ | ||
142 | nop; \ | ||
143 | /* Starfire implementation. */ \ | ||
144 | sethi %hi(0x1fff40000d0 >> 9), REG; \ | ||
145 | sllx REG, 9, REG; \ | ||
146 | or REG, 0xd0, REG; \ | ||
147 | lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ | ||
148 | /* sun4v implementation. */ \ | ||
149 | mov SCRATCHPAD_CPUID, REG; \ | ||
150 | ldxa [REG] ASI_SCRATCHPAD, REG; \ | ||
151 | nop; \ | ||
152 | nop; \ | ||
153 | .previous; | ||
154 | |||
155 | #ifdef CONFIG_SMP | ||
156 | |||
157 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
158 | __GET_CPUID(TMP) \ | ||
159 | sethi %hi(trap_block), DEST; \ | ||
160 | sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ | ||
161 | or DEST, %lo(trap_block), DEST; \ | ||
162 | add DEST, TMP, DEST; \ | ||
163 | |||
164 | /* Clobbers TMP, current address space PGD phys address into DEST. */ | ||
165 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
166 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
167 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
168 | |||
169 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
170 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | ||
171 | __GET_CPUID(TMP) \ | ||
172 | sethi %hi(__irq_work), DEST; \ | ||
173 | sllx TMP, 6, TMP; \ | ||
174 | or DEST, %lo(__irq_work), DEST; \ | ||
175 | add DEST, TMP, DEST; | ||
176 | |||
177 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | ||
178 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
179 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
180 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
181 | |||
182 | /* Given the current thread info pointer in THR, load the per-cpu | ||
183 | * area base of the current processor into DEST. REG1, REG2, and REG3 are | ||
184 | * clobbered. | ||
185 | * | ||
186 | * You absolutely cannot use DEST as a temporary in this code. The | ||
187 | * reason is that traps can happen during execution, and return from | ||
188 | * trap will load the fully resolved DEST per-cpu base. This can corrupt | ||
189 | * the calculations done by the macro mid-stream. | ||
190 | */ | ||
191 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ | ||
192 | ldub [THR + TI_CPU], REG1; \ | ||
193 | sethi %hi(__per_cpu_shift), REG3; \ | ||
194 | sethi %hi(__per_cpu_base), REG2; \ | ||
195 | ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ | ||
196 | ldx [REG2 + %lo(__per_cpu_base)], REG2; \ | ||
197 | sllx REG1, REG3, REG3; \ | ||
198 | add REG3, REG2, DEST; | ||
199 | |||
200 | #else | ||
201 | |||
202 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
203 | sethi %hi(trap_block), DEST; \ | ||
204 | or DEST, %lo(trap_block), DEST; \ | ||
205 | |||
206 | /* Uniprocessor versions, we know the cpuid is zero. */ | ||
207 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
208 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
209 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
210 | |||
211 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | ||
212 | sethi %hi(__irq_work), DEST; \ | ||
213 | or DEST, %lo(__irq_work), DEST; | ||
214 | |||
215 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
216 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
217 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
218 | |||
219 | /* No per-cpu areas on uniprocessor, so no need to load DEST. */ | ||
220 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) | ||
221 | |||
222 | #endif /* !(CONFIG_SMP) */ | ||
223 | |||
41 | #endif /* _SPARC64_CPUDATA_H */ | 224 | #endif /* _SPARC64_CPUDATA_H */ |
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 69539a8ab833..303d85e2f82e 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | #include <asm/spitfire.h> | ||
13 | #endif | 14 | #endif |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -68,6 +69,7 @@ | |||
68 | #define HWCAP_SPARC_MULDIV 8 | 69 | #define HWCAP_SPARC_MULDIV 8 |
69 | #define HWCAP_SPARC_V9 16 | 70 | #define HWCAP_SPARC_V9 16 |
70 | #define HWCAP_SPARC_ULTRA3 32 | 71 | #define HWCAP_SPARC_ULTRA3 32 |
72 | #define HWCAP_SPARC_BLKINIT 64 | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * These are used to set parameters in the core dumps. | 75 | * These are used to set parameters in the core dumps. |
@@ -145,11 +147,21 @@ typedef struct { | |||
145 | instruction set this cpu supports. */ | 147 | instruction set this cpu supports. */ |
146 | 148 | ||
147 | /* On Ultra, we support all of the v8 capabilities. */ | 149 | /* On Ultra, we support all of the v8 capabilities. */ |
148 | #define ELF_HWCAP ((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \ | 150 | static inline unsigned int sparc64_elf_hwcap(void) |
149 | HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \ | 151 | { |
150 | HWCAP_SPARC_V9) | \ | 152 | unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | |
151 | ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \ | 153 | HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | |
152 | HWCAP_SPARC_ULTRA3 : 0)) | 154 | HWCAP_SPARC_V9); |
155 | |||
156 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
157 | cap |= HWCAP_SPARC_ULTRA3; | ||
158 | else if (tlb_type == hypervisor) | ||
159 | cap |= HWCAP_SPARC_BLKINIT; | ||
160 | |||
161 | return cap; | ||
162 | } | ||
163 | |||
164 | #define ELF_HWCAP sparc64_elf_hwcap(); | ||
153 | 165 | ||
154 | /* This yields a string that ld.so will use to load implementation | 166 | /* This yields a string that ld.so will use to load implementation |
155 | specific libraries for optimization. This is more specific in | 167 | specific libraries for optimization. This is more specific in |
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 0abd3a674e8f..67960a751f4d 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h | |||
@@ -4,12 +4,21 @@ | |||
4 | 4 | ||
5 | #include <asm/pstate.h> | 5 | #include <asm/pstate.h> |
6 | 6 | ||
7 | /* wrpr %g0, val, %gl */ | ||
8 | #define SET_GL(val) \ | ||
9 | .word 0xa1902000 | val | ||
10 | |||
11 | /* rdpr %gl, %gN */ | ||
12 | #define GET_GL_GLOBAL(N) \ | ||
13 | .word 0x81540000 | (N << 25) | ||
14 | |||
7 | #define KERNBASE 0x400000 | 15 | #define KERNBASE 0x400000 |
8 | 16 | ||
9 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) | 17 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) |
10 | 18 | ||
11 | #define __CHEETAH_ID 0x003e0014 | 19 | #define __CHEETAH_ID 0x003e0014 |
12 | #define __JALAPENO_ID 0x003e0016 | 20 | #define __JALAPENO_ID 0x003e0016 |
21 | #define __SERRANO_ID 0x003e0022 | ||
13 | 22 | ||
14 | #define CHEETAH_MANUF 0x003e | 23 | #define CHEETAH_MANUF 0x003e |
15 | #define CHEETAH_IMPL 0x0014 /* Ultra-III */ | 24 | #define CHEETAH_IMPL 0x0014 /* Ultra-III */ |
@@ -19,6 +28,12 @@ | |||
19 | #define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ | 28 | #define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ |
20 | #define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ | 29 | #define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ |
21 | 30 | ||
31 | #define BRANCH_IF_SUN4V(tmp1,label) \ | ||
32 | sethi %hi(is_sun4v), %tmp1; \ | ||
33 | lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \ | ||
34 | brnz,pn %tmp1, label; \ | ||
35 | nop | ||
36 | |||
22 | #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ | 37 | #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ |
23 | rdpr %ver, %tmp1; \ | 38 | rdpr %ver, %tmp1; \ |
24 | sethi %hi(__CHEETAH_ID), %tmp2; \ | 39 | sethi %hi(__CHEETAH_ID), %tmp2; \ |
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h new file mode 100644 index 000000000000..612bf319753f --- /dev/null +++ b/include/asm-sparc64/hypervisor.h | |||
@@ -0,0 +1,2128 @@ | |||
1 | #ifndef _SPARC64_HYPERVISOR_H | ||
2 | #define _SPARC64_HYPERVISOR_H | ||
3 | |||
4 | /* Sun4v hypervisor interfaces and defines. | ||
5 | * | ||
6 | * Hypervisor calls are made via traps to software traps number 0x80 | ||
7 | * and above. Registers %o0 to %o5 serve as argument, status, and | ||
8 | * return value registers. | ||
9 | * | ||
10 | * There are two kinds of these traps. First there are the normal | ||
11 | * "fast traps" which use software trap 0x80 and encode the function | ||
12 | * to invoke by number in register %o5. Argument and return value | ||
13 | * handling is as follows: | ||
14 | * | ||
15 | * ----------------------------------------------- | ||
16 | * | %o5 | function number | undefined | | ||
17 | * | %o0 | argument 0 | return status | | ||
18 | * | %o1 | argument 1 | return value 1 | | ||
19 | * | %o2 | argument 2 | return value 2 | | ||
20 | * | %o3 | argument 3 | return value 3 | | ||
21 | * | %o4 | argument 4 | return value 4 | | ||
22 | * ----------------------------------------------- | ||
23 | * | ||
24 | * The second type are "hyper-fast traps" which encode the function | ||
25 | * number in the software trap number itself. So these use trap | ||
26 | * numbers > 0x80. The register usage for hyper-fast traps is as | ||
27 | * follows: | ||
28 | * | ||
29 | * ----------------------------------------------- | ||
30 | * | %o0 | argument 0 | return status | | ||
31 | * | %o1 | argument 1 | return value 1 | | ||
32 | * | %o2 | argument 2 | return value 2 | | ||
33 | * | %o3 | argument 3 | return value 3 | | ||
34 | * | %o4 | argument 4 | return value 4 | | ||
35 | * ----------------------------------------------- | ||
36 | * | ||
37 | * Registers providing explicit arguments to the hypervisor calls | ||
38 | * are volatile across the call. Upon return their values are | ||
39 | * undefined unless explicitly specified as containing a particular | ||
40 | * return value by the specific call. The return status is always | ||
41 | * returned in register %o0, zero indicates a successful execution of | ||
42 | * the hypervisor call and other values indicate an error status as | ||
43 | * defined below. So, for example, if a hyper-fast trap takes | ||
44 | * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across | ||
45 | * the call and %o3, %o4, and %o5 would be preserved. | ||
46 | * | ||
47 | * If the hypervisor trap is invalid, or the fast trap function number | ||
48 | * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits | ||
49 | * of the argument and return values are significant. | ||
50 | */ | ||
51 | |||
52 | /* Trap numbers. */ | ||
53 | #define HV_FAST_TRAP 0x80 | ||
54 | #define HV_MMU_MAP_ADDR_TRAP 0x83 | ||
55 | #define HV_MMU_UNMAP_ADDR_TRAP 0x84 | ||
56 | #define HV_TTRACE_ADDENTRY_TRAP 0x85 | ||
57 | #define HV_CORE_TRAP 0xff | ||
58 | |||
59 | /* Error codes. */ | ||
60 | #define HV_EOK 0 /* Successful return */ | ||
61 | #define HV_ENOCPU 1 /* Invalid CPU id */ | ||
62 | #define HV_ENORADDR 2 /* Invalid real address */ | ||
63 | #define HV_ENOINTR 3 /* Invalid interrupt id */ | ||
64 | #define HV_EBADPGSZ 4 /* Invalid pagesize encoding */ | ||
65 | #define HV_EBADTSB 5 /* Invalid TSB description */ | ||
66 | #define HV_EINVAL 6 /* Invalid argument */ | ||
67 | #define HV_EBADTRAP 7 /* Invalid function number */ | ||
68 | #define HV_EBADALIGN 8 /* Invalid address alignment */ | ||
69 | #define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */ | ||
70 | #define HV_ENOACCESS 10 /* No access to resource */ | ||
71 | #define HV_EIO 11 /* I/O error */ | ||
72 | #define HV_ECPUERROR 12 /* CPU in error state */ | ||
73 | #define HV_ENOTSUPPORTED 13 /* Function not supported */ | ||
74 | #define HV_ENOMAP 14 /* No mapping found */ | ||
75 | #define HV_ETOOMANY 15 /* Too many items specified */ | ||
76 | |||
77 | /* mach_exit() | ||
78 | * TRAP: HV_FAST_TRAP | ||
79 | * FUNCTION: HV_FAST_MACH_EXIT | ||
80 | * ARG0: exit code | ||
81 | * ERRORS: This service does not return. | ||
82 | * | ||
83 | * Stop all CPUs in the virtual domain and place them into the stopped | ||
84 | * state. The 64-bit exit code may be passed to a service entity as | ||
85 | * the domain's exit status. On systems without a service entity, the | ||
86 | * domain will undergo a reset, and the boot firmware will be | ||
87 | * reloaded. | ||
88 | * | ||
89 | * This function will never return to the guest that invokes it. | ||
90 | * | ||
91 | * Note: By convention an exit code of zero denotes a successful exit by | ||
92 | * the guest code. A non-zero exit code denotes a guest specific | ||
93 | * error indication. | ||
94 | * | ||
95 | */ | ||
96 | #define HV_FAST_MACH_EXIT 0x00 | ||
97 | |||
98 | /* Domain services. */ | ||
99 | |||
100 | /* mach_desc() | ||
101 | * TRAP: HV_FAST_TRAP | ||
102 | * FUNCTION: HV_FAST_MACH_DESC | ||
103 | * ARG0: buffer | ||
104 | * ARG1: length | ||
105 | * RET0: status | ||
106 | * RET1: length | ||
107 | * ERRORS: HV_EBADALIGN Buffer is badly aligned | ||
108 | * HV_ENORADDR Buffer is to an illegal real address. | ||
109 | * HV_EINVAL Buffer length is too small for complete | ||
110 | * machine description. | ||
111 | * | ||
112 | * Copy the most current machine description into the buffer indicated | ||
113 | * by the real address in ARG0. The buffer provided must be 16 byte | ||
114 | * aligned. Upon success or HV_EINVAL, this service returns the | ||
115 | * actual size of the machine description in the RET1 return value. | ||
116 | * | ||
117 | * Note: A method of determining the appropriate buffer size for the | ||
118 | * machine description is to first call this service with a buffer | ||
119 | * length of 0 bytes. | ||
120 | */ | ||
121 | #define HV_FAST_MACH_DESC 0x01 | ||
122 | |||
123 | /* mach_exit() | ||
124 | * TRAP: HV_FAST_TRAP | ||
125 | * FUNCTION: HV_FAST_MACH_SIR | ||
126 | * ERRORS: This service does not return. | ||
127 | * | ||
128 | * Perform a software initiated reset of the virtual machine domain. | ||
129 | * All CPUs are captured as soon as possible, all hardware devices are | ||
130 | * returned to the entry default state, and the domain is restarted at | ||
131 | * the SIR (trap type 0x04) real trap table (RTBA) entry point on one | ||
132 | * of the CPUs. The single CPU restarted is selected as determined by | ||
133 | * platform specific policy. Memory is preserved across this | ||
134 | * operation. | ||
135 | */ | ||
136 | #define HV_FAST_MACH_SIR 0x02 | ||
137 | |||
138 | /* mach_set_soft_state() | ||
139 | * TRAP: HV_FAST_TRAP | ||
140 | * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE | ||
141 | * ARG0: software state | ||
142 | * ARG1: software state description pointer | ||
143 | * RET0: status | ||
144 | * ERRORS: EINVAL software state not valid or software state | ||
145 | * description is not NULL terminated | ||
146 | * ENORADDR software state description pointer is not a | ||
147 | * valid real address | ||
148 | * EBADALIGNED software state description is not correctly | ||
149 | * aligned | ||
150 | * | ||
151 | * This allows the guest to report it's soft state to the hypervisor. There | ||
152 | * are two primary components to this state. The first part states whether | ||
153 | * the guest software is running or not. The second containts optional | ||
154 | * details specific to the software. | ||
155 | * | ||
156 | * The software state argument is defined below in HV_SOFT_STATE_*, and | ||
157 | * indicates whether the guest is operating normally or in a transitional | ||
158 | * state. | ||
159 | * | ||
160 | * The software state description argument is a real address of a data buffer | ||
161 | * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL | ||
162 | * terminated 7-bit ASCII string of up to 31 characters not including the | ||
163 | * NULL termination. | ||
164 | */ | ||
165 | #define HV_FAST_MACH_SET_SOFT_STATE 0x03 | ||
166 | #define HV_SOFT_STATE_NORMAL 0x01 | ||
167 | #define HV_SOFT_STATE_TRANSITION 0x02 | ||
168 | |||
169 | /* mach_get_soft_state() | ||
170 | * TRAP: HV_FAST_TRAP | ||
171 | * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE | ||
172 | * ARG0: software state description pointer | ||
173 | * RET0: status | ||
174 | * RET1: software state | ||
175 | * ERRORS: ENORADDR software state description pointer is not a | ||
176 | * valid real address | ||
177 | * EBADALIGNED software state description is not correctly | ||
178 | * aligned | ||
179 | * | ||
180 | * Retrieve the current value of the guest's software state. The rules | ||
181 | * for the software state pointer are the same as for mach_set_soft_state() | ||
182 | * above. | ||
183 | */ | ||
184 | #define HV_FAST_MACH_GET_SOFT_STATE 0x04 | ||
185 | |||
186 | /* CPU services. | ||
187 | * | ||
188 | * CPUs represent devices that can execute software threads. A single | ||
189 | * chip that contains multiple cores or strands is represented as | ||
190 | * multiple CPUs with unique CPU identifiers. CPUs are exported to | ||
191 | * OBP via the machine description (and to the OS via the OBP device | ||
192 | * tree). CPUs are always in one of three states: stopped, running, | ||
193 | * or error. | ||
194 | * | ||
195 | * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a | ||
196 | * CPU within a logical domain. Operations that are to be performed | ||
197 | * on multiple CPUs specify them via a CPU list. A CPU list is an | ||
198 | * array in real memory, of which each 16-bit word is a CPU ID. CPU | ||
199 | * lists are passed through the API as two arguments. The first is | ||
200 | * the number of entries (16-bit words) in the CPU list, and the | ||
201 | * second is the (real address) pointer to the CPU ID list. | ||
202 | */ | ||
203 | |||
204 | /* cpu_start() | ||
205 | * TRAP: HV_FAST_TRAP | ||
206 | * FUNCTION: HV_FAST_CPU_START | ||
207 | * ARG0: CPU ID | ||
208 | * ARG1: PC | ||
209 | * ARG1: RTBA | ||
210 | * ARG1: target ARG0 | ||
211 | * RET0: status | ||
212 | * ERRORS: ENOCPU Invalid CPU ID | ||
213 | * EINVAL Target CPU ID is not in the stopped state | ||
214 | * ENORADDR Invalid PC or RTBA real address | ||
215 | * EBADALIGN Unaligned PC or unaligned RTBA | ||
216 | * EWOULDBLOCK Starting resources are not available | ||
217 | * | ||
218 | * Start CPU with given CPU ID with PC in %pc and with a real trap | ||
219 | * base address value of RTBA. The indicated CPU must be in the | ||
220 | * stopped state. The supplied RTBA must be aligned on a 256 byte | ||
221 | * boundary. On successful completion, the specified CPU will be in | ||
222 | * the running state and will be supplied with "target ARG0" in %o0 | ||
223 | * and RTBA in %tba. | ||
224 | */ | ||
225 | #define HV_FAST_CPU_START 0x10 | ||
226 | |||
227 | /* cpu_stop() | ||
228 | * TRAP: HV_FAST_TRAP | ||
229 | * FUNCTION: HV_FAST_CPU_STOP | ||
230 | * ARG0: CPU ID | ||
231 | * RET0: status | ||
232 | * ERRORS: ENOCPU Invalid CPU ID | ||
233 | * EINVAL Target CPU ID is the current cpu | ||
234 | * EINVAL Target CPU ID is not in the running state | ||
235 | * EWOULDBLOCK Stopping resources are not available | ||
236 | * ENOTSUPPORTED Not supported on this platform | ||
237 | * | ||
238 | * The specified CPU is stopped. The indicated CPU must be in the | ||
239 | * running state. On completion, it will be in the stopped state. It | ||
240 | * is not legal to stop the current CPU. | ||
241 | * | ||
242 | * Note: As this service cannot be used to stop the current cpu, this service | ||
243 | * may not be used to stop the last running CPU in a domain. To stop | ||
244 | * and exit a running domain, a guest must use the mach_exit() service. | ||
245 | */ | ||
246 | #define HV_FAST_CPU_STOP 0x11 | ||
247 | |||
248 | /* cpu_yield() | ||
249 | * TRAP: HV_FAST_TRAP | ||
250 | * FUNCTION: HV_FAST_CPU_YIELD | ||
251 | * RET0: status | ||
252 | * ERRORS: No possible error. | ||
253 | * | ||
254 | * Suspend execution on the current CPU. Execution will resume when | ||
255 | * an interrupt (device, %stick_compare, or cross-call) is targeted to | ||
256 | * the CPU. On some CPUs, this API may be used by the hypervisor to | ||
257 | * save power by disabling hardware strands. | ||
258 | */ | ||
259 | #define HV_FAST_CPU_YIELD 0x12 | ||
260 | |||
261 | #ifndef __ASSEMBLY__ | ||
262 | extern unsigned long sun4v_cpu_yield(void); | ||
263 | #endif | ||
264 | |||
265 | /* cpu_qconf() | ||
266 | * TRAP: HV_FAST_TRAP | ||
267 | * FUNCTION: HV_FAST_CPU_QCONF | ||
268 | * ARG0: queue | ||
269 | * ARG1: base real address | ||
270 | * ARG2: number of entries | ||
271 | * RET0: status | ||
272 | * ERRORS: ENORADDR Invalid base real address | ||
273 | * EINVAL Invalid queue or number of entries is less | ||
274 | * than 2 or too large. | ||
275 | * EBADALIGN Base real address is not correctly aligned | ||
276 | * for size. | ||
277 | * | ||
278 | * Configure the given queue to be placed at the given base real | ||
279 | * address, with the given number of entries. The number of entries | ||
280 | * must be a power of 2. The base real address must be aligned | ||
281 | * exactly to match the queue size. Each queue entry is 64 bytes | ||
282 | * long, so for example a 32 entry queue must be aligned on a 2048 | ||
283 | * byte real address boundary. | ||
284 | * | ||
285 | * The specified queue is unconfigured if the number of entries is given | ||
286 | * as zero. | ||
287 | * | ||
288 | * For the current version of this API service, the argument queue is defined | ||
289 | * as follows: | ||
290 | * | ||
291 | * queue description | ||
292 | * ----- ------------------------- | ||
293 | * 0x3c cpu mondo queue | ||
294 | * 0x3d device mondo queue | ||
295 | * 0x3e resumable error queue | ||
296 | * 0x3f non-resumable error queue | ||
297 | * | ||
298 | * Note: The maximum number of entries for each queue for a specific cpu may | ||
299 | * be determined from the machine description. | ||
300 | */ | ||
301 | #define HV_FAST_CPU_QCONF 0x14 | ||
302 | #define HV_CPU_QUEUE_CPU_MONDO 0x3c | ||
303 | #define HV_CPU_QUEUE_DEVICE_MONDO 0x3d | ||
304 | #define HV_CPU_QUEUE_RES_ERROR 0x3e | ||
305 | #define HV_CPU_QUEUE_NONRES_ERROR 0x3f | ||
306 | |||
307 | #ifndef __ASSEMBLY__ | ||
308 | extern unsigned long sun4v_cpu_qconf(unsigned long type, | ||
309 | unsigned long queue_paddr, | ||
310 | unsigned long num_queue_entries); | ||
311 | #endif | ||
312 | |||
313 | /* cpu_qinfo() | ||
314 | * TRAP: HV_FAST_TRAP | ||
315 | * FUNCTION: HV_FAST_CPU_QINFO | ||
316 | * ARG0: queue | ||
317 | * RET0: status | ||
318 | * RET1: base real address | ||
319 | * RET1: number of entries | ||
320 | * ERRORS: EINVAL Invalid queue | ||
321 | * | ||
322 | * Return the configuration info for the given queue. The base real | ||
323 | * address and number of entries of the defined queue are returned. | ||
324 | * The queue argument values are the same as for cpu_qconf() above. | ||
325 | * | ||
326 | * If the specified queue is a valid queue number, but no queue has | ||
327 | * been defined, the number of entries will be set to zero and the | ||
328 | * base real address returned is undefined. | ||
329 | */ | ||
330 | #define HV_FAST_CPU_QINFO 0x15 | ||
331 | |||
332 | /* cpu_mondo_send() | ||
333 | * TRAP: HV_FAST_TRAP | ||
334 | * FUNCTION: HV_FAST_CPU_MONDO_SEND | ||
335 | * ARG0-1: CPU list | ||
336 | * ARG2: data real address | ||
337 | * RET0: status | ||
338 | * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list | ||
339 | * is not 2-byte aligned. | ||
340 | * ENORADDR Invalid data mondo address, or invalid cpu list | ||
341 | * address. | ||
342 | * ENOCPU Invalid cpu in CPU list | ||
343 | * EWOULDBLOCK Some or all of the listed CPUs did not receive | ||
344 | * the mondo | ||
345 | * ECPUERROR One or more of the listed CPUs are in error | ||
346 | * state, use HV_FAST_CPU_STATE to see which ones | ||
347 | * EINVAL CPU list includes caller's CPU ID | ||
348 | * | ||
349 | * Send a mondo interrupt to the CPUs in the given CPU list with the | ||
350 | * 64-bytes at the given data real address. The data must be 64-byte | ||
351 | * aligned. The mondo data will be delivered to the cpu_mondo queues | ||
352 | * of the recipient CPUs. | ||
353 | * | ||
354 | * In all cases, error or not, the CPUs in the CPU list to which the | ||
355 | * mondo has been successfully delivered will be indicated by having | ||
356 | * their entry in CPU list updated with the value 0xffff. | ||
357 | */ | ||
358 | #define HV_FAST_CPU_MONDO_SEND 0x42 | ||
359 | |||
360 | #ifndef __ASSEMBLY__ | ||
361 | extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa); | ||
362 | #endif | ||
363 | |||
364 | /* cpu_myid() | ||
365 | * TRAP: HV_FAST_TRAP | ||
366 | * FUNCTION: HV_FAST_CPU_MYID | ||
367 | * RET0: status | ||
368 | * RET1: CPU ID | ||
369 | * ERRORS: No errors defined. | ||
370 | * | ||
371 | * Return the hypervisor ID handle for the current CPU. Use by a | ||
372 | * virtual CPU to discover it's own identity. | ||
373 | */ | ||
374 | #define HV_FAST_CPU_MYID 0x16 | ||
375 | |||
376 | /* cpu_state() | ||
377 | * TRAP: HV_FAST_TRAP | ||
378 | * FUNCTION: HV_FAST_CPU_STATE | ||
379 | * ARG0: CPU ID | ||
380 | * RET0: status | ||
381 | * RET1: state | ||
382 | * ERRORS: ENOCPU Invalid CPU ID | ||
383 | * | ||
384 | * Retrieve the current state of the CPU with the given CPU ID. | ||
385 | */ | ||
386 | #define HV_FAST_CPU_STATE 0x17 | ||
387 | #define HV_CPU_STATE_STOPPED 0x01 | ||
388 | #define HV_CPU_STATE_RUNNING 0x02 | ||
389 | #define HV_CPU_STATE_ERROR 0x03 | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | extern long sun4v_cpu_state(unsigned long cpuid); | ||
393 | #endif | ||
394 | |||
395 | /* cpu_set_rtba() | ||
396 | * TRAP: HV_FAST_TRAP | ||
397 | * FUNCTION: HV_FAST_CPU_SET_RTBA | ||
398 | * ARG0: RTBA | ||
399 | * RET0: status | ||
400 | * RET1: previous RTBA | ||
401 | * ERRORS: ENORADDR Invalid RTBA real address | ||
402 | * EBADALIGN RTBA is incorrectly aligned for a trap table | ||
403 | * | ||
404 | * Set the real trap base address of the local cpu to the given RTBA. | ||
405 | * The supplied RTBA must be aligned on a 256 byte boundary. Upon | ||
406 | * success the previous value of the RTBA is returned in RET1. | ||
407 | * | ||
408 | * Note: This service does not affect %tba | ||
409 | */ | ||
410 | #define HV_FAST_CPU_SET_RTBA 0x18 | ||
411 | |||
412 | /* cpu_set_rtba() | ||
413 | * TRAP: HV_FAST_TRAP | ||
414 | * FUNCTION: HV_FAST_CPU_GET_RTBA | ||
415 | * RET0: status | ||
416 | * RET1: previous RTBA | ||
417 | * ERRORS: No possible error. | ||
418 | * | ||
419 | * Returns the current value of RTBA in RET1. | ||
420 | */ | ||
421 | #define HV_FAST_CPU_GET_RTBA 0x19 | ||
422 | |||
423 | /* MMU services. | ||
424 | * | ||
425 | * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. | ||
426 | */ | ||
427 | #ifndef __ASSEMBLY__ | ||
428 | struct hv_tsb_descr { | ||
429 | unsigned short pgsz_idx; | ||
430 | unsigned short assoc; | ||
431 | unsigned int num_ttes; /* in TTEs */ | ||
432 | unsigned int ctx_idx; | ||
433 | unsigned int pgsz_mask; | ||
434 | unsigned long tsb_base; | ||
435 | unsigned long resv; | ||
436 | }; | ||
437 | #endif | ||
438 | #define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00 | ||
439 | #define HV_TSB_DESCR_ASSOC_OFFSET 0x02 | ||
440 | #define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04 | ||
441 | #define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08 | ||
442 | #define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c | ||
443 | #define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10 | ||
444 | #define HV_TSB_DESCR_RESV_OFFSET 0x18 | ||
445 | |||
446 | /* Page size bitmask. */ | ||
447 | #define HV_PGSZ_MASK_8K (1 << 0) | ||
448 | #define HV_PGSZ_MASK_64K (1 << 1) | ||
449 | #define HV_PGSZ_MASK_512K (1 << 2) | ||
450 | #define HV_PGSZ_MASK_4MB (1 << 3) | ||
451 | #define HV_PGSZ_MASK_32MB (1 << 4) | ||
452 | #define HV_PGSZ_MASK_256MB (1 << 5) | ||
453 | #define HV_PGSZ_MASK_2GB (1 << 6) | ||
454 | #define HV_PGSZ_MASK_16GB (1 << 7) | ||
455 | |||
456 | /* Page size index. The value given in the TSB descriptor must correspond | ||
457 | * to the smallest page size specified in the pgsz_mask page size bitmask. | ||
458 | */ | ||
459 | #define HV_PGSZ_IDX_8K 0 | ||
460 | #define HV_PGSZ_IDX_64K 1 | ||
461 | #define HV_PGSZ_IDX_512K 2 | ||
462 | #define HV_PGSZ_IDX_4MB 3 | ||
463 | #define HV_PGSZ_IDX_32MB 4 | ||
464 | #define HV_PGSZ_IDX_256MB 5 | ||
465 | #define HV_PGSZ_IDX_2GB 6 | ||
466 | #define HV_PGSZ_IDX_16GB 7 | ||
467 | |||
468 | /* MMU fault status area. | ||
469 | * | ||
470 | * MMU related faults have their status and fault address information | ||
471 | * placed into a memory region made available by privileged code. Each | ||
472 | * virtual processor must make a mmu_fault_area_conf() call to tell the | ||
473 | * hypervisor where that processor's fault status should be stored. | ||
474 | * | ||
475 | * The fault status block is a multiple of 64-bytes and must be aligned | ||
476 | * on a 64-byte boundary. | ||
477 | */ | ||
478 | #ifndef __ASSEMBLY__ | ||
479 | struct hv_fault_status { | ||
480 | unsigned long i_fault_type; | ||
481 | unsigned long i_fault_addr; | ||
482 | unsigned long i_fault_ctx; | ||
483 | unsigned long i_reserved[5]; | ||
484 | unsigned long d_fault_type; | ||
485 | unsigned long d_fault_addr; | ||
486 | unsigned long d_fault_ctx; | ||
487 | unsigned long d_reserved[5]; | ||
488 | }; | ||
489 | #endif | ||
490 | #define HV_FAULT_I_TYPE_OFFSET 0x00 | ||
491 | #define HV_FAULT_I_ADDR_OFFSET 0x08 | ||
492 | #define HV_FAULT_I_CTX_OFFSET 0x10 | ||
493 | #define HV_FAULT_D_TYPE_OFFSET 0x40 | ||
494 | #define HV_FAULT_D_ADDR_OFFSET 0x48 | ||
495 | #define HV_FAULT_D_CTX_OFFSET 0x50 | ||
496 | |||
497 | #define HV_FAULT_TYPE_FAST_MISS 1 | ||
498 | #define HV_FAULT_TYPE_FAST_PROT 2 | ||
499 | #define HV_FAULT_TYPE_MMU_MISS 3 | ||
500 | #define HV_FAULT_TYPE_INV_RA 4 | ||
501 | #define HV_FAULT_TYPE_PRIV_VIOL 5 | ||
502 | #define HV_FAULT_TYPE_PROT_VIOL 6 | ||
503 | #define HV_FAULT_TYPE_NFO 7 | ||
504 | #define HV_FAULT_TYPE_NFO_SEFF 8 | ||
505 | #define HV_FAULT_TYPE_INV_VA 9 | ||
506 | #define HV_FAULT_TYPE_INV_ASI 10 | ||
507 | #define HV_FAULT_TYPE_NC_ATOMIC 11 | ||
508 | #define HV_FAULT_TYPE_PRIV_ACT 12 | ||
509 | #define HV_FAULT_TYPE_RESV1 13 | ||
510 | #define HV_FAULT_TYPE_UNALIGNED 14 | ||
511 | #define HV_FAULT_TYPE_INV_PGSZ 15 | ||
512 | /* Values 16 --> -2 are reserved. */ | ||
513 | #define HV_FAULT_TYPE_MULTIPLE -1 | ||
514 | |||
515 | /* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), | ||
516 | * and mmu_{map,unmap}_perm_addr(). | ||
517 | */ | ||
518 | #define HV_MMU_DMMU 0x01 | ||
519 | #define HV_MMU_IMMU 0x02 | ||
520 | #define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU) | ||
521 | |||
522 | /* mmu_map_addr() | ||
523 | * TRAP: HV_MMU_MAP_ADDR_TRAP | ||
524 | * ARG0: virtual address | ||
525 | * ARG1: mmu context | ||
526 | * ARG2: TTE | ||
527 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
528 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
529 | * EBADPGSZ Invalid page size value | ||
530 | * ENORADDR Invalid real address in TTE | ||
531 | * | ||
532 | * Create a non-permanent mapping using the given TTE, virtual | ||
533 | * address, and mmu context. The flags argument determines which | ||
534 | * (data, or instruction, or both) TLB the mapping gets loaded into. | ||
535 | * | ||
536 | * The behavior is undefined if the valid bit is clear in the TTE. | ||
537 | * | ||
538 | * Note: This API call is for privileged code to specify temporary translation | ||
539 | * mappings without the need to create and manage a TSB. | ||
540 | */ | ||
541 | |||
542 | /* mmu_unmap_addr() | ||
543 | * TRAP: HV_MMU_UNMAP_ADDR_TRAP | ||
544 | * ARG0: virtual address | ||
545 | * ARG1: mmu context | ||
546 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
547 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
548 | * | ||
549 | * Demaps the given virtual address in the given mmu context on this | ||
550 | * CPU. This function is intended to be used to demap pages mapped | ||
551 | * with mmu_map_addr. This service is equivalent to invoking | ||
552 | * mmu_demap_page() with only the current CPU in the CPU list. The | ||
553 | * flags argument determines which (data, or instruction, or both) TLB | ||
554 | * the mapping gets unmapped from. | ||
555 | * | ||
556 | * Attempting to perform an unmap operation for a previously defined | ||
557 | * permanent mapping will have undefined results. | ||
558 | */ | ||
559 | |||
560 | /* mmu_tsb_ctx0() | ||
561 | * TRAP: HV_FAST_TRAP | ||
562 | * FUNCTION: HV_FAST_MMU_TSB_CTX0 | ||
563 | * ARG0: number of TSB descriptions | ||
564 | * ARG1: TSB descriptions pointer | ||
565 | * RET0: status | ||
566 | * ERRORS: ENORADDR Invalid TSB descriptions pointer or | ||
567 | * TSB base within a descriptor | ||
568 | * EBADALIGN TSB descriptions pointer is not aligned | ||
569 | * to an 8-byte boundary, or TSB base | ||
570 | * within a descriptor is not aligned for | ||
571 | * the given TSB size | ||
572 | * EBADPGSZ Invalid page size in a TSB descriptor | ||
573 | * EBADTSB Invalid associativity or size in a TSB | ||
574 | * descriptor | ||
575 | * EINVAL Invalid number of TSB descriptions, or | ||
576 | * invalid context index in a TSB | ||
577 | * descriptor, or index page size not | ||
578 | * equal to smallest page size in page | ||
579 | * size bitmask field. | ||
580 | * | ||
581 | * Configures the TSBs for the current CPU for virtual addresses with | ||
582 | * context zero. The TSB descriptions pointer is a pointer to an | ||
583 | * array of the given number of TSB descriptions. | ||
584 | * | ||
585 | * Note: The maximum number of TSBs available to a virtual CPU is given by the | ||
586 | * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the | ||
587 | * machine description. | ||
588 | */ | ||
589 | #define HV_FAST_MMU_TSB_CTX0 0x20 | ||
590 | |||
591 | /* mmu_tsb_ctxnon0() | ||
592 | * TRAP: HV_FAST_TRAP | ||
593 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 | ||
594 | * ARG0: number of TSB descriptions | ||
595 | * ARG1: TSB descriptions pointer | ||
596 | * RET0: status | ||
597 | * ERRORS: Same as for mmu_tsb_ctx0() above. | ||
598 | * | ||
599 | * Configures the TSBs for the current CPU for virtual addresses with | ||
600 | * non-zero contexts. The TSB descriptions pointer is a pointer to an | ||
601 | * array of the given number of TSB descriptions. | ||
602 | * | ||
603 | * Note: A maximum of 16 TSBs may be specified in the TSB description list. | ||
604 | */ | ||
605 | #define HV_FAST_MMU_TSB_CTXNON0 0x21 | ||
606 | |||
607 | /* mmu_demap_page() | ||
608 | * TRAP: HV_FAST_TRAP | ||
609 | * FUNCTION: HV_FAST_MMU_DEMAP_PAGE | ||
610 | * ARG0: reserved, must be zero | ||
611 | * ARG1: reserved, must be zero | ||
612 | * ARG2: virtual address | ||
613 | * ARG3: mmu context | ||
614 | * ARG4: flags (HV_MMU_{IMMU,DMMU}) | ||
615 | * RET0: status | ||
616 | * ERRORS: EINVAL Invalid virutal address, context, or | ||
617 | * flags value | ||
618 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
619 | * | ||
620 | * Demaps any page mapping of the given virtual address in the given | ||
621 | * mmu context for the current virtual CPU. Any virtually tagged | ||
622 | * caches are guaranteed to be kept consistent. The flags argument | ||
623 | * determines which TLB (instruction, or data, or both) participate in | ||
624 | * the operation. | ||
625 | * | ||
626 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
627 | */ | ||
628 | #define HV_FAST_MMU_DEMAP_PAGE 0x22 | ||
629 | |||
630 | /* mmu_demap_ctx() | ||
631 | * TRAP: HV_FAST_TRAP | ||
632 | * FUNCTION: HV_FAST_MMU_DEMAP_CTX | ||
633 | * ARG0: reserved, must be zero | ||
634 | * ARG1: reserved, must be zero | ||
635 | * ARG2: mmu context | ||
636 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
637 | * RET0: status | ||
638 | * ERRORS: EINVAL Invalid context or flags value | ||
639 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
640 | * | ||
641 | * Demaps all non-permanent virtual page mappings previously specified | ||
642 | * for the given context for the current virtual CPU. Any virtual | ||
643 | * tagged caches are guaranteed to be kept consistent. The flags | ||
644 | * argument determines which TLB (instruction, or data, or both) | ||
645 | * participate in the operation. | ||
646 | * | ||
647 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
648 | */ | ||
649 | #define HV_FAST_MMU_DEMAP_CTX 0x23 | ||
650 | |||
651 | /* mmu_demap_all() | ||
652 | * TRAP: HV_FAST_TRAP | ||
653 | * FUNCTION: HV_FAST_MMU_DEMAP_ALL | ||
654 | * ARG0: reserved, must be zero | ||
655 | * ARG1: reserved, must be zero | ||
656 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
657 | * RET0: status | ||
658 | * ERRORS: EINVAL Invalid flags value | ||
659 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
660 | * | ||
661 | * Demaps all non-permanent virtual page mappings previously specified | ||
662 | * for the current virtual CPU. Any virtual tagged caches are | ||
663 | * guaranteed to be kept consistent. The flags argument determines | ||
664 | * which TLB (instruction, or data, or both) participate in the | ||
665 | * operation. | ||
666 | * | ||
667 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
668 | */ | ||
669 | #define HV_FAST_MMU_DEMAP_ALL 0x24 | ||
670 | |||
671 | /* mmu_map_perm_addr() | ||
672 | * TRAP: HV_FAST_TRAP | ||
673 | * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR | ||
674 | * ARG0: virtual address | ||
675 | * ARG1: reserved, must be zero | ||
676 | * ARG2: TTE | ||
677 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
678 | * RET0: status | ||
679 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
680 | * EBADPGSZ Invalid page size value | ||
681 | * ENORADDR Invalid real address in TTE | ||
682 | * ETOOMANY Too many mappings (max of 8 reached) | ||
683 | * | ||
684 | * Create a permanent mapping using the given TTE and virtual address | ||
685 | * for context 0 on the calling virtual CPU. A maximum of 8 such | ||
686 | * permanent mappings may be specified by privileged code. Mappings | ||
687 | * may be removed with mmu_unmap_perm_addr(). | ||
688 | * | ||
689 | * The behavior is undefined if a TTE with the valid bit clear is given. | ||
690 | * | ||
691 | * Note: This call is used to specify address space mappings for which | ||
692 | * privileged code does not expect to receive misses. For example, | ||
693 | * this mechanism can be used to map kernel nucleus code and data. | ||
694 | */ | ||
695 | #define HV_FAST_MMU_MAP_PERM_ADDR 0x25 | ||
696 | |||
697 | /* mmu_fault_area_conf() | ||
698 | * TRAP: HV_FAST_TRAP | ||
699 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF | ||
700 | * ARG0: real address | ||
701 | * RET0: status | ||
702 | * RET1: previous mmu fault area real address | ||
703 | * ERRORS: ENORADDR Invalid real address | ||
704 | * EBADALIGN Invalid alignment for fault area | ||
705 | * | ||
706 | * Configure the MMU fault status area for the calling CPU. A 64-byte | ||
707 | * aligned real address specifies where MMU fault status information | ||
708 | * is placed. The return value is the previously specified area, or 0 | ||
709 | * for the first invocation. Specifying a fault area at real address | ||
710 | * 0 is not allowed. | ||
711 | */ | ||
712 | #define HV_FAST_MMU_FAULT_AREA_CONF 0x26 | ||
713 | |||
714 | /* mmu_enable() | ||
715 | * TRAP: HV_FAST_TRAP | ||
716 | * FUNCTION: HV_FAST_MMU_ENABLE | ||
717 | * ARG0: enable flag | ||
718 | * ARG1: return target address | ||
719 | * RET0: status | ||
720 | * ERRORS: ENORADDR Invalid real address when disabling | ||
721 | * translation. | ||
722 | * EBADALIGN The return target address is not | ||
723 | * aligned to an instruction. | ||
724 | * EINVAL The enable flag request the current | ||
725 | * operating mode (e.g. disable if already | ||
726 | * disabled) | ||
727 | * | ||
728 | * Enable or disable virtual address translation for the calling CPU | ||
729 | * within the virtual machine domain. If the enable flag is zero, | ||
730 | * translation is disabled, any non-zero value will enable | ||
731 | * translation. | ||
732 | * | ||
733 | * When this function returns, the newly selected translation mode | ||
734 | * will be active. If the mmu is being enabled, then the return | ||
735 | * target address is a virtual address else it is a real address. | ||
736 | * | ||
737 | * Upon successful completion, control will be returned to the given | ||
738 | * return target address (ie. the cpu will jump to that address). On | ||
739 | * failure, the previous mmu mode remains and the trap simply returns | ||
740 | * as normal with the appropriate error code in RET0. | ||
741 | */ | ||
742 | #define HV_FAST_MMU_ENABLE 0x27 | ||
743 | |||
744 | /* mmu_unmap_perm_addr() | ||
745 | * TRAP: HV_FAST_TRAP | ||
746 | * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR | ||
747 | * ARG0: virtual address | ||
748 | * ARG1: reserved, must be zero | ||
749 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
750 | * RET0: status | ||
751 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
752 | * ENOMAP Specified mapping was not found | ||
753 | * | ||
754 | * Demaps any permanent page mapping (established via | ||
755 | * mmu_map_perm_addr()) at the given virtual address for context 0 on | ||
756 | * the current virtual CPU. Any virtual tagged caches are guaranteed | ||
757 | * to be kept consistent. | ||
758 | */ | ||
759 | #define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28 | ||
760 | |||
761 | /* mmu_tsb_ctx0_info() | ||
762 | * TRAP: HV_FAST_TRAP | ||
763 | * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO | ||
764 | * ARG0: max TSBs | ||
765 | * ARG1: buffer pointer | ||
766 | * RET0: status | ||
767 | * RET1: number of TSBs | ||
768 | * ERRORS: EINVAL Supplied buffer is too small | ||
769 | * EBADALIGN The buffer pointer is badly aligned | ||
770 | * ENORADDR Invalid real address for buffer pointer | ||
771 | * | ||
772 | * Return the TSB configuration as previous defined by mmu_tsb_ctx0() | ||
773 | * into the provided buffer. The size of the buffer is given in ARG1 | ||
774 | * in terms of the number of TSB description entries. | ||
775 | * | ||
776 | * Upon return, RET1 always contains the number of TSB descriptions | ||
777 | * previously configured. If zero TSBs were configured, EOK is | ||
778 | * returned with RET1 containing 0. | ||
779 | */ | ||
780 | #define HV_FAST_MMU_TSB_CTX0_INFO 0x29 | ||
781 | |||
782 | /* mmu_tsb_ctxnon0_info() | ||
783 | * TRAP: HV_FAST_TRAP | ||
784 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO | ||
785 | * ARG0: max TSBs | ||
786 | * ARG1: buffer pointer | ||
787 | * RET0: status | ||
788 | * RET1: number of TSBs | ||
789 | * ERRORS: EINVAL Supplied buffer is too small | ||
790 | * EBADALIGN The buffer pointer is badly aligned | ||
791 | * ENORADDR Invalid real address for buffer pointer | ||
792 | * | ||
793 | * Return the TSB configuration as previous defined by | ||
794 | * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer | ||
795 | * is given in ARG1 in terms of the number of TSB description entries. | ||
796 | * | ||
797 | * Upon return, RET1 always contains the number of TSB descriptions | ||
798 | * previously configured. If zero TSBs were configured, EOK is | ||
799 | * returned with RET1 containing 0. | ||
800 | */ | ||
801 | #define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a | ||
802 | |||
803 | /* mmu_fault_area_info() | ||
804 | * TRAP: HV_FAST_TRAP | ||
805 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO | ||
806 | * RET0: status | ||
807 | * RET1: fault area real address | ||
808 | * ERRORS: No errors defined. | ||
809 | * | ||
810 | * Return the currently defined MMU fault status area for the current | ||
811 | * CPU. The real address of the fault status area is returned in | ||
812 | * RET1, or 0 is returned in RET1 if no fault status area is defined. | ||
813 | * | ||
814 | * Note: mmu_fault_area_conf() may be called with the return value (RET1) | ||
815 | * from this service if there is a need to save and restore the fault | ||
816 | * area for a cpu. | ||
817 | */ | ||
818 | #define HV_FAST_MMU_FAULT_AREA_INFO 0x2b | ||
819 | |||
820 | /* Cache and Memory services. */ | ||
821 | |||
822 | /* mem_scrub() | ||
823 | * TRAP: HV_FAST_TRAP | ||
824 | * FUNCTION: HV_FAST_MEM_SCRUB | ||
825 | * ARG0: real address | ||
826 | * ARG1: length | ||
827 | * RET0: status | ||
828 | * RET1: length scrubbed | ||
829 | * ERRORS: ENORADDR Invalid real address | ||
830 | * EBADALIGN Start address or length are not correctly | ||
831 | * aligned | ||
832 | * EINVAL Length is zero | ||
833 | * | ||
834 | * Zero the memory contents in the range real address to real address | ||
835 | * plus length minus 1. Also, valid ECC will be generated for that | ||
836 | * memory address range. Scrubbing is started at the given real | ||
837 | * address, but may not scrub the entire given length. The actual | ||
838 | * length scrubbed will be returned in RET1. | ||
839 | * | ||
840 | * The real address and length must be aligned on an 8K boundary, or | ||
841 | * contain the start address and length from a sun4v error report. | ||
842 | * | ||
843 | * Note: There are two uses for this function. The first use is to block clear | ||
844 | * and initialize memory and the second is to scrub an u ncorrectable | ||
845 | * error reported via a resumable or non-resumable trap. The second | ||
846 | * use requires the arguments to be equal to the real address and length | ||
847 | * provided in a sun4v memory error report. | ||
848 | */ | ||
849 | #define HV_FAST_MEM_SCRUB 0x31 | ||
850 | |||
851 | /* mem_sync() | ||
852 | * TRAP: HV_FAST_TRAP | ||
853 | * FUNCTION: HV_FAST_MEM_SYNC | ||
854 | * ARG0: real address | ||
855 | * ARG1: length | ||
856 | * RET0: status | ||
857 | * RET1: length synced | ||
858 | * ERRORS: ENORADDR Invalid real address | ||
859 | * EBADALIGN Start address or length are not correctly | ||
860 | * aligned | ||
861 | * EINVAL Length is zero | ||
862 | * | ||
863 | * Force the next access within the real address to real address plus | ||
864 | * length minus 1 to be fetches from main system memory. Less than | ||
865 | * the given length may be synced, the actual amount synced is | ||
866 | * returned in RET1. The real address and length must be aligned on | ||
867 | * an 8K boundary. | ||
868 | */ | ||
869 | #define HV_FAST_MEM_SYNC 0x32 | ||
870 | |||
871 | /* Time of day services. | ||
872 | * | ||
873 | * The hypervisor maintains the time of day on a per-domain basis. | ||
874 | * Changing the time of day in one domain does not affect the time of | ||
875 | * day on any other domain. | ||
876 | * | ||
877 | * Time is described by a single unsigned 64-bit word which is the | ||
878 | * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1, | ||
879 | * 1970). | ||
880 | */ | ||
881 | |||
882 | /* tod_get() | ||
883 | * TRAP: HV_FAST_TRAP | ||
884 | * FUNCTION: HV_FAST_TOD_GET | ||
885 | * RET0: status | ||
886 | * RET1: TOD | ||
887 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
888 | * ENOTSUPPORTED If TOD not supported on this platform | ||
889 | * | ||
890 | * Return the current time of day. May block if TOD access is | ||
891 | * temporarily not possible. | ||
892 | */ | ||
893 | #define HV_FAST_TOD_GET 0x50 | ||
894 | |||
895 | /* tod_set() | ||
896 | * TRAP: HV_FAST_TRAP | ||
897 | * FUNCTION: HV_FAST_TOD_SET | ||
898 | * ARG0: TOD | ||
899 | * RET0: status | ||
900 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
901 | * ENOTSUPPORTED If TOD not supported on this platform | ||
902 | * | ||
903 | * The current time of day is set to the value specified in ARG0. May | ||
904 | * block if TOD access is temporarily not possible. | ||
905 | */ | ||
906 | #define HV_FAST_TOD_SET 0x51 | ||
907 | |||
908 | /* Console services */ | ||
909 | |||
910 | /* con_getchar() | ||
911 | * TRAP: HV_FAST_TRAP | ||
912 | * FUNCTION: HV_FAST_CONS_GETCHAR | ||
913 | * RET0: status | ||
914 | * RET1: character | ||
915 | * ERRORS: EWOULDBLOCK No character available. | ||
916 | * | ||
917 | * Returns a character from the console device. If no character is | ||
918 | * available then an EWOULDBLOCK error is returned. If a character is | ||
919 | * available, then the returned status is EOK and the character value | ||
920 | * is in RET1. | ||
921 | * | ||
922 | * A virtual BREAK is represented by the 64-bit value -1. | ||
923 | * | ||
924 | * A virtual HUP signal is represented by the 64-bit value -2. | ||
925 | */ | ||
926 | #define HV_FAST_CONS_GETCHAR 0x60 | ||
927 | |||
928 | /* con_putchar() | ||
929 | * TRAP: HV_FAST_TRAP | ||
930 | * FUNCTION: HV_FAST_CONS_PUTCHAR | ||
931 | * ARG0: character | ||
932 | * RET0: status | ||
933 | * ERRORS: EINVAL Illegal character | ||
934 | * EWOULDBLOCK Output buffer currently full, would block | ||
935 | * | ||
936 | * Send a character to the console device. Only character values | ||
937 | * between 0 and 255 may be used. Values outside this range are | ||
938 | * invalid except for the 64-bit value -1 which is used to send a | ||
939 | * virtual BREAK. | ||
940 | */ | ||
941 | #define HV_FAST_CONS_PUTCHAR 0x61 | ||
942 | |||
943 | /* Trap trace services. | ||
944 | * | ||
945 | * The hypervisor provides a trap tracing capability for privileged | ||
946 | * code running on each virtual CPU. Privileged code provides a | ||
947 | * round-robin trap trace queue within which the hypervisor writes | ||
948 | * 64-byte entries detailing hyperprivileged traps taken n behalf of | ||
949 | * privileged code. This is provided as a debugging capability for | ||
950 | * privileged code. | ||
951 | * | ||
952 | * The trap trace control structure is 64-bytes long and placed at the | ||
953 | * start (offset 0) of the trap trace buffer, and is described as | ||
954 | * follows: | ||
955 | */ | ||
956 | #ifndef __ASSEMBLY__ | ||
957 | struct hv_trap_trace_control { | ||
958 | unsigned long head_offset; | ||
959 | unsigned long tail_offset; | ||
960 | unsigned long __reserved[0x30 / sizeof(unsigned long)]; | ||
961 | }; | ||
962 | #endif | ||
963 | #define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00 | ||
964 | #define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08 | ||
965 | |||
966 | /* The head offset is the offset of the most recently completed entry | ||
967 | * in the trap-trace buffer. The tail offset is the offset of the | ||
968 | * next entry to be written. The control structure is owned and | ||
969 | * modified by the hypervisor. A guest may not modify the control | ||
970 | * structure contents. Attempts to do so will result in undefined | ||
971 | * behavior for the guest. | ||
972 | * | ||
973 | * Each trap trace buffer entry is layed out as follows: | ||
974 | */ | ||
975 | #ifndef __ASSEMBLY__ | ||
976 | struct hv_trap_trace_entry { | ||
977 | unsigned char type; /* Hypervisor or guest entry? */ | ||
978 | unsigned char hpstate; /* Hyper-privileged state */ | ||
979 | unsigned char tl; /* Trap level */ | ||
980 | unsigned char gl; /* Global register level */ | ||
981 | unsigned short tt; /* Trap type */ | ||
982 | unsigned short tag; /* Extended trap identifier */ | ||
983 | unsigned long tstate; /* Trap state */ | ||
984 | unsigned long tick; /* Tick */ | ||
985 | unsigned long tpc; /* Trap PC */ | ||
986 | unsigned long f1; /* Entry specific */ | ||
987 | unsigned long f2; /* Entry specific */ | ||
988 | unsigned long f3; /* Entry specific */ | ||
989 | unsigned long f4; /* Entry specific */ | ||
990 | }; | ||
991 | #endif | ||
992 | #define HV_TRAP_TRACE_ENTRY_TYPE 0x00 | ||
993 | #define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01 | ||
994 | #define HV_TRAP_TRACE_ENTRY_TL 0x02 | ||
995 | #define HV_TRAP_TRACE_ENTRY_GL 0x03 | ||
996 | #define HV_TRAP_TRACE_ENTRY_TT 0x04 | ||
997 | #define HV_TRAP_TRACE_ENTRY_TAG 0x06 | ||
998 | #define HV_TRAP_TRACE_ENTRY_TSTATE 0x08 | ||
999 | #define HV_TRAP_TRACE_ENTRY_TICK 0x10 | ||
1000 | #define HV_TRAP_TRACE_ENTRY_TPC 0x18 | ||
1001 | #define HV_TRAP_TRACE_ENTRY_F1 0x20 | ||
1002 | #define HV_TRAP_TRACE_ENTRY_F2 0x28 | ||
1003 | #define HV_TRAP_TRACE_ENTRY_F3 0x30 | ||
1004 | #define HV_TRAP_TRACE_ENTRY_F4 0x38 | ||
1005 | |||
1006 | /* The type field is encoded as follows. */ | ||
1007 | #define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */ | ||
1008 | #define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */ | ||
1009 | #define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */ | ||
1010 | |||
1011 | /* ttrace_buf_conf() | ||
1012 | * TRAP: HV_FAST_TRAP | ||
1013 | * FUNCTION: HV_FAST_TTRACE_BUF_CONF | ||
1014 | * ARG0: real address | ||
1015 | * ARG1: number of entries | ||
1016 | * RET0: status | ||
1017 | * RET1: number of entries | ||
1018 | * ERRORS: ENORADDR Invalid real address | ||
1019 | * EINVAL Size is too small | ||
1020 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
1021 | * | ||
1022 | * Requests hypervisor trap tracing and declares a virtual CPU's trap | ||
1023 | * trace buffer to the hypervisor. The real address supplies the real | ||
1024 | * base address of the trap trace queue and must be 64-byte aligned. | ||
1025 | * Specifying a value of 0 for the number of entries disables trap | ||
1026 | * tracing for the calling virtual CPU. The buffer allocated must be | ||
1027 | * sized for a power of two number of 64-byte trap trace entries plus | ||
1028 | * an initial 64-byte control structure. | ||
1029 | * | ||
1030 | * This may be invoked any number of times so that a virtual CPU may | ||
1031 | * relocate a trap trace buffer or create "snapshots" of information. | ||
1032 | * | ||
1033 | * If the real address is illegal or badly aligned, then trap tracing | ||
1034 | * is disabled and an error is returned. | ||
1035 | * | ||
1036 | * Upon failure with EINVAL, this service call returns in RET1 the | ||
1037 | * minimum number of buffer entries required. Upon other failures | ||
1038 | * RET1 is undefined. | ||
1039 | */ | ||
1040 | #define HV_FAST_TTRACE_BUF_CONF 0x90 | ||
1041 | |||
1042 | /* ttrace_buf_info() | ||
1043 | * TRAP: HV_FAST_TRAP | ||
1044 | * FUNCTION: HV_FAST_TTRACE_BUF_INFO | ||
1045 | * RET0: status | ||
1046 | * RET1: real address | ||
1047 | * RET2: size | ||
1048 | * ERRORS: None defined. | ||
1049 | * | ||
1050 | * Returns the size and location of the previously declared trap-trace | ||
1051 | * buffer. In the event that no buffer was previously defined, or the | ||
1052 | * buffer is disabled, this call will return a size of zero bytes. | ||
1053 | */ | ||
1054 | #define HV_FAST_TTRACE_BUF_INFO 0x91 | ||
1055 | |||
1056 | /* ttrace_enable() | ||
1057 | * TRAP: HV_FAST_TRAP | ||
1058 | * FUNCTION: HV_FAST_TTRACE_ENABLE | ||
1059 | * ARG0: enable | ||
1060 | * RET0: status | ||
1061 | * RET1: previous enable state | ||
1062 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1063 | * | ||
1064 | * Enable or disable trap tracing, and return the previous enabled | ||
1065 | * state in RET1. Future systems may define various flags for the | ||
1066 | * enable argument (ARG0), for the moment a guest should pass | ||
1067 | * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all | ||
1068 | * tracing - which will ensure future compatability. | ||
1069 | */ | ||
1070 | #define HV_FAST_TTRACE_ENABLE 0x92 | ||
1071 | |||
1072 | /* ttrace_freeze() | ||
1073 | * TRAP: HV_FAST_TRAP | ||
1074 | * FUNCTION: HV_FAST_TTRACE_FREEZE | ||
1075 | * ARG0: freeze | ||
1076 | * RET0: status | ||
1077 | * RET1: previous freeze state | ||
1078 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1079 | * | ||
1080 | * Freeze or unfreeze trap tracing, returning the previous freeze | ||
1081 | * state in RET1. A guest should pass a non-zero value to freeze and | ||
1082 | * a zero value to unfreeze all tracing. The returned previous state | ||
1083 | * is 0 for not frozen and 1 for frozen. | ||
1084 | */ | ||
1085 | #define HV_FAST_TTRACE_FREEZE 0x93 | ||
1086 | |||
1087 | /* ttrace_addentry() | ||
1088 | * TRAP: HV_TTRACE_ADDENTRY_TRAP | ||
1089 | * ARG0: tag (16-bits) | ||
1090 | * ARG1: data word 0 | ||
1091 | * ARG2: data word 1 | ||
1092 | * ARG3: data word 2 | ||
1093 | * ARG4: data word 3 | ||
1094 | * RET0: status | ||
1095 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1096 | * | ||
1097 | * Add an entry to the trap trace buffer. Upon return only ARG0/RET0 | ||
1098 | * is modified - none of the other registers holding arguments are | ||
1099 | * volatile across this hypervisor service. | ||
1100 | */ | ||
1101 | |||
1102 | /* Core dump services. | ||
1103 | * | ||
1104 | * Since the hypervisor viraulizes and thus obscures a lot of the | ||
1105 | * physical machine layout and state, traditional OS crash dumps can | ||
1106 | * be difficult to diagnose especially when the problem is a | ||
1107 | * configuration error of some sort. | ||
1108 | * | ||
1109 | * The dump services provide an opaque buffer into which the | ||
1110 | * hypervisor can place it's internal state in order to assist in | ||
1111 | * debugging such situations. The contents are opaque and extremely | ||
1112 | * platform and hypervisor implementation specific. The guest, during | ||
1113 | * a core dump, requests that the hypervisor update any information in | ||
1114 | * the dump buffer in preparation to being dumped as part of the | ||
1115 | * domain's memory image. | ||
1116 | */ | ||
1117 | |||
1118 | /* dump_buf_update() | ||
1119 | * TRAP: HV_FAST_TRAP | ||
1120 | * FUNCTION: HV_FAST_DUMP_BUF_UPDATE | ||
1121 | * ARG0: real address | ||
1122 | * ARG1: size | ||
1123 | * RET0: status | ||
1124 | * RET1: required size of dump buffer | ||
1125 | * ERRORS: ENORADDR Invalid real address | ||
1126 | * EBADALIGN Real address is not aligned on a 64-byte | ||
1127 | * boundary | ||
1128 | * EINVAL Size is non-zero but less than minimum size | ||
1129 | * required | ||
1130 | * ENOTSUPPORTED Operation not supported on current logical | ||
1131 | * domain | ||
1132 | * | ||
1133 | * Declare a domain dump buffer to the hypervisor. The real address | ||
1134 | * provided for the domain dump buffer must be 64-byte aligned. The | ||
1135 | * size specifies the size of the dump buffer and may be larger than | ||
1136 | * the minimum size specified in the machine description. The | ||
1137 | * hypervisor will fill the dump buffer with opaque data. | ||
1138 | * | ||
1139 | * Note: A guest may elect to include dump buffer contents as part of a crash | ||
1140 | * dump to assist with debugging. This function may be called any number | ||
1141 | * of times so that a guest may relocate a dump buffer, or create | ||
1142 | * "snapshots" of any dump-buffer information. Each call to | ||
1143 | * dump_buf_update() atomically declares the new dump buffer to the | ||
1144 | * hypervisor. | ||
1145 | * | ||
1146 | * A specified size of 0 unconfigures the dump buffer. If the real | ||
1147 | * address is illegal or badly aligned, then any currently active dump | ||
1148 | * buffer is disabled and an error is returned. | ||
1149 | * | ||
1150 | * In the event that the call fails with EINVAL, RET1 contains the | ||
1151 | * minimum size requires by the hypervisor for a valid dump buffer. | ||
1152 | */ | ||
1153 | #define HV_FAST_DUMP_BUF_UPDATE 0x94 | ||
1154 | |||
1155 | /* dump_buf_info() | ||
1156 | * TRAP: HV_FAST_TRAP | ||
1157 | * FUNCTION: HV_FAST_DUMP_BUF_INFO | ||
1158 | * RET0: status | ||
1159 | * RET1: real address of current dump buffer | ||
1160 | * RET2: size of current dump buffer | ||
1161 | * ERRORS: No errors defined. | ||
1162 | * | ||
1163 | * Return the currently configures dump buffer description. A | ||
1164 | * returned size of 0 bytes indicates an undefined dump buffer. In | ||
1165 | * this case the return address in RET1 is undefined. | ||
1166 | */ | ||
1167 | #define HV_FAST_DUMP_BUF_INFO 0x95 | ||
1168 | |||
1169 | /* Device interrupt services. | ||
1170 | * | ||
1171 | * Device interrupts are allocated to system bus bridges by the hypervisor, | ||
1172 | * and described to OBP in the machine description. OBP then describes | ||
1173 | * these interrupts to the OS via properties in the device tree. | ||
1174 | * | ||
1175 | * Terminology: | ||
1176 | * | ||
1177 | * cpuid Unique opaque value which represents a target cpu. | ||
1178 | * | ||
1179 | * devhandle Device handle. It uniquely identifies a device, and | ||
1180 | * consistes of the lower 28-bits of the hi-cell of the | ||
1181 | * first entry of the device's "reg" property in the | ||
1182 | * OBP device tree. | ||
1183 | * | ||
1184 | * devino Device interrupt number. Specifies the relative | ||
1185 | * interrupt number within the device. The unique | ||
1186 | * combination of devhandle and devino are used to | ||
1187 | * identify a specific device interrupt. | ||
1188 | * | ||
1189 | * Note: The devino value is the same as the values in the | ||
1190 | * "interrupts" property or "interrupt-map" property | ||
1191 | * in the OBP device tree for that device. | ||
1192 | * | ||
1193 | * sysino System interrupt number. A 64-bit unsigned interger | ||
1194 | * representing a unique interrupt within a virtual | ||
1195 | * machine. | ||
1196 | * | ||
1197 | * intr_state A flag representing the interrupt state for a given | ||
1198 | * sysino. The state values are defined below. | ||
1199 | * | ||
1200 | * intr_enabled A flag representing the 'enabled' state for a given | ||
1201 | * sysino. The enable values are defined below. | ||
1202 | */ | ||
1203 | |||
1204 | #define HV_INTR_STATE_IDLE 0 /* Nothing pending */ | ||
1205 | #define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */ | ||
1206 | #define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */ | ||
1207 | |||
1208 | #define HV_INTR_DISABLED 0 /* sysino not enabled */ | ||
1209 | #define HV_INTR_ENABLED 1 /* sysino enabled */ | ||
1210 | |||
1211 | /* intr_devino_to_sysino() | ||
1212 | * TRAP: HV_FAST_TRAP | ||
1213 | * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO | ||
1214 | * ARG0: devhandle | ||
1215 | * ARG1: devino | ||
1216 | * RET0: status | ||
1217 | * RET1: sysino | ||
1218 | * ERRORS: EINVAL Invalid devhandle/devino | ||
1219 | * | ||
1220 | * Converts a device specific interrupt number of the given | ||
1221 | * devhandle/devino into a system specific ino (sysino). | ||
1222 | */ | ||
1223 | #define HV_FAST_INTR_DEVINO2SYSINO 0xa0 | ||
1224 | |||
1225 | #ifndef __ASSEMBLY__ | ||
1226 | extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, | ||
1227 | unsigned long devino); | ||
1228 | #endif | ||
1229 | |||
1230 | /* intr_getenabled() | ||
1231 | * TRAP: HV_FAST_TRAP | ||
1232 | * FUNCTION: HV_FAST_INTR_GETENABLED | ||
1233 | * ARG0: sysino | ||
1234 | * RET0: status | ||
1235 | * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1236 | * ERRORS: EINVAL Invalid sysino | ||
1237 | * | ||
1238 | * Returns interrupt enabled state in RET1 for the interrupt defined | ||
1239 | * by the given sysino. | ||
1240 | */ | ||
1241 | #define HV_FAST_INTR_GETENABLED 0xa1 | ||
1242 | |||
1243 | #ifndef __ASSEMBLY__ | ||
1244 | extern unsigned long sun4v_intr_getenabled(unsigned long sysino); | ||
1245 | #endif | ||
1246 | |||
1247 | /* intr_setenabled() | ||
1248 | * TRAP: HV_FAST_TRAP | ||
1249 | * FUNCTION: HV_FAST_INTR_SETENABLED | ||
1250 | * ARG0: sysino | ||
1251 | * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1252 | * RET0: status | ||
1253 | * ERRORS: EINVAL Invalid sysino or intr_enabled value | ||
1254 | * | ||
1255 | * Set the 'enabled' state of the interrupt sysino. | ||
1256 | */ | ||
1257 | #define HV_FAST_INTR_SETENABLED 0xa2 | ||
1258 | |||
1259 | #ifndef __ASSEMBLY__ | ||
1260 | extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); | ||
1261 | #endif | ||
1262 | |||
1263 | /* intr_getstate() | ||
1264 | * TRAP: HV_FAST_TRAP | ||
1265 | * FUNCTION: HV_FAST_INTR_GETSTATE | ||
1266 | * ARG0: sysino | ||
1267 | * RET0: status | ||
1268 | * RET1: intr_state (HV_INTR_STATE_*) | ||
1269 | * ERRORS: EINVAL Invalid sysino | ||
1270 | * | ||
1271 | * Returns current state of the interrupt defined by the given sysino. | ||
1272 | */ | ||
1273 | #define HV_FAST_INTR_GETSTATE 0xa3 | ||
1274 | |||
1275 | #ifndef __ASSEMBLY__ | ||
1276 | extern unsigned long sun4v_intr_getstate(unsigned long sysino); | ||
1277 | #endif | ||
1278 | |||
1279 | /* intr_setstate() | ||
1280 | * TRAP: HV_FAST_TRAP | ||
1281 | * FUNCTION: HV_FAST_INTR_SETSTATE | ||
1282 | * ARG0: sysino | ||
1283 | * ARG1: intr_state (HV_INTR_STATE_*) | ||
1284 | * RET0: status | ||
1285 | * ERRORS: EINVAL Invalid sysino or intr_state value | ||
1286 | * | ||
1287 | * Sets the current state of the interrupt described by the given sysino | ||
1288 | * value. | ||
1289 | * | ||
1290 | * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending | ||
1291 | * interrupt for sysino. | ||
1292 | */ | ||
1293 | #define HV_FAST_INTR_SETSTATE 0xa4 | ||
1294 | |||
1295 | #ifndef __ASSEMBLY__ | ||
1296 | extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); | ||
1297 | #endif | ||
1298 | |||
1299 | /* intr_gettarget() | ||
1300 | * TRAP: HV_FAST_TRAP | ||
1301 | * FUNCTION: HV_FAST_INTR_GETTARGET | ||
1302 | * ARG0: sysino | ||
1303 | * RET0: status | ||
1304 | * RET1: cpuid | ||
1305 | * ERRORS: EINVAL Invalid sysino | ||
1306 | * | ||
1307 | * Returns CPU that is the current target of the interrupt defined by | ||
1308 | * the given sysino. The CPU value returned is undefined if the target | ||
1309 | * has not been set via intr_settarget(). | ||
1310 | */ | ||
1311 | #define HV_FAST_INTR_GETTARGET 0xa5 | ||
1312 | |||
1313 | #ifndef __ASSEMBLY__ | ||
1314 | extern unsigned long sun4v_intr_gettarget(unsigned long sysino); | ||
1315 | #endif | ||
1316 | |||
1317 | /* intr_settarget() | ||
1318 | * TRAP: HV_FAST_TRAP | ||
1319 | * FUNCTION: HV_FAST_INTR_SETTARGET | ||
1320 | * ARG0: sysino | ||
1321 | * ARG1: cpuid | ||
1322 | * RET0: status | ||
1323 | * ERRORS: EINVAL Invalid sysino | ||
1324 | * ENOCPU Invalid cpuid | ||
1325 | * | ||
1326 | * Set the target CPU for the interrupt defined by the given sysino. | ||
1327 | */ | ||
1328 | #define HV_FAST_INTR_SETTARGET 0xa6 | ||
1329 | |||
1330 | #ifndef __ASSEMBLY__ | ||
1331 | extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); | ||
1332 | #endif | ||
1333 | |||
1334 | /* PCI IO services. | ||
1335 | * | ||
1336 | * See the terminology descriptions in the device interrupt services | ||
1337 | * section above as those apply here too. Here are terminology | ||
1338 | * definitions specific to these PCI IO services: | ||
1339 | * | ||
1340 | * tsbnum TSB number. Indentifies which io-tsb is used. | ||
1341 | * For this version of the specification, tsbnum | ||
1342 | * must be zero. | ||
1343 | * | ||
1344 | * tsbindex TSB index. Identifies which entry in the TSB | ||
1345 | * is used. The first entry is zero. | ||
1346 | * | ||
1347 | * tsbid A 64-bit aligned data structure which contains | ||
1348 | * a tsbnum and a tsbindex. Bits 63:32 contain the | ||
1349 | * tsbnum and bits 31:00 contain the tsbindex. | ||
1350 | * | ||
1351 | * Use the HV_PCI_TSBID() macro to construct such | ||
1352 | * values. | ||
1353 | * | ||
1354 | * io_attributes IO attributes for IOMMU mappings. One of more | ||
1355 | * of the attritbute bits are stores in a 64-bit | ||
1356 | * value. The values are defined below. | ||
1357 | * | ||
1358 | * r_addr 64-bit real address | ||
1359 | * | ||
1360 | * pci_device PCI device address. A PCI device address identifies | ||
1361 | * a specific device on a specific PCI bus segment. | ||
1362 | * A PCI device address ia a 32-bit unsigned integer | ||
1363 | * with the following format: | ||
1364 | * | ||
1365 | * 00000000.bbbbbbbb.dddddfff.00000000 | ||
1366 | * | ||
1367 | * Use the HV_PCI_DEVICE_BUILD() macro to construct | ||
1368 | * such values. | ||
1369 | * | ||
1370 | * pci_config_offset | ||
1371 | * PCI configureation space offset. For conventional | ||
1372 | * PCI a value between 0 and 255. For extended | ||
1373 | * configuration space, a value between 0 and 4095. | ||
1374 | * | ||
1375 | * Note: For PCI configuration space accesses, the offset | ||
1376 | * must be aligned to the access size. | ||
1377 | * | ||
1378 | * error_flag A return value which specifies if the action succeeded | ||
1379 | * or failed. 0 means no error, non-0 means some error | ||
1380 | * occurred while performing the service. | ||
1381 | * | ||
1382 | * io_sync_direction | ||
1383 | * Direction definition for pci_dma_sync(), defined | ||
1384 | * below in HV_PCI_SYNC_*. | ||
1385 | * | ||
1386 | * io_page_list A list of io_page_addresses, an io_page_address is | ||
1387 | * a real address. | ||
1388 | * | ||
1389 | * io_page_list_p A pointer to an io_page_list. | ||
1390 | * | ||
1391 | * "size based byte swap" - Some functions do size based byte swapping | ||
1392 | * which allows sw to access pointers and | ||
1393 | * counters in native form when the processor | ||
1394 | * operates in a different endianness than the | ||
1395 | * IO bus. Size-based byte swapping converts a | ||
1396 | * multi-byte field between big-endian and | ||
1397 | * little-endian format. | ||
1398 | */ | ||
1399 | |||
1400 | #define HV_PCI_MAP_ATTR_READ 0x01 | ||
1401 | #define HV_PCI_MAP_ATTR_WRITE 0x02 | ||
1402 | |||
1403 | #define HV_PCI_DEVICE_BUILD(b,d,f) \ | ||
1404 | ((((b) & 0xff) << 16) | \ | ||
1405 | (((d) & 0x1f) << 11) | \ | ||
1406 | (((f) & 0x07) << 8)) | ||
1407 | |||
1408 | #define HV_PCI_TSBID(__tsb_num, __tsb_index) \ | ||
1409 | ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index))) | ||
1410 | |||
1411 | #define HV_PCI_SYNC_FOR_DEVICE 0x01 | ||
1412 | #define HV_PCI_SYNC_FOR_CPU 0x02 | ||
1413 | |||
1414 | /* pci_iommu_map() | ||
1415 | * TRAP: HV_FAST_TRAP | ||
1416 | * FUNCTION: HV_FAST_PCI_IOMMU_MAP | ||
1417 | * ARG0: devhandle | ||
1418 | * ARG1: tsbid | ||
1419 | * ARG2: #ttes | ||
1420 | * ARG3: io_attributes | ||
1421 | * ARG4: io_page_list_p | ||
1422 | * RET0: status | ||
1423 | * RET1: #ttes mapped | ||
1424 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes | ||
1425 | * EBADALIGN Improperly aligned real address | ||
1426 | * ENORADDR Invalid real address | ||
1427 | * | ||
1428 | * Create IOMMU mappings in the sun4v device defined by the given | ||
1429 | * devhandle. The mappings are created in the TSB defined by the | ||
1430 | * tsbnum component of the given tsbid. The first mapping is created | ||
1431 | * in the TSB i ndex defined by the tsbindex component of the given tsbid. | ||
1432 | * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex, | ||
1433 | * the second at tsbnum, tsbindex + 1, etc. | ||
1434 | * | ||
1435 | * All mappings are created with the attributes defined by the io_attributes | ||
1436 | * argument. The page mapping addresses are described in the io_page_list | ||
1437 | * defined by the given io_page_list_p, which is a pointer to the io_page_list. | ||
1438 | * The first entry in the io_page_list is the address for the first iotte, the | ||
1439 | * 2nd for the 2nd iotte, and so on. | ||
1440 | * | ||
1441 | * Each io_page_address in the io_page_list must be appropriately aligned. | ||
1442 | * #ttes must be greater than zero. For this version of the spec, the tsbnum | ||
1443 | * component of the given tsbid must be zero. | ||
1444 | * | ||
1445 | * Returns the actual number of mappings creates, which may be less than | ||
1446 | * or equal to the argument #ttes. If the function returns a value which | ||
1447 | * is less than the #ttes, the caller may continus to call the function with | ||
1448 | * an updated tsbid, #ttes, io_page_list_p arguments until all pages are | ||
1449 | * mapped. | ||
1450 | * | ||
1451 | * Note: This function does not imply an iotte cache flush. The guest must | ||
1452 | * demap an entry before re-mapping it. | ||
1453 | */ | ||
1454 | #define HV_FAST_PCI_IOMMU_MAP 0xb0 | ||
1455 | |||
1456 | /* pci_iommu_demap() | ||
1457 | * TRAP: HV_FAST_TRAP | ||
1458 | * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP | ||
1459 | * ARG0: devhandle | ||
1460 | * ARG1: tsbid | ||
1461 | * ARG2: #ttes | ||
1462 | * RET0: status | ||
1463 | * RET1: #ttes demapped | ||
1464 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1465 | * | ||
1466 | * Demap and flush IOMMU mappings in the device defined by the given | ||
1467 | * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum | ||
1468 | * component of the given tsbid, starting at the TSB index defined by the | ||
1469 | * tsbindex component of the given tsbid. | ||
1470 | * | ||
1471 | * For this version of the spec, the tsbnum of the given tsbid must be zero. | ||
1472 | * #ttes must be greater than zero. | ||
1473 | * | ||
1474 | * Returns the actual number of ttes demapped, which may be less than or equal | ||
1475 | * to the argument #ttes. If #ttes demapped is less than #ttes, the caller | ||
1476 | * may continue to call this function with updated tsbid and #ttes arguments | ||
1477 | * until all pages are demapped. | ||
1478 | * | ||
1479 | * Note: Entries do not have to be mapped to be demapped. A demap of an | ||
1480 | * unmapped page will flush the entry from the tte cache. | ||
1481 | */ | ||
1482 | #define HV_FAST_PCI_IOMMU_DEMAP 0xb1 | ||
1483 | |||
1484 | /* pci_iommu_getmap() | ||
1485 | * TRAP: HV_FAST_TRAP | ||
1486 | * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP | ||
1487 | * ARG0: devhandle | ||
1488 | * ARG1: tsbid | ||
1489 | * RET0: status | ||
1490 | * RET1: io_attributes | ||
1491 | * RET2: real address | ||
1492 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1493 | * ENOMAP Mapping is not valid, no translation exists | ||
1494 | * | ||
1495 | * Read and return the mapping in the device described by the given devhandle | ||
1496 | * and tsbid. If successful, the io_attributes shall be returned in RET1 | ||
1497 | * and the page address of the mapping shall be returned in RET2. | ||
1498 | * | ||
1499 | * For this version of the spec, the tsbnum component of the given tsbid | ||
1500 | * must be zero. | ||
1501 | */ | ||
1502 | #define HV_FAST_PCI_IOMMU_GETMAP 0xb2 | ||
1503 | |||
1504 | /* pci_iommu_getbypass() | ||
1505 | * TRAP: HV_FAST_TRAP | ||
1506 | * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS | ||
1507 | * ARG0: devhandle | ||
1508 | * ARG1: real address | ||
1509 | * ARG2: io_attributes | ||
1510 | * RET0: status | ||
1511 | * RET1: io_addr | ||
1512 | * ERRORS: EINVAL Invalid devhandle/io_attributes | ||
1513 | * ENORADDR Invalid real address | ||
1514 | * ENOTSUPPORTED Function not supported in this implementation. | ||
1515 | * | ||
1516 | * Create a "special" mapping in the device described by the given devhandle, | ||
1517 | * for the given real address and attributes. Return the IO address in RET1 | ||
1518 | * if successful. | ||
1519 | */ | ||
1520 | #define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3 | ||
1521 | |||
1522 | /* pci_config_get() | ||
1523 | * TRAP: HV_FAST_TRAP | ||
1524 | * FUNCTION: HV_FAST_PCI_CONFIG_GET | ||
1525 | * ARG0: devhandle | ||
1526 | * ARG1: pci_device | ||
1527 | * ARG2: pci_config_offset | ||
1528 | * ARG3: size | ||
1529 | * RET0: status | ||
1530 | * RET1: error_flag | ||
1531 | * RET2: data | ||
1532 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1533 | * EBADALIGN pci_config_offset not size aligned | ||
1534 | * ENOACCESS Access to this offset is not permitted | ||
1535 | * | ||
1536 | * Read PCI configuration space for the adapter described by the given | ||
1537 | * devhandle. Read size (1, 2, or 4) bytes of data from the given | ||
1538 | * pci_device, at pci_config_offset from the beginning of the device's | ||
1539 | * configuration space. If there was no error, RET1 is set to zero and | ||
1540 | * RET2 is set to the data read. Insignificant bits in RET2 are not | ||
1541 | * guarenteed to have any specific value and therefore must be ignored. | ||
1542 | * | ||
1543 | * The data returned in RET2 is size based byte swapped. | ||
1544 | * | ||
1545 | * If an error occurs during the read, set RET1 to a non-zero value. The | ||
1546 | * given pci_config_offset must be 'size' aligned. | ||
1547 | */ | ||
1548 | #define HV_FAST_PCI_CONFIG_GET 0xb4 | ||
1549 | |||
1550 | /* pci_config_put() | ||
1551 | * TRAP: HV_FAST_TRAP | ||
1552 | * FUNCTION: HV_FAST_PCI_CONFIG_PUT | ||
1553 | * ARG0: devhandle | ||
1554 | * ARG1: pci_device | ||
1555 | * ARG2: pci_config_offset | ||
1556 | * ARG3: size | ||
1557 | * ARG4: data | ||
1558 | * RET0: status | ||
1559 | * RET1: error_flag | ||
1560 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1561 | * EBADALIGN pci_config_offset not size aligned | ||
1562 | * ENOACCESS Access to this offset is not permitted | ||
1563 | * | ||
1564 | * Write PCI configuration space for the adapter described by the given | ||
1565 | * devhandle. Write size (1, 2, or 4) bytes of data in a single operation, | ||
1566 | * at pci_config_offset from the beginning of the device's configuration | ||
1567 | * space. The data argument contains the data to be written to configuration | ||
1568 | * space. Prior to writing, the data is size based byte swapped. | ||
1569 | * | ||
1570 | * If an error occurs during the write access, do not generate an error | ||
1571 | * report, do set RET1 to a non-zero value. Otherwise RET1 is zero. | ||
1572 | * The given pci_config_offset must be 'size' aligned. | ||
1573 | * | ||
1574 | * This function is permitted to read from offset zero in the configuration | ||
1575 | * space described by the given pci_device if necessary to ensure that the | ||
1576 | * write access to config space completes. | ||
1577 | */ | ||
1578 | #define HV_FAST_PCI_CONFIG_PUT 0xb5 | ||
1579 | |||
1580 | /* pci_peek() | ||
1581 | * TRAP: HV_FAST_TRAP | ||
1582 | * FUNCTION: HV_FAST_PCI_PEEK | ||
1583 | * ARG0: devhandle | ||
1584 | * ARG1: real address | ||
1585 | * ARG2: size | ||
1586 | * RET0: status | ||
1587 | * RET1: error_flag | ||
1588 | * RET2: data | ||
1589 | * ERRORS: EINVAL Invalid devhandle or size | ||
1590 | * EBADALIGN Improperly aligned real address | ||
1591 | * ENORADDR Bad real address | ||
1592 | * ENOACCESS Guest access prohibited | ||
1593 | * | ||
1594 | * Attempt to read the IO address given by the given devhandle, real address, | ||
1595 | * and size. Size must be 1, 2, 4, or 8. The read is performed as a single | ||
1596 | * access operation using the given size. If an error occurs when reading | ||
1597 | * from the given location, do not generate an error report, but return a | ||
1598 | * non-zero value in RET1. If the read was successful, return zero in RET1 | ||
1599 | * and return the actual data read in RET2. The data returned is size based | ||
1600 | * byte swapped. | ||
1601 | * | ||
1602 | * Non-significant bits in RET2 are not guarenteed to have any specific value | ||
1603 | * and therefore must be ignored. If RET1 is returned as non-zero, the data | ||
1604 | * value is not guarenteed to have any specific value and should be ignored. | ||
1605 | * | ||
1606 | * The caller must have permission to read from the given devhandle, real | ||
1607 | * address, which must be an IO address. The argument real address must be a | ||
1608 | * size aligned address. | ||
1609 | * | ||
1610 | * The hypervisor implementation of this function must block access to any | ||
1611 | * IO address that the guest does not have explicit permission to access. | ||
1612 | */ | ||
1613 | #define HV_FAST_PCI_PEEK 0xb6 | ||
1614 | |||
1615 | /* pci_poke() | ||
1616 | * TRAP: HV_FAST_TRAP | ||
1617 | * FUNCTION: HV_FAST_PCI_POKE | ||
1618 | * ARG0: devhandle | ||
1619 | * ARG1: real address | ||
1620 | * ARG2: size | ||
1621 | * ARG3: data | ||
1622 | * ARG4: pci_device | ||
1623 | * RET0: status | ||
1624 | * RET1: error_flag | ||
1625 | * ERRORS: EINVAL Invalid devhandle, size, or pci_device | ||
1626 | * EBADALIGN Improperly aligned real address | ||
1627 | * ENORADDR Bad real address | ||
1628 | * ENOACCESS Guest access prohibited | ||
1629 | * ENOTSUPPORTED Function is not supported by implementation | ||
1630 | * | ||
1631 | * Attempt to write data to the IO address given by the given devhandle, | ||
1632 | * real address, and size. Size must be 1, 2, 4, or 8. The write is | ||
1633 | * performed as a single access operation using the given size. Prior to | ||
1634 | * writing the data is size based swapped. | ||
1635 | * | ||
1636 | * If an error occurs when writing to the given location, do not generate an | ||
1637 | * error report, but return a non-zero value in RET1. If the write was | ||
1638 | * successful, return zero in RET1. | ||
1639 | * | ||
1640 | * pci_device describes the configuration address of the device being | ||
1641 | * written to. The implementation may safely read from offset 0 with | ||
1642 | * the configuration space of the device described by devhandle and | ||
1643 | * pci_device in order to guarantee that the write portion of the operation | ||
1644 | * completes | ||
1645 | * | ||
1646 | * Any error that occurs due to the read shall be reported using the normal | ||
1647 | * error reporting mechanisms .. the read error is not suppressed. | ||
1648 | * | ||
1649 | * The caller must have permission to write to the given devhandle, real | ||
1650 | * address, which must be an IO address. The argument real address must be a | ||
1651 | * size aligned address. The caller must have permission to read from | ||
1652 | * the given devhandle, pci_device cofiguration space offset 0. | ||
1653 | * | ||
1654 | * The hypervisor implementation of this function must block access to any | ||
1655 | * IO address that the guest does not have explicit permission to access. | ||
1656 | */ | ||
1657 | #define HV_FAST_PCI_POKE 0xb7 | ||
1658 | |||
1659 | /* pci_dma_sync() | ||
1660 | * TRAP: HV_FAST_TRAP | ||
1661 | * FUNCTION: HV_FAST_PCI_DMA_SYNC | ||
1662 | * ARG0: devhandle | ||
1663 | * ARG1: real address | ||
1664 | * ARG2: size | ||
1665 | * ARG3: io_sync_direction | ||
1666 | * RET0: status | ||
1667 | * RET1: #synced | ||
1668 | * ERRORS: EINVAL Invalid devhandle or io_sync_direction | ||
1669 | * ENORADDR Bad real address | ||
1670 | * | ||
1671 | * Synchronize a memory region described by the given real address and size, | ||
1672 | * for the device defined by the given devhandle using the direction(s) | ||
1673 | * defined by the given io_sync_direction. The argument size is the size of | ||
1674 | * the memory region in bytes. | ||
1675 | * | ||
1676 | * Return the actual number of bytes synchronized in the return value #synced, | ||
1677 | * which may be less than or equal to the argument size. If the return | ||
1678 | * value #synced is less than size, the caller must continue to call this | ||
1679 | * function with updated real address and size arguments until the entire | ||
1680 | * memory region is synchronized. | ||
1681 | */ | ||
1682 | #define HV_FAST_PCI_DMA_SYNC 0xb8 | ||
1683 | |||
1684 | /* PCI MSI services. */ | ||
1685 | |||
1686 | #define HV_MSITYPE_MSI32 0x00 | ||
1687 | #define HV_MSITYPE_MSI64 0x01 | ||
1688 | |||
1689 | #define HV_MSIQSTATE_IDLE 0x00 | ||
1690 | #define HV_MSIQSTATE_ERROR 0x01 | ||
1691 | |||
1692 | #define HV_MSIQ_INVALID 0x00 | ||
1693 | #define HV_MSIQ_VALID 0x01 | ||
1694 | |||
1695 | #define HV_MSISTATE_IDLE 0x00 | ||
1696 | #define HV_MSISTATE_DELIVERED 0x01 | ||
1697 | |||
1698 | #define HV_MSIVALID_INVALID 0x00 | ||
1699 | #define HV_MSIVALID_VALID 0x01 | ||
1700 | |||
1701 | #define HV_PCIE_MSGTYPE_PME_MSG 0x18 | ||
1702 | #define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b | ||
1703 | #define HV_PCIE_MSGTYPE_CORR_MSG 0x30 | ||
1704 | #define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31 | ||
1705 | #define HV_PCIE_MSGTYPE_FATAL_MSG 0x33 | ||
1706 | |||
1707 | #define HV_MSG_INVALID 0x00 | ||
1708 | #define HV_MSG_VALID 0x01 | ||
1709 | |||
1710 | /* pci_msiq_conf() | ||
1711 | * TRAP: HV_FAST_TRAP | ||
1712 | * FUNCTION: HV_FAST_PCI_MSIQ_CONF | ||
1713 | * ARG0: devhandle | ||
1714 | * ARG1: msiqid | ||
1715 | * ARG2: real address | ||
1716 | * ARG3: number of entries | ||
1717 | * RET0: status | ||
1718 | * ERRORS: EINVAL Invalid devhandle, msiqid or nentries | ||
1719 | * EBADALIGN Improperly aligned real address | ||
1720 | * ENORADDR Bad real address | ||
1721 | * | ||
1722 | * Configure the MSI queue given by the devhandle and msiqid arguments, | ||
1723 | * and to be placed at the given real address and be of the given | ||
1724 | * number of entries. The real address must be aligned exactly to match | ||
1725 | * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry | ||
1726 | * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ | ||
1727 | * Head and Tail are initialized so that the MSI-EQ is 'empty'. | ||
1728 | * | ||
1729 | * Implementation Note: Certain implementations have fixed sized queues. In | ||
1730 | * that case, number of entries must contain the correct | ||
1731 | * value. | ||
1732 | */ | ||
1733 | #define HV_FAST_PCI_MSIQ_CONF 0xc0 | ||
1734 | |||
1735 | /* pci_msiq_info() | ||
1736 | * TRAP: HV_FAST_TRAP | ||
1737 | * FUNCTION: HV_FAST_PCI_MSIQ_INFO | ||
1738 | * ARG0: devhandle | ||
1739 | * ARG1: msiqid | ||
1740 | * RET0: status | ||
1741 | * RET1: real address | ||
1742 | * RET2: number of entries | ||
1743 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1744 | * | ||
1745 | * Return the configuration information for the MSI queue described | ||
1746 | * by the given devhandle and msiqid. The base address of the queue | ||
1747 | * is returned in ARG1 and the number of entries is returned in ARG2. | ||
1748 | * If the queue is unconfigured, the real address is undefined and the | ||
1749 | * number of entries will be returned as zero. | ||
1750 | */ | ||
1751 | #define HV_FAST_PCI_MSIQ_INFO 0xc1 | ||
1752 | |||
1753 | /* pci_msiq_getvalid() | ||
1754 | * TRAP: HV_FAST_TRAP | ||
1755 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1756 | * ARG0: devhandle | ||
1757 | * ARG1: msiqid | ||
1758 | * RET0: status | ||
1759 | * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1760 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1761 | * | ||
1762 | * Get the valid state of the MSI-EQ described by the given devhandle and | ||
1763 | * msiqid. | ||
1764 | */ | ||
1765 | #define HV_FAST_PCI_MSIQ_GETVALID 0xc2 | ||
1766 | |||
1767 | /* pci_msiq_setvalid() | ||
1768 | * TRAP: HV_FAST_TRAP | ||
1769 | * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID | ||
1770 | * ARG0: devhandle | ||
1771 | * ARG1: msiqid | ||
1772 | * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1773 | * RET0: status | ||
1774 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid | ||
1775 | * value or MSI EQ is uninitialized | ||
1776 | * | ||
1777 | * Set the valid state of the MSI-EQ described by the given devhandle and | ||
1778 | * msiqid to the given msiqvalid. | ||
1779 | */ | ||
1780 | #define HV_FAST_PCI_MSIQ_SETVALID 0xc3 | ||
1781 | |||
1782 | /* pci_msiq_getstate() | ||
1783 | * TRAP: HV_FAST_TRAP | ||
1784 | * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE | ||
1785 | * ARG0: devhandle | ||
1786 | * ARG1: msiqid | ||
1787 | * RET0: status | ||
1788 | * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1789 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1790 | * | ||
1791 | * Get the state of the MSI-EQ described by the given devhandle and | ||
1792 | * msiqid. | ||
1793 | */ | ||
1794 | #define HV_FAST_PCI_MSIQ_GETSTATE 0xc4 | ||
1795 | |||
1796 | /* pci_msiq_getvalid() | ||
1797 | * TRAP: HV_FAST_TRAP | ||
1798 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1799 | * ARG0: devhandle | ||
1800 | * ARG1: msiqid | ||
1801 | * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1802 | * RET0: status | ||
1803 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate | ||
1804 | * value or MSI EQ is uninitialized | ||
1805 | * | ||
1806 | * Set the state of the MSI-EQ described by the given devhandle and | ||
1807 | * msiqid to the given msiqvalid. | ||
1808 | */ | ||
1809 | #define HV_FAST_PCI_MSIQ_SETSTATE 0xc5 | ||
1810 | |||
1811 | /* pci_msiq_gethead() | ||
1812 | * TRAP: HV_FAST_TRAP | ||
1813 | * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD | ||
1814 | * ARG0: devhandle | ||
1815 | * ARG1: msiqid | ||
1816 | * RET0: status | ||
1817 | * RET1: msiqhead | ||
1818 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1819 | * | ||
1820 | * Get the current MSI EQ queue head for the MSI-EQ described by the | ||
1821 | * given devhandle and msiqid. | ||
1822 | */ | ||
1823 | #define HV_FAST_PCI_MSIQ_GETHEAD 0xc6 | ||
1824 | |||
1825 | /* pci_msiq_sethead() | ||
1826 | * TRAP: HV_FAST_TRAP | ||
1827 | * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD | ||
1828 | * ARG0: devhandle | ||
1829 | * ARG1: msiqid | ||
1830 | * ARG2: msiqhead | ||
1831 | * RET0: status | ||
1832 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead, | ||
1833 | * or MSI EQ is uninitialized | ||
1834 | * | ||
1835 | * Set the current MSI EQ queue head for the MSI-EQ described by the | ||
1836 | * given devhandle and msiqid. | ||
1837 | */ | ||
1838 | #define HV_FAST_PCI_MSIQ_SETHEAD 0xc7 | ||
1839 | |||
1840 | /* pci_msiq_gettail() | ||
1841 | * TRAP: HV_FAST_TRAP | ||
1842 | * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL | ||
1843 | * ARG0: devhandle | ||
1844 | * ARG1: msiqid | ||
1845 | * RET0: status | ||
1846 | * RET1: msiqtail | ||
1847 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1848 | * | ||
1849 | * Get the current MSI EQ queue tail for the MSI-EQ described by the | ||
1850 | * given devhandle and msiqid. | ||
1851 | */ | ||
1852 | #define HV_FAST_PCI_MSIQ_GETTAIL 0xc8 | ||
1853 | |||
1854 | /* pci_msi_getvalid() | ||
1855 | * TRAP: HV_FAST_TRAP | ||
1856 | * FUNCTION: HV_FAST_PCI_MSI_GETVALID | ||
1857 | * ARG0: devhandle | ||
1858 | * ARG1: msinum | ||
1859 | * RET0: status | ||
1860 | * RET1: msivalidstate | ||
1861 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1862 | * | ||
1863 | * Get the current valid/enabled state for the MSI defined by the | ||
1864 | * given devhandle and msinum. | ||
1865 | */ | ||
1866 | #define HV_FAST_PCI_MSI_GETVALID 0xc9 | ||
1867 | |||
1868 | /* pci_msi_setvalid() | ||
1869 | * TRAP: HV_FAST_TRAP | ||
1870 | * FUNCTION: HV_FAST_PCI_MSI_SETVALID | ||
1871 | * ARG0: devhandle | ||
1872 | * ARG1: msinum | ||
1873 | * ARG2: msivalidstate | ||
1874 | * RET0: status | ||
1875 | * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate | ||
1876 | * | ||
1877 | * Set the current valid/enabled state for the MSI defined by the | ||
1878 | * given devhandle and msinum. | ||
1879 | */ | ||
1880 | #define HV_FAST_PCI_MSI_SETVALID 0xca | ||
1881 | |||
1882 | /* pci_msi_getmsiq() | ||
1883 | * TRAP: HV_FAST_TRAP | ||
1884 | * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ | ||
1885 | * ARG0: devhandle | ||
1886 | * ARG1: msinum | ||
1887 | * RET0: status | ||
1888 | * RET1: msiqid | ||
1889 | * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound | ||
1890 | * | ||
1891 | * Get the MSI EQ that the MSI defined by the given devhandle and | ||
1892 | * msinum is bound to. | ||
1893 | */ | ||
1894 | #define HV_FAST_PCI_MSI_GETMSIQ 0xcb | ||
1895 | |||
1896 | /* pci_msi_setmsiq() | ||
1897 | * TRAP: HV_FAST_TRAP | ||
1898 | * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ | ||
1899 | * ARG0: devhandle | ||
1900 | * ARG1: msinum | ||
1901 | * ARG2: msitype | ||
1902 | * ARG3: msiqid | ||
1903 | * RET0: status | ||
1904 | * ERRORS: EINVAL Invalid devhandle or msinum or msiqid | ||
1905 | * | ||
1906 | * Set the MSI EQ that the MSI defined by the given devhandle and | ||
1907 | * msinum is bound to. | ||
1908 | */ | ||
1909 | #define HV_FAST_PCI_MSI_SETMSIQ 0xcc | ||
1910 | |||
1911 | /* pci_msi_getstate() | ||
1912 | * TRAP: HV_FAST_TRAP | ||
1913 | * FUNCTION: HV_FAST_PCI_MSI_GETSTATE | ||
1914 | * ARG0: devhandle | ||
1915 | * ARG1: msinum | ||
1916 | * RET0: status | ||
1917 | * RET1: msistate | ||
1918 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1919 | * | ||
1920 | * Get the state of the MSI defined by the given devhandle and msinum. | ||
1921 | * If not initialized, return HV_MSISTATE_IDLE. | ||
1922 | */ | ||
1923 | #define HV_FAST_PCI_MSI_GETSTATE 0xcd | ||
1924 | |||
1925 | /* pci_msi_setstate() | ||
1926 | * TRAP: HV_FAST_TRAP | ||
1927 | * FUNCTION: HV_FAST_PCI_MSI_SETSTATE | ||
1928 | * ARG0: devhandle | ||
1929 | * ARG1: msinum | ||
1930 | * ARG2: msistate | ||
1931 | * RET0: status | ||
1932 | * ERRORS: EINVAL Invalid devhandle or msinum or msistate | ||
1933 | * | ||
1934 | * Set the state of the MSI defined by the given devhandle and msinum. | ||
1935 | */ | ||
1936 | #define HV_FAST_PCI_MSI_SETSTATE 0xce | ||
1937 | |||
1938 | /* pci_msg_getmsiq() | ||
1939 | * TRAP: HV_FAST_TRAP | ||
1940 | * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ | ||
1941 | * ARG0: devhandle | ||
1942 | * ARG1: msgtype | ||
1943 | * RET0: status | ||
1944 | * RET1: msiqid | ||
1945 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1946 | * | ||
1947 | * Get the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1948 | */ | ||
1949 | #define HV_FAST_PCI_MSG_GETMSIQ 0xd0 | ||
1950 | |||
1951 | /* pci_msg_setmsiq() | ||
1952 | * TRAP: HV_FAST_TRAP | ||
1953 | * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ | ||
1954 | * ARG0: devhandle | ||
1955 | * ARG1: msgtype | ||
1956 | * ARG2: msiqid | ||
1957 | * RET0: status | ||
1958 | * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid | ||
1959 | * | ||
1960 | * Set the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1961 | */ | ||
1962 | #define HV_FAST_PCI_MSG_SETMSIQ 0xd1 | ||
1963 | |||
1964 | /* pci_msg_getvalid() | ||
1965 | * TRAP: HV_FAST_TRAP | ||
1966 | * FUNCTION: HV_FAST_PCI_MSG_GETVALID | ||
1967 | * ARG0: devhandle | ||
1968 | * ARG1: msgtype | ||
1969 | * RET0: status | ||
1970 | * RET1: msgvalidstate | ||
1971 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1972 | * | ||
1973 | * Get the valid/enabled state of the MSG defined by the given | ||
1974 | * devhandle and msgtype. | ||
1975 | */ | ||
1976 | #define HV_FAST_PCI_MSG_GETVALID 0xd2 | ||
1977 | |||
1978 | /* pci_msg_setvalid() | ||
1979 | * TRAP: HV_FAST_TRAP | ||
1980 | * FUNCTION: HV_FAST_PCI_MSG_SETVALID | ||
1981 | * ARG0: devhandle | ||
1982 | * ARG1: msgtype | ||
1983 | * ARG2: msgvalidstate | ||
1984 | * RET0: status | ||
1985 | * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate | ||
1986 | * | ||
1987 | * Set the valid/enabled state of the MSG defined by the given | ||
1988 | * devhandle and msgtype. | ||
1989 | */ | ||
1990 | #define HV_FAST_PCI_MSG_SETVALID 0xd3 | ||
1991 | |||
1992 | /* Performance counter services. */ | ||
1993 | |||
1994 | #define HV_PERF_JBUS_PERF_CTRL_REG 0x00 | ||
1995 | #define HV_PERF_JBUS_PERF_CNT_REG 0x01 | ||
1996 | #define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02 | ||
1997 | #define HV_PERF_DRAM_PERF_CNT_REG_0 0x03 | ||
1998 | #define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04 | ||
1999 | #define HV_PERF_DRAM_PERF_CNT_REG_1 0x05 | ||
2000 | #define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06 | ||
2001 | #define HV_PERF_DRAM_PERF_CNT_REG_2 0x07 | ||
2002 | #define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08 | ||
2003 | #define HV_PERF_DRAM_PERF_CNT_REG_3 0x09 | ||
2004 | |||
2005 | /* get_perfreg() | ||
2006 | * TRAP: HV_FAST_TRAP | ||
2007 | * FUNCTION: HV_FAST_GET_PERFREG | ||
2008 | * ARG0: performance reg number | ||
2009 | * RET0: status | ||
2010 | * RET1: performance reg value | ||
2011 | * ERRORS: EINVAL Invalid performance register number | ||
2012 | * ENOACCESS No access allowed to performance counters | ||
2013 | * | ||
2014 | * Read the value of the given DRAM/JBUS performance counter/control register. | ||
2015 | */ | ||
2016 | #define HV_FAST_GET_PERFREG 0x100 | ||
2017 | |||
2018 | /* set_perfreg() | ||
2019 | * TRAP: HV_FAST_TRAP | ||
2020 | * FUNCTION: HV_FAST_SET_PERFREG | ||
2021 | * ARG0: performance reg number | ||
2022 | * ARG1: performance reg value | ||
2023 | * RET0: status | ||
2024 | * ERRORS: EINVAL Invalid performance register number | ||
2025 | * ENOACCESS No access allowed to performance counters | ||
2026 | * | ||
2027 | * Write the given performance reg value to the given DRAM/JBUS | ||
2028 | * performance counter/control register. | ||
2029 | */ | ||
2030 | #define HV_FAST_SET_PERFREG 0x101 | ||
2031 | |||
2032 | /* MMU statistics services. | ||
2033 | * | ||
2034 | * The hypervisor maintains MMU statistics and privileged code provides | ||
2035 | * a buffer where these statistics can be collected. It is continually | ||
2036 | * updated once configured. The layout is as follows: | ||
2037 | */ | ||
2038 | #ifndef __ASSEMBLY__ | ||
2039 | struct hv_mmu_statistics { | ||
2040 | unsigned long immu_tsb_hits_ctx0_8k_tte; | ||
2041 | unsigned long immu_tsb_ticks_ctx0_8k_tte; | ||
2042 | unsigned long immu_tsb_hits_ctx0_64k_tte; | ||
2043 | unsigned long immu_tsb_ticks_ctx0_64k_tte; | ||
2044 | unsigned long __reserved1[2]; | ||
2045 | unsigned long immu_tsb_hits_ctx0_4mb_tte; | ||
2046 | unsigned long immu_tsb_ticks_ctx0_4mb_tte; | ||
2047 | unsigned long __reserved2[2]; | ||
2048 | unsigned long immu_tsb_hits_ctx0_256mb_tte; | ||
2049 | unsigned long immu_tsb_ticks_ctx0_256mb_tte; | ||
2050 | unsigned long __reserved3[4]; | ||
2051 | unsigned long immu_tsb_hits_ctxnon0_8k_tte; | ||
2052 | unsigned long immu_tsb_ticks_ctxnon0_8k_tte; | ||
2053 | unsigned long immu_tsb_hits_ctxnon0_64k_tte; | ||
2054 | unsigned long immu_tsb_ticks_ctxnon0_64k_tte; | ||
2055 | unsigned long __reserved4[2]; | ||
2056 | unsigned long immu_tsb_hits_ctxnon0_4mb_tte; | ||
2057 | unsigned long immu_tsb_ticks_ctxnon0_4mb_tte; | ||
2058 | unsigned long __reserved5[2]; | ||
2059 | unsigned long immu_tsb_hits_ctxnon0_256mb_tte; | ||
2060 | unsigned long immu_tsb_ticks_ctxnon0_256mb_tte; | ||
2061 | unsigned long __reserved6[4]; | ||
2062 | unsigned long dmmu_tsb_hits_ctx0_8k_tte; | ||
2063 | unsigned long dmmu_tsb_ticks_ctx0_8k_tte; | ||
2064 | unsigned long dmmu_tsb_hits_ctx0_64k_tte; | ||
2065 | unsigned long dmmu_tsb_ticks_ctx0_64k_tte; | ||
2066 | unsigned long __reserved7[2]; | ||
2067 | unsigned long dmmu_tsb_hits_ctx0_4mb_tte; | ||
2068 | unsigned long dmmu_tsb_ticks_ctx0_4mb_tte; | ||
2069 | unsigned long __reserved8[2]; | ||
2070 | unsigned long dmmu_tsb_hits_ctx0_256mb_tte; | ||
2071 | unsigned long dmmu_tsb_ticks_ctx0_256mb_tte; | ||
2072 | unsigned long __reserved9[4]; | ||
2073 | unsigned long dmmu_tsb_hits_ctxnon0_8k_tte; | ||
2074 | unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte; | ||
2075 | unsigned long dmmu_tsb_hits_ctxnon0_64k_tte; | ||
2076 | unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte; | ||
2077 | unsigned long __reserved10[2]; | ||
2078 | unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte; | ||
2079 | unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte; | ||
2080 | unsigned long __reserved11[2]; | ||
2081 | unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte; | ||
2082 | unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte; | ||
2083 | unsigned long __reserved12[4]; | ||
2084 | }; | ||
2085 | #endif | ||
2086 | |||
2087 | /* mmustat_conf() | ||
2088 | * TRAP: HV_FAST_TRAP | ||
2089 | * FUNCTION: HV_FAST_MMUSTAT_CONF | ||
2090 | * ARG0: real address | ||
2091 | * RET0: status | ||
2092 | * RET1: real address | ||
2093 | * ERRORS: ENORADDR Invalid real address | ||
2094 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
2095 | * EBADTRAP API not supported on this processor | ||
2096 | * | ||
2097 | * Enable MMU statistic gathering using the buffer at the given real | ||
2098 | * address on the current virtual CPU. The new buffer real address | ||
2099 | * is given in ARG1, and the previously specified buffer real address | ||
2100 | * is returned in RET1, or is returned as zero for the first invocation. | ||
2101 | * | ||
2102 | * If the passed in real address argument is zero, this will disable | ||
2103 | * MMU statistic collection on the current virtual CPU. If an error is | ||
2104 | * returned then no statistics are collected. | ||
2105 | * | ||
2106 | * The buffer contents should be initialized to all zeros before being | ||
2107 | * given to the hypervisor or else the statistics will be meaningless. | ||
2108 | */ | ||
2109 | #define HV_FAST_MMUSTAT_CONF 0x102 | ||
2110 | |||
2111 | /* mmustat_info() | ||
2112 | * TRAP: HV_FAST_TRAP | ||
2113 | * FUNCTION: HV_FAST_MMUSTAT_INFO | ||
2114 | * RET0: status | ||
2115 | * RET1: real address | ||
2116 | * ERRORS: EBADTRAP API not supported on this processor | ||
2117 | * | ||
2118 | * Return the current state and real address of the currently configured | ||
2119 | * MMU statistics buffer on the current virtual CPU. | ||
2120 | */ | ||
2121 | #define HV_FAST_MMUSTAT_INFO 0x103 | ||
2122 | |||
2123 | /* Function numbers for HV_CORE_TRAP. */ | ||
2124 | #define HV_CORE_VER 0x00 | ||
2125 | #define HV_CORE_PUTCHAR 0x01 | ||
2126 | #define HV_CORE_EXIT 0x02 | ||
2127 | |||
2128 | #endif /* !(_SPARC64_HYPERVISOR_H) */ | ||
diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h index 701483c5465d..77fbf987385f 100644 --- a/include/asm-sparc64/idprom.h +++ b/include/asm-sparc64/idprom.h | |||
@@ -9,15 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | 11 | ||
12 | /* Offset into the EEPROM where the id PROM is located on the 4c */ | 12 | struct idprom { |
13 | #define IDPROM_OFFSET 0x7d8 | ||
14 | |||
15 | /* On sun4m; physical. */ | ||
16 | /* MicroSPARC(-II) does not decode 31rd bit, but it works. */ | ||
17 | #define IDPROM_OFFSET_M 0xfd8 | ||
18 | |||
19 | struct idprom | ||
20 | { | ||
21 | u8 id_format; /* Format identifier (always 0x01) */ | 13 | u8 id_format; /* Format identifier (always 0x01) */ |
22 | u8 id_machtype; /* Machine type */ | 14 | u8 id_machtype; /* Machine type */ |
23 | u8 id_ethaddr[6]; /* Hardware ethernet address */ | 15 | u8 id_ethaddr[6]; /* Hardware ethernet address */ |
@@ -30,6 +22,4 @@ struct idprom | |||
30 | extern struct idprom *idprom; | 22 | extern struct idprom *idprom; |
31 | extern void idprom_init(void); | 23 | extern void idprom_init(void); |
32 | 24 | ||
33 | #define IDPROM_SIZE (sizeof(struct idprom)) | ||
34 | |||
35 | #endif /* !(_SPARC_IDPROM_H) */ | 25 | #endif /* !(_SPARC_IDPROM_H) */ |
diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h new file mode 100644 index 000000000000..206077dedc2a --- /dev/null +++ b/include/asm-sparc64/intr_queue.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _SPARC64_INTR_QUEUE_H | ||
2 | #define _SPARC64_INTR_QUEUE_H | ||
3 | |||
4 | /* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */ | ||
5 | |||
6 | #define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */ | ||
7 | #define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */ | ||
8 | #define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */ | ||
9 | #define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */ | ||
10 | #define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */ | ||
11 | #define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */ | ||
12 | #define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */ | ||
13 | #define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */ | ||
14 | |||
15 | #endif /* !(_SPARC64_INTR_QUEUE_H) */ | ||
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index 8b70edcb80dc..de33d6e1afb5 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h | |||
@@ -72,8 +72,11 @@ struct ino_bucket { | |||
72 | #define IMAP_VALID 0x80000000 /* IRQ Enabled */ | 72 | #define IMAP_VALID 0x80000000 /* IRQ Enabled */ |
73 | #define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ | 73 | #define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ |
74 | #define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ | 74 | #define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ |
75 | #define IMAP_TID_SHIFT 26 | ||
75 | #define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ | 76 | #define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ |
77 | #define IMAP_AID_SHIFT 26 | ||
76 | #define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ | 78 | #define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ |
79 | #define IMAP_NID_SHIFT 21 | ||
77 | #define IMAP_IGN 0x000007c0 /* IRQ Group Number */ | 80 | #define IMAP_IGN 0x000007c0 /* IRQ Group Number */ |
78 | #define IMAP_INO 0x0000003f /* IRQ Number */ | 81 | #define IMAP_INO 0x0000003f /* IRQ Number */ |
79 | #define IMAP_INR 0x000007ff /* Full interrupt number*/ | 82 | #define IMAP_INR 0x000007ff /* Full interrupt number*/ |
@@ -111,6 +114,7 @@ extern void disable_irq(unsigned int); | |||
111 | #define disable_irq_nosync disable_irq | 114 | #define disable_irq_nosync disable_irq |
112 | extern void enable_irq(unsigned int); | 115 | extern void enable_irq(unsigned int); |
113 | extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); | 116 | extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); |
117 | extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags); | ||
114 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); | 118 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); |
115 | 119 | ||
116 | static __inline__ void set_softint(unsigned long bits) | 120 | static __inline__ void set_softint(unsigned long bits) |
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 8627eed6e83d..230ba678d3b0 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h | |||
@@ -4,20 +4,9 @@ | |||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | #include <asm/const.h> | 6 | #include <asm/const.h> |
7 | #include <asm/hypervisor.h> | ||
7 | 8 | ||
8 | /* | 9 | #define CTX_NR_BITS 13 |
9 | * For the 8k pagesize kernel, use only 10 hw context bits to optimize some | ||
10 | * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically | ||
11 | * for vpte offset calculation). For other pagesizes, this optimization in | ||
12 | * the tlbhandlers can not be done; but still, all 13 bits can not be used | ||
13 | * because the tlb handlers use "andcc" instruction which sign extends 13 | ||
14 | * bit arguments. | ||
15 | */ | ||
16 | #if PAGE_SHIFT == 13 | ||
17 | #define CTX_NR_BITS 10 | ||
18 | #else | ||
19 | #define CTX_NR_BITS 12 | ||
20 | #endif | ||
21 | 10 | ||
22 | #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) | 11 | #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) |
23 | 12 | ||
@@ -90,8 +79,27 @@ | |||
90 | 79 | ||
91 | #ifndef __ASSEMBLY__ | 80 | #ifndef __ASSEMBLY__ |
92 | 81 | ||
82 | #define TSB_ENTRY_ALIGNMENT 16 | ||
83 | |||
84 | struct tsb { | ||
85 | unsigned long tag; | ||
86 | unsigned long pte; | ||
87 | } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); | ||
88 | |||
89 | extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); | ||
90 | extern void tsb_flush(unsigned long ent, unsigned long tag); | ||
91 | extern void tsb_init(struct tsb *tsb, unsigned long size); | ||
92 | |||
93 | typedef struct { | 93 | typedef struct { |
94 | unsigned long sparc64_ctx_val; | 94 | spinlock_t lock; |
95 | unsigned long sparc64_ctx_val; | ||
96 | struct tsb *tsb; | ||
97 | unsigned long tsb_rss_limit; | ||
98 | unsigned long tsb_nentries; | ||
99 | unsigned long tsb_reg_val; | ||
100 | unsigned long tsb_map_vaddr; | ||
101 | unsigned long tsb_map_pte; | ||
102 | struct hv_tsb_descr tsb_descr; | ||
95 | } mm_context_t; | 103 | } mm_context_t; |
96 | 104 | ||
97 | #endif /* !__ASSEMBLY__ */ | 105 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 57ee7b306189..e7974321d052 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h | |||
@@ -19,96 +19,98 @@ extern unsigned long tlb_context_cache; | |||
19 | extern unsigned long mmu_context_bmap[]; | 19 | extern unsigned long mmu_context_bmap[]; |
20 | 20 | ||
21 | extern void get_new_mmu_context(struct mm_struct *mm); | 21 | extern void get_new_mmu_context(struct mm_struct *mm); |
22 | #ifdef CONFIG_SMP | ||
23 | extern void smp_new_mmu_context_version(void); | ||
24 | #else | ||
25 | #define smp_new_mmu_context_version() do { } while (0) | ||
26 | #endif | ||
27 | |||
28 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
29 | extern void destroy_context(struct mm_struct *mm); | ||
30 | |||
31 | extern void __tsb_context_switch(unsigned long pgd_pa, | ||
32 | unsigned long tsb_reg, | ||
33 | unsigned long tsb_vaddr, | ||
34 | unsigned long tsb_pte, | ||
35 | unsigned long tsb_descr_pa); | ||
36 | |||
37 | static inline void tsb_context_switch(struct mm_struct *mm) | ||
38 | { | ||
39 | __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, | ||
40 | mm->context.tsb_map_vaddr, | ||
41 | mm->context.tsb_map_pte, | ||
42 | __pa(&mm->context.tsb_descr)); | ||
43 | } | ||
22 | 44 | ||
23 | /* Initialize a new mmu context. This is invoked when a new | 45 | extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss); |
24 | * address space instance (unique or shared) is instantiated. | 46 | #ifdef CONFIG_SMP |
25 | * This just needs to set mm->context to an invalid context. | 47 | extern void smp_tsb_sync(struct mm_struct *mm); |
26 | */ | 48 | #else |
27 | #define init_new_context(__tsk, __mm) \ | 49 | #define smp_tsb_sync(__mm) do { } while (0) |
28 | (((__mm)->context.sparc64_ctx_val = 0UL), 0) | 50 | #endif |
29 | |||
30 | /* Destroy a dead context. This occurs when mmput drops the | ||
31 | * mm_users count to zero, the mmaps have been released, and | ||
32 | * all the page tables have been flushed. Our job is to destroy | ||
33 | * any remaining processor-specific state, and in the sparc64 | ||
34 | * case this just means freeing up the mmu context ID held by | ||
35 | * this task if valid. | ||
36 | */ | ||
37 | #define destroy_context(__mm) \ | ||
38 | do { spin_lock(&ctx_alloc_lock); \ | ||
39 | if (CTX_VALID((__mm)->context)) { \ | ||
40 | unsigned long nr = CTX_NRBITS((__mm)->context); \ | ||
41 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ | ||
42 | } \ | ||
43 | spin_unlock(&ctx_alloc_lock); \ | ||
44 | } while(0) | ||
45 | |||
46 | /* Reload the two core values used by TLB miss handler | ||
47 | * processing on sparc64. They are: | ||
48 | * 1) The physical address of mm->pgd, when full page | ||
49 | * table walks are necessary, this is where the | ||
50 | * search begins. | ||
51 | * 2) A "PGD cache". For 32-bit tasks only pgd[0] is | ||
52 | * ever used since that maps the entire low 4GB | ||
53 | * completely. To speed up TLB miss processing we | ||
54 | * make this value available to the handlers. This | ||
55 | * decreases the amount of memory traffic incurred. | ||
56 | */ | ||
57 | #define reload_tlbmiss_state(__tsk, __mm) \ | ||
58 | do { \ | ||
59 | register unsigned long paddr asm("o5"); \ | ||
60 | register unsigned long pgd_cache asm("o4"); \ | ||
61 | paddr = __pa((__mm)->pgd); \ | ||
62 | pgd_cache = 0UL; \ | ||
63 | if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ | ||
64 | pgd_cache = get_pgd_cache((__mm)->pgd); \ | ||
65 | __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ | ||
66 | "mov %3, %%g4\n\t" \ | ||
67 | "mov %0, %%g7\n\t" \ | ||
68 | "stxa %1, [%%g4] %2\n\t" \ | ||
69 | "membar #Sync\n\t" \ | ||
70 | "wrpr %%g0, 0x096, %%pstate" \ | ||
71 | : /* no outputs */ \ | ||
72 | : "r" (paddr), "r" (pgd_cache),\ | ||
73 | "i" (ASI_DMMU), "i" (TSB_REG)); \ | ||
74 | } while(0) | ||
75 | 51 | ||
76 | /* Set MMU context in the actual hardware. */ | 52 | /* Set MMU context in the actual hardware. */ |
77 | #define load_secondary_context(__mm) \ | 53 | #define load_secondary_context(__mm) \ |
78 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ | 54 | __asm__ __volatile__( \ |
79 | "flush %%g6" \ | 55 | "\n661: stxa %0, [%1] %2\n" \ |
80 | : /* No outputs */ \ | 56 | " .section .sun4v_1insn_patch, \"ax\"\n" \ |
81 | : "r" (CTX_HWBITS((__mm)->context)), \ | 57 | " .word 661b\n" \ |
82 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) | 58 | " stxa %0, [%1] %3\n" \ |
59 | " .previous\n" \ | ||
60 | " flush %%g6\n" \ | ||
61 | : /* No outputs */ \ | ||
62 | : "r" (CTX_HWBITS((__mm)->context)), \ | ||
63 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) | ||
83 | 64 | ||
84 | extern void __flush_tlb_mm(unsigned long, unsigned long); | 65 | extern void __flush_tlb_mm(unsigned long, unsigned long); |
85 | 66 | ||
86 | /* Switch the current MM context. */ | 67 | /* Switch the current MM context. Interrupts are disabled. */ |
87 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | 68 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
88 | { | 69 | { |
89 | unsigned long ctx_valid; | 70 | unsigned long ctx_valid, flags; |
90 | int cpu; | 71 | int cpu; |
91 | 72 | ||
92 | /* Note: page_table_lock is used here to serialize switch_mm | 73 | spin_lock_irqsave(&mm->context.lock, flags); |
93 | * and activate_mm, and their calls to get_new_mmu_context. | ||
94 | * This use of page_table_lock is unrelated to its other uses. | ||
95 | */ | ||
96 | spin_lock(&mm->page_table_lock); | ||
97 | ctx_valid = CTX_VALID(mm->context); | 74 | ctx_valid = CTX_VALID(mm->context); |
98 | if (!ctx_valid) | 75 | if (!ctx_valid) |
99 | get_new_mmu_context(mm); | 76 | get_new_mmu_context(mm); |
100 | spin_unlock(&mm->page_table_lock); | ||
101 | 77 | ||
102 | if (!ctx_valid || (old_mm != mm)) { | 78 | /* We have to be extremely careful here or else we will miss |
103 | load_secondary_context(mm); | 79 | * a TSB grow if we switch back and forth between a kernel |
104 | reload_tlbmiss_state(tsk, mm); | 80 | * thread and an address space which has it's TSB size increased |
105 | } | 81 | * on another processor. |
82 | * | ||
83 | * It is possible to play some games in order to optimize the | ||
84 | * switch, but the safest thing to do is to unconditionally | ||
85 | * perform the secondary context load and the TSB context switch. | ||
86 | * | ||
87 | * For reference the bad case is, for address space "A": | ||
88 | * | ||
89 | * CPU 0 CPU 1 | ||
90 | * run address space A | ||
91 | * set cpu0's bits in cpu_vm_mask | ||
92 | * switch to kernel thread, borrow | ||
93 | * address space A via entry_lazy_tlb | ||
94 | * run address space A | ||
95 | * set cpu1's bit in cpu_vm_mask | ||
96 | * flush_tlb_pending() | ||
97 | * reset cpu_vm_mask to just cpu1 | ||
98 | * TSB grow | ||
99 | * run address space A | ||
100 | * context was valid, so skip | ||
101 | * TSB context switch | ||
102 | * | ||
103 | * At that point cpu0 continues to use a stale TSB, the one from | ||
104 | * before the TSB grow performed on cpu1. cpu1 did not cross-call | ||
105 | * cpu0 to update it's TSB because at that point the cpu_vm_mask | ||
106 | * only had cpu1 set in it. | ||
107 | */ | ||
108 | load_secondary_context(mm); | ||
109 | tsb_context_switch(mm); | ||
106 | 110 | ||
107 | /* Even if (mm == old_mm) we _must_ check | 111 | /* Any time a processor runs a context on an address space |
108 | * the cpu_vm_mask. If we do not we could | 112 | * for the first time, we must flush that context out of the |
109 | * corrupt the TLB state because of how | 113 | * local TLB. |
110 | * smp_flush_tlb_{page,range,mm} on sparc64 | ||
111 | * and lazy tlb switches work. -DaveM | ||
112 | */ | 114 | */ |
113 | cpu = smp_processor_id(); | 115 | cpu = smp_processor_id(); |
114 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { | 116 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { |
@@ -116,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
116 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 118 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
117 | SECONDARY_CONTEXT); | 119 | SECONDARY_CONTEXT); |
118 | } | 120 | } |
121 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
119 | } | 122 | } |
120 | 123 | ||
121 | #define deactivate_mm(tsk,mm) do { } while (0) | 124 | #define deactivate_mm(tsk,mm) do { } while (0) |
@@ -123,23 +126,20 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
123 | /* Activate a new MM instance for the current task. */ | 126 | /* Activate a new MM instance for the current task. */ |
124 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | 127 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) |
125 | { | 128 | { |
129 | unsigned long flags; | ||
126 | int cpu; | 130 | int cpu; |
127 | 131 | ||
128 | /* Note: page_table_lock is used here to serialize switch_mm | 132 | spin_lock_irqsave(&mm->context.lock, flags); |
129 | * and activate_mm, and their calls to get_new_mmu_context. | ||
130 | * This use of page_table_lock is unrelated to its other uses. | ||
131 | */ | ||
132 | spin_lock(&mm->page_table_lock); | ||
133 | if (!CTX_VALID(mm->context)) | 133 | if (!CTX_VALID(mm->context)) |
134 | get_new_mmu_context(mm); | 134 | get_new_mmu_context(mm); |
135 | cpu = smp_processor_id(); | 135 | cpu = smp_processor_id(); |
136 | if (!cpu_isset(cpu, mm->cpu_vm_mask)) | 136 | if (!cpu_isset(cpu, mm->cpu_vm_mask)) |
137 | cpu_set(cpu, mm->cpu_vm_mask); | 137 | cpu_set(cpu, mm->cpu_vm_mask); |
138 | spin_unlock(&mm->page_table_lock); | ||
139 | 138 | ||
140 | load_secondary_context(mm); | 139 | load_secondary_context(mm); |
141 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | 140 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); |
142 | reload_tlbmiss_state(current, mm); | 141 | tsb_context_switch(mm); |
142 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
143 | } | 143 | } |
144 | 144 | ||
145 | #endif /* !(__ASSEMBLY__) */ | 145 | #endif /* !(__ASSEMBLY__) */ |
diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h new file mode 100644 index 000000000000..017e7e74f5e7 --- /dev/null +++ b/include/asm-sparc64/numnodes.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _SPARC64_NUMNODES_H | ||
2 | #define _SPARC64_NUMNODES_H | ||
3 | |||
4 | #define NODES_SHIFT 0 | ||
5 | |||
6 | #endif /* !(_SPARC64_NUMNODES_H) */ | ||
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 3c59b2693fb9..c754676e13ef 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h | |||
@@ -12,18 +12,8 @@ | |||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <asm/openprom.h> | 13 | #include <asm/openprom.h> |
14 | 14 | ||
15 | /* Enumeration to describe the prom major version we have detected. */ | 15 | /* OBP version string. */ |
16 | enum prom_major_version { | 16 | extern char prom_version[]; |
17 | PROM_V0, /* Original sun4c V0 prom */ | ||
18 | PROM_V2, /* sun4c and early sun4m V2 prom */ | ||
19 | PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */ | ||
20 | PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */ | ||
21 | PROM_AP1000, /* actually no prom at all */ | ||
22 | }; | ||
23 | |||
24 | extern enum prom_major_version prom_vers; | ||
25 | /* Revision, and firmware revision. */ | ||
26 | extern unsigned int prom_rev, prom_prev; | ||
27 | 17 | ||
28 | /* Root node of the prom device tree, this stays constant after | 18 | /* Root node of the prom device tree, this stays constant after |
29 | * initialization is complete. | 19 | * initialization is complete. |
@@ -39,6 +29,9 @@ extern int prom_stdin, prom_stdout; | |||
39 | extern int prom_chosen_node; | 29 | extern int prom_chosen_node; |
40 | 30 | ||
41 | /* Helper values and strings in arch/sparc64/kernel/head.S */ | 31 | /* Helper values and strings in arch/sparc64/kernel/head.S */ |
32 | extern const char prom_peer_name[]; | ||
33 | extern const char prom_compatible_name[]; | ||
34 | extern const char prom_root_compatible[]; | ||
42 | extern const char prom_finddev_name[]; | 35 | extern const char prom_finddev_name[]; |
43 | extern const char prom_chosen_path[]; | 36 | extern const char prom_chosen_path[]; |
44 | extern const char prom_getprop_name[]; | 37 | extern const char prom_getprop_name[]; |
@@ -130,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr); | |||
130 | */ | 123 | */ |
131 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); | 124 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); |
132 | 125 | ||
133 | /* Get the prom major version. */ | ||
134 | extern int prom_version(void); | ||
135 | |||
136 | /* Get the prom plugin revision. */ | ||
137 | extern int prom_getrev(void); | ||
138 | |||
139 | /* Get the prom firmware revision. */ | ||
140 | extern int prom_getprev(void); | ||
141 | |||
142 | /* Character operations to/from the console.... */ | 126 | /* Character operations to/from the console.... */ |
143 | 127 | ||
144 | /* Non-blocking get character from console. */ | 128 | /* Non-blocking get character from console. */ |
@@ -164,6 +148,7 @@ enum prom_input_device { | |||
164 | PROMDEV_ITTYA, /* input from ttya */ | 148 | PROMDEV_ITTYA, /* input from ttya */ |
165 | PROMDEV_ITTYB, /* input from ttyb */ | 149 | PROMDEV_ITTYB, /* input from ttyb */ |
166 | PROMDEV_IRSC, /* input from rsc */ | 150 | PROMDEV_IRSC, /* input from rsc */ |
151 | PROMDEV_IVCONS, /* input from virtual-console */ | ||
167 | PROMDEV_I_UNK, | 152 | PROMDEV_I_UNK, |
168 | }; | 153 | }; |
169 | 154 | ||
@@ -176,6 +161,7 @@ enum prom_output_device { | |||
176 | PROMDEV_OTTYA, /* to ttya */ | 161 | PROMDEV_OTTYA, /* to ttya */ |
177 | PROMDEV_OTTYB, /* to ttyb */ | 162 | PROMDEV_OTTYB, /* to ttyb */ |
178 | PROMDEV_ORSC, /* to rsc */ | 163 | PROMDEV_ORSC, /* to rsc */ |
164 | PROMDEV_OVCONS, /* to virtual-console */ | ||
179 | PROMDEV_O_UNK, | 165 | PROMDEV_O_UNK, |
180 | }; | 166 | }; |
181 | 167 | ||
@@ -183,10 +169,18 @@ extern enum prom_output_device prom_query_output_device(void); | |||
183 | 169 | ||
184 | /* Multiprocessor operations... */ | 170 | /* Multiprocessor operations... */ |
185 | #ifdef CONFIG_SMP | 171 | #ifdef CONFIG_SMP |
186 | /* Start the CPU with the given device tree node, context table, and context | 172 | /* Start the CPU with the given device tree node at the passed program |
187 | * at the passed program counter. | 173 | * counter with the given arg passed in via register %o0. |
174 | */ | ||
175 | extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg); | ||
176 | |||
177 | /* Start the CPU with the given cpu ID at the passed program | ||
178 | * counter with the given arg passed in via register %o0. | ||
188 | */ | 179 | */ |
189 | extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0); | 180 | extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg); |
181 | |||
182 | /* Stop the CPU with the given cpu ID. */ | ||
183 | extern void prom_stopcpu_cpuid(int cpuid); | ||
190 | 184 | ||
191 | /* Stop the current CPU. */ | 185 | /* Stop the current CPU. */ |
192 | extern void prom_stopself(void); | 186 | extern void prom_stopself(void); |
@@ -335,6 +329,7 @@ int cpu_find_by_mid(int mid, int *prom_node); | |||
335 | 329 | ||
336 | /* Client interface level routines. */ | 330 | /* Client interface level routines. */ |
337 | extern void prom_set_trap_table(unsigned long tba); | 331 | extern void prom_set_trap_table(unsigned long tba); |
332 | extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa); | ||
338 | 333 | ||
339 | extern long p1275_cmd(const char *, long, ...); | 334 | extern long p1275_cmd(const char *, long, ...); |
340 | 335 | ||
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 5426bb28a993..fcb2812265f4 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h | |||
@@ -104,10 +104,12 @@ typedef unsigned long pgprot_t; | |||
104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | 105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK | 106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK |
107 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
107 | #endif | 108 | #endif |
108 | 109 | ||
109 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ | 110 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
110 | (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) | 111 | (_AC(0x0000000070000000,UL)) : \ |
112 | (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) | ||
111 | 113 | ||
112 | #endif /* !(__ASSEMBLY__) */ | 114 | #endif /* !(__ASSEMBLY__) */ |
113 | 115 | ||
@@ -124,17 +126,10 @@ typedef unsigned long pgprot_t; | |||
124 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) | 126 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
125 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | 127 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
126 | 128 | ||
127 | /* PFNs are real physical page numbers. However, mem_map only begins to record | 129 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
128 | * per-page information starting at pfn_base. This is to handle systems where | ||
129 | * the first physical page in the machine is at some huge physical address, | ||
130 | * such as 4GB. This is common on a partitioned E10000, for example. | ||
131 | */ | ||
132 | extern struct page *pfn_to_page(unsigned long pfn); | ||
133 | extern unsigned long page_to_pfn(struct page *); | ||
134 | 130 | ||
135 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) | 131 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) |
136 | 132 | ||
137 | #define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) | ||
138 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 133 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
139 | 134 | ||
140 | #define virt_to_phys __pa | 135 | #define virt_to_phys __pa |
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index dd35a2c7798a..1396f110939a 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h | |||
@@ -139,6 +139,9 @@ struct pci_pbm_info { | |||
139 | /* Opaque 32-bit system bus Port ID. */ | 139 | /* Opaque 32-bit system bus Port ID. */ |
140 | u32 portid; | 140 | u32 portid; |
141 | 141 | ||
142 | /* Opaque 32-bit handle used for hypervisor calls. */ | ||
143 | u32 devhandle; | ||
144 | |||
142 | /* Chipset version information. */ | 145 | /* Chipset version information. */ |
143 | int chip_type; | 146 | int chip_type; |
144 | #define PBM_CHIP_TYPE_SABRE 1 | 147 | #define PBM_CHIP_TYPE_SABRE 1 |
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index 89bd71b1c0d8..7c5a589ea437 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h | |||
@@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
41 | 41 | ||
42 | struct pci_dev; | 42 | struct pci_dev; |
43 | 43 | ||
44 | struct pci_iommu_ops { | ||
45 | void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *); | ||
46 | void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t); | ||
47 | dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int); | ||
48 | void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int); | ||
49 | int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
50 | void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
51 | void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int); | ||
52 | void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); | ||
53 | }; | ||
54 | |||
55 | extern struct pci_iommu_ops *pci_iommu_ops; | ||
56 | |||
44 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 57 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
45 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 58 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
46 | */ | 59 | */ |
47 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | 60 | static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) |
61 | { | ||
62 | return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle); | ||
63 | } | ||
48 | 64 | ||
49 | /* Free and unmap a consistent DMA buffer. | 65 | /* Free and unmap a consistent DMA buffer. |
50 | * cpu_addr is what was returned from pci_alloc_consistent, | 66 | * cpu_addr is what was returned from pci_alloc_consistent, |
@@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t | |||
54 | * References to the memory and mappings associated with cpu_addr/dma_addr | 70 | * References to the memory and mappings associated with cpu_addr/dma_addr |
55 | * past this call are illegal. | 71 | * past this call are illegal. |
56 | */ | 72 | */ |
57 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | 73 | static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) |
74 | { | ||
75 | return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); | ||
76 | } | ||
58 | 77 | ||
59 | /* Map a single buffer of the indicated size for DMA in streaming mode. | 78 | /* Map a single buffer of the indicated size for DMA in streaming mode. |
60 | * The 32-bit bus address to use is returned. | 79 | * The 32-bit bus address to use is returned. |
@@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, | |||
62 | * Once the device is given the dma address, the device owns this memory | 81 | * Once the device is given the dma address, the device owns this memory |
63 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | 82 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. |
64 | */ | 83 | */ |
65 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | 84 | static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) |
85 | { | ||
86 | return pci_iommu_ops->map_single(hwdev, ptr, size, direction); | ||
87 | } | ||
66 | 88 | ||
67 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | 89 | /* Unmap a single streaming mode DMA translation. The dma_addr and size |
68 | * must match what was provided for in a previous pci_map_single call. All | 90 | * must match what was provided for in a previous pci_map_single call. All |
@@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |||
71 | * After this call, reads by the cpu to the buffer are guaranteed to see | 93 | * After this call, reads by the cpu to the buffer are guaranteed to see |
72 | * whatever the device wrote there. | 94 | * whatever the device wrote there. |
73 | */ | 95 | */ |
74 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | 96 | static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) |
97 | { | ||
98 | pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); | ||
99 | } | ||
75 | 100 | ||
76 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ | 101 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ |
77 | #define pci_map_page(dev, page, off, size, dir) \ | 102 | #define pci_map_page(dev, page, off, size, dir) \ |
@@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t | |||
107 | * Device ownership issues as mentioned above for pci_map_single are | 132 | * Device ownership issues as mentioned above for pci_map_single are |
108 | * the same here. | 133 | * the same here. |
109 | */ | 134 | */ |
110 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 135 | static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) |
111 | int nents, int direction); | 136 | { |
137 | return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); | ||
138 | } | ||
112 | 139 | ||
113 | /* Unmap a set of streaming mode DMA translations. | 140 | /* Unmap a set of streaming mode DMA translations. |
114 | * Again, cpu read rules concerning calls here are the same as for | 141 | * Again, cpu read rules concerning calls here are the same as for |
115 | * pci_unmap_single() above. | 142 | * pci_unmap_single() above. |
116 | */ | 143 | */ |
117 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 144 | static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction) |
118 | int nhwents, int direction); | 145 | { |
146 | pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); | ||
147 | } | ||
119 | 148 | ||
120 | /* Make physical memory consistent for a single | 149 | /* Make physical memory consistent for a single |
121 | * streaming mode DMA translation after a transfer. | 150 | * streaming mode DMA translation after a transfer. |
@@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |||
127 | * must first perform a pci_dma_sync_for_device, and then the | 156 | * must first perform a pci_dma_sync_for_device, and then the |
128 | * device again owns the buffer. | 157 | * device again owns the buffer. |
129 | */ | 158 | */ |
130 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | 159 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) |
131 | size_t size, int direction); | 160 | { |
161 | pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); | ||
162 | } | ||
132 | 163 | ||
133 | static inline void | 164 | static inline void |
134 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | 165 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, |
@@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | |||
144 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | 175 | * The same as pci_dma_sync_single_* but for a scatter-gather list, |
145 | * same rules and usage. | 176 | * same rules and usage. |
146 | */ | 177 | */ |
147 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | 178 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) |
179 | { | ||
180 | pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); | ||
181 | } | ||
148 | 182 | ||
149 | static inline void | 183 | static inline void |
150 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | 184 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, |
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index a96067cca963..12e4a273bd43 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
11 | #include <asm/cpudata.h> | 12 | #include <asm/cpudata.h> |
@@ -13,172 +14,59 @@ | |||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | 15 | ||
15 | /* Page table allocation/freeing. */ | 16 | /* Page table allocation/freeing. */ |
16 | #ifdef CONFIG_SMP | 17 | extern kmem_cache_t *pgtable_cache; |
17 | /* Sliiiicck */ | ||
18 | #define pgt_quicklists local_cpu_data() | ||
19 | #else | ||
20 | extern struct pgtable_cache_struct { | ||
21 | unsigned long *pgd_cache; | ||
22 | unsigned long *pte_cache[2]; | ||
23 | unsigned int pgcache_size; | ||
24 | } pgt_quicklists; | ||
25 | #endif | ||
26 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | ||
27 | #define pmd_quicklist ((unsigned long *)0) | ||
28 | #define pte_quicklist (pgt_quicklists.pte_cache) | ||
29 | #define pgtable_cache_size (pgt_quicklists.pgcache_size) | ||
30 | 18 | ||
31 | static __inline__ void free_pgd_fast(pgd_t *pgd) | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
32 | { | 20 | { |
33 | preempt_disable(); | 21 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
34 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | ||
35 | pgd_quicklist = (unsigned long *) pgd; | ||
36 | pgtable_cache_size++; | ||
37 | preempt_enable(); | ||
38 | } | 22 | } |
39 | 23 | ||
40 | static __inline__ pgd_t *get_pgd_fast(void) | 24 | static inline void pgd_free(pgd_t *pgd) |
41 | { | 25 | { |
42 | unsigned long *ret; | 26 | kmem_cache_free(pgtable_cache, pgd); |
43 | |||
44 | preempt_disable(); | ||
45 | if((ret = pgd_quicklist) != NULL) { | ||
46 | pgd_quicklist = (unsigned long *)(*ret); | ||
47 | ret[0] = 0; | ||
48 | pgtable_cache_size--; | ||
49 | preempt_enable(); | ||
50 | } else { | ||
51 | preempt_enable(); | ||
52 | ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
53 | if(ret) | ||
54 | memset(ret, 0, PAGE_SIZE); | ||
55 | } | ||
56 | return (pgd_t *)ret; | ||
57 | } | 27 | } |
58 | 28 | ||
59 | static __inline__ void free_pgd_slow(pgd_t *pgd) | ||
60 | { | ||
61 | free_page((unsigned long)pgd); | ||
62 | } | ||
63 | |||
64 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
65 | #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL) | ||
66 | #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL) | ||
67 | #else | ||
68 | #define VPTE_COLOR(address) 0 | ||
69 | #define DCACHE_COLOR(address) 0 | ||
70 | #endif | ||
71 | |||
72 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) | 29 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
73 | 30 | ||
74 | static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) | 31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
75 | { | 32 | { |
76 | unsigned long *ret; | 33 | return kmem_cache_alloc(pgtable_cache, |
77 | int color = 0; | 34 | GFP_KERNEL|__GFP_REPEAT); |
78 | |||
79 | preempt_disable(); | ||
80 | if (pte_quicklist[color] == NULL) | ||
81 | color = 1; | ||
82 | |||
83 | if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { | ||
84 | pte_quicklist[color] = (unsigned long *)(*ret); | ||
85 | ret[0] = 0; | ||
86 | pgtable_cache_size--; | ||
87 | } | ||
88 | preempt_enable(); | ||
89 | |||
90 | return (pmd_t *)ret; | ||
91 | } | 35 | } |
92 | 36 | ||
93 | static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 37 | static inline void pmd_free(pmd_t *pmd) |
94 | { | 38 | { |
95 | pmd_t *pmd; | 39 | kmem_cache_free(pgtable_cache, pmd); |
96 | |||
97 | pmd = pmd_alloc_one_fast(mm, address); | ||
98 | if (!pmd) { | ||
99 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
100 | if (pmd) | ||
101 | memset(pmd, 0, PAGE_SIZE); | ||
102 | } | ||
103 | return pmd; | ||
104 | } | 40 | } |
105 | 41 | ||
106 | static __inline__ void free_pmd_fast(pmd_t *pmd) | 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
43 | unsigned long address) | ||
107 | { | 44 | { |
108 | unsigned long color = DCACHE_COLOR((unsigned long)pmd); | 45 | return kmem_cache_alloc(pgtable_cache, |
109 | 46 | GFP_KERNEL|__GFP_REPEAT); | |
110 | preempt_disable(); | ||
111 | *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; | ||
112 | pte_quicklist[color] = (unsigned long *) pmd; | ||
113 | pgtable_cache_size++; | ||
114 | preempt_enable(); | ||
115 | } | 47 | } |
116 | 48 | ||
117 | static __inline__ void free_pmd_slow(pmd_t *pmd) | 49 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
50 | unsigned long address) | ||
118 | { | 51 | { |
119 | free_page((unsigned long)pmd); | 52 | return virt_to_page(pte_alloc_one_kernel(mm, address)); |
120 | } | 53 | } |
121 | 54 | ||
122 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) | ||
123 | #define pmd_populate(MM,PMD,PTE_PAGE) \ | ||
124 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
125 | |||
126 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); | ||
127 | |||
128 | static inline struct page * | ||
129 | pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
130 | { | ||
131 | pte_t *pte = pte_alloc_one_kernel(mm, addr); | ||
132 | |||
133 | if (pte) | ||
134 | return virt_to_page(pte); | ||
135 | |||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) | ||
140 | { | ||
141 | unsigned long color = VPTE_COLOR(address); | ||
142 | unsigned long *ret; | ||
143 | |||
144 | preempt_disable(); | ||
145 | if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { | ||
146 | pte_quicklist[color] = (unsigned long *)(*ret); | ||
147 | ret[0] = 0; | ||
148 | pgtable_cache_size--; | ||
149 | } | ||
150 | preempt_enable(); | ||
151 | return (pte_t *)ret; | ||
152 | } | ||
153 | |||
154 | static __inline__ void free_pte_fast(pte_t *pte) | ||
155 | { | ||
156 | unsigned long color = DCACHE_COLOR((unsigned long)pte); | ||
157 | |||
158 | preempt_disable(); | ||
159 | *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; | ||
160 | pte_quicklist[color] = (unsigned long *) pte; | ||
161 | pgtable_cache_size++; | ||
162 | preempt_enable(); | ||
163 | } | ||
164 | |||
165 | static __inline__ void free_pte_slow(pte_t *pte) | ||
166 | { | ||
167 | free_page((unsigned long)pte); | ||
168 | } | ||
169 | |||
170 | static inline void pte_free_kernel(pte_t *pte) | 55 | static inline void pte_free_kernel(pte_t *pte) |
171 | { | 56 | { |
172 | free_pte_fast(pte); | 57 | kmem_cache_free(pgtable_cache, pte); |
173 | } | 58 | } |
174 | 59 | ||
175 | static inline void pte_free(struct page *ptepage) | 60 | static inline void pte_free(struct page *ptepage) |
176 | { | 61 | { |
177 | free_pte_fast(page_address(ptepage)); | 62 | pte_free_kernel(page_address(ptepage)); |
178 | } | 63 | } |
179 | 64 | ||
180 | #define pmd_free(pmd) free_pmd_fast(pmd) | 65 | |
181 | #define pgd_free(pgd) free_pgd_fast(pgd) | 66 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
182 | #define pgd_alloc(mm) get_pgd_fast() | 67 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
68 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
69 | |||
70 | #define check_pgt_cache() do { } while (0) | ||
183 | 71 | ||
184 | #endif /* _SPARC64_PGALLOC_H */ | 72 | #endif /* _SPARC64_PGALLOC_H */ |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index f0a9b44d3eb5..ed4124edf837 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -25,7 +25,8 @@ | |||
25 | #include <asm/const.h> | 25 | #include <asm/const.h> |
26 | 26 | ||
27 | /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). | 27 | /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). |
28 | * The page copy blockops can use 0x2000000 to 0x10000000. | 28 | * The page copy blockops can use 0x2000000 to 0x4000000. |
29 | * The TSB is mapped in the 0x4000000 to 0x6000000 range. | ||
29 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. | 30 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. |
30 | * The vmalloc area spans 0x100000000 to 0x200000000. | 31 | * The vmalloc area spans 0x100000000 to 0x200000000. |
31 | * Since modules need to be in the lowest 32-bits of the address space, | 32 | * Since modules need to be in the lowest 32-bits of the address space, |
@@ -34,6 +35,7 @@ | |||
34 | * 0x400000000. | 35 | * 0x400000000. |
35 | */ | 36 | */ |
36 | #define TLBTEMP_BASE _AC(0x0000000002000000,UL) | 37 | #define TLBTEMP_BASE _AC(0x0000000002000000,UL) |
38 | #define TSBMAP_BASE _AC(0x0000000004000000,UL) | ||
37 | #define MODULES_VADDR _AC(0x0000000010000000,UL) | 39 | #define MODULES_VADDR _AC(0x0000000010000000,UL) |
38 | #define MODULES_LEN _AC(0x00000000e0000000,UL) | 40 | #define MODULES_LEN _AC(0x00000000e0000000,UL) |
39 | #define MODULES_END _AC(0x00000000f0000000,UL) | 41 | #define MODULES_END _AC(0x00000000f0000000,UL) |
@@ -88,162 +90,538 @@ | |||
88 | 90 | ||
89 | #endif /* !(__ASSEMBLY__) */ | 91 | #endif /* !(__ASSEMBLY__) */ |
90 | 92 | ||
91 | /* Spitfire/Cheetah TTE bits. */ | 93 | /* PTE bits which are the same in SUN4U and SUN4V format. */ |
92 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ | 94 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ |
93 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ | 95 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
94 | #define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ | 96 | |
95 | #define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ | 97 | /* SUN4U pte bits... */ |
96 | #define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ | 98 | #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ |
97 | #define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ | 99 | #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ |
98 | #define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ | 100 | #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ |
99 | #define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ | 101 | #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ |
100 | #define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ | 102 | #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ |
101 | #define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ | 103 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ |
102 | #define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ | 104 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ |
103 | #define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ | 105 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ |
104 | #define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ | 106 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ |
105 | #define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ | 107 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ |
106 | #define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ | 108 | #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ |
107 | #define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ | 109 | #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ |
108 | #define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ | 110 | #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ |
109 | #define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ | 111 | #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ |
110 | #define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ | 112 | #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ |
111 | #define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ | 113 | #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ |
112 | #define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ | 114 | #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ |
113 | #define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */ | 115 | #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ |
114 | #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ | 116 | #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ |
115 | #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ | 117 | #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ |
116 | 118 | #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ | |
117 | /* Here are the SpitFire software bits we use in the TTE's. | 119 | #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ |
118 | * | 120 | #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ |
119 | * WARNING: If you are going to try and start using some | 121 | #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ |
120 | * of the soft2 bits, you will need to make | 122 | #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ |
121 | * modifications to the swap entry implementation. | 123 | #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ |
122 | * For example, one thing that could happen is that | 124 | #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ |
123 | * swp_entry_to_pte() would BUG_ON() if you tried | 125 | |
124 | * to use one of the soft2 bits for _PAGE_FILE. | 126 | /* SUN4V pte bits... */ |
125 | * | 127 | #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ |
126 | * Like other architectures, I have aliased _PAGE_FILE with | 128 | #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ |
127 | * _PAGE_MODIFIED. This works because _PAGE_FILE is never | 129 | #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ |
128 | * interpreted that way unless _PAGE_PRESENT is clear. | 130 | #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ |
129 | */ | 131 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ |
130 | #define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */ | 132 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ |
131 | #define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */ | 133 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ |
132 | #define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */ | 134 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ |
133 | #define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ | 135 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ |
134 | #define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */ | 136 | #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ |
135 | #define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */ | 137 | #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ |
136 | #define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */ | 138 | #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ |
139 | #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ | ||
140 | #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ | ||
141 | #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ | ||
142 | #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ | ||
143 | #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ | ||
144 | #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ | ||
145 | #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ | ||
146 | #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ | ||
147 | #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ | ||
148 | #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ | ||
149 | #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ | ||
150 | #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ | ||
151 | #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ | ||
152 | #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ | ||
137 | 153 | ||
138 | #if PAGE_SHIFT == 13 | 154 | #if PAGE_SHIFT == 13 |
139 | #define _PAGE_SZBITS _PAGE_SZ8K | 155 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U |
156 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V | ||
140 | #elif PAGE_SHIFT == 16 | 157 | #elif PAGE_SHIFT == 16 |
141 | #define _PAGE_SZBITS _PAGE_SZ64K | 158 | #define _PAGE_SZBITS_4U _PAGE_SZ64K_4U |
159 | #define _PAGE_SZBITS_4V _PAGE_SZ64K_4V | ||
142 | #elif PAGE_SHIFT == 19 | 160 | #elif PAGE_SHIFT == 19 |
143 | #define _PAGE_SZBITS _PAGE_SZ512K | 161 | #define _PAGE_SZBITS_4U _PAGE_SZ512K_4U |
162 | #define _PAGE_SZBITS_4V _PAGE_SZ512K_4V | ||
144 | #elif PAGE_SHIFT == 22 | 163 | #elif PAGE_SHIFT == 22 |
145 | #define _PAGE_SZBITS _PAGE_SZ4MB | 164 | #define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U |
165 | #define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V | ||
146 | #else | 166 | #else |
147 | #error Wrong PAGE_SHIFT specified | 167 | #error Wrong PAGE_SHIFT specified |
148 | #endif | 168 | #endif |
149 | 169 | ||
150 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | 170 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) |
151 | #define _PAGE_SZHUGE _PAGE_SZ4MB | 171 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U |
172 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V | ||
152 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | 173 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) |
153 | #define _PAGE_SZHUGE _PAGE_SZ512K | 174 | #define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U |
175 | #define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V | ||
154 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 176 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
155 | #define _PAGE_SZHUGE _PAGE_SZ64K | 177 | #define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U |
178 | #define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V | ||
156 | #endif | 179 | #endif |
157 | 180 | ||
158 | #define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) | 181 | /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ |
182 | #define __P000 __pgprot(0) | ||
183 | #define __P001 __pgprot(0) | ||
184 | #define __P010 __pgprot(0) | ||
185 | #define __P011 __pgprot(0) | ||
186 | #define __P100 __pgprot(0) | ||
187 | #define __P101 __pgprot(0) | ||
188 | #define __P110 __pgprot(0) | ||
189 | #define __P111 __pgprot(0) | ||
190 | |||
191 | #define __S000 __pgprot(0) | ||
192 | #define __S001 __pgprot(0) | ||
193 | #define __S010 __pgprot(0) | ||
194 | #define __S011 __pgprot(0) | ||
195 | #define __S100 __pgprot(0) | ||
196 | #define __S101 __pgprot(0) | ||
197 | #define __S110 __pgprot(0) | ||
198 | #define __S111 __pgprot(0) | ||
159 | 199 | ||
160 | #define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) | 200 | #ifndef __ASSEMBLY__ |
161 | #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R) | ||
162 | #define __PRIV_BITS _PAGE_P | ||
163 | 201 | ||
164 | #define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE) | 202 | extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); |
165 | 203 | ||
166 | /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */ | 204 | extern unsigned long pte_sz_bits(unsigned long size); |
167 | #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | ||
168 | __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC) | ||
169 | 205 | ||
170 | #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 206 | extern pgprot_t PAGE_KERNEL; |
171 | __ACCESS_BITS | _PAGE_EXEC) | 207 | extern pgprot_t PAGE_KERNEL_LOCKED; |
208 | extern pgprot_t PAGE_COPY; | ||
209 | extern pgprot_t PAGE_SHARED; | ||
172 | 210 | ||
173 | #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 211 | /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ |
174 | __ACCESS_BITS | _PAGE_EXEC) | 212 | extern unsigned long _PAGE_IE; |
213 | extern unsigned long _PAGE_E; | ||
214 | extern unsigned long _PAGE_CACHE; | ||
175 | 215 | ||
176 | #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 216 | extern unsigned long pg_iobits; |
177 | __PRIV_BITS | \ | 217 | extern unsigned long _PAGE_ALL_SZ_BITS; |
178 | __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC) | 218 | extern unsigned long _PAGE_SZBITS; |
179 | 219 | ||
180 | #define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 220 | extern struct page *mem_map_zero; |
181 | _PAGE_CACHE | \ | 221 | #define ZERO_PAGE(vaddr) (mem_map_zero) |
182 | __ACCESS_BITS | _PAGE_WRITE) | ||
183 | 222 | ||
184 | #define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 223 | /* PFNs are real physical page numbers. However, mem_map only begins to record |
185 | _PAGE_CACHE | __ACCESS_BITS) | 224 | * per-page information starting at pfn_base. This is to handle systems where |
225 | * the first physical page in the machine is at some huge physical address, | ||
226 | * such as 4GB. This is common on a partitioned E10000, for example. | ||
227 | */ | ||
228 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
229 | { | ||
230 | unsigned long paddr = pfn << PAGE_SHIFT; | ||
231 | unsigned long sz_bits; | ||
232 | |||
233 | sz_bits = 0UL; | ||
234 | if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { | ||
235 | __asm__ __volatile__( | ||
236 | "\n661: sethi %uhi(%1), %0\n" | ||
237 | " sllx %0, 32, %0\n" | ||
238 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
239 | " .word 661b\n" | ||
240 | " mov %2, %0\n" | ||
241 | " nop\n" | ||
242 | " .previous\n" | ||
243 | : "=r" (sz_bits) | ||
244 | : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); | ||
245 | } | ||
246 | return __pte(paddr | sz_bits | pgprot_val(prot)); | ||
247 | } | ||
248 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
186 | 249 | ||
187 | #define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 250 | /* This one can be done with two shifts. */ |
188 | _PAGE_CACHE | __ACCESS_BITS) | 251 | static inline unsigned long pte_pfn(pte_t pte) |
252 | { | ||
253 | unsigned long ret; | ||
254 | |||
255 | __asm__ __volatile__( | ||
256 | "\n661: sllx %1, %2, %0\n" | ||
257 | " srlx %0, %3, %0\n" | ||
258 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
259 | " .word 661b\n" | ||
260 | " sllx %1, %4, %0\n" | ||
261 | " srlx %0, %5, %0\n" | ||
262 | " .previous\n" | ||
263 | : "=r" (ret) | ||
264 | : "r" (pte_val(pte)), | ||
265 | "i" (21), "i" (21 + PAGE_SHIFT), | ||
266 | "i" (8), "i" (8 + PAGE_SHIFT)); | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
189 | 271 | ||
190 | #define _PFN_MASK _PAGE_PADDR | 272 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) |
273 | { | ||
274 | unsigned long mask, tmp; | ||
275 | |||
276 | /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) | ||
277 | * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) | ||
278 | * | ||
279 | * Even if we use negation tricks the result is still a 6 | ||
280 | * instruction sequence, so don't try to play fancy and just | ||
281 | * do the most straightforward implementation. | ||
282 | * | ||
283 | * Note: We encode this into 3 sun4v 2-insn patch sequences. | ||
284 | */ | ||
191 | 285 | ||
192 | #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \ | 286 | __asm__ __volatile__( |
193 | __ACCESS_BITS | _PAGE_E) | 287 | "\n661: sethi %%uhi(%2), %1\n" |
288 | " sethi %%hi(%2), %0\n" | ||
289 | "\n662: or %1, %%ulo(%2), %1\n" | ||
290 | " or %0, %%lo(%2), %0\n" | ||
291 | "\n663: sllx %1, 32, %1\n" | ||
292 | " or %0, %1, %0\n" | ||
293 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
294 | " .word 661b\n" | ||
295 | " sethi %%uhi(%3), %1\n" | ||
296 | " sethi %%hi(%3), %0\n" | ||
297 | " .word 662b\n" | ||
298 | " or %1, %%ulo(%3), %1\n" | ||
299 | " or %0, %%lo(%3), %0\n" | ||
300 | " .word 663b\n" | ||
301 | " sllx %1, 32, %1\n" | ||
302 | " or %0, %1, %0\n" | ||
303 | " .previous\n" | ||
304 | : "=r" (mask), "=r" (tmp) | ||
305 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | ||
306 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | | ||
307 | _PAGE_SZBITS_4U), | ||
308 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | ||
309 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | | ||
310 | _PAGE_SZBITS_4V)); | ||
311 | |||
312 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | ||
313 | } | ||
194 | 314 | ||
195 | #define __P000 PAGE_NONE | 315 | static inline pte_t pgoff_to_pte(unsigned long off) |
196 | #define __P001 PAGE_READONLY_NOEXEC | 316 | { |
197 | #define __P010 PAGE_COPY_NOEXEC | 317 | off <<= PAGE_SHIFT; |
198 | #define __P011 PAGE_COPY_NOEXEC | 318 | |
199 | #define __P100 PAGE_READONLY | 319 | __asm__ __volatile__( |
200 | #define __P101 PAGE_READONLY | 320 | "\n661: or %0, %2, %0\n" |
201 | #define __P110 PAGE_COPY | 321 | " .section .sun4v_1insn_patch, \"ax\"\n" |
202 | #define __P111 PAGE_COPY | 322 | " .word 661b\n" |
323 | " or %0, %3, %0\n" | ||
324 | " .previous\n" | ||
325 | : "=r" (off) | ||
326 | : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
327 | |||
328 | return __pte(off); | ||
329 | } | ||
203 | 330 | ||
204 | #define __S000 PAGE_NONE | 331 | static inline pgprot_t pgprot_noncached(pgprot_t prot) |
205 | #define __S001 PAGE_READONLY_NOEXEC | 332 | { |
206 | #define __S010 PAGE_SHARED_NOEXEC | 333 | unsigned long val = pgprot_val(prot); |
207 | #define __S011 PAGE_SHARED_NOEXEC | 334 | |
208 | #define __S100 PAGE_READONLY | 335 | __asm__ __volatile__( |
209 | #define __S101 PAGE_READONLY | 336 | "\n661: andn %0, %2, %0\n" |
210 | #define __S110 PAGE_SHARED | 337 | " or %0, %3, %0\n" |
211 | #define __S111 PAGE_SHARED | 338 | " .section .sun4v_2insn_patch, \"ax\"\n" |
339 | " .word 661b\n" | ||
340 | " andn %0, %4, %0\n" | ||
341 | " or %0, %3, %0\n" | ||
342 | " .previous\n" | ||
343 | : "=r" (val) | ||
344 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | ||
345 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); | ||
346 | |||
347 | return __pgprot(val); | ||
348 | } | ||
349 | /* Various pieces of code check for platform support by ifdef testing | ||
350 | * on "pgprot_noncached". That's broken and should be fixed, but for | ||
351 | * now... | ||
352 | */ | ||
353 | #define pgprot_noncached pgprot_noncached | ||
212 | 354 | ||
213 | #ifndef __ASSEMBLY__ | 355 | #ifdef CONFIG_HUGETLB_PAGE |
356 | static inline pte_t pte_mkhuge(pte_t pte) | ||
357 | { | ||
358 | unsigned long mask; | ||
359 | |||
360 | __asm__ __volatile__( | ||
361 | "\n661: sethi %%uhi(%1), %0\n" | ||
362 | " sllx %0, 32, %0\n" | ||
363 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
364 | " .word 661b\n" | ||
365 | " mov %2, %0\n" | ||
366 | " nop\n" | ||
367 | " .previous\n" | ||
368 | : "=r" (mask) | ||
369 | : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); | ||
370 | |||
371 | return __pte(pte_val(pte) | mask); | ||
372 | } | ||
373 | #endif | ||
214 | 374 | ||
215 | extern unsigned long phys_base; | 375 | static inline pte_t pte_mkdirty(pte_t pte) |
216 | extern unsigned long pfn_base; | 376 | { |
377 | unsigned long val = pte_val(pte), tmp; | ||
378 | |||
379 | __asm__ __volatile__( | ||
380 | "\n661: or %0, %3, %0\n" | ||
381 | " nop\n" | ||
382 | "\n662: nop\n" | ||
383 | " nop\n" | ||
384 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
385 | " .word 661b\n" | ||
386 | " sethi %%uhi(%4), %1\n" | ||
387 | " sllx %1, 32, %1\n" | ||
388 | " .word 662b\n" | ||
389 | " or %1, %%lo(%4), %1\n" | ||
390 | " or %0, %1, %0\n" | ||
391 | " .previous\n" | ||
392 | : "=r" (val), "=r" (tmp) | ||
393 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
394 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
395 | |||
396 | return __pte(val); | ||
397 | } | ||
217 | 398 | ||
218 | extern struct page *mem_map_zero; | 399 | static inline pte_t pte_mkclean(pte_t pte) |
219 | #define ZERO_PAGE(vaddr) (mem_map_zero) | 400 | { |
401 | unsigned long val = pte_val(pte), tmp; | ||
402 | |||
403 | __asm__ __volatile__( | ||
404 | "\n661: andn %0, %3, %0\n" | ||
405 | " nop\n" | ||
406 | "\n662: nop\n" | ||
407 | " nop\n" | ||
408 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
409 | " .word 661b\n" | ||
410 | " sethi %%uhi(%4), %1\n" | ||
411 | " sllx %1, 32, %1\n" | ||
412 | " .word 662b\n" | ||
413 | " or %1, %%lo(%4), %1\n" | ||
414 | " andn %0, %1, %0\n" | ||
415 | " .previous\n" | ||
416 | : "=r" (val), "=r" (tmp) | ||
417 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
418 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
419 | |||
420 | return __pte(val); | ||
421 | } | ||
220 | 422 | ||
221 | /* PFNs are real physical page numbers. However, mem_map only begins to record | 423 | static inline pte_t pte_mkwrite(pte_t pte) |
222 | * per-page information starting at pfn_base. This is to handle systems where | 424 | { |
223 | * the first physical page in the machine is at some huge physical address, | 425 | unsigned long val = pte_val(pte), mask; |
224 | * such as 4GB. This is common on a partitioned E10000, for example. | 426 | |
225 | */ | 427 | __asm__ __volatile__( |
428 | "\n661: mov %1, %0\n" | ||
429 | " nop\n" | ||
430 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
431 | " .word 661b\n" | ||
432 | " sethi %%uhi(%2), %0\n" | ||
433 | " sllx %0, 32, %0\n" | ||
434 | " .previous\n" | ||
435 | : "=r" (mask) | ||
436 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
437 | |||
438 | return __pte(val | mask); | ||
439 | } | ||
226 | 440 | ||
227 | #define pfn_pte(pfn, prot) \ | 441 | static inline pte_t pte_wrprotect(pte_t pte) |
228 | __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS) | 442 | { |
229 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 443 | unsigned long val = pte_val(pte), tmp; |
444 | |||
445 | __asm__ __volatile__( | ||
446 | "\n661: andn %0, %3, %0\n" | ||
447 | " nop\n" | ||
448 | "\n662: nop\n" | ||
449 | " nop\n" | ||
450 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
451 | " .word 661b\n" | ||
452 | " sethi %%uhi(%4), %1\n" | ||
453 | " sllx %1, 32, %1\n" | ||
454 | " .word 662b\n" | ||
455 | " or %1, %%lo(%4), %1\n" | ||
456 | " andn %0, %1, %0\n" | ||
457 | " .previous\n" | ||
458 | : "=r" (val), "=r" (tmp) | ||
459 | : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), | ||
460 | "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); | ||
461 | |||
462 | return __pte(val); | ||
463 | } | ||
464 | |||
465 | static inline pte_t pte_mkold(pte_t pte) | ||
466 | { | ||
467 | unsigned long mask; | ||
468 | |||
469 | __asm__ __volatile__( | ||
470 | "\n661: mov %1, %0\n" | ||
471 | " nop\n" | ||
472 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
473 | " .word 661b\n" | ||
474 | " sethi %%uhi(%2), %0\n" | ||
475 | " sllx %0, 32, %0\n" | ||
476 | " .previous\n" | ||
477 | : "=r" (mask) | ||
478 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
479 | |||
480 | mask |= _PAGE_R; | ||
481 | |||
482 | return __pte(pte_val(pte) & ~mask); | ||
483 | } | ||
484 | |||
485 | static inline pte_t pte_mkyoung(pte_t pte) | ||
486 | { | ||
487 | unsigned long mask; | ||
488 | |||
489 | __asm__ __volatile__( | ||
490 | "\n661: mov %1, %0\n" | ||
491 | " nop\n" | ||
492 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
493 | " .word 661b\n" | ||
494 | " sethi %%uhi(%2), %0\n" | ||
495 | " sllx %0, 32, %0\n" | ||
496 | " .previous\n" | ||
497 | : "=r" (mask) | ||
498 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
499 | |||
500 | mask |= _PAGE_R; | ||
501 | |||
502 | return __pte(pte_val(pte) | mask); | ||
503 | } | ||
230 | 504 | ||
231 | #define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) | 505 | static inline unsigned long pte_young(pte_t pte) |
232 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 506 | { |
507 | unsigned long mask; | ||
508 | |||
509 | __asm__ __volatile__( | ||
510 | "\n661: mov %1, %0\n" | ||
511 | " nop\n" | ||
512 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
513 | " .word 661b\n" | ||
514 | " sethi %%uhi(%2), %0\n" | ||
515 | " sllx %0, 32, %0\n" | ||
516 | " .previous\n" | ||
517 | : "=r" (mask) | ||
518 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
519 | |||
520 | return (pte_val(pte) & mask); | ||
521 | } | ||
522 | |||
523 | static inline unsigned long pte_dirty(pte_t pte) | ||
524 | { | ||
525 | unsigned long mask; | ||
526 | |||
527 | __asm__ __volatile__( | ||
528 | "\n661: mov %1, %0\n" | ||
529 | " nop\n" | ||
530 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
531 | " .word 661b\n" | ||
532 | " sethi %%uhi(%2), %0\n" | ||
533 | " sllx %0, 32, %0\n" | ||
534 | " .previous\n" | ||
535 | : "=r" (mask) | ||
536 | : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); | ||
537 | |||
538 | return (pte_val(pte) & mask); | ||
539 | } | ||
233 | 540 | ||
234 | static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | 541 | static inline unsigned long pte_write(pte_t pte) |
235 | { | 542 | { |
236 | pte_t __pte; | 543 | unsigned long mask; |
237 | const unsigned long preserve_mask = (_PFN_MASK | | 544 | |
238 | _PAGE_MODIFIED | _PAGE_ACCESSED | | 545 | __asm__ __volatile__( |
239 | _PAGE_CACHE | _PAGE_E | | 546 | "\n661: mov %1, %0\n" |
240 | _PAGE_PRESENT | _PAGE_SZBITS); | 547 | " nop\n" |
548 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
549 | " .word 661b\n" | ||
550 | " sethi %%uhi(%2), %0\n" | ||
551 | " sllx %0, 32, %0\n" | ||
552 | " .previous\n" | ||
553 | : "=r" (mask) | ||
554 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
555 | |||
556 | return (pte_val(pte) & mask); | ||
557 | } | ||
241 | 558 | ||
242 | pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | | 559 | static inline unsigned long pte_exec(pte_t pte) |
243 | (pgprot_val(new_prot) & ~preserve_mask); | 560 | { |
561 | unsigned long mask; | ||
562 | |||
563 | __asm__ __volatile__( | ||
564 | "\n661: sethi %%hi(%1), %0\n" | ||
565 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
566 | " .word 661b\n" | ||
567 | " mov %2, %0\n" | ||
568 | " .previous\n" | ||
569 | : "=r" (mask) | ||
570 | : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); | ||
571 | |||
572 | return (pte_val(pte) & mask); | ||
573 | } | ||
244 | 574 | ||
245 | return __pte; | 575 | static inline unsigned long pte_read(pte_t pte) |
576 | { | ||
577 | unsigned long mask; | ||
578 | |||
579 | __asm__ __volatile__( | ||
580 | "\n661: mov %1, %0\n" | ||
581 | " nop\n" | ||
582 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
583 | " .word 661b\n" | ||
584 | " sethi %%uhi(%2), %0\n" | ||
585 | " sllx %0, 32, %0\n" | ||
586 | " .previous\n" | ||
587 | : "=r" (mask) | ||
588 | : "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V)); | ||
589 | |||
590 | return (pte_val(pte) & mask); | ||
246 | } | 591 | } |
592 | |||
593 | static inline unsigned long pte_file(pte_t pte) | ||
594 | { | ||
595 | unsigned long val = pte_val(pte); | ||
596 | |||
597 | __asm__ __volatile__( | ||
598 | "\n661: and %0, %2, %0\n" | ||
599 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
600 | " .word 661b\n" | ||
601 | " and %0, %3, %0\n" | ||
602 | " .previous\n" | ||
603 | : "=r" (val) | ||
604 | : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
605 | |||
606 | return val; | ||
607 | } | ||
608 | |||
609 | static inline unsigned long pte_present(pte_t pte) | ||
610 | { | ||
611 | unsigned long val = pte_val(pte); | ||
612 | |||
613 | __asm__ __volatile__( | ||
614 | "\n661: and %0, %2, %0\n" | ||
615 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
616 | " .word 661b\n" | ||
617 | " and %0, %3, %0\n" | ||
618 | " .previous\n" | ||
619 | : "=r" (val) | ||
620 | : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); | ||
621 | |||
622 | return val; | ||
623 | } | ||
624 | |||
247 | #define pmd_set(pmdp, ptep) \ | 625 | #define pmd_set(pmdp, ptep) \ |
248 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) | 626 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) |
249 | #define pud_set(pudp, pmdp) \ | 627 | #define pud_set(pudp, pmdp) \ |
@@ -253,8 +631,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
253 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | 631 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
254 | #define pud_page(pud) \ | 632 | #define pud_page(pud) \ |
255 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) | 633 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) |
256 | #define pte_none(pte) (!pte_val(pte)) | ||
257 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
258 | #define pmd_none(pmd) (!pmd_val(pmd)) | 634 | #define pmd_none(pmd) (!pmd_val(pmd)) |
259 | #define pmd_bad(pmd) (0) | 635 | #define pmd_bad(pmd) (0) |
260 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) | 636 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) |
@@ -264,30 +640,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
264 | #define pud_present(pud) (pud_val(pud) != 0U) | 640 | #define pud_present(pud) (pud_val(pud) != 0U) |
265 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) | 641 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) |
266 | 642 | ||
267 | /* The following only work if pte_present() is true. | 643 | /* Same in both SUN4V and SUN4U. */ |
268 | * Undefined behaviour if not.. | 644 | #define pte_none(pte) (!pte_val(pte)) |
269 | */ | ||
270 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) | ||
271 | #define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC) | ||
272 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) | ||
273 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) | ||
274 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) | ||
275 | #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W))) | ||
276 | #define pte_rdprotect(pte) \ | ||
277 | (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ)) | ||
278 | #define pte_mkclean(pte) \ | ||
279 | (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W))) | ||
280 | #define pte_mkold(pte) \ | ||
281 | (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED)) | ||
282 | |||
283 | /* Permanent address of a page. */ | ||
284 | #define __page_address(page) page_address(page) | ||
285 | |||
286 | /* Be very careful when you change these three, they are delicate. */ | ||
287 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) | ||
288 | #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) | ||
289 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) | ||
290 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) | ||
291 | 645 | ||
292 | /* to find an entry in a page-table-directory. */ | 646 | /* to find an entry in a page-table-directory. */ |
293 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | 647 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
@@ -296,11 +650,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
296 | /* to find an entry in a kernel page-table-directory */ | 650 | /* to find an entry in a kernel page-table-directory */ |
297 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 651 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
298 | 652 | ||
299 | /* extract the pgd cache used for optimizing the tlb miss | ||
300 | * slow path when executing 32-bit compat processes | ||
301 | */ | ||
302 | #define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11) | ||
303 | |||
304 | /* Find an entry in the second-level page table.. */ | 653 | /* Find an entry in the second-level page table.. */ |
305 | #define pmd_offset(pudp, address) \ | 654 | #define pmd_offset(pudp, address) \ |
306 | ((pmd_t *) pud_page(*(pudp)) + \ | 655 | ((pmd_t *) pud_page(*(pudp)) + \ |
@@ -327,6 +676,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p | |||
327 | 676 | ||
328 | /* It is more efficient to let flush_tlb_kernel_range() | 677 | /* It is more efficient to let flush_tlb_kernel_range() |
329 | * handle init_mm tlb flushes. | 678 | * handle init_mm tlb flushes. |
679 | * | ||
680 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | ||
681 | * and SUN4V pte layout, so this inline test is fine. | ||
330 | */ | 682 | */ |
331 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) | 683 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) |
332 | tlb_batch_add(mm, addr, ptep, orig); | 684 | tlb_batch_add(mm, addr, ptep, orig); |
@@ -361,42 +713,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |||
361 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 713 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
362 | 714 | ||
363 | /* File offset in PTE support. */ | 715 | /* File offset in PTE support. */ |
364 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) | 716 | extern unsigned long pte_file(pte_t); |
365 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) | 717 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) |
366 | #define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) | 718 | extern pte_t pgoff_to_pte(unsigned long); |
367 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | 719 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
368 | 720 | ||
369 | extern unsigned long prom_virt_to_phys(unsigned long, int *); | 721 | extern unsigned long prom_virt_to_phys(unsigned long, int *); |
370 | 722 | ||
371 | static __inline__ unsigned long | 723 | extern unsigned long sun4u_get_pte(unsigned long); |
372 | sun4u_get_pte (unsigned long addr) | ||
373 | { | ||
374 | pgd_t *pgdp; | ||
375 | pud_t *pudp; | ||
376 | pmd_t *pmdp; | ||
377 | pte_t *ptep; | ||
378 | |||
379 | if (addr >= PAGE_OFFSET) | ||
380 | return addr & _PAGE_PADDR; | ||
381 | if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) | ||
382 | return prom_virt_to_phys(addr, NULL); | ||
383 | pgdp = pgd_offset_k(addr); | ||
384 | pudp = pud_offset(pgdp, addr); | ||
385 | pmdp = pmd_offset(pudp, addr); | ||
386 | ptep = pte_offset_kernel(pmdp, addr); | ||
387 | return pte_val(*ptep) & _PAGE_PADDR; | ||
388 | } | ||
389 | 724 | ||
390 | static __inline__ unsigned long | 725 | static inline unsigned long __get_phys(unsigned long addr) |
391 | __get_phys (unsigned long addr) | ||
392 | { | 726 | { |
393 | return sun4u_get_pte (addr); | 727 | return sun4u_get_pte(addr); |
394 | } | 728 | } |
395 | 729 | ||
396 | static __inline__ int | 730 | static inline int __get_iospace(unsigned long addr) |
397 | __get_iospace (unsigned long addr) | ||
398 | { | 731 | { |
399 | return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); | 732 | return ((sun4u_get_pte(addr) & 0xf0000000) >> 28); |
400 | } | 733 | } |
401 | 734 | ||
402 | extern unsigned long *sparc64_valid_addr_bitmap; | 735 | extern unsigned long *sparc64_valid_addr_bitmap; |
@@ -409,11 +742,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
409 | unsigned long pfn, | 742 | unsigned long pfn, |
410 | unsigned long size, pgprot_t prot); | 743 | unsigned long size, pgprot_t prot); |
411 | 744 | ||
412 | /* Clear virtual and physical cachability, set side-effect bit. */ | ||
413 | #define pgprot_noncached(prot) \ | ||
414 | (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \ | ||
415 | _PAGE_E)) | ||
416 | |||
417 | /* | 745 | /* |
418 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in | 746 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in |
419 | * its high 4 bits. These macros/functions put it there or get it from there. | 747 | * its high 4 bits. These macros/functions put it there or get it from there. |
@@ -424,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
424 | 752 | ||
425 | #include <asm-generic/pgtable.h> | 753 | #include <asm-generic/pgtable.h> |
426 | 754 | ||
427 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ | 755 | /* We provide our own get_unmapped_area to cope with VA holes and |
756 | * SHM area cache aliasing for userland. | ||
757 | */ | ||
428 | #define HAVE_ARCH_UNMAPPED_AREA | 758 | #define HAVE_ARCH_UNMAPPED_AREA |
759 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
429 | 760 | ||
430 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use | 761 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use |
431 | * the largest alignment possible such that larget PTEs can be used. | 762 | * the largest alignment possible such that larget PTEs can be used. |
@@ -435,12 +766,9 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, | |||
435 | unsigned long); | 766 | unsigned long); |
436 | #define HAVE_ARCH_FB_UNMAPPED_AREA | 767 | #define HAVE_ARCH_FB_UNMAPPED_AREA |
437 | 768 | ||
438 | /* | 769 | extern void pgtable_cache_init(void); |
439 | * No page table caches to initialise | 770 | extern void sun4v_register_fault_status(void); |
440 | */ | 771 | extern void sun4v_ktsb_register(void); |
441 | #define pgtable_cache_init() do { } while (0) | ||
442 | |||
443 | extern void check_pgt_cache(void); | ||
444 | 772 | ||
445 | #endif /* !(__ASSEMBLY__) */ | 773 | #endif /* !(__ASSEMBLY__) */ |
446 | 774 | ||
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h index 8f87750c3517..79f827eb3f5d 100644 --- a/include/asm-sparc64/pil.h +++ b/include/asm-sparc64/pil.h | |||
@@ -16,11 +16,13 @@ | |||
16 | #define PIL_SMP_CALL_FUNC 1 | 16 | #define PIL_SMP_CALL_FUNC 1 |
17 | #define PIL_SMP_RECEIVE_SIGNAL 2 | 17 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
18 | #define PIL_SMP_CAPTURE 3 | 18 | #define PIL_SMP_CAPTURE 3 |
19 | #define PIL_SMP_CTX_NEW_VERSION 4 | ||
19 | 20 | ||
20 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
21 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ | 22 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ |
22 | (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ | 23 | (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ |
23 | (PIL) == PIL_SMP_CAPTURE) | 24 | (PIL) == PIL_SMP_CAPTURE || \ |
25 | (PIL) == PIL_SMP_CTX_NEW_VERSION) | ||
24 | #endif | 26 | #endif |
25 | 27 | ||
26 | #endif /* !(_SPARC64_PIL_H) */ | 28 | #endif /* !(_SPARC64_PIL_H) */ |
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index cd8d9b4c8658..c6896b88283e 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h | |||
@@ -28,6 +28,8 @@ | |||
28 | * User lives in his very own context, and cannot reference us. Note | 28 | * User lives in his very own context, and cannot reference us. Note |
29 | * that TASK_SIZE is a misnomer, it really gives maximum user virtual | 29 | * that TASK_SIZE is a misnomer, it really gives maximum user virtual |
30 | * address that the kernel will allocate out. | 30 | * address that the kernel will allocate out. |
31 | * | ||
32 | * XXX No longer using virtual page tables, kill this upper limit... | ||
31 | */ | 33 | */ |
32 | #define VA_BITS 44 | 34 | #define VA_BITS 44 |
33 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
@@ -37,18 +39,6 @@ | |||
37 | #endif | 39 | #endif |
38 | #define TASK_SIZE ((unsigned long)-VPTE_SIZE) | 40 | #define TASK_SIZE ((unsigned long)-VPTE_SIZE) |
39 | 41 | ||
40 | /* | ||
41 | * The vpte base must be able to hold the entire vpte, half | ||
42 | * of which lives above, and half below, the base. And it | ||
43 | * is placed as close to the highest address range as possible. | ||
44 | */ | ||
45 | #define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2)) | ||
46 | #if 1 | ||
47 | #define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE | ||
48 | #else | ||
49 | #define VPTE_BASE_CHEETAH 0xffe0000000000000 | ||
50 | #endif | ||
51 | |||
52 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
53 | 43 | ||
54 | typedef struct { | 44 | typedef struct { |
@@ -101,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *); | |||
101 | /* Do necessary setup to start up a newly executed thread. */ | 91 | /* Do necessary setup to start up a newly executed thread. */ |
102 | #define start_thread(regs, pc, sp) \ | 92 | #define start_thread(regs, pc, sp) \ |
103 | do { \ | 93 | do { \ |
104 | regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \ | 94 | unsigned long __asi = ASI_PNF; \ |
95 | regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \ | ||
105 | regs->tpc = ((pc & (~3)) - 4); \ | 96 | regs->tpc = ((pc & (~3)) - 4); \ |
106 | regs->tnpc = regs->tpc + 4; \ | 97 | regs->tnpc = regs->tpc + 4; \ |
107 | regs->y = 0; \ | 98 | regs->y = 0; \ |
@@ -138,10 +129,10 @@ do { \ | |||
138 | 129 | ||
139 | #define start_thread32(regs, pc, sp) \ | 130 | #define start_thread32(regs, pc, sp) \ |
140 | do { \ | 131 | do { \ |
132 | unsigned long __asi = ASI_PNF; \ | ||
141 | pc &= 0x00000000ffffffffUL; \ | 133 | pc &= 0x00000000ffffffffUL; \ |
142 | sp &= 0x00000000ffffffffUL; \ | 134 | sp &= 0x00000000ffffffffUL; \ |
143 | \ | 135 | regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \ |
144 | regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \ | ||
145 | regs->tpc = ((pc & (~3)) - 4); \ | 136 | regs->tpc = ((pc & (~3)) - 4); \ |
146 | regs->tnpc = regs->tpc + 4; \ | 137 | regs->tnpc = regs->tpc + 4; \ |
147 | regs->y = 0; \ | 138 | regs->y = 0; \ |
@@ -226,6 +217,8 @@ static inline void prefetchw(const void *x) | |||
226 | 217 | ||
227 | #define spin_lock_prefetch(x) prefetchw(x) | 218 | #define spin_lock_prefetch(x) prefetchw(x) |
228 | 219 | ||
220 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
221 | |||
229 | #endif /* !(__ASSEMBLY__) */ | 222 | #endif /* !(__ASSEMBLY__) */ |
230 | 223 | ||
231 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ | 224 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ |
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 29fb74aa805d..49a7924a89ab 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h | |||
@@ -28,11 +28,12 @@ | |||
28 | 28 | ||
29 | /* The V9 TSTATE Register (with SpitFire and Linux extensions). | 29 | /* The V9 TSTATE Register (with SpitFire and Linux extensions). |
30 | * | 30 | * |
31 | * --------------------------------------------------------------- | 31 | * --------------------------------------------------------------------- |
32 | * | Resv | CCR | ASI | %pil | PSTATE | Resv | CWP | | 32 | * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP | |
33 | * --------------------------------------------------------------- | 33 | * --------------------------------------------------------------------- |
34 | * 63 40 39 32 31 24 23 20 19 8 7 5 4 0 | 34 | * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0 |
35 | */ | 35 | */ |
36 | #define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */ | ||
36 | #define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ | 37 | #define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ |
37 | #define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ | 38 | #define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ |
38 | #define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ | 39 | #define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ |
diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h new file mode 100644 index 000000000000..5e8b01fb3343 --- /dev/null +++ b/include/asm-sparc64/scratchpad.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _SPARC64_SCRATCHPAD_H | ||
2 | #define _SPARC64_SCRATCHPAD_H | ||
3 | |||
4 | /* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */ | ||
5 | |||
6 | #define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */ | ||
7 | #define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */ | ||
8 | #define SCRATCHPAD_UTSBREG1 0x10 | ||
9 | #define SCRATCHPAD_UTSBREG2 0x18 | ||
10 | /* 0x20 and 0x28, hypervisor only... */ | ||
11 | #define SCRATCHPAD_UNUSED1 0x30 | ||
12 | #define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */ | ||
13 | |||
14 | #endif /* !(_SPARC64_SCRATCHPAD_H) */ | ||
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 473edb2603ec..89d86ecaab24 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h | |||
@@ -33,37 +33,13 @@ | |||
33 | extern cpumask_t phys_cpu_present_map; | 33 | extern cpumask_t phys_cpu_present_map; |
34 | #define cpu_possible_map phys_cpu_present_map | 34 | #define cpu_possible_map phys_cpu_present_map |
35 | 35 | ||
36 | extern cpumask_t cpu_sibling_map[NR_CPUS]; | ||
37 | |||
36 | /* | 38 | /* |
37 | * General functions that each host system must provide. | 39 | * General functions that each host system must provide. |
38 | */ | 40 | */ |
39 | 41 | ||
40 | static __inline__ int hard_smp_processor_id(void) | 42 | extern int hard_smp_processor_id(void); |
41 | { | ||
42 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
43 | unsigned long cfg, ver; | ||
44 | __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); | ||
45 | if ((ver >> 32) == 0x003e0016) { | ||
46 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
47 | : "=r" (cfg) | ||
48 | : "i" (ASI_JBUS_CONFIG)); | ||
49 | return ((cfg >> 17) & 0x1f); | ||
50 | } else { | ||
51 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
52 | : "=r" (cfg) | ||
53 | : "i" (ASI_SAFARI_CONFIG)); | ||
54 | return ((cfg >> 17) & 0x3ff); | ||
55 | } | ||
56 | } else if (this_is_starfire != 0) { | ||
57 | return starfire_hard_smp_processor_id(); | ||
58 | } else { | ||
59 | unsigned long upaconfig; | ||
60 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
61 | : "=r" (upaconfig) | ||
62 | : "i" (ASI_UPA_CONFIG)); | ||
63 | return ((upaconfig >> 17) & 0x1f); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 43 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
68 | 44 | ||
69 | extern void smp_setup_cpu_possible_map(void); | 45 | extern void smp_setup_cpu_possible_map(void); |
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h new file mode 100644 index 000000000000..ed5c9d8541e2 --- /dev/null +++ b/include/asm-sparc64/sparsemem.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _SPARC64_SPARSEMEM_H | ||
2 | #define _SPARC64_SPARSEMEM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #define SECTION_SIZE_BITS 26 | ||
7 | #define MAX_PHYSADDR_BITS 42 | ||
8 | #define MAX_PHYSMEM_BITS 42 | ||
9 | |||
10 | #endif /* !(__KERNEL__) */ | ||
11 | |||
12 | #endif /* !(_SPARC64_SPARSEMEM_H) */ | ||
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index 962638c9d122..23ad8a7987ad 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h | |||
@@ -44,6 +44,7 @@ enum ultra_tlb_layout { | |||
44 | spitfire = 0, | 44 | spitfire = 0, |
45 | cheetah = 1, | 45 | cheetah = 1, |
46 | cheetah_plus = 2, | 46 | cheetah_plus = 2, |
47 | hypervisor = 3, | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | extern enum ultra_tlb_layout tlb_type; | 50 | extern enum ultra_tlb_layout tlb_type; |
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index af254e581834..a18ec87a52c1 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h | |||
@@ -209,9 +209,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
209 | /* so that ASI is only written if it changes, think again. */ \ | 209 | /* so that ASI is only written if it changes, think again. */ \ |
210 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | 210 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
211 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ | 211 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ |
212 | trap_block[current_thread_info()->cpu].thread = \ | ||
213 | task_thread_info(next); \ | ||
212 | __asm__ __volatile__( \ | 214 | __asm__ __volatile__( \ |
213 | "mov %%g4, %%g7\n\t" \ | 215 | "mov %%g4, %%g7\n\t" \ |
214 | "wrpr %%g0, 0x95, %%pstate\n\t" \ | ||
215 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ | 216 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ |
216 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ | 217 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ |
217 | "rdpr %%wstate, %%o5\n\t" \ | 218 | "rdpr %%wstate, %%o5\n\t" \ |
@@ -225,14 +226,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
225 | "ldx [%%g6 + %3], %%o6\n\t" \ | 226 | "ldx [%%g6 + %3], %%o6\n\t" \ |
226 | "ldub [%%g6 + %2], %%o5\n\t" \ | 227 | "ldub [%%g6 + %2], %%o5\n\t" \ |
227 | "ldub [%%g6 + %4], %%o7\n\t" \ | 228 | "ldub [%%g6 + %4], %%o7\n\t" \ |
228 | "mov %%g6, %%l2\n\t" \ | ||
229 | "wrpr %%o5, 0x0, %%wstate\n\t" \ | 229 | "wrpr %%o5, 0x0, %%wstate\n\t" \ |
230 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ | 230 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ |
231 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ | 231 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ |
232 | "wrpr %%g0, 0x94, %%pstate\n\t" \ | ||
233 | "mov %%l2, %%g6\n\t" \ | ||
234 | "ldx [%%g6 + %6], %%g4\n\t" \ | 232 | "ldx [%%g6 + %6], %%g4\n\t" \ |
235 | "wrpr %%g0, 0x96, %%pstate\n\t" \ | ||
236 | "brz,pt %%o7, 1f\n\t" \ | 233 | "brz,pt %%o7, 1f\n\t" \ |
237 | " mov %%g7, %0\n\t" \ | 234 | " mov %%g7, %0\n\t" \ |
238 | "b,a ret_from_syscall\n\t" \ | 235 | "b,a ret_from_syscall\n\t" \ |
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index ac9d068aab4f..2ebf7f27bf91 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h | |||
@@ -64,8 +64,6 @@ struct thread_info { | |||
64 | __u64 kernel_cntd0, kernel_cntd1; | 64 | __u64 kernel_cntd0, kernel_cntd1; |
65 | __u64 pcr_reg; | 65 | __u64 pcr_reg; |
66 | 66 | ||
67 | __u64 cee_stuff; | ||
68 | |||
69 | struct restart_block restart_block; | 67 | struct restart_block restart_block; |
70 | 68 | ||
71 | struct pt_regs *kern_una_regs; | 69 | struct pt_regs *kern_una_regs; |
@@ -104,10 +102,9 @@ struct thread_info { | |||
104 | #define TI_KERN_CNTD0 0x00000480 | 102 | #define TI_KERN_CNTD0 0x00000480 |
105 | #define TI_KERN_CNTD1 0x00000488 | 103 | #define TI_KERN_CNTD1 0x00000488 |
106 | #define TI_PCR 0x00000490 | 104 | #define TI_PCR 0x00000490 |
107 | #define TI_CEE_STUFF 0x00000498 | 105 | #define TI_RESTART_BLOCK 0x00000498 |
108 | #define TI_RESTART_BLOCK 0x000004a0 | 106 | #define TI_KUNA_REGS 0x000004c0 |
109 | #define TI_KUNA_REGS 0x000004c8 | 107 | #define TI_KUNA_INSN 0x000004c8 |
110 | #define TI_KUNA_INSN 0x000004d0 | ||
111 | #define TI_FPREGS 0x00000500 | 108 | #define TI_FPREGS 0x00000500 |
112 | 109 | ||
113 | /* We embed this in the uppermost byte of thread_info->flags */ | 110 | /* We embed this in the uppermost byte of thread_info->flags */ |
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h index 9e8d4175bcb2..2a5e4ebaad80 100644 --- a/include/asm-sparc64/timex.h +++ b/include/asm-sparc64/timex.h | |||
@@ -14,4 +14,10 @@ | |||
14 | typedef unsigned long cycles_t; | 14 | typedef unsigned long cycles_t; |
15 | #define get_cycles() tick_ops->get_tick() | 15 | #define get_cycles() tick_ops->get_tick() |
16 | 16 | ||
17 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | ||
18 | #define read_current_timer(timer_val_p) \ | ||
19 | ({ *timer_val_p = tick_ops->get_tick(); \ | ||
20 | 0; \ | ||
21 | }) | ||
22 | |||
17 | #endif | 23 | #endif |
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h index 3ef9909ac3ac..9ad5d9c51d42 100644 --- a/include/asm-sparc64/tlbflush.h +++ b/include/asm-sparc64/tlbflush.h | |||
@@ -5,6 +5,11 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm/mmu_context.h> | 6 | #include <asm/mmu_context.h> |
7 | 7 | ||
8 | /* TSB flush operations. */ | ||
9 | struct mmu_gather; | ||
10 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); | ||
11 | extern void flush_tsb_user(struct mmu_gather *mp); | ||
12 | |||
8 | /* TLB flush operations. */ | 13 | /* TLB flush operations. */ |
9 | 14 | ||
10 | extern void flush_tlb_pending(void); | 15 | extern void flush_tlb_pending(void); |
@@ -14,28 +19,36 @@ extern void flush_tlb_pending(void); | |||
14 | #define flush_tlb_page(vma,addr) flush_tlb_pending() | 19 | #define flush_tlb_page(vma,addr) flush_tlb_pending() |
15 | #define flush_tlb_mm(mm) flush_tlb_pending() | 20 | #define flush_tlb_mm(mm) flush_tlb_pending() |
16 | 21 | ||
22 | /* Local cpu only. */ | ||
17 | extern void __flush_tlb_all(void); | 23 | extern void __flush_tlb_all(void); |
24 | |||
18 | extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); | 25 | extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); |
19 | 26 | ||
20 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); | 27 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); |
21 | 28 | ||
22 | #ifndef CONFIG_SMP | 29 | #ifndef CONFIG_SMP |
23 | 30 | ||
24 | #define flush_tlb_all() __flush_tlb_all() | ||
25 | #define flush_tlb_kernel_range(start,end) \ | 31 | #define flush_tlb_kernel_range(start,end) \ |
26 | __flush_tlb_kernel_range(start,end) | 32 | do { flush_tsb_kernel_range(start,end); \ |
33 | __flush_tlb_kernel_range(start,end); \ | ||
34 | } while (0) | ||
27 | 35 | ||
28 | #else /* CONFIG_SMP */ | 36 | #else /* CONFIG_SMP */ |
29 | 37 | ||
30 | extern void smp_flush_tlb_all(void); | ||
31 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); | 38 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); |
32 | 39 | ||
33 | #define flush_tlb_all() smp_flush_tlb_all() | ||
34 | #define flush_tlb_kernel_range(start, end) \ | 40 | #define flush_tlb_kernel_range(start, end) \ |
35 | smp_flush_tlb_kernel_range(start, end) | 41 | do { flush_tsb_kernel_range(start,end); \ |
42 | smp_flush_tlb_kernel_range(start, end); \ | ||
43 | } while (0) | ||
36 | 44 | ||
37 | #endif /* ! CONFIG_SMP */ | 45 | #endif /* ! CONFIG_SMP */ |
38 | 46 | ||
39 | extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long); | 47 | static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) |
48 | { | ||
49 | /* We don't use virtual page tables for TLB miss processing | ||
50 | * any more. Nowadays we use the TSB. | ||
51 | */ | ||
52 | } | ||
40 | 53 | ||
41 | #endif /* _SPARC64_TLBFLUSH_H */ | 54 | #endif /* _SPARC64_TLBFLUSH_H */ |
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h new file mode 100644 index 000000000000..e82612cd9f33 --- /dev/null +++ b/include/asm-sparc64/tsb.h | |||
@@ -0,0 +1,281 @@ | |||
1 | #ifndef _SPARC64_TSB_H | ||
2 | #define _SPARC64_TSB_H | ||
3 | |||
4 | /* The sparc64 TSB is similar to the powerpc hashtables. It's a | ||
5 | * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes | ||
6 | * pointers into this table for 8K and 64K page sizes, and also a | ||
7 | * comparison TAG based upon the virtual address and context which | ||
8 | * faults. | ||
9 | * | ||
10 | * TLB miss trap handler software does the actual lookup via something | ||
11 | * of the form: | ||
12 | * | ||
13 | * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 | ||
14 | * ldxa [%g0] ASI_{D,I}MMU, %g6 | ||
15 | * sllx %g6, 22, %g6 | ||
16 | * srlx %g6, 22, %g6 | ||
17 | * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 | ||
18 | * cmp %g4, %g6 | ||
19 | * bne,pn %xcc, tsb_miss_{d,i}tlb | ||
20 | * mov FAULT_CODE_{D,I}TLB, %g3 | ||
21 | * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN | ||
22 | * retry | ||
23 | * | ||
24 | * | ||
25 | * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte | ||
26 | * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu | ||
27 | * register which is: | ||
28 | * | ||
29 | * ------------------------------------------------- | ||
30 | * | - | CONTEXT | - | VADDR bits 63:22 | | ||
31 | * ------------------------------------------------- | ||
32 | * 63 61 60 48 47 42 41 0 | ||
33 | * | ||
34 | * But actually, since we use per-mm TSB's, we zero out the CONTEXT | ||
35 | * field. | ||
36 | * | ||
37 | * Like the powerpc hashtables we need to use locking in order to | ||
38 | * synchronize while we update the entries. PTE updates need locking | ||
39 | * as well. | ||
40 | * | ||
41 | * We need to carefully choose a lock bits for the TSB entry. We | ||
42 | * choose to use bit 47 in the tag. Also, since we never map anything | ||
43 | * at page zero in context zero, we use zero as an invalid tag entry. | ||
44 | * When the lock bit is set, this forces a tag comparison failure. | ||
45 | */ | ||
46 | |||
47 | #define TSB_TAG_LOCK_BIT 47 | ||
48 | #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) | ||
49 | |||
50 | #define TSB_TAG_INVALID_BIT 46 | ||
51 | #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) | ||
52 | |||
53 | #define TSB_MEMBAR membar #StoreStore | ||
54 | |||
55 | /* Some cpus support physical address quad loads. We want to use | ||
56 | * those if possible so we don't need to hard-lock the TSB mapping | ||
57 | * into the TLB. We encode some instruction patching in order to | ||
58 | * support this. | ||
59 | * | ||
60 | * The kernel TSB is locked into the TLB by virtue of being in the | ||
61 | * kernel image, so we don't play these games for swapper_tsb access. | ||
62 | */ | ||
63 | #ifndef __ASSEMBLY__ | ||
64 | struct tsb_ldquad_phys_patch_entry { | ||
65 | unsigned int addr; | ||
66 | unsigned int sun4u_insn; | ||
67 | unsigned int sun4v_insn; | ||
68 | }; | ||
69 | extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch, | ||
70 | __tsb_ldquad_phys_patch_end; | ||
71 | |||
72 | struct tsb_phys_patch_entry { | ||
73 | unsigned int addr; | ||
74 | unsigned int insn; | ||
75 | }; | ||
76 | extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | ||
77 | #endif | ||
78 | #define TSB_LOAD_QUAD(TSB, REG) \ | ||
79 | 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ | ||
80 | .section .tsb_ldquad_phys_patch, "ax"; \ | ||
81 | .word 661b; \ | ||
82 | ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ | ||
83 | ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \ | ||
84 | .previous | ||
85 | |||
86 | #define TSB_LOAD_TAG_HIGH(TSB, REG) \ | ||
87 | 661: lduwa [TSB] ASI_N, REG; \ | ||
88 | .section .tsb_phys_patch, "ax"; \ | ||
89 | .word 661b; \ | ||
90 | lduwa [TSB] ASI_PHYS_USE_EC, REG; \ | ||
91 | .previous | ||
92 | |||
93 | #define TSB_LOAD_TAG(TSB, REG) \ | ||
94 | 661: ldxa [TSB] ASI_N, REG; \ | ||
95 | .section .tsb_phys_patch, "ax"; \ | ||
96 | .word 661b; \ | ||
97 | ldxa [TSB] ASI_PHYS_USE_EC, REG; \ | ||
98 | .previous | ||
99 | |||
100 | #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \ | ||
101 | 661: casa [TSB] ASI_N, REG1, REG2; \ | ||
102 | .section .tsb_phys_patch, "ax"; \ | ||
103 | .word 661b; \ | ||
104 | casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ | ||
105 | .previous | ||
106 | |||
107 | #define TSB_CAS_TAG(TSB, REG1, REG2) \ | ||
108 | 661: casxa [TSB] ASI_N, REG1, REG2; \ | ||
109 | .section .tsb_phys_patch, "ax"; \ | ||
110 | .word 661b; \ | ||
111 | casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ | ||
112 | .previous | ||
113 | |||
114 | #define TSB_STORE(ADDR, VAL) \ | ||
115 | 661: stxa VAL, [ADDR] ASI_N; \ | ||
116 | .section .tsb_phys_patch, "ax"; \ | ||
117 | .word 661b; \ | ||
118 | stxa VAL, [ADDR] ASI_PHYS_USE_EC; \ | ||
119 | .previous | ||
120 | |||
121 | #define TSB_LOCK_TAG(TSB, REG1, REG2) \ | ||
122 | 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \ | ||
123 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ | ||
124 | andcc REG1, REG2, %g0; \ | ||
125 | bne,pn %icc, 99b; \ | ||
126 | nop; \ | ||
127 | TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \ | ||
128 | cmp REG1, REG2; \ | ||
129 | bne,pn %icc, 99b; \ | ||
130 | nop; \ | ||
131 | TSB_MEMBAR | ||
132 | |||
133 | #define TSB_WRITE(TSB, TTE, TAG) \ | ||
134 | add TSB, 0x8, TSB; \ | ||
135 | TSB_STORE(TSB, TTE); \ | ||
136 | sub TSB, 0x8, TSB; \ | ||
137 | TSB_MEMBAR; \ | ||
138 | TSB_STORE(TSB, TAG); | ||
139 | |||
140 | #define KTSB_LOAD_QUAD(TSB, REG) \ | ||
141 | ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; | ||
142 | |||
143 | #define KTSB_STORE(ADDR, VAL) \ | ||
144 | stxa VAL, [ADDR] ASI_N; | ||
145 | |||
146 | #define KTSB_LOCK_TAG(TSB, REG1, REG2) \ | ||
147 | 99: lduwa [TSB] ASI_N, REG1; \ | ||
148 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ | ||
149 | andcc REG1, REG2, %g0; \ | ||
150 | bne,pn %icc, 99b; \ | ||
151 | nop; \ | ||
152 | casa [TSB] ASI_N, REG1, REG2;\ | ||
153 | cmp REG1, REG2; \ | ||
154 | bne,pn %icc, 99b; \ | ||
155 | nop; \ | ||
156 | TSB_MEMBAR | ||
157 | |||
158 | #define KTSB_WRITE(TSB, TTE, TAG) \ | ||
159 | add TSB, 0x8, TSB; \ | ||
160 | stxa TTE, [TSB] ASI_N; \ | ||
161 | sub TSB, 0x8, TSB; \ | ||
162 | TSB_MEMBAR; \ | ||
163 | stxa TAG, [TSB] ASI_N; | ||
164 | |||
165 | /* Do a kernel page table walk. Leaves physical PTE pointer in | ||
166 | * REG1. Jumps to FAIL_LABEL on early page table walk termination. | ||
167 | * VADDR will not be clobbered, but REG2 will. | ||
168 | */ | ||
169 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ | ||
170 | sethi %hi(swapper_pg_dir), REG1; \ | ||
171 | or REG1, %lo(swapper_pg_dir), REG1; \ | ||
172 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ | ||
173 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
174 | andn REG2, 0x3, REG2; \ | ||
175 | lduw [REG1 + REG2], REG1; \ | ||
176 | brz,pn REG1, FAIL_LABEL; \ | ||
177 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | ||
178 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
179 | sllx REG1, 11, REG1; \ | ||
180 | andn REG2, 0x3, REG2; \ | ||
181 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
182 | brz,pn REG1, FAIL_LABEL; \ | ||
183 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | ||
184 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
185 | sllx REG1, 11, REG1; \ | ||
186 | andn REG2, 0x7, REG2; \ | ||
187 | add REG1, REG2, REG1; | ||
188 | |||
189 | /* Do a user page table walk in MMU globals. Leaves physical PTE | ||
190 | * pointer in REG1. Jumps to FAIL_LABEL on early page table walk | ||
191 | * termination. Physical base of page tables is in PHYS_PGD which | ||
192 | * will not be modified. | ||
193 | * | ||
194 | * VADDR will not be clobbered, but REG1 and REG2 will. | ||
195 | */ | ||
196 | #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ | ||
197 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ | ||
198 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
199 | andn REG2, 0x3, REG2; \ | ||
200 | lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
201 | brz,pn REG1, FAIL_LABEL; \ | ||
202 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | ||
203 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
204 | sllx REG1, 11, REG1; \ | ||
205 | andn REG2, 0x3, REG2; \ | ||
206 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
207 | brz,pn REG1, FAIL_LABEL; \ | ||
208 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | ||
209 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
210 | sllx REG1, 11, REG1; \ | ||
211 | andn REG2, 0x7, REG2; \ | ||
212 | add REG1, REG2, REG1; | ||
213 | |||
214 | /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. | ||
215 | * If no entry is found, FAIL_LABEL will be branched to. On success | ||
216 | * the resulting PTE value will be left in REG1. VADDR is preserved | ||
217 | * by this routine. | ||
218 | */ | ||
219 | #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \ | ||
220 | sethi %hi(prom_trans), REG1; \ | ||
221 | or REG1, %lo(prom_trans), REG1; \ | ||
222 | 97: ldx [REG1 + 0x00], REG2; \ | ||
223 | brz,pn REG2, FAIL_LABEL; \ | ||
224 | nop; \ | ||
225 | ldx [REG1 + 0x08], REG3; \ | ||
226 | add REG2, REG3, REG3; \ | ||
227 | cmp REG2, VADDR; \ | ||
228 | bgu,pt %xcc, 98f; \ | ||
229 | cmp VADDR, REG3; \ | ||
230 | bgeu,pt %xcc, 98f; \ | ||
231 | ldx [REG1 + 0x10], REG3; \ | ||
232 | sub VADDR, REG2, REG2; \ | ||
233 | ba,pt %xcc, 99f; \ | ||
234 | add REG3, REG2, REG1; \ | ||
235 | 98: ba,pt %xcc, 97b; \ | ||
236 | add REG1, (3 * 8), REG1; \ | ||
237 | 99: | ||
238 | |||
239 | /* We use a 32K TSB for the whole kernel, this allows to | ||
240 | * handle about 16MB of modules and vmalloc mappings without | ||
241 | * incurring many hash conflicts. | ||
242 | */ | ||
243 | #define KERNEL_TSB_SIZE_BYTES (32 * 1024) | ||
244 | #define KERNEL_TSB_NENTRIES \ | ||
245 | (KERNEL_TSB_SIZE_BYTES / 16) | ||
246 | #define KERNEL_TSB4M_NENTRIES 4096 | ||
247 | |||
248 | /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL | ||
249 | * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries | ||
250 | * and the found TTE will be left in REG1. REG3 and REG4 must | ||
251 | * be an even/odd pair of registers. | ||
252 | * | ||
253 | * VADDR and TAG will be preserved and not clobbered by this macro. | ||
254 | */ | ||
255 | #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ | ||
256 | sethi %hi(swapper_tsb), REG1; \ | ||
257 | or REG1, %lo(swapper_tsb), REG1; \ | ||
258 | srlx VADDR, PAGE_SHIFT, REG2; \ | ||
259 | and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ | ||
260 | sllx REG2, 4, REG2; \ | ||
261 | add REG1, REG2, REG2; \ | ||
262 | KTSB_LOAD_QUAD(REG2, REG3); \ | ||
263 | cmp REG3, TAG; \ | ||
264 | be,a,pt %xcc, OK_LABEL; \ | ||
265 | mov REG4, REG1; | ||
266 | |||
267 | /* This version uses a trick, the TAG is already (VADDR >> 22) so | ||
268 | * we can make use of that for the index computation. | ||
269 | */ | ||
270 | #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ | ||
271 | sethi %hi(swapper_4m_tsb), REG1; \ | ||
272 | or REG1, %lo(swapper_4m_tsb), REG1; \ | ||
273 | and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ | ||
274 | sllx REG2, 4, REG2; \ | ||
275 | add REG1, REG2, REG2; \ | ||
276 | KTSB_LOAD_QUAD(REG2, REG3); \ | ||
277 | cmp REG3, TAG; \ | ||
278 | be,a,pt %xcc, OK_LABEL; \ | ||
279 | mov REG4, REG1; | ||
280 | |||
281 | #endif /* !(_SPARC64_TSB_H) */ | ||
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 2784f80094c3..2d5e3c464df5 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h | |||
@@ -93,7 +93,7 @@ | |||
93 | 93 | ||
94 | #define SYSCALL_TRAP(routine, systbl) \ | 94 | #define SYSCALL_TRAP(routine, systbl) \ |
95 | sethi %hi(109f), %g7; \ | 95 | sethi %hi(109f), %g7; \ |
96 | ba,pt %xcc, scetrap; \ | 96 | ba,pt %xcc, etrap; \ |
97 | 109: or %g7, %lo(109b), %g7; \ | 97 | 109: or %g7, %lo(109b), %g7; \ |
98 | sethi %hi(systbl), %l7; \ | 98 | sethi %hi(systbl), %l7; \ |
99 | ba,pt %xcc, routine; \ | 99 | ba,pt %xcc, routine; \ |
@@ -109,14 +109,14 @@ | |||
109 | nop;nop;nop; | 109 | nop;nop;nop; |
110 | 110 | ||
111 | #define TRAP_UTRAP(handler,lvl) \ | 111 | #define TRAP_UTRAP(handler,lvl) \ |
112 | ldx [%g6 + TI_UTRAPS], %g1; \ | 112 | mov handler, %g3; \ |
113 | sethi %hi(109f), %g7; \ | 113 | ba,pt %xcc, utrap_trap; \ |
114 | brz,pn %g1, utrap; \ | 114 | mov lvl, %g4; \ |
115 | or %g7, %lo(109f), %g7; \ | 115 | nop; \ |
116 | ba,pt %xcc, utrap; \ | 116 | nop; \ |
117 | 109: ldx [%g1 + handler*8], %g1; \ | 117 | nop; \ |
118 | ba,pt %xcc, utrap_ill; \ | 118 | nop; \ |
119 | mov lvl, %o1; | 119 | nop; |
120 | 120 | ||
121 | #ifdef CONFIG_SUNOS_EMUL | 121 | #ifdef CONFIG_SUNOS_EMUL |
122 | #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) | 122 | #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) |
@@ -136,8 +136,6 @@ | |||
136 | #else | 136 | #else |
137 | #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) | 137 | #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) |
138 | #endif | 138 | #endif |
139 | /* FIXME: Write these actually */ | ||
140 | #define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall) | ||
141 | #define BREAKPOINT_TRAP TRAP(breakpoint_trap) | 139 | #define BREAKPOINT_TRAP TRAP(breakpoint_trap) |
142 | 140 | ||
143 | #define TRAP_IRQ(routine, level) \ | 141 | #define TRAP_IRQ(routine, level) \ |
@@ -182,6 +180,26 @@ | |||
182 | #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) | 180 | #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) |
183 | #endif | 181 | #endif |
184 | 182 | ||
183 | #define SUN4V_ITSB_MISS \ | ||
184 | ldxa [%g0] ASI_SCRATCHPAD, %g2; \ | ||
185 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \ | ||
186 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ | ||
187 | srlx %g4, 22, %g6; \ | ||
188 | ba,pt %xcc, sun4v_itsb_miss; \ | ||
189 | nop; \ | ||
190 | nop; \ | ||
191 | nop; | ||
192 | |||
193 | #define SUN4V_DTSB_MISS \ | ||
194 | ldxa [%g0] ASI_SCRATCHPAD, %g2; \ | ||
195 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ | ||
196 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ | ||
197 | srlx %g4, 22, %g6; \ | ||
198 | ba,pt %xcc, sun4v_dtsb_miss; \ | ||
199 | nop; \ | ||
200 | nop; \ | ||
201 | nop; | ||
202 | |||
185 | /* Before touching these macros, you owe it to yourself to go and | 203 | /* Before touching these macros, you owe it to yourself to go and |
186 | * see how arch/sparc64/kernel/winfixup.S works... -DaveM | 204 | * see how arch/sparc64/kernel/winfixup.S works... -DaveM |
187 | * | 205 | * |
@@ -221,6 +239,31 @@ | |||
221 | saved; retry; nop; nop; nop; nop; nop; nop; \ | 239 | saved; retry; nop; nop; nop; nop; nop; nop; \ |
222 | nop; nop; nop; nop; nop; nop; nop; nop; | 240 | nop; nop; nop; nop; nop; nop; nop; nop; |
223 | 241 | ||
242 | #define SPILL_0_NORMAL_ETRAP \ | ||
243 | etrap_kernel_spill: \ | ||
244 | stx %l0, [%sp + STACK_BIAS + 0x00]; \ | ||
245 | stx %l1, [%sp + STACK_BIAS + 0x08]; \ | ||
246 | stx %l2, [%sp + STACK_BIAS + 0x10]; \ | ||
247 | stx %l3, [%sp + STACK_BIAS + 0x18]; \ | ||
248 | stx %l4, [%sp + STACK_BIAS + 0x20]; \ | ||
249 | stx %l5, [%sp + STACK_BIAS + 0x28]; \ | ||
250 | stx %l6, [%sp + STACK_BIAS + 0x30]; \ | ||
251 | stx %l7, [%sp + STACK_BIAS + 0x38]; \ | ||
252 | stx %i0, [%sp + STACK_BIAS + 0x40]; \ | ||
253 | stx %i1, [%sp + STACK_BIAS + 0x48]; \ | ||
254 | stx %i2, [%sp + STACK_BIAS + 0x50]; \ | ||
255 | stx %i3, [%sp + STACK_BIAS + 0x58]; \ | ||
256 | stx %i4, [%sp + STACK_BIAS + 0x60]; \ | ||
257 | stx %i5, [%sp + STACK_BIAS + 0x68]; \ | ||
258 | stx %i6, [%sp + STACK_BIAS + 0x70]; \ | ||
259 | stx %i7, [%sp + STACK_BIAS + 0x78]; \ | ||
260 | saved; \ | ||
261 | sub %g1, 2, %g1; \ | ||
262 | ba,pt %xcc, etrap_save; \ | ||
263 | wrpr %g1, %cwp; \ | ||
264 | nop; nop; nop; nop; nop; nop; nop; nop; \ | ||
265 | nop; nop; nop; nop; | ||
266 | |||
224 | /* Normal 64bit spill */ | 267 | /* Normal 64bit spill */ |
225 | #define SPILL_1_GENERIC(ASI) \ | 268 | #define SPILL_1_GENERIC(ASI) \ |
226 | add %sp, STACK_BIAS + 0x00, %g1; \ | 269 | add %sp, STACK_BIAS + 0x00, %g1; \ |
@@ -254,6 +297,67 @@ | |||
254 | b,a,pt %xcc, spill_fixup_mna; \ | 297 | b,a,pt %xcc, spill_fixup_mna; \ |
255 | b,a,pt %xcc, spill_fixup; | 298 | b,a,pt %xcc, spill_fixup; |
256 | 299 | ||
300 | #define SPILL_1_GENERIC_ETRAP \ | ||
301 | etrap_user_spill_64bit: \ | ||
302 | stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \ | ||
303 | stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \ | ||
304 | stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \ | ||
305 | stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \ | ||
306 | stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \ | ||
307 | stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \ | ||
308 | stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \ | ||
309 | stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \ | ||
310 | stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \ | ||
311 | stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \ | ||
312 | stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \ | ||
313 | stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \ | ||
314 | stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \ | ||
315 | stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \ | ||
316 | stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \ | ||
317 | stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \ | ||
318 | saved; \ | ||
319 | sub %g1, 2, %g1; \ | ||
320 | ba,pt %xcc, etrap_save; \ | ||
321 | wrpr %g1, %cwp; \ | ||
322 | nop; nop; nop; nop; nop; \ | ||
323 | nop; nop; nop; nop; \ | ||
324 | ba,a,pt %xcc, etrap_spill_fixup_64bit; \ | ||
325 | ba,a,pt %xcc, etrap_spill_fixup_64bit; \ | ||
326 | ba,a,pt %xcc, etrap_spill_fixup_64bit; | ||
327 | |||
328 | #define SPILL_1_GENERIC_ETRAP_FIXUP \ | ||
329 | etrap_spill_fixup_64bit: \ | ||
330 | ldub [%g6 + TI_WSAVED], %g1; \ | ||
331 | sll %g1, 3, %g3; \ | ||
332 | add %g6, %g3, %g3; \ | ||
333 | stx %sp, [%g3 + TI_RWIN_SPTRS]; \ | ||
334 | sll %g1, 7, %g3; \ | ||
335 | add %g6, %g3, %g3; \ | ||
336 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ | ||
337 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \ | ||
338 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \ | ||
339 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \ | ||
340 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \ | ||
341 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \ | ||
342 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \ | ||
343 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \ | ||
344 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \ | ||
345 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \ | ||
346 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \ | ||
347 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \ | ||
348 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \ | ||
349 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \ | ||
350 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \ | ||
351 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \ | ||
352 | add %g1, 1, %g1; \ | ||
353 | stb %g1, [%g6 + TI_WSAVED]; \ | ||
354 | saved; \ | ||
355 | rdpr %cwp, %g1; \ | ||
356 | sub %g1, 2, %g1; \ | ||
357 | ba,pt %xcc, etrap_save; \ | ||
358 | wrpr %g1, %cwp; \ | ||
359 | nop; nop; nop | ||
360 | |||
257 | /* Normal 32bit spill */ | 361 | /* Normal 32bit spill */ |
258 | #define SPILL_2_GENERIC(ASI) \ | 362 | #define SPILL_2_GENERIC(ASI) \ |
259 | srl %sp, 0, %sp; \ | 363 | srl %sp, 0, %sp; \ |
@@ -287,6 +391,68 @@ | |||
287 | b,a,pt %xcc, spill_fixup_mna; \ | 391 | b,a,pt %xcc, spill_fixup_mna; \ |
288 | b,a,pt %xcc, spill_fixup; | 392 | b,a,pt %xcc, spill_fixup; |
289 | 393 | ||
394 | #define SPILL_2_GENERIC_ETRAP \ | ||
395 | etrap_user_spill_32bit: \ | ||
396 | srl %sp, 0, %sp; \ | ||
397 | stwa %l0, [%sp + 0x00] %asi; \ | ||
398 | stwa %l1, [%sp + 0x04] %asi; \ | ||
399 | stwa %l2, [%sp + 0x08] %asi; \ | ||
400 | stwa %l3, [%sp + 0x0c] %asi; \ | ||
401 | stwa %l4, [%sp + 0x10] %asi; \ | ||
402 | stwa %l5, [%sp + 0x14] %asi; \ | ||
403 | stwa %l6, [%sp + 0x18] %asi; \ | ||
404 | stwa %l7, [%sp + 0x1c] %asi; \ | ||
405 | stwa %i0, [%sp + 0x20] %asi; \ | ||
406 | stwa %i1, [%sp + 0x24] %asi; \ | ||
407 | stwa %i2, [%sp + 0x28] %asi; \ | ||
408 | stwa %i3, [%sp + 0x2c] %asi; \ | ||
409 | stwa %i4, [%sp + 0x30] %asi; \ | ||
410 | stwa %i5, [%sp + 0x34] %asi; \ | ||
411 | stwa %i6, [%sp + 0x38] %asi; \ | ||
412 | stwa %i7, [%sp + 0x3c] %asi; \ | ||
413 | saved; \ | ||
414 | sub %g1, 2, %g1; \ | ||
415 | ba,pt %xcc, etrap_save; \ | ||
416 | wrpr %g1, %cwp; \ | ||
417 | nop; nop; nop; nop; \ | ||
418 | nop; nop; nop; nop; \ | ||
419 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | ||
420 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | ||
421 | ba,a,pt %xcc, etrap_spill_fixup_32bit; | ||
422 | |||
423 | #define SPILL_2_GENERIC_ETRAP_FIXUP \ | ||
424 | etrap_spill_fixup_32bit: \ | ||
425 | ldub [%g6 + TI_WSAVED], %g1; \ | ||
426 | sll %g1, 3, %g3; \ | ||
427 | add %g6, %g3, %g3; \ | ||
428 | stx %sp, [%g3 + TI_RWIN_SPTRS]; \ | ||
429 | sll %g1, 7, %g3; \ | ||
430 | add %g6, %g3, %g3; \ | ||
431 | stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ | ||
432 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \ | ||
433 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \ | ||
434 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \ | ||
435 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \ | ||
436 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \ | ||
437 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \ | ||
438 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \ | ||
439 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \ | ||
440 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \ | ||
441 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \ | ||
442 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \ | ||
443 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \ | ||
444 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \ | ||
445 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \ | ||
446 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \ | ||
447 | add %g1, 1, %g1; \ | ||
448 | stb %g1, [%g6 + TI_WSAVED]; \ | ||
449 | saved; \ | ||
450 | rdpr %cwp, %g1; \ | ||
451 | sub %g1, 2, %g1; \ | ||
452 | ba,pt %xcc, etrap_save; \ | ||
453 | wrpr %g1, %cwp; \ | ||
454 | nop; nop; nop | ||
455 | |||
290 | #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) | 456 | #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) |
291 | #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) | 457 | #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) |
292 | #define SPILL_3_NORMAL SPILL_0_NORMAL | 458 | #define SPILL_3_NORMAL SPILL_0_NORMAL |
@@ -325,6 +491,35 @@ | |||
325 | restored; retry; nop; nop; nop; nop; nop; nop; \ | 491 | restored; retry; nop; nop; nop; nop; nop; nop; \ |
326 | nop; nop; nop; nop; nop; nop; nop; nop; | 492 | nop; nop; nop; nop; nop; nop; nop; nop; |
327 | 493 | ||
494 | #define FILL_0_NORMAL_RTRAP \ | ||
495 | kern_rtt_fill: \ | ||
496 | rdpr %cwp, %g1; \ | ||
497 | sub %g1, 1, %g1; \ | ||
498 | wrpr %g1, %cwp; \ | ||
499 | ldx [%sp + STACK_BIAS + 0x00], %l0; \ | ||
500 | ldx [%sp + STACK_BIAS + 0x08], %l1; \ | ||
501 | ldx [%sp + STACK_BIAS + 0x10], %l2; \ | ||
502 | ldx [%sp + STACK_BIAS + 0x18], %l3; \ | ||
503 | ldx [%sp + STACK_BIAS + 0x20], %l4; \ | ||
504 | ldx [%sp + STACK_BIAS + 0x28], %l5; \ | ||
505 | ldx [%sp + STACK_BIAS + 0x30], %l6; \ | ||
506 | ldx [%sp + STACK_BIAS + 0x38], %l7; \ | ||
507 | ldx [%sp + STACK_BIAS + 0x40], %i0; \ | ||
508 | ldx [%sp + STACK_BIAS + 0x48], %i1; \ | ||
509 | ldx [%sp + STACK_BIAS + 0x50], %i2; \ | ||
510 | ldx [%sp + STACK_BIAS + 0x58], %i3; \ | ||
511 | ldx [%sp + STACK_BIAS + 0x60], %i4; \ | ||
512 | ldx [%sp + STACK_BIAS + 0x68], %i5; \ | ||
513 | ldx [%sp + STACK_BIAS + 0x70], %i6; \ | ||
514 | ldx [%sp + STACK_BIAS + 0x78], %i7; \ | ||
515 | restored; \ | ||
516 | add %g1, 1, %g1; \ | ||
517 | ba,pt %xcc, kern_rtt_restore; \ | ||
518 | wrpr %g1, %cwp; \ | ||
519 | nop; nop; nop; nop; nop; \ | ||
520 | nop; nop; nop; nop; | ||
521 | |||
522 | |||
328 | /* Normal 64bit fill */ | 523 | /* Normal 64bit fill */ |
329 | #define FILL_1_GENERIC(ASI) \ | 524 | #define FILL_1_GENERIC(ASI) \ |
330 | add %sp, STACK_BIAS + 0x00, %g1; \ | 525 | add %sp, STACK_BIAS + 0x00, %g1; \ |
@@ -356,6 +551,33 @@ | |||
356 | b,a,pt %xcc, fill_fixup_mna; \ | 551 | b,a,pt %xcc, fill_fixup_mna; \ |
357 | b,a,pt %xcc, fill_fixup; | 552 | b,a,pt %xcc, fill_fixup; |
358 | 553 | ||
554 | #define FILL_1_GENERIC_RTRAP \ | ||
555 | user_rtt_fill_64bit: \ | ||
556 | ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \ | ||
557 | ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \ | ||
558 | ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \ | ||
559 | ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \ | ||
560 | ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \ | ||
561 | ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \ | ||
562 | ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \ | ||
563 | ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \ | ||
564 | ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \ | ||
565 | ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \ | ||
566 | ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \ | ||
567 | ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \ | ||
568 | ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \ | ||
569 | ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \ | ||
570 | ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \ | ||
571 | ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \ | ||
572 | ba,pt %xcc, user_rtt_pre_restore; \ | ||
573 | restored; \ | ||
574 | nop; nop; nop; nop; nop; nop; \ | ||
575 | nop; nop; nop; nop; nop; \ | ||
576 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
577 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
578 | ba,a,pt %xcc, user_rtt_fill_fixup; | ||
579 | |||
580 | |||
359 | /* Normal 32bit fill */ | 581 | /* Normal 32bit fill */ |
360 | #define FILL_2_GENERIC(ASI) \ | 582 | #define FILL_2_GENERIC(ASI) \ |
361 | srl %sp, 0, %sp; \ | 583 | srl %sp, 0, %sp; \ |
@@ -387,6 +609,34 @@ | |||
387 | b,a,pt %xcc, fill_fixup_mna; \ | 609 | b,a,pt %xcc, fill_fixup_mna; \ |
388 | b,a,pt %xcc, fill_fixup; | 610 | b,a,pt %xcc, fill_fixup; |
389 | 611 | ||
612 | #define FILL_2_GENERIC_RTRAP \ | ||
613 | user_rtt_fill_32bit: \ | ||
614 | srl %sp, 0, %sp; \ | ||
615 | lduwa [%sp + 0x00] %asi, %l0; \ | ||
616 | lduwa [%sp + 0x04] %asi, %l1; \ | ||
617 | lduwa [%sp + 0x08] %asi, %l2; \ | ||
618 | lduwa [%sp + 0x0c] %asi, %l3; \ | ||
619 | lduwa [%sp + 0x10] %asi, %l4; \ | ||
620 | lduwa [%sp + 0x14] %asi, %l5; \ | ||
621 | lduwa [%sp + 0x18] %asi, %l6; \ | ||
622 | lduwa [%sp + 0x1c] %asi, %l7; \ | ||
623 | lduwa [%sp + 0x20] %asi, %i0; \ | ||
624 | lduwa [%sp + 0x24] %asi, %i1; \ | ||
625 | lduwa [%sp + 0x28] %asi, %i2; \ | ||
626 | lduwa [%sp + 0x2c] %asi, %i3; \ | ||
627 | lduwa [%sp + 0x30] %asi, %i4; \ | ||
628 | lduwa [%sp + 0x34] %asi, %i5; \ | ||
629 | lduwa [%sp + 0x38] %asi, %i6; \ | ||
630 | lduwa [%sp + 0x3c] %asi, %i7; \ | ||
631 | ba,pt %xcc, user_rtt_pre_restore; \ | ||
632 | restored; \ | ||
633 | nop; nop; nop; nop; nop; \ | ||
634 | nop; nop; nop; nop; nop; \ | ||
635 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
636 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
637 | ba,a,pt %xcc, user_rtt_fill_fixup; | ||
638 | |||
639 | |||
390 | #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) | 640 | #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) |
391 | #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) | 641 | #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) |
392 | #define FILL_3_NORMAL FILL_0_NORMAL | 642 | #define FILL_3_NORMAL FILL_0_NORMAL |
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index c91d1e38eac6..afe236ba555b 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h | |||
@@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ | |||
114 | default: __pu_ret = __put_user_bad(); break; \ | 114 | default: __pu_ret = __put_user_bad(); break; \ |
115 | } __pu_ret; }) | 115 | } __pu_ret; }) |
116 | 116 | ||
117 | #define __put_user_nocheck_ret(data,addr,size,retval) ({ \ | ||
118 | register int __foo __asm__ ("l1"); \ | ||
119 | switch (size) { \ | ||
120 | case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \ | ||
121 | case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \ | ||
122 | case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \ | ||
123 | case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \ | ||
124 | default: if (__put_user_bad()) return retval; break; \ | ||
125 | } }) | ||
126 | |||
127 | #define __put_user_asm(x,size,addr,ret) \ | 117 | #define __put_user_asm(x,size,addr,ret) \ |
128 | __asm__ __volatile__( \ | 118 | __asm__ __volatile__( \ |
129 | "/* Put user asm, inline. */\n" \ | 119 | "/* Put user asm, inline. */\n" \ |
@@ -143,33 +133,6 @@ __asm__ __volatile__( \ | |||
143 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ | 133 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ |
144 | "i" (-EFAULT)) | 134 | "i" (-EFAULT)) |
145 | 135 | ||
146 | #define __put_user_asm_ret(x,size,addr,ret,foo) \ | ||
147 | if (__builtin_constant_p(ret) && ret == -EFAULT) \ | ||
148 | __asm__ __volatile__( \ | ||
149 | "/* Put user asm ret, inline. */\n" \ | ||
150 | "1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ | ||
151 | ".section __ex_table,\"a\"\n\t" \ | ||
152 | ".align 4\n\t" \ | ||
153 | ".word 1b, __ret_efault\n\n\t" \ | ||
154 | ".previous\n\n\t" \ | ||
155 | : "=r" (foo) : "r" (x), "r" (__m(addr))); \ | ||
156 | else \ | ||
157 | __asm__ __volatile__( \ | ||
158 | "/* Put user asm ret, inline. */\n" \ | ||
159 | "1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ | ||
160 | ".section .fixup,#alloc,#execinstr\n\t" \ | ||
161 | ".align 4\n" \ | ||
162 | "3:\n\t" \ | ||
163 | "ret\n\t" \ | ||
164 | " restore %%g0, %3, %%o0\n\n\t" \ | ||
165 | ".previous\n\t" \ | ||
166 | ".section __ex_table,\"a\"\n\t" \ | ||
167 | ".align 4\n\t" \ | ||
168 | ".word 1b, 3b\n\n\t" \ | ||
169 | ".previous\n\n\t" \ | ||
170 | : "=r" (foo) : "r" (x), "r" (__m(addr)), \ | ||
171 | "i" (ret)) | ||
172 | |||
173 | extern int __put_user_bad(void); | 136 | extern int __put_user_bad(void); |
174 | 137 | ||
175 | #define __get_user_nocheck(data,addr,size,type) ({ \ | 138 | #define __get_user_nocheck(data,addr,size,type) ({ \ |
@@ -289,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) | |||
289 | } | 252 | } |
290 | #define __copy_in_user copy_in_user | 253 | #define __copy_in_user copy_in_user |
291 | 254 | ||
292 | extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long); | 255 | extern unsigned long __must_check __clear_user(void __user *, unsigned long); |
293 | |||
294 | static inline unsigned long __must_check | ||
295 | __clear_user(void __user *addr, unsigned long size) | ||
296 | { | ||
297 | |||
298 | return __bzero_noasi(addr, size); | ||
299 | } | ||
300 | 256 | ||
301 | #define clear_user __clear_user | 257 | #define clear_user __clear_user |
302 | 258 | ||
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h new file mode 100644 index 000000000000..996e6be7b976 --- /dev/null +++ b/include/asm-sparc64/vdev.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* vdev.h: SUN4V virtual device interfaces and defines. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #ifndef _SPARC64_VDEV_H | ||
7 | #define _SPARC64_VDEV_H | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | |||
11 | extern u32 sun4v_vdev_devhandle; | ||
12 | extern int sun4v_vdev_root; | ||
13 | |||
14 | extern unsigned int sun4v_vdev_device_interrupt(unsigned int); | ||
15 | |||
16 | #endif /* !(_SPARC64_VDEV_H) */ | ||
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h index 8b3a7e4b6062..8ce3f1813e28 100644 --- a/include/asm-sparc64/xor.h +++ b/include/asm-sparc64/xor.h | |||
@@ -2,9 +2,11 @@ | |||
2 | * include/asm-sparc64/xor.h | 2 | * include/asm-sparc64/xor.h |
3 | * | 3 | * |
4 | * High speed xor_block operation for RAID4/5 utilizing the | 4 | * High speed xor_block operation for RAID4/5 utilizing the |
5 | * UltraSparc Visual Instruction Set. | 5 | * UltraSparc Visual Instruction Set and Niagara block-init |
6 | * twin-load instructions. | ||
6 | * | 7 | * |
7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | 8 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) |
9 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
8 | * | 10 | * |
9 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -16,8 +18,7 @@ | |||
16 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 19 | */ |
18 | 20 | ||
19 | #include <asm/pstate.h> | 21 | #include <asm/spitfire.h> |
20 | #include <asm/asi.h> | ||
21 | 22 | ||
22 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); | 23 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); |
23 | extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, | 24 | extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, |
@@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = { | |||
37 | .do_5 = xor_vis_5, | 38 | .do_5 = xor_vis_5, |
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define XOR_TRY_TEMPLATES xor_speed(&xor_block_VIS) | 41 | extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); |
42 | extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, | ||
43 | unsigned long *); | ||
44 | extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, | ||
45 | unsigned long *, unsigned long *); | ||
46 | extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, | ||
47 | unsigned long *, unsigned long *, unsigned long *); | ||
48 | |||
49 | static struct xor_block_template xor_block_niagara = { | ||
50 | .name = "Niagara", | ||
51 | .do_2 = xor_niagara_2, | ||
52 | .do_3 = xor_niagara_3, | ||
53 | .do_4 = xor_niagara_4, | ||
54 | .do_5 = xor_niagara_5, | ||
55 | }; | ||
56 | |||
57 | #undef XOR_TRY_TEMPLATES | ||
58 | #define XOR_TRY_TEMPLATES \ | ||
59 | do { \ | ||
60 | xor_speed(&xor_block_VIS); \ | ||
61 | xor_speed(&xor_block_niagara); \ | ||
62 | } while (0) | ||
63 | |||
64 | /* For VIS for everything except Niagara. */ | ||
65 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
66 | (tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS) | ||
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7198f129e135..231ba090ae34 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -206,7 +206,6 @@ struct ArcProto { | |||
206 | 206 | ||
207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, | 207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, |
208 | *arc_bcast_proto, *arc_raw_proto; | 208 | *arc_bcast_proto, *arc_raw_proto; |
209 | extern struct ArcProto arc_proto_null; | ||
210 | 209 | ||
211 | 210 | ||
212 | /* | 211 | /* |
@@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); | |||
334 | #define arcnet_dump_skb(dev,skb,desc) ; | 333 | #define arcnet_dump_skb(dev,skb,desc) ; |
335 | #endif | 334 | #endif |
336 | 335 | ||
337 | #if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX) | ||
338 | void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | ||
339 | int take_arcnet_lock); | ||
340 | #else | ||
341 | #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ; | ||
342 | #endif | ||
343 | |||
344 | void arcnet_unregister_proto(struct ArcProto *proto); | 336 | void arcnet_unregister_proto(struct ArcProto *proto); |
345 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); | 337 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); |
346 | struct net_device *alloc_arcdev(char *name); | 338 | struct net_device *alloc_arcdev(char *name); |
347 | void arcnet_rx(struct net_device *dev, int bufnum); | ||
348 | 339 | ||
349 | #endif /* __KERNEL__ */ | 340 | #endif /* __KERNEL__ */ |
350 | #endif /* _LINUX_ARCDEVICE_H */ | 341 | #endif /* _LINUX_ARCDEVICE_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5f..56bb6a4e15f3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -58,7 +58,7 @@ struct cfq_io_context { | |||
58 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
59 | */ | 59 | */ |
60 | struct list_head list; | 60 | struct list_head list; |
61 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq[2]; |
62 | void *key; | 62 | void *key; |
63 | 63 | ||
64 | struct io_context *ioc; | 64 | struct io_context *ioc; |
@@ -69,6 +69,8 @@ struct cfq_io_context { | |||
69 | unsigned long ttime_samples; | 69 | unsigned long ttime_samples; |
70 | unsigned long ttime_mean; | 70 | unsigned long ttime_mean; |
71 | 71 | ||
72 | struct list_head queue_list; | ||
73 | |||
72 | void (*dtor)(struct cfq_io_context *); | 74 | void (*dtor)(struct cfq_io_context *); |
73 | void (*exit)(struct cfq_io_context *); | 75 | void (*exit)(struct cfq_io_context *); |
74 | }; | 76 | }; |
@@ -404,8 +406,6 @@ struct request_queue | |||
404 | 406 | ||
405 | struct blk_queue_tag *queue_tags; | 407 | struct blk_queue_tag *queue_tags; |
406 | 408 | ||
407 | atomic_t refcnt; | ||
408 | |||
409 | unsigned int nr_sorted; | 409 | unsigned int nr_sorted; |
410 | unsigned int in_flight; | 410 | unsigned int in_flight; |
411 | 411 | ||
@@ -424,6 +424,8 @@ struct request_queue | |||
424 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
425 | struct request *orig_bar_rq; | 425 | struct request *orig_bar_rq; |
426 | unsigned int bi_size; | 426 | unsigned int bi_size; |
427 | |||
428 | struct mutex sysfs_lock; | ||
427 | }; | 429 | }; |
428 | 430 | ||
429 | #define RQ_INACTIVE (-1) | 431 | #define RQ_INACTIVE (-1) |
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void); | |||
725 | int blk_get_queue(request_queue_t *); | 727 | int blk_get_queue(request_queue_t *); |
726 | request_queue_t *blk_alloc_queue(gfp_t); | 728 | request_queue_t *blk_alloc_queue(gfp_t); |
727 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 729 | request_queue_t *blk_alloc_queue_node(gfp_t, int); |
728 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 730 | extern void blk_put_queue(request_queue_t *); |
729 | 731 | ||
730 | /* | 732 | /* |
731 | * tag stuff | 733 | * tag stuff |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 18cf1f3e1184..ad133fcfb239 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -48,10 +48,17 @@ struct elevator_ops | |||
48 | 48 | ||
49 | elevator_init_fn *elevator_init_fn; | 49 | elevator_init_fn *elevator_init_fn; |
50 | elevator_exit_fn *elevator_exit_fn; | 50 | elevator_exit_fn *elevator_exit_fn; |
51 | void (*trim)(struct io_context *); | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define ELV_NAME_MAX (16) | 54 | #define ELV_NAME_MAX (16) |
54 | 55 | ||
56 | struct elv_fs_entry { | ||
57 | struct attribute attr; | ||
58 | ssize_t (*show)(elevator_t *, char *); | ||
59 | ssize_t (*store)(elevator_t *, const char *, size_t); | ||
60 | }; | ||
61 | |||
55 | /* | 62 | /* |
56 | * identifies an elevator type, such as AS or deadline | 63 | * identifies an elevator type, such as AS or deadline |
57 | */ | 64 | */ |
@@ -60,7 +67,7 @@ struct elevator_type | |||
60 | struct list_head list; | 67 | struct list_head list; |
61 | struct elevator_ops ops; | 68 | struct elevator_ops ops; |
62 | struct elevator_type *elevator_type; | 69 | struct elevator_type *elevator_type; |
63 | struct kobj_type *elevator_ktype; | 70 | struct elv_fs_entry *elevator_attrs; |
64 | char elevator_name[ELV_NAME_MAX]; | 71 | char elevator_name[ELV_NAME_MAX]; |
65 | struct module *elevator_owner; | 72 | struct module *elevator_owner; |
66 | }; | 73 | }; |
@@ -74,6 +81,7 @@ struct elevator_queue | |||
74 | void *elevator_data; | 81 | void *elevator_data; |
75 | struct kobject kobj; | 82 | struct kobject kobj; |
76 | struct elevator_type *elevator_type; | 83 | struct elevator_type *elevator_type; |
84 | struct mutex sysfs_lock; | ||
77 | }; | 85 | }; |
78 | 86 | ||
79 | /* | 87 | /* |
diff --git a/include/linux/if.h b/include/linux/if.h index ce627d9092ef..12c6f6d157c3 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -52,6 +52,9 @@ | |||
52 | /* Private (from user) interface flags (netdevice->priv_flags). */ | 52 | /* Private (from user) interface flags (netdevice->priv_flags). */ |
53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ | 53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ |
54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ | 54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ |
55 | #define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */ | ||
56 | #define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */ | ||
57 | #define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ | ||
55 | 58 | ||
56 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 59 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
57 | #define IF_GET_PROTO 0x0002 | 60 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 7a92c1ce1457..ab08f35cbc35 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ | 61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ |
62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ | 62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ |
63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ | 63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ |
64 | #define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ | ||
64 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol | 65 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol |
65 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ | 66 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ |
66 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ | 67 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 0b08cd692201..955d3069d727 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata { | |||
1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 | 1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 |
1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) | 1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) |
1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) | 1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) |
1217 | #define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9) | ||
1217 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 | 1218 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 |
1218 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) | 1219 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) |
1219 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 | 1220 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 |
@@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata { | |||
1243 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 | 1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 |
1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) | 1245 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) |
1245 | 1246 | ||
1247 | #define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17) | ||
1248 | |||
1246 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ | 1249 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ |
1247 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ | 1250 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ |
1248 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ | 1251 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ |
@@ -1285,23 +1288,15 @@ struct mv64xxx_i2c_pdata { | |||
1285 | #define MV643XX_ETH_NAME "mv643xx_eth" | 1288 | #define MV643XX_ETH_NAME "mv643xx_eth" |
1286 | 1289 | ||
1287 | struct mv643xx_eth_platform_data { | 1290 | struct mv643xx_eth_platform_data { |
1288 | /* | ||
1289 | * Non-values for mac_addr, phy_addr, port_config, etc. | ||
1290 | * override the default value. Setting the corresponding | ||
1291 | * force_* field, causes the default value to be overridden | ||
1292 | * even when zero. | ||
1293 | */ | ||
1294 | unsigned int force_phy_addr:1; | ||
1295 | unsigned int force_port_config:1; | ||
1296 | unsigned int force_port_config_extend:1; | ||
1297 | unsigned int force_port_sdma_config:1; | ||
1298 | unsigned int force_port_serial_control:1; | ||
1299 | int phy_addr; | ||
1300 | char *mac_addr; /* pointer to mac address */ | 1291 | char *mac_addr; /* pointer to mac address */ |
1301 | u32 port_config; | 1292 | u16 force_phy_addr; /* force override if phy_addr == 0 */ |
1302 | u32 port_config_extend; | 1293 | u16 phy_addr; |
1303 | u32 port_sdma_config; | 1294 | |
1304 | u32 port_serial_control; | 1295 | /* If speed is 0, then speed and duplex are autonegotiated. */ |
1296 | int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ | ||
1297 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | ||
1298 | |||
1299 | /* non-zero values of the following fields override defaults */ | ||
1305 | u32 tx_queue_size; | 1300 | u32 tx_queue_size; |
1306 | u32 rx_queue_size; | 1301 | u32 rx_queue_size; |
1307 | u32 tx_sram_addr; | 1302 | u32 tx_sram_addr; |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4041122dabfc..57abcea1cb5d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -127,6 +127,9 @@ | |||
127 | /* Hilscher netx */ | 127 | /* Hilscher netx */ |
128 | #define PORT_NETX 71 | 128 | #define PORT_NETX 71 |
129 | 129 | ||
130 | /* SUN4V Hypervisor Console */ | ||
131 | #define PORT_SUNHV 72 | ||
132 | |||
130 | #ifdef __KERNEL__ | 133 | #ifdef __KERNEL__ |
131 | 134 | ||
132 | #include <linux/config.h> | 135 | #include <linux/config.h> |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 9a92aef8b0b2..4725ff861c57 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -220,6 +220,7 @@ struct ieee80211_snap_hdr { | |||
220 | /* Authentication algorithms */ | 220 | /* Authentication algorithms */ |
221 | #define WLAN_AUTH_OPEN 0 | 221 | #define WLAN_AUTH_OPEN 0 |
222 | #define WLAN_AUTH_SHARED_KEY 1 | 222 | #define WLAN_AUTH_SHARED_KEY 1 |
223 | #define WLAN_AUTH_LEAP 2 | ||
223 | 224 | ||
224 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 225 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
225 | 226 | ||
@@ -299,6 +300,23 @@ enum ieee80211_reasoncode { | |||
299 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, | 300 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, |
300 | }; | 301 | }; |
301 | 302 | ||
303 | /* Action categories - 802.11h */ | ||
304 | enum ieee80211_actioncategories { | ||
305 | WLAN_ACTION_SPECTRUM_MGMT = 0, | ||
306 | /* Reserved 1-127 */ | ||
307 | /* Error 128-255 */ | ||
308 | }; | ||
309 | |||
310 | /* Action details - 802.11h */ | ||
311 | enum ieee80211_actiondetails { | ||
312 | WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0, | ||
313 | WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1, | ||
314 | WLAN_ACTION_CATEGORY_TPC_REQUEST = 2, | ||
315 | WLAN_ACTION_CATEGORY_TPC_REPORT = 3, | ||
316 | WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4, | ||
317 | /* 5 - 255 Reserved */ | ||
318 | }; | ||
319 | |||
302 | #define IEEE80211_STATMASK_SIGNAL (1<<0) | 320 | #define IEEE80211_STATMASK_SIGNAL (1<<0) |
303 | #define IEEE80211_STATMASK_RSSI (1<<1) | 321 | #define IEEE80211_STATMASK_RSSI (1<<1) |
304 | #define IEEE80211_STATMASK_NOISE (1<<2) | 322 | #define IEEE80211_STATMASK_NOISE (1<<2) |
@@ -377,6 +395,8 @@ struct ieee80211_rx_stats { | |||
377 | u8 mask; | 395 | u8 mask; |
378 | u8 freq; | 396 | u8 freq; |
379 | u16 len; | 397 | u16 len; |
398 | u64 tsf; | ||
399 | u32 beacon_time; | ||
380 | }; | 400 | }; |
381 | 401 | ||
382 | /* IEEE 802.11 requires that STA supports concurrent reception of at least | 402 | /* IEEE 802.11 requires that STA supports concurrent reception of at least |
@@ -608,6 +628,28 @@ struct ieee80211_auth { | |||
608 | struct ieee80211_info_element info_element[0]; | 628 | struct ieee80211_info_element info_element[0]; |
609 | } __attribute__ ((packed)); | 629 | } __attribute__ ((packed)); |
610 | 630 | ||
631 | struct ieee80211_channel_switch { | ||
632 | u8 id; | ||
633 | u8 len; | ||
634 | u8 mode; | ||
635 | u8 channel; | ||
636 | u8 count; | ||
637 | } __attribute__ ((packed)); | ||
638 | |||
639 | struct ieee80211_action { | ||
640 | struct ieee80211_hdr_3addr header; | ||
641 | u8 category; | ||
642 | u8 action; | ||
643 | union { | ||
644 | struct ieee80211_action_exchange { | ||
645 | u8 token; | ||
646 | struct ieee80211_info_element info_element[0]; | ||
647 | } exchange; | ||
648 | struct ieee80211_channel_switch channel_switch; | ||
649 | |||
650 | } format; | ||
651 | } __attribute__ ((packed)); | ||
652 | |||
611 | struct ieee80211_disassoc { | 653 | struct ieee80211_disassoc { |
612 | struct ieee80211_hdr_3addr header; | 654 | struct ieee80211_hdr_3addr header; |
613 | __le16 reason; | 655 | __le16 reason; |
@@ -692,7 +734,15 @@ struct ieee80211_txb { | |||
692 | /* QoS structure */ | 734 | /* QoS structure */ |
693 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) | 735 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) |
694 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) | 736 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) |
695 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) | 737 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ |
738 | NETWORK_HAS_QOS_INFORMATION) | ||
739 | |||
740 | /* 802.11h */ | ||
741 | #define NETWORK_HAS_POWER_CONSTRAINT (1<<5) | ||
742 | #define NETWORK_HAS_CSA (1<<6) | ||
743 | #define NETWORK_HAS_QUIET (1<<7) | ||
744 | #define NETWORK_HAS_IBSS_DFS (1<<8) | ||
745 | #define NETWORK_HAS_TPC_REPORT (1<<9) | ||
696 | 746 | ||
697 | #define QOS_QUEUE_NUM 4 | 747 | #define QOS_QUEUE_NUM 4 |
698 | #define QOS_OUI_LEN 3 | 748 | #define QOS_OUI_LEN 3 |
@@ -748,6 +798,91 @@ struct ieee80211_tim_parameters { | |||
748 | 798 | ||
749 | /*******************************************************/ | 799 | /*******************************************************/ |
750 | 800 | ||
801 | enum { /* ieee80211_basic_report.map */ | ||
802 | IEEE80211_BASIC_MAP_BSS = (1 << 0), | ||
803 | IEEE80211_BASIC_MAP_OFDM = (1 << 1), | ||
804 | IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), | ||
805 | IEEE80211_BASIC_MAP_RADAR = (1 << 3), | ||
806 | IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), | ||
807 | /* Bits 5-7 are reserved */ | ||
808 | |||
809 | }; | ||
810 | struct ieee80211_basic_report { | ||
811 | u8 channel; | ||
812 | __le64 start_time; | ||
813 | __le16 duration; | ||
814 | u8 map; | ||
815 | } __attribute__ ((packed)); | ||
816 | |||
817 | enum { /* ieee80211_measurement_request.mode */ | ||
818 | /* Bit 0 is reserved */ | ||
819 | IEEE80211_MEASUREMENT_ENABLE = (1 << 1), | ||
820 | IEEE80211_MEASUREMENT_REQUEST = (1 << 2), | ||
821 | IEEE80211_MEASUREMENT_REPORT = (1 << 3), | ||
822 | /* Bits 4-7 are reserved */ | ||
823 | }; | ||
824 | |||
825 | enum { | ||
826 | IEEE80211_REPORT_BASIC = 0, /* required */ | ||
827 | IEEE80211_REPORT_CCA = 1, /* optional */ | ||
828 | IEEE80211_REPORT_RPI = 2, /* optional */ | ||
829 | /* 3-255 reserved */ | ||
830 | }; | ||
831 | |||
832 | struct ieee80211_measurement_params { | ||
833 | u8 channel; | ||
834 | __le64 start_time; | ||
835 | __le16 duration; | ||
836 | } __attribute__ ((packed)); | ||
837 | |||
838 | struct ieee80211_measurement_request { | ||
839 | struct ieee80211_info_element ie; | ||
840 | u8 token; | ||
841 | u8 mode; | ||
842 | u8 type; | ||
843 | struct ieee80211_measurement_params params[0]; | ||
844 | } __attribute__ ((packed)); | ||
845 | |||
846 | struct ieee80211_measurement_report { | ||
847 | struct ieee80211_info_element ie; | ||
848 | u8 token; | ||
849 | u8 mode; | ||
850 | u8 type; | ||
851 | union { | ||
852 | struct ieee80211_basic_report basic[0]; | ||
853 | } u; | ||
854 | } __attribute__ ((packed)); | ||
855 | |||
856 | struct ieee80211_tpc_report { | ||
857 | u8 transmit_power; | ||
858 | u8 link_margin; | ||
859 | } __attribute__ ((packed)); | ||
860 | |||
861 | struct ieee80211_channel_map { | ||
862 | u8 channel; | ||
863 | u8 map; | ||
864 | } __attribute__ ((packed)); | ||
865 | |||
866 | struct ieee80211_ibss_dfs { | ||
867 | struct ieee80211_info_element ie; | ||
868 | u8 owner[ETH_ALEN]; | ||
869 | u8 recovery_interval; | ||
870 | struct ieee80211_channel_map channel_map[0]; | ||
871 | }; | ||
872 | |||
873 | struct ieee80211_csa { | ||
874 | u8 mode; | ||
875 | u8 channel; | ||
876 | u8 count; | ||
877 | } __attribute__ ((packed)); | ||
878 | |||
879 | struct ieee80211_quiet { | ||
880 | u8 count; | ||
881 | u8 period; | ||
882 | u8 duration; | ||
883 | u8 offset; | ||
884 | } __attribute__ ((packed)); | ||
885 | |||
751 | struct ieee80211_network { | 886 | struct ieee80211_network { |
752 | /* These entries are used to identify a unique network */ | 887 | /* These entries are used to identify a unique network */ |
753 | u8 bssid[ETH_ALEN]; | 888 | u8 bssid[ETH_ALEN]; |
@@ -767,7 +902,7 @@ struct ieee80211_network { | |||
767 | u8 rates_ex_len; | 902 | u8 rates_ex_len; |
768 | unsigned long last_scanned; | 903 | unsigned long last_scanned; |
769 | u8 mode; | 904 | u8 mode; |
770 | u8 flags; | 905 | u32 flags; |
771 | u32 last_associate; | 906 | u32 last_associate; |
772 | u32 time_stamp[2]; | 907 | u32 time_stamp[2]; |
773 | u16 beacon_interval; | 908 | u16 beacon_interval; |
@@ -779,6 +914,25 @@ struct ieee80211_network { | |||
779 | u8 rsn_ie[MAX_WPA_IE_LEN]; | 914 | u8 rsn_ie[MAX_WPA_IE_LEN]; |
780 | size_t rsn_ie_len; | 915 | size_t rsn_ie_len; |
781 | struct ieee80211_tim_parameters tim; | 916 | struct ieee80211_tim_parameters tim; |
917 | |||
918 | /* 802.11h info */ | ||
919 | |||
920 | /* Power Constraint - mandatory if spctrm mgmt required */ | ||
921 | u8 power_constraint; | ||
922 | |||
923 | /* TPC Report - mandatory if spctrm mgmt required */ | ||
924 | struct ieee80211_tpc_report tpc_report; | ||
925 | |||
926 | /* IBSS DFS - mandatory if spctrm mgmt required and IBSS | ||
927 | * NOTE: This is variable length and so must be allocated dynamically */ | ||
928 | struct ieee80211_ibss_dfs *ibss_dfs; | ||
929 | |||
930 | /* Channel Switch Announcement - optional if spctrm mgmt required */ | ||
931 | struct ieee80211_csa csa; | ||
932 | |||
933 | /* Quiet - optional if spctrm mgmt required */ | ||
934 | struct ieee80211_quiet quiet; | ||
935 | |||
782 | struct list_head list; | 936 | struct list_head list; |
783 | }; | 937 | }; |
784 | 938 | ||
@@ -924,7 +1078,10 @@ struct ieee80211_device { | |||
924 | int (*handle_auth) (struct net_device * dev, | 1078 | int (*handle_auth) (struct net_device * dev, |
925 | struct ieee80211_auth * auth); | 1079 | struct ieee80211_auth * auth); |
926 | int (*handle_deauth) (struct net_device * dev, | 1080 | int (*handle_deauth) (struct net_device * dev, |
927 | struct ieee80211_auth * auth); | 1081 | struct ieee80211_deauth * auth); |
1082 | int (*handle_action) (struct net_device * dev, | ||
1083 | struct ieee80211_action * action, | ||
1084 | struct ieee80211_rx_stats * stats); | ||
928 | int (*handle_disassoc) (struct net_device * dev, | 1085 | int (*handle_disassoc) (struct net_device * dev, |
929 | struct ieee80211_disassoc * assoc); | 1086 | struct ieee80211_disassoc * assoc); |
930 | int (*handle_beacon) (struct net_device * dev, | 1087 | int (*handle_beacon) (struct net_device * dev, |
@@ -1093,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
1093 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, | 1250 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, |
1094 | struct ieee80211_hdr_4addr *header, | 1251 | struct ieee80211_hdr_4addr *header, |
1095 | struct ieee80211_rx_stats *stats); | 1252 | struct ieee80211_rx_stats *stats); |
1253 | extern void ieee80211_network_reset(struct ieee80211_network *network); | ||
1096 | 1254 | ||
1097 | /* ieee80211_geo.c */ | 1255 | /* ieee80211_geo.c */ |
1098 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device | 1256 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device |
@@ -1105,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, | |||
1105 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, | 1263 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, |
1106 | u8 channel); | 1264 | u8 channel); |
1107 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); | 1265 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); |
1266 | extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee, | ||
1267 | u8 channel); | ||
1268 | extern const struct ieee80211_channel *ieee80211_get_channel(struct | ||
1269 | ieee80211_device | ||
1270 | *ieee, u8 channel); | ||
1108 | 1271 | ||
1109 | /* ieee80211_wx.c */ | 1272 | /* ieee80211_wx.c */ |
1110 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | 1273 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, |
@@ -1122,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, | |||
1122 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | 1285 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, |
1123 | struct iw_request_info *info, | 1286 | struct iw_request_info *info, |
1124 | union iwreq_data *wrqu, char *extra); | 1287 | union iwreq_data *wrqu, char *extra); |
1288 | extern int ieee80211_wx_set_auth(struct net_device *dev, | ||
1289 | struct iw_request_info *info, | ||
1290 | union iwreq_data *wrqu, | ||
1291 | char *extra); | ||
1292 | extern int ieee80211_wx_get_auth(struct net_device *dev, | ||
1293 | struct iw_request_info *info, | ||
1294 | union iwreq_data *wrqu, | ||
1295 | char *extra); | ||
1125 | 1296 | ||
1126 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) | 1297 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) |
1127 | { | 1298 | { |
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h index cd82c3e998e4..eb476414fd72 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/ieee80211_crypt.h | |||
@@ -47,7 +47,8 @@ struct ieee80211_crypto_ops { | |||
47 | /* deinitialize crypto context and free allocated private data */ | 47 | /* deinitialize crypto context and free allocated private data */ |
48 | void (*deinit) (void *priv); | 48 | void (*deinit) (void *priv); |
49 | 49 | ||
50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); | 50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, |
51 | u8 *key, int keylen, void *priv); | ||
51 | 52 | ||
52 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return | 53 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return |
53 | * value from decrypt_mpdu is passed as the keyidx value for | 54 | * value from decrypt_mpdu is passed as the keyidx value for |
diff --git a/kernel/exit.c b/kernel/exit.c index 531aadca5530..d1e8d500a7e1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
807 | panic("Attempted to kill the idle task!"); | 807 | panic("Attempted to kill the idle task!"); |
808 | if (unlikely(tsk->pid == 1)) | 808 | if (unlikely(tsk->pid == 1)) |
809 | panic("Attempted to kill init!"); | 809 | panic("Attempted to kill init!"); |
810 | if (tsk->io_context) | ||
811 | exit_io_context(); | ||
812 | 810 | ||
813 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 811 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
814 | current->ptrace_message = code; | 812 | current->ptrace_message = code; |
@@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code) | |||
822 | if (unlikely(tsk->flags & PF_EXITING)) { | 820 | if (unlikely(tsk->flags & PF_EXITING)) { |
823 | printk(KERN_ALERT | 821 | printk(KERN_ALERT |
824 | "Fixing recursive fault but reboot is needed!\n"); | 822 | "Fixing recursive fault but reboot is needed!\n"); |
823 | if (tsk->io_context) | ||
824 | exit_io_context(); | ||
825 | set_current_state(TASK_UNINTERRUPTIBLE); | 825 | set_current_state(TASK_UNINTERRUPTIBLE); |
826 | schedule(); | 826 | schedule(); |
827 | } | 827 | } |
@@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code) | |||
881 | */ | 881 | */ |
882 | mutex_debug_check_no_locks_held(tsk); | 882 | mutex_debug_check_no_locks_held(tsk); |
883 | 883 | ||
884 | if (tsk->io_context) | ||
885 | exit_io_context(); | ||
886 | |||
884 | /* PF_DEAD causes final put_task_struct after we schedule. */ | 887 | /* PF_DEAD causes final put_task_struct after we schedule. */ |
885 | preempt_disable(); | 888 | preempt_disable(); |
886 | BUG_ON(tsk->flags & PF_DEAD); | 889 | BUG_ON(tsk->flags & PF_DEAD); |
diff --git a/net/Kconfig b/net/Kconfig index 5126f58d9c44..4193cdcd3ae7 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -224,6 +224,9 @@ source "net/irda/Kconfig" | |||
224 | source "net/bluetooth/Kconfig" | 224 | source "net/bluetooth/Kconfig" |
225 | source "net/ieee80211/Kconfig" | 225 | source "net/ieee80211/Kconfig" |
226 | 226 | ||
227 | config WIRELESS_EXT | ||
228 | bool | ||
229 | |||
227 | endif # if NET | 230 | endif # if NET |
228 | endmenu # Networking | 231 | endmenu # Networking |
229 | 232 | ||
diff --git a/net/core/Makefile b/net/core/Makefile index 630da0f0579e..79fe12cced27 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -14,5 +14,5 @@ obj-$(CONFIG_XFRM) += flow.o | |||
14 | obj-$(CONFIG_SYSFS) += net-sysfs.o | 14 | obj-$(CONFIG_SYSFS) += net-sysfs.o |
15 | obj-$(CONFIG_NET_DIVERT) += dv.o | 15 | obj-$(CONFIG_NET_DIVERT) += dv.o |
16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o | 16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o |
17 | obj-$(CONFIG_NET_RADIO) += wireless.o | 17 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o |
18 | obj-$(CONFIG_NETPOLL) += netpoll.o | 18 | obj-$(CONFIG_NETPOLL) += netpoll.o |
diff --git a/net/core/dev.c b/net/core/dev.c index 2afb0de95329..ef56c035d44e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -110,10 +110,8 @@ | |||
110 | #include <linux/netpoll.h> | 110 | #include <linux/netpoll.h> |
111 | #include <linux/rcupdate.h> | 111 | #include <linux/rcupdate.h> |
112 | #include <linux/delay.h> | 112 | #include <linux/delay.h> |
113 | #ifdef CONFIG_NET_RADIO | 113 | #include <linux/wireless.h> |
114 | #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ | ||
115 | #include <net/iw_handler.h> | 114 | #include <net/iw_handler.h> |
116 | #endif /* CONFIG_NET_RADIO */ | ||
117 | #include <asm/current.h> | 115 | #include <asm/current.h> |
118 | 116 | ||
119 | /* | 117 | /* |
@@ -1448,8 +1446,29 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) | |||
1448 | { | 1446 | { |
1449 | struct net_device *dev = skb->dev; | 1447 | struct net_device *dev = skb->dev; |
1450 | 1448 | ||
1451 | if (dev->master) | 1449 | if (dev->master) { |
1450 | /* | ||
1451 | * On bonding slaves other than the currently active | ||
1452 | * slave, suppress duplicates except for 802.3ad | ||
1453 | * ETH_P_SLOW and alb non-mcast/bcast. | ||
1454 | */ | ||
1455 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
1456 | if (dev->master->priv_flags & IFF_MASTER_ALB) { | ||
1457 | if (skb->pkt_type != PACKET_BROADCAST && | ||
1458 | skb->pkt_type != PACKET_MULTICAST) | ||
1459 | goto keep; | ||
1460 | } | ||
1461 | |||
1462 | if (dev->master->priv_flags & IFF_MASTER_8023AD && | ||
1463 | skb->protocol == __constant_htons(ETH_P_SLOW)) | ||
1464 | goto keep; | ||
1465 | |||
1466 | kfree_skb(skb); | ||
1467 | return NULL; | ||
1468 | } | ||
1469 | keep: | ||
1452 | skb->dev = dev->master; | 1470 | skb->dev = dev->master; |
1471 | } | ||
1453 | 1472 | ||
1454 | return dev; | 1473 | return dev; |
1455 | } | 1474 | } |
@@ -1593,6 +1612,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1593 | 1612 | ||
1594 | orig_dev = skb_bond(skb); | 1613 | orig_dev = skb_bond(skb); |
1595 | 1614 | ||
1615 | if (!orig_dev) | ||
1616 | return NET_RX_DROP; | ||
1617 | |||
1596 | __get_cpu_var(netdev_rx_stat).total++; | 1618 | __get_cpu_var(netdev_rx_stat).total++; |
1597 | 1619 | ||
1598 | skb->h.raw = skb->nh.raw = skb->data; | 1620 | skb->h.raw = skb->nh.raw = skb->data; |
@@ -2028,7 +2050,7 @@ static struct file_operations softnet_seq_fops = { | |||
2028 | .release = seq_release, | 2050 | .release = seq_release, |
2029 | }; | 2051 | }; |
2030 | 2052 | ||
2031 | #ifdef WIRELESS_EXT | 2053 | #ifdef CONFIG_WIRELESS_EXT |
2032 | extern int wireless_proc_init(void); | 2054 | extern int wireless_proc_init(void); |
2033 | #else | 2055 | #else |
2034 | #define wireless_proc_init() 0 | 2056 | #define wireless_proc_init() 0 |
@@ -2582,7 +2604,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2582 | ret = -EFAULT; | 2604 | ret = -EFAULT; |
2583 | return ret; | 2605 | return ret; |
2584 | } | 2606 | } |
2585 | #ifdef WIRELESS_EXT | 2607 | #ifdef CONFIG_WIRELESS_EXT |
2586 | /* Take care of Wireless Extensions */ | 2608 | /* Take care of Wireless Extensions */ |
2587 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 2609 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
2588 | /* If command is `set a parameter', or | 2610 | /* If command is `set a parameter', or |
@@ -2603,7 +2625,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2603 | ret = -EFAULT; | 2625 | ret = -EFAULT; |
2604 | return ret; | 2626 | return ret; |
2605 | } | 2627 | } |
2606 | #endif /* WIRELESS_EXT */ | 2628 | #endif /* CONFIG_WIRELESS_EXT */ |
2607 | return -EINVAL; | 2629 | return -EINVAL; |
2608 | } | 2630 | } |
2609 | } | 2631 | } |
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index ecc9bb196abc..cb71d794a7d1 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <net/ieee80211.h> | 19 | #include <net/ieee80211.h> |
20 | 20 | ||
21 | |||
22 | MODULE_AUTHOR("Jouni Malinen"); | 21 | MODULE_AUTHOR("Jouni Malinen"); |
23 | MODULE_DESCRIPTION("HostAP crypto"); | 22 | MODULE_DESCRIPTION("HostAP crypto"); |
24 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
@@ -33,11 +32,11 @@ static DEFINE_SPINLOCK(ieee80211_crypto_lock); | |||
33 | 32 | ||
34 | void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) | 33 | void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) |
35 | { | 34 | { |
36 | struct ieee80211_crypt_data *entry, *next; | 35 | struct ieee80211_crypt_data *entry, *next; |
37 | unsigned long flags; | 36 | unsigned long flags; |
38 | 37 | ||
39 | spin_lock_irqsave(&ieee->lock, flags); | 38 | spin_lock_irqsave(&ieee->lock, flags); |
40 | list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) { | 39 | list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) { |
41 | if (atomic_read(&entry->refcnt) != 0 && !force) | 40 | if (atomic_read(&entry->refcnt) != 0 && !force) |
42 | continue; | 41 | continue; |
43 | 42 | ||
@@ -141,9 +140,9 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops) | |||
141 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 140 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
142 | return -EINVAL; | 141 | return -EINVAL; |
143 | 142 | ||
144 | found: | 143 | found: |
145 | printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " | 144 | printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " |
146 | "'%s'\n", ops->name); | 145 | "'%s'\n", ops->name); |
147 | list_del(&alg->list); | 146 | list_del(&alg->list); |
148 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 147 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
149 | kfree(alg); | 148 | kfree(alg); |
@@ -163,7 +162,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name) | |||
163 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 162 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
164 | return NULL; | 163 | return NULL; |
165 | 164 | ||
166 | found: | 165 | found: |
167 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); | 166 | spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); |
168 | return alg->ops; | 167 | return alg->ops; |
169 | } | 168 | } |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 3840d1911f2b..78b2d13e80e3 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -190,7 +190,8 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, | |||
190 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); | 190 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) | 193 | static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, |
194 | u8 *aeskey, int keylen, void *priv) | ||
194 | { | 195 | { |
195 | struct ieee80211_ccmp_data *key = priv; | 196 | struct ieee80211_ccmp_data *key = priv; |
196 | int i; | 197 | int i; |
@@ -199,6 +200,9 @@ static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) | |||
199 | if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) | 200 | if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) |
200 | return -1; | 201 | return -1; |
201 | 202 | ||
203 | if (aeskey != NULL && keylen >= CCMP_TK_LEN) | ||
204 | memcpy(aeskey, key->key, CCMP_TK_LEN); | ||
205 | |||
202 | pos = skb_push(skb, CCMP_HDR_LEN); | 206 | pos = skb_push(skb, CCMP_HDR_LEN); |
203 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); | 207 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); |
204 | pos += hdr_len; | 208 | pos += hdr_len; |
@@ -238,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
238 | return -1; | 242 | return -1; |
239 | 243 | ||
240 | data_len = skb->len - hdr_len; | 244 | data_len = skb->len - hdr_len; |
241 | len = ieee80211_ccmp_hdr(skb, hdr_len, priv); | 245 | len = ieee80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv); |
242 | if (len < 0) | 246 | if (len < 0) |
243 | return -1; | 247 | return -1; |
244 | 248 | ||
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index e0988320efbf..93def94c1b32 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -80,10 +80,9 @@ static void *ieee80211_tkip_init(int key_idx) | |||
80 | { | 80 | { |
81 | struct ieee80211_tkip_data *priv; | 81 | struct ieee80211_tkip_data *priv; |
82 | 82 | ||
83 | priv = kmalloc(sizeof(*priv), GFP_ATOMIC); | 83 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); |
84 | if (priv == NULL) | 84 | if (priv == NULL) |
85 | goto fail; | 85 | goto fail; |
86 | memset(priv, 0, sizeof(*priv)); | ||
87 | 86 | ||
88 | priv->key_idx = key_idx; | 87 | priv->key_idx = key_idx; |
89 | 88 | ||
@@ -271,34 +270,33 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, | |||
271 | #endif | 270 | #endif |
272 | } | 271 | } |
273 | 272 | ||
274 | static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) | 273 | static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, |
274 | u8 * rc4key, int keylen, void *priv) | ||
275 | { | 275 | { |
276 | struct ieee80211_tkip_data *tkey = priv; | 276 | struct ieee80211_tkip_data *tkey = priv; |
277 | int len; | 277 | int len; |
278 | u8 *rc4key, *pos, *icv; | 278 | u8 *pos; |
279 | struct ieee80211_hdr_4addr *hdr; | 279 | struct ieee80211_hdr_4addr *hdr; |
280 | u32 crc; | ||
281 | 280 | ||
282 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | 281 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
283 | 282 | ||
284 | if (skb_headroom(skb) < 8 || skb->len < hdr_len) | 283 | if (skb_headroom(skb) < 8 || skb->len < hdr_len) |
285 | return NULL; | 284 | return -1; |
285 | |||
286 | if (rc4key == NULL || keylen < 16) | ||
287 | return -1; | ||
286 | 288 | ||
287 | if (!tkey->tx_phase1_done) { | 289 | if (!tkey->tx_phase1_done) { |
288 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, | 290 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, |
289 | tkey->tx_iv32); | 291 | tkey->tx_iv32); |
290 | tkey->tx_phase1_done = 1; | 292 | tkey->tx_phase1_done = 1; |
291 | } | 293 | } |
292 | rc4key = kmalloc(16, GFP_ATOMIC); | ||
293 | if (!rc4key) | ||
294 | return NULL; | ||
295 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); | 294 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); |
296 | 295 | ||
297 | len = skb->len - hdr_len; | 296 | len = skb->len - hdr_len; |
298 | pos = skb_push(skb, 8); | 297 | pos = skb_push(skb, 8); |
299 | memmove(pos, pos + 8, hdr_len); | 298 | memmove(pos, pos + 8, hdr_len); |
300 | pos += hdr_len; | 299 | pos += hdr_len; |
301 | icv = skb_put(skb, 4); | ||
302 | 300 | ||
303 | *pos++ = *rc4key; | 301 | *pos++ = *rc4key; |
304 | *pos++ = *(rc4key + 1); | 302 | *pos++ = *(rc4key + 1); |
@@ -309,28 +307,28 @@ static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) | |||
309 | *pos++ = (tkey->tx_iv32 >> 16) & 0xff; | 307 | *pos++ = (tkey->tx_iv32 >> 16) & 0xff; |
310 | *pos++ = (tkey->tx_iv32 >> 24) & 0xff; | 308 | *pos++ = (tkey->tx_iv32 >> 24) & 0xff; |
311 | 309 | ||
312 | crc = ~crc32_le(~0, pos, len); | 310 | tkey->tx_iv16++; |
313 | icv[0] = crc; | 311 | if (tkey->tx_iv16 == 0) { |
314 | icv[1] = crc >> 8; | 312 | tkey->tx_phase1_done = 0; |
315 | icv[2] = crc >> 16; | 313 | tkey->tx_iv32++; |
316 | icv[3] = crc >> 24; | 314 | } |
317 | 315 | ||
318 | return rc4key; | 316 | return 8; |
319 | } | 317 | } |
320 | 318 | ||
321 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | 319 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) |
322 | { | 320 | { |
323 | struct ieee80211_tkip_data *tkey = priv; | 321 | struct ieee80211_tkip_data *tkey = priv; |
324 | int len; | 322 | int len; |
325 | const u8 *rc4key; | 323 | u8 rc4key[16], *pos, *icv; |
326 | u8 *pos; | 324 | u32 crc; |
327 | struct scatterlist sg; | 325 | struct scatterlist sg; |
328 | 326 | ||
329 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | 327 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { |
330 | if (net_ratelimit()) { | 328 | if (net_ratelimit()) { |
331 | struct ieee80211_hdr_4addr *hdr = | 329 | struct ieee80211_hdr_4addr *hdr = |
332 | (struct ieee80211_hdr_4addr *)skb->data; | 330 | (struct ieee80211_hdr_4addr *)skb->data; |
333 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | 331 | printk(KERN_DEBUG ": TKIP countermeasures: dropped " |
334 | "TX packet to " MAC_FMT "\n", | 332 | "TX packet to " MAC_FMT "\n", |
335 | MAC_ARG(hdr->addr1)); | 333 | MAC_ARG(hdr->addr1)); |
336 | } | 334 | } |
@@ -343,22 +341,23 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
343 | len = skb->len - hdr_len; | 341 | len = skb->len - hdr_len; |
344 | pos = skb->data + hdr_len; | 342 | pos = skb->data + hdr_len; |
345 | 343 | ||
346 | rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); | 344 | if ((ieee80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) |
347 | if (!rc4key) | ||
348 | return -1; | 345 | return -1; |
349 | 346 | ||
347 | icv = skb_put(skb, 4); | ||
348 | |||
349 | crc = ~crc32_le(~0, pos, len); | ||
350 | icv[0] = crc; | ||
351 | icv[1] = crc >> 8; | ||
352 | icv[2] = crc >> 16; | ||
353 | icv[3] = crc >> 24; | ||
354 | |||
350 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); | 355 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); |
351 | sg.page = virt_to_page(pos); | 356 | sg.page = virt_to_page(pos); |
352 | sg.offset = offset_in_page(pos); | 357 | sg.offset = offset_in_page(pos); |
353 | sg.length = len + 4; | 358 | sg.length = len + 4; |
354 | crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); | 359 | crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); |
355 | 360 | ||
356 | tkey->tx_iv16++; | ||
357 | if (tkey->tx_iv16 == 0) { | ||
358 | tkey->tx_phase1_done = 0; | ||
359 | tkey->tx_iv32++; | ||
360 | } | ||
361 | |||
362 | return 0; | 361 | return 0; |
363 | } | 362 | } |
364 | 363 | ||
@@ -379,7 +378,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
379 | 378 | ||
380 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | 379 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { |
381 | if (net_ratelimit()) { | 380 | if (net_ratelimit()) { |
382 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | 381 | printk(KERN_DEBUG ": TKIP countermeasures: dropped " |
383 | "received packet from " MAC_FMT "\n", | 382 | "received packet from " MAC_FMT "\n", |
384 | MAC_ARG(hdr->addr2)); | 383 | MAC_ARG(hdr->addr2)); |
385 | } | 384 | } |
@@ -695,6 +694,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { | |||
695 | .name = "TKIP", | 694 | .name = "TKIP", |
696 | .init = ieee80211_tkip_init, | 695 | .init = ieee80211_tkip_init, |
697 | .deinit = ieee80211_tkip_deinit, | 696 | .deinit = ieee80211_tkip_deinit, |
697 | .build_iv = ieee80211_tkip_hdr, | ||
698 | .encrypt_mpdu = ieee80211_tkip_encrypt, | 698 | .encrypt_mpdu = ieee80211_tkip_encrypt, |
699 | .decrypt_mpdu = ieee80211_tkip_decrypt, | 699 | .decrypt_mpdu = ieee80211_tkip_decrypt, |
700 | .encrypt_msdu = ieee80211_michael_mic_add, | 700 | .encrypt_msdu = ieee80211_michael_mic_add, |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index f8dca31be5dd..649e581fa565 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
@@ -76,7 +76,8 @@ static void prism2_wep_deinit(void *priv) | |||
76 | } | 76 | } |
77 | 77 | ||
78 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ | 78 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ |
79 | static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, void *priv) | 79 | static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, |
80 | u8 *key, int keylen, void *priv) | ||
80 | { | 81 | { |
81 | struct prism2_wep_data *wep = priv; | 82 | struct prism2_wep_data *wep = priv; |
82 | u32 klen, len; | 83 | u32 klen, len; |
@@ -131,7 +132,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
131 | return -1; | 132 | return -1; |
132 | 133 | ||
133 | /* add the IV to the frame */ | 134 | /* add the IV to the frame */ |
134 | if (prism2_wep_build_iv(skb, hdr_len, priv)) | 135 | if (prism2_wep_build_iv(skb, hdr_len, NULL, 0, priv)) |
135 | return -1; | 136 | return -1; |
136 | 137 | ||
137 | /* Copy the IV into the first 3 bytes of the key */ | 138 | /* Copy the IV into the first 3 bytes of the key */ |
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c index 610cc5cbc252..192243ab35ed 100644 --- a/net/ieee80211/ieee80211_geo.c +++ b/net/ieee80211/ieee80211_geo.c | |||
@@ -50,7 +50,8 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | |||
50 | 50 | ||
51 | /* Driver needs to initialize the geography map before using | 51 | /* Driver needs to initialize the geography map before using |
52 | * these helper functions */ | 52 | * these helper functions */ |
53 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 53 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
54 | return 0; | ||
54 | 55 | ||
55 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | 56 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) |
56 | for (i = 0; i < ieee->geo.bg_channels; i++) | 57 | for (i = 0; i < ieee->geo.bg_channels; i++) |
@@ -58,13 +59,15 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | |||
58 | * this is a B only channel, we don't see it | 59 | * this is a B only channel, we don't see it |
59 | * as valid. */ | 60 | * as valid. */ |
60 | if ((ieee->geo.bg[i].channel == channel) && | 61 | if ((ieee->geo.bg[i].channel == channel) && |
62 | !(ieee->geo.bg[i].flags & IEEE80211_CH_INVALID) && | ||
61 | (!(ieee->mode & IEEE_G) || | 63 | (!(ieee->mode & IEEE_G) || |
62 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) | 64 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) |
63 | return IEEE80211_24GHZ_BAND; | 65 | return IEEE80211_24GHZ_BAND; |
64 | 66 | ||
65 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | 67 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) |
66 | for (i = 0; i < ieee->geo.a_channels; i++) | 68 | for (i = 0; i < ieee->geo.a_channels; i++) |
67 | if (ieee->geo.a[i].channel == channel) | 69 | if ((ieee->geo.a[i].channel == channel) && |
70 | !(ieee->geo.a[i].flags & IEEE80211_CH_INVALID)) | ||
68 | return IEEE80211_52GHZ_BAND; | 71 | return IEEE80211_52GHZ_BAND; |
69 | 72 | ||
70 | return 0; | 73 | return 0; |
@@ -76,7 +79,8 @@ int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel) | |||
76 | 79 | ||
77 | /* Driver needs to initialize the geography map before using | 80 | /* Driver needs to initialize the geography map before using |
78 | * these helper functions */ | 81 | * these helper functions */ |
79 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 82 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
83 | return -1; | ||
80 | 84 | ||
81 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | 85 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) |
82 | for (i = 0; i < ieee->geo.bg_channels; i++) | 86 | for (i = 0; i < ieee->geo.bg_channels; i++) |
@@ -97,7 +101,8 @@ u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq) | |||
97 | 101 | ||
98 | /* Driver needs to initialize the geography map before using | 102 | /* Driver needs to initialize the geography map before using |
99 | * these helper functions */ | 103 | * these helper functions */ |
100 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | 104 | if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) |
105 | return 0; | ||
101 | 106 | ||
102 | freq /= 100000; | 107 | freq /= 100000; |
103 | 108 | ||
@@ -133,6 +138,41 @@ const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee) | |||
133 | return &ieee->geo; | 138 | return &ieee->geo; |
134 | } | 139 | } |
135 | 140 | ||
141 | u8 ieee80211_get_channel_flags(struct ieee80211_device * ieee, u8 channel) | ||
142 | { | ||
143 | int index = ieee80211_channel_to_index(ieee, channel); | ||
144 | |||
145 | if (index == -1) | ||
146 | return IEEE80211_CH_INVALID; | ||
147 | |||
148 | if (channel <= IEEE80211_24GHZ_CHANNELS) | ||
149 | return ieee->geo.bg[index].flags; | ||
150 | |||
151 | return ieee->geo.a[index].flags; | ||
152 | } | ||
153 | |||
154 | static const struct ieee80211_channel bad_channel = { | ||
155 | .channel = 0, | ||
156 | .flags = IEEE80211_CH_INVALID, | ||
157 | .max_power = 0, | ||
158 | }; | ||
159 | |||
160 | const struct ieee80211_channel *ieee80211_get_channel(struct ieee80211_device | ||
161 | *ieee, u8 channel) | ||
162 | { | ||
163 | int index = ieee80211_channel_to_index(ieee, channel); | ||
164 | |||
165 | if (index == -1) | ||
166 | return &bad_channel; | ||
167 | |||
168 | if (channel <= IEEE80211_24GHZ_CHANNELS) | ||
169 | return &ieee->geo.bg[index]; | ||
170 | |||
171 | return &ieee->geo.a[index]; | ||
172 | } | ||
173 | |||
174 | EXPORT_SYMBOL(ieee80211_get_channel); | ||
175 | EXPORT_SYMBOL(ieee80211_get_channel_flags); | ||
136 | EXPORT_SYMBOL(ieee80211_is_valid_channel); | 176 | EXPORT_SYMBOL(ieee80211_is_valid_channel); |
137 | EXPORT_SYMBOL(ieee80211_freq_to_channel); | 177 | EXPORT_SYMBOL(ieee80211_freq_to_channel); |
138 | EXPORT_SYMBOL(ieee80211_channel_to_index); | 178 | EXPORT_SYMBOL(ieee80211_channel_to_index); |
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 90d18b72da3d..2cb84d84f671 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -82,10 +82,28 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | void ieee80211_network_reset(struct ieee80211_network *network) | ||
86 | { | ||
87 | if (!network) | ||
88 | return; | ||
89 | |||
90 | if (network->ibss_dfs) { | ||
91 | kfree(network->ibss_dfs); | ||
92 | network->ibss_dfs = NULL; | ||
93 | } | ||
94 | } | ||
95 | |||
85 | static inline void ieee80211_networks_free(struct ieee80211_device *ieee) | 96 | static inline void ieee80211_networks_free(struct ieee80211_device *ieee) |
86 | { | 97 | { |
98 | int i; | ||
99 | |||
87 | if (!ieee->networks) | 100 | if (!ieee->networks) |
88 | return; | 101 | return; |
102 | |||
103 | for (i = 0; i < MAX_NETWORK_COUNT; i++) | ||
104 | if (ieee->networks[i].ibss_dfs) | ||
105 | kfree(ieee->networks[i].ibss_dfs); | ||
106 | |||
89 | kfree(ieee->networks); | 107 | kfree(ieee->networks); |
90 | ieee->networks = NULL; | 108 | ieee->networks = NULL; |
91 | } | 109 | } |
@@ -195,7 +213,7 @@ void free_ieee80211(struct net_device *dev) | |||
195 | 213 | ||
196 | static int debug = 0; | 214 | static int debug = 0; |
197 | u32 ieee80211_debug_level = 0; | 215 | u32 ieee80211_debug_level = 0; |
198 | struct proc_dir_entry *ieee80211_proc = NULL; | 216 | static struct proc_dir_entry *ieee80211_proc = NULL; |
199 | 217 | ||
200 | static int show_debug_level(char *page, char **start, off_t offset, | 218 | static int show_debug_level(char *page, char **start, off_t offset, |
201 | int count, int *eof, void *data) | 219 | int count, int *eof, void *data) |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 7ac6a7165d9c..a7f2a642a512 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -369,8 +369,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
369 | 369 | ||
370 | /* Put this code here so that we avoid duplicating it in all | 370 | /* Put this code here so that we avoid duplicating it in all |
371 | * Rx paths. - Jean II */ | 371 | * Rx paths. - Jean II */ |
372 | #ifdef CONFIG_WIRELESS_EXT | ||
372 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ | 373 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ |
373 | #ifdef CONFIG_NET_RADIO | ||
374 | /* If spy monitoring on */ | 374 | /* If spy monitoring on */ |
375 | if (ieee->spy_data.spy_number > 0) { | 375 | if (ieee->spy_data.spy_number > 0) { |
376 | struct iw_quality wstats; | 376 | struct iw_quality wstats; |
@@ -397,8 +397,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
397 | /* Update spy records */ | 397 | /* Update spy records */ |
398 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); | 398 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); |
399 | } | 399 | } |
400 | #endif /* CONFIG_NET_RADIO */ | ||
401 | #endif /* IW_WIRELESS_SPY */ | 400 | #endif /* IW_WIRELESS_SPY */ |
401 | #endif /* CONFIG_WIRELESS_EXT */ | ||
402 | 402 | ||
403 | #ifdef NOT_YET | 403 | #ifdef NOT_YET |
404 | hostap_update_rx_stats(local->ap, hdr, rx_stats); | 404 | hostap_update_rx_stats(local->ap, hdr, rx_stats); |
@@ -574,7 +574,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
574 | /* skb: hdr + (possibly fragmented) plaintext payload */ | 574 | /* skb: hdr + (possibly fragmented) plaintext payload */ |
575 | // PR: FIXME: hostap has additional conditions in the "if" below: | 575 | // PR: FIXME: hostap has additional conditions in the "if" below: |
576 | // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 576 | // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && |
577 | if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { | 577 | if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { |
578 | int flen; | 578 | int flen; |
579 | struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); | 579 | struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); |
580 | IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); | 580 | IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); |
@@ -754,7 +754,14 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
754 | memset(skb->cb, 0, sizeof(skb->cb)); | 754 | memset(skb->cb, 0, sizeof(skb->cb)); |
755 | skb->dev = dev; | 755 | skb->dev = dev; |
756 | skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ | 756 | skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ |
757 | netif_rx(skb); | 757 | if (netif_rx(skb) == NET_RX_DROP) { |
758 | /* netif_rx always succeeds, but it might drop | ||
759 | * the packet. If it drops the packet, we log that | ||
760 | * in our stats. */ | ||
761 | IEEE80211_DEBUG_DROP | ||
762 | ("RX: netif_rx dropped the packet\n"); | ||
763 | stats->rx_dropped++; | ||
764 | } | ||
758 | } | 765 | } |
759 | 766 | ||
760 | rx_exit: | 767 | rx_exit: |
@@ -930,6 +937,45 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element | |||
930 | return rc; | 937 | return rc; |
931 | } | 938 | } |
932 | 939 | ||
940 | #ifdef CONFIG_IEEE80211_DEBUG | ||
941 | #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x | ||
942 | |||
943 | static const char *get_info_element_string(u16 id) | ||
944 | { | ||
945 | switch (id) { | ||
946 | MFIE_STRING(SSID); | ||
947 | MFIE_STRING(RATES); | ||
948 | MFIE_STRING(FH_SET); | ||
949 | MFIE_STRING(DS_SET); | ||
950 | MFIE_STRING(CF_SET); | ||
951 | MFIE_STRING(TIM); | ||
952 | MFIE_STRING(IBSS_SET); | ||
953 | MFIE_STRING(COUNTRY); | ||
954 | MFIE_STRING(HOP_PARAMS); | ||
955 | MFIE_STRING(HOP_TABLE); | ||
956 | MFIE_STRING(REQUEST); | ||
957 | MFIE_STRING(CHALLENGE); | ||
958 | MFIE_STRING(POWER_CONSTRAINT); | ||
959 | MFIE_STRING(POWER_CAPABILITY); | ||
960 | MFIE_STRING(TPC_REQUEST); | ||
961 | MFIE_STRING(TPC_REPORT); | ||
962 | MFIE_STRING(SUPP_CHANNELS); | ||
963 | MFIE_STRING(CSA); | ||
964 | MFIE_STRING(MEASURE_REQUEST); | ||
965 | MFIE_STRING(MEASURE_REPORT); | ||
966 | MFIE_STRING(QUIET); | ||
967 | MFIE_STRING(IBSS_DFS); | ||
968 | MFIE_STRING(ERP_INFO); | ||
969 | MFIE_STRING(RSN); | ||
970 | MFIE_STRING(RATES_EX); | ||
971 | MFIE_STRING(GENERIC); | ||
972 | MFIE_STRING(QOS_PARAMETER); | ||
973 | default: | ||
974 | return "UNKNOWN"; | ||
975 | } | ||
976 | } | ||
977 | #endif | ||
978 | |||
933 | static int ieee80211_parse_info_param(struct ieee80211_info_element | 979 | static int ieee80211_parse_info_param(struct ieee80211_info_element |
934 | *info_element, u16 length, | 980 | *info_element, u16 length, |
935 | struct ieee80211_network *network) | 981 | struct ieee80211_network *network) |
@@ -1040,7 +1086,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1040 | break; | 1086 | break; |
1041 | 1087 | ||
1042 | case MFIE_TYPE_TIM: | 1088 | case MFIE_TYPE_TIM: |
1043 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); | 1089 | network->tim.tim_count = info_element->data[0]; |
1090 | network->tim.tim_period = info_element->data[1]; | ||
1091 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n"); | ||
1044 | break; | 1092 | break; |
1045 | 1093 | ||
1046 | case MFIE_TYPE_ERP_INFO: | 1094 | case MFIE_TYPE_ERP_INFO: |
@@ -1091,10 +1139,49 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1091 | printk(KERN_ERR | 1139 | printk(KERN_ERR |
1092 | "QoS Error need to parse QOS_PARAMETER IE\n"); | 1140 | "QoS Error need to parse QOS_PARAMETER IE\n"); |
1093 | break; | 1141 | break; |
1142 | /* 802.11h */ | ||
1143 | case MFIE_TYPE_POWER_CONSTRAINT: | ||
1144 | network->power_constraint = info_element->data[0]; | ||
1145 | network->flags |= NETWORK_HAS_POWER_CONSTRAINT; | ||
1146 | break; | ||
1147 | |||
1148 | case MFIE_TYPE_CSA: | ||
1149 | network->power_constraint = info_element->data[0]; | ||
1150 | network->flags |= NETWORK_HAS_CSA; | ||
1151 | break; | ||
1152 | |||
1153 | case MFIE_TYPE_QUIET: | ||
1154 | network->quiet.count = info_element->data[0]; | ||
1155 | network->quiet.period = info_element->data[1]; | ||
1156 | network->quiet.duration = info_element->data[2]; | ||
1157 | network->quiet.offset = info_element->data[3]; | ||
1158 | network->flags |= NETWORK_HAS_QUIET; | ||
1159 | break; | ||
1160 | |||
1161 | case MFIE_TYPE_IBSS_DFS: | ||
1162 | if (network->ibss_dfs) | ||
1163 | break; | ||
1164 | network->ibss_dfs = | ||
1165 | kmalloc(info_element->len, GFP_ATOMIC); | ||
1166 | if (!network->ibss_dfs) | ||
1167 | return 1; | ||
1168 | memcpy(network->ibss_dfs, info_element->data, | ||
1169 | info_element->len); | ||
1170 | network->flags |= NETWORK_HAS_IBSS_DFS; | ||
1171 | break; | ||
1172 | |||
1173 | case MFIE_TYPE_TPC_REPORT: | ||
1174 | network->tpc_report.transmit_power = | ||
1175 | info_element->data[0]; | ||
1176 | network->tpc_report.link_margin = info_element->data[1]; | ||
1177 | network->flags |= NETWORK_HAS_TPC_REPORT; | ||
1178 | break; | ||
1094 | 1179 | ||
1095 | default: | 1180 | default: |
1096 | IEEE80211_DEBUG_MGMT("unsupported IE %d\n", | 1181 | IEEE80211_DEBUG_MGMT |
1097 | info_element->id); | 1182 | ("Unsupported info element: %s (%d)\n", |
1183 | get_info_element_string(info_element->id), | ||
1184 | info_element->id); | ||
1098 | break; | 1185 | break; |
1099 | } | 1186 | } |
1100 | 1187 | ||
@@ -1110,7 +1197,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1110 | static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response | 1197 | static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response |
1111 | *frame, struct ieee80211_rx_stats *stats) | 1198 | *frame, struct ieee80211_rx_stats *stats) |
1112 | { | 1199 | { |
1113 | struct ieee80211_network network_resp; | 1200 | struct ieee80211_network network_resp = { |
1201 | .ibss_dfs = NULL, | ||
1202 | }; | ||
1114 | struct ieee80211_network *network = &network_resp; | 1203 | struct ieee80211_network *network = &network_resp; |
1115 | struct net_device *dev = ieee->dev; | 1204 | struct net_device *dev = ieee->dev; |
1116 | 1205 | ||
@@ -1253,7 +1342,22 @@ static void update_network(struct ieee80211_network *dst, | |||
1253 | int qos_active; | 1342 | int qos_active; |
1254 | u8 old_param; | 1343 | u8 old_param; |
1255 | 1344 | ||
1256 | memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); | 1345 | ieee80211_network_reset(dst); |
1346 | dst->ibss_dfs = src->ibss_dfs; | ||
1347 | |||
1348 | /* We only update the statistics if they were created by receiving | ||
1349 | * the network information on the actual channel the network is on. | ||
1350 | * | ||
1351 | * This keeps beacons received on neighbor channels from bringing | ||
1352 | * down the signal level of an AP. */ | ||
1353 | if (dst->channel == src->stats.received_channel) | ||
1354 | memcpy(&dst->stats, &src->stats, | ||
1355 | sizeof(struct ieee80211_rx_stats)); | ||
1356 | else | ||
1357 | IEEE80211_DEBUG_SCAN("Network " MAC_FMT " info received " | ||
1358 | "off channel (%d vs. %d)\n", MAC_ARG(src->bssid), | ||
1359 | dst->channel, src->stats.received_channel); | ||
1360 | |||
1257 | dst->capability = src->capability; | 1361 | dst->capability = src->capability; |
1258 | memcpy(dst->rates, src->rates, src->rates_len); | 1362 | memcpy(dst->rates, src->rates, src->rates_len); |
1259 | dst->rates_len = src->rates_len; | 1363 | dst->rates_len = src->rates_len; |
@@ -1269,6 +1373,7 @@ static void update_network(struct ieee80211_network *dst, | |||
1269 | dst->listen_interval = src->listen_interval; | 1373 | dst->listen_interval = src->listen_interval; |
1270 | dst->atim_window = src->atim_window; | 1374 | dst->atim_window = src->atim_window; |
1271 | dst->erp_value = src->erp_value; | 1375 | dst->erp_value = src->erp_value; |
1376 | dst->tim = src->tim; | ||
1272 | 1377 | ||
1273 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); | 1378 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); |
1274 | dst->wpa_ie_len = src->wpa_ie_len; | 1379 | dst->wpa_ie_len = src->wpa_ie_len; |
@@ -1313,7 +1418,9 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1313 | *stats) | 1418 | *stats) |
1314 | { | 1419 | { |
1315 | struct net_device *dev = ieee->dev; | 1420 | struct net_device *dev = ieee->dev; |
1316 | struct ieee80211_network network; | 1421 | struct ieee80211_network network = { |
1422 | .ibss_dfs = NULL, | ||
1423 | }; | ||
1317 | struct ieee80211_network *target; | 1424 | struct ieee80211_network *target; |
1318 | struct ieee80211_network *oldest = NULL; | 1425 | struct ieee80211_network *oldest = NULL; |
1319 | #ifdef CONFIG_IEEE80211_DEBUG | 1426 | #ifdef CONFIG_IEEE80211_DEBUG |
@@ -1386,6 +1493,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1386 | escape_essid(target->ssid, | 1493 | escape_essid(target->ssid, |
1387 | target->ssid_len), | 1494 | target->ssid_len), |
1388 | MAC_ARG(target->bssid)); | 1495 | MAC_ARG(target->bssid)); |
1496 | ieee80211_network_reset(target); | ||
1389 | } else { | 1497 | } else { |
1390 | /* Otherwise just pull from the free list */ | 1498 | /* Otherwise just pull from the free list */ |
1391 | target = list_entry(ieee->network_free_list.next, | 1499 | target = list_entry(ieee->network_free_list.next, |
@@ -1402,6 +1510,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1402 | "BEACON" : "PROBE RESPONSE"); | 1510 | "BEACON" : "PROBE RESPONSE"); |
1403 | #endif | 1511 | #endif |
1404 | memcpy(target, &network, sizeof(*target)); | 1512 | memcpy(target, &network, sizeof(*target)); |
1513 | network.ibss_dfs = NULL; | ||
1405 | list_add_tail(&target->list, &ieee->network_list); | 1514 | list_add_tail(&target->list, &ieee->network_list); |
1406 | } else { | 1515 | } else { |
1407 | IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", | 1516 | IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", |
@@ -1411,6 +1520,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1411 | is_beacon(beacon->header.frame_ctl) ? | 1520 | is_beacon(beacon->header.frame_ctl) ? |
1412 | "BEACON" : "PROBE RESPONSE"); | 1521 | "BEACON" : "PROBE RESPONSE"); |
1413 | update_network(target, &network); | 1522 | update_network(target, &network); |
1523 | network.ibss_dfs = NULL; | ||
1414 | } | 1524 | } |
1415 | 1525 | ||
1416 | spin_unlock_irqrestore(&ieee->lock, flags); | 1526 | spin_unlock_irqrestore(&ieee->lock, flags); |
@@ -1495,10 +1605,43 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1495 | header); | 1605 | header); |
1496 | break; | 1606 | break; |
1497 | 1607 | ||
1608 | case IEEE80211_STYPE_ACTION: | ||
1609 | IEEE80211_DEBUG_MGMT("ACTION\n"); | ||
1610 | if (ieee->handle_action) | ||
1611 | ieee->handle_action(ieee->dev, | ||
1612 | (struct ieee80211_action *) | ||
1613 | header, stats); | ||
1614 | break; | ||
1615 | |||
1616 | case IEEE80211_STYPE_REASSOC_REQ: | ||
1617 | IEEE80211_DEBUG_MGMT("received reassoc (%d)\n", | ||
1618 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1619 | (header->frame_ctl))); | ||
1620 | |||
1621 | IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n", | ||
1622 | ieee->dev->name); | ||
1623 | if (ieee->handle_reassoc_request != NULL) | ||
1624 | ieee->handle_reassoc_request(ieee->dev, | ||
1625 | (struct ieee80211_reassoc_request *) | ||
1626 | header); | ||
1627 | break; | ||
1628 | |||
1629 | case IEEE80211_STYPE_ASSOC_REQ: | ||
1630 | IEEE80211_DEBUG_MGMT("received assoc (%d)\n", | ||
1631 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1632 | (header->frame_ctl))); | ||
1633 | |||
1634 | IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n", | ||
1635 | ieee->dev->name); | ||
1636 | if (ieee->handle_assoc_request != NULL) | ||
1637 | ieee->handle_assoc_request(ieee->dev); | ||
1638 | break; | ||
1639 | |||
1498 | case IEEE80211_STYPE_DEAUTH: | 1640 | case IEEE80211_STYPE_DEAUTH: |
1499 | printk("DEAUTH from AP\n"); | 1641 | IEEE80211_DEBUG_MGMT("DEAUTH\n"); |
1500 | if (ieee->handle_deauth != NULL) | 1642 | if (ieee->handle_deauth != NULL) |
1501 | ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) | 1643 | ieee->handle_deauth(ieee->dev, |
1644 | (struct ieee80211_deauth *) | ||
1502 | header); | 1645 | header); |
1503 | break; | 1646 | break; |
1504 | default: | 1647 | default: |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 8fdd943ebe8e..8b4332f53394 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -56,7 +56,18 @@ Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | |||
56 | `--------------------------------------------------| |------' | 56 | `--------------------------------------------------| |------' |
57 | Total: 28 non-data bytes `----.----' | 57 | Total: 28 non-data bytes `----.----' |
58 | | | 58 | | |
59 | .- 'Frame data' expands to <---------------------------' | 59 | .- 'Frame data' expands, if WEP enabled, to <----------' |
60 | | | ||
61 | V | ||
62 | ,-----------------------. | ||
63 | Bytes | 4 | 0-2296 | 4 | | ||
64 | |-----|-----------|-----| | ||
65 | Desc. | IV | Encrypted | ICV | | ||
66 | | | Packet | | | ||
67 | `-----| |-----' | ||
68 | `-----.-----' | ||
69 | | | ||
70 | .- 'Encrypted Packet' expands to | ||
60 | | | 71 | | |
61 | V | 72 | V |
62 | ,---------------------------------------------------. | 73 | ,---------------------------------------------------. |
@@ -65,18 +76,7 @@ Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | | |||
65 | Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | 76 | Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | |
66 | | DSAP | SSAP | | | | Packet | | 77 | | DSAP | SSAP | | | | Packet | |
67 | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | | 78 | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | |
68 | `-----------------------------------------| | | 79 | `---------------------------------------------------- |
69 | Total: 8 non-data bytes `----.----' | ||
70 | | | ||
71 | .- 'IP Packet' expands, if WEP enabled, to <--' | ||
72 | | | ||
73 | V | ||
74 | ,-----------------------. | ||
75 | Bytes | 4 | 0-2296 | 4 | | ||
76 | |-----|-----------|-----| | ||
77 | Desc. | IV | Encrypted | ICV | | ||
78 | | | IP Packet | | | ||
79 | `-----------------------' | ||
80 | Total: 8 non-data bytes | 80 | Total: 8 non-data bytes |
81 | 81 | ||
82 | 802.3 Ethernet Data Frame | 82 | 802.3 Ethernet Data Frame |
@@ -470,7 +470,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
470 | atomic_inc(&crypt->refcnt); | 470 | atomic_inc(&crypt->refcnt); |
471 | if (crypt->ops->build_iv) | 471 | if (crypt->ops->build_iv) |
472 | crypt->ops->build_iv(skb_frag, hdr_len, | 472 | crypt->ops->build_iv(skb_frag, hdr_len, |
473 | crypt->priv); | 473 | ieee->sec.keys[ieee->sec.active_key], |
474 | ieee->sec.key_sizes[ieee->sec.active_key], | ||
475 | crypt->priv); | ||
474 | atomic_dec(&crypt->refcnt); | 476 | atomic_dec(&crypt->refcnt); |
475 | } | 477 | } |
476 | 478 | ||
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index f87c6b89f845..af7f9bbfd18a 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -149,9 +149,7 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
149 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | | 149 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | |
150 | IW_QUAL_LEVEL_INVALID; | 150 | IW_QUAL_LEVEL_INVALID; |
151 | iwe.u.qual.qual = 0; | 151 | iwe.u.qual.qual = 0; |
152 | iwe.u.qual.level = 0; | ||
153 | } else { | 152 | } else { |
154 | iwe.u.qual.level = network->stats.rssi; | ||
155 | if (ieee->perfect_rssi == ieee->worst_rssi) | 153 | if (ieee->perfect_rssi == ieee->worst_rssi) |
156 | iwe.u.qual.qual = 100; | 154 | iwe.u.qual.qual = 100; |
157 | else | 155 | else |
@@ -179,6 +177,13 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
179 | iwe.u.qual.noise = network->stats.noise; | 177 | iwe.u.qual.noise = network->stats.noise; |
180 | } | 178 | } |
181 | 179 | ||
180 | if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) { | ||
181 | iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; | ||
182 | iwe.u.qual.level = 0; | ||
183 | } else { | ||
184 | iwe.u.qual.level = network->stats.signal; | ||
185 | } | ||
186 | |||
182 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); | 187 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); |
183 | 188 | ||
184 | iwe.cmd = IWEVCUSTOM; | 189 | iwe.cmd = IWEVCUSTOM; |
@@ -188,33 +193,21 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
188 | if (iwe.u.data.length) | 193 | if (iwe.u.data.length) |
189 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 194 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
190 | 195 | ||
196 | memset(&iwe, 0, sizeof(iwe)); | ||
191 | if (network->wpa_ie_len) { | 197 | if (network->wpa_ie_len) { |
192 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 198 | char buf[MAX_WPA_IE_LEN]; |
193 | 199 | memcpy(buf, network->wpa_ie, network->wpa_ie_len); | |
194 | u8 *p = buf; | 200 | iwe.cmd = IWEVGENIE; |
195 | p += sprintf(p, "wpa_ie="); | 201 | iwe.u.data.length = network->wpa_ie_len; |
196 | for (i = 0; i < network->wpa_ie_len; i++) { | ||
197 | p += sprintf(p, "%02x", network->wpa_ie[i]); | ||
198 | } | ||
199 | |||
200 | memset(&iwe, 0, sizeof(iwe)); | ||
201 | iwe.cmd = IWEVCUSTOM; | ||
202 | iwe.u.data.length = strlen(buf); | ||
203 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 202 | start = iwe_stream_add_point(start, stop, &iwe, buf); |
204 | } | 203 | } |
205 | 204 | ||
205 | memset(&iwe, 0, sizeof(iwe)); | ||
206 | if (network->rsn_ie_len) { | 206 | if (network->rsn_ie_len) { |
207 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 207 | char buf[MAX_WPA_IE_LEN]; |
208 | 208 | memcpy(buf, network->rsn_ie, network->rsn_ie_len); | |
209 | u8 *p = buf; | 209 | iwe.cmd = IWEVGENIE; |
210 | p += sprintf(p, "rsn_ie="); | 210 | iwe.u.data.length = network->rsn_ie_len; |
211 | for (i = 0; i < network->rsn_ie_len; i++) { | ||
212 | p += sprintf(p, "%02x", network->rsn_ie[i]); | ||
213 | } | ||
214 | |||
215 | memset(&iwe, 0, sizeof(iwe)); | ||
216 | iwe.cmd = IWEVCUSTOM; | ||
217 | iwe.u.data.length = strlen(buf); | ||
218 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 211 | start = iwe_stream_add_point(start, stop, &iwe, buf); |
219 | } | 212 | } |
220 | 213 | ||
@@ -229,6 +222,28 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
229 | if (iwe.u.data.length) | 222 | if (iwe.u.data.length) |
230 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 223 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
231 | 224 | ||
225 | /* Add spectrum management information */ | ||
226 | iwe.cmd = -1; | ||
227 | p = custom; | ||
228 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: "); | ||
229 | |||
230 | if (ieee80211_get_channel_flags(ieee, network->channel) & | ||
231 | IEEE80211_CH_INVALID) { | ||
232 | iwe.cmd = IWEVCUSTOM; | ||
233 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID "); | ||
234 | } | ||
235 | |||
236 | if (ieee80211_get_channel_flags(ieee, network->channel) & | ||
237 | IEEE80211_CH_RADAR_DETECT) { | ||
238 | iwe.cmd = IWEVCUSTOM; | ||
239 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS "); | ||
240 | } | ||
241 | |||
242 | if (iwe.cmd == IWEVCUSTOM) { | ||
243 | iwe.u.data.length = p - custom; | ||
244 | start = iwe_stream_add_point(start, stop, &iwe, custom); | ||
245 | } | ||
246 | |||
232 | return start; | 247 | return start; |
233 | } | 248 | } |
234 | 249 | ||
@@ -734,9 +749,98 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | |||
734 | return 0; | 749 | return 0; |
735 | } | 750 | } |
736 | 751 | ||
752 | int ieee80211_wx_set_auth(struct net_device *dev, | ||
753 | struct iw_request_info *info, | ||
754 | union iwreq_data *wrqu, | ||
755 | char *extra) | ||
756 | { | ||
757 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
758 | unsigned long flags; | ||
759 | int err = 0; | ||
760 | |||
761 | spin_lock_irqsave(&ieee->lock, flags); | ||
762 | |||
763 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
764 | case IW_AUTH_WPA_VERSION: | ||
765 | case IW_AUTH_CIPHER_PAIRWISE: | ||
766 | case IW_AUTH_CIPHER_GROUP: | ||
767 | case IW_AUTH_KEY_MGMT: | ||
768 | /* | ||
769 | * Host AP driver does not use these parameters and allows | ||
770 | * wpa_supplicant to control them internally. | ||
771 | */ | ||
772 | break; | ||
773 | case IW_AUTH_TKIP_COUNTERMEASURES: | ||
774 | break; /* FIXME */ | ||
775 | case IW_AUTH_DROP_UNENCRYPTED: | ||
776 | ieee->drop_unencrypted = !!wrqu->param.value; | ||
777 | break; | ||
778 | case IW_AUTH_80211_AUTH_ALG: | ||
779 | break; /* FIXME */ | ||
780 | case IW_AUTH_WPA_ENABLED: | ||
781 | ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value; | ||
782 | break; | ||
783 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
784 | ieee->ieee802_1x = !!wrqu->param.value; | ||
785 | break; | ||
786 | case IW_AUTH_PRIVACY_INVOKED: | ||
787 | ieee->privacy_invoked = !!wrqu->param.value; | ||
788 | break; | ||
789 | default: | ||
790 | err = -EOPNOTSUPP; | ||
791 | break; | ||
792 | } | ||
793 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
794 | return err; | ||
795 | } | ||
796 | |||
797 | int ieee80211_wx_get_auth(struct net_device *dev, | ||
798 | struct iw_request_info *info, | ||
799 | union iwreq_data *wrqu, | ||
800 | char *extra) | ||
801 | { | ||
802 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
803 | unsigned long flags; | ||
804 | int err = 0; | ||
805 | |||
806 | spin_lock_irqsave(&ieee->lock, flags); | ||
807 | |||
808 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
809 | case IW_AUTH_WPA_VERSION: | ||
810 | case IW_AUTH_CIPHER_PAIRWISE: | ||
811 | case IW_AUTH_CIPHER_GROUP: | ||
812 | case IW_AUTH_KEY_MGMT: | ||
813 | case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */ | ||
814 | case IW_AUTH_80211_AUTH_ALG: /* FIXME */ | ||
815 | /* | ||
816 | * Host AP driver does not use these parameters and allows | ||
817 | * wpa_supplicant to control them internally. | ||
818 | */ | ||
819 | err = -EOPNOTSUPP; | ||
820 | break; | ||
821 | case IW_AUTH_DROP_UNENCRYPTED: | ||
822 | wrqu->param.value = ieee->drop_unencrypted; | ||
823 | break; | ||
824 | case IW_AUTH_WPA_ENABLED: | ||
825 | wrqu->param.value = ieee->wpa_enabled; | ||
826 | break; | ||
827 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
828 | wrqu->param.value = ieee->ieee802_1x; | ||
829 | break; | ||
830 | default: | ||
831 | err = -EOPNOTSUPP; | ||
832 | break; | ||
833 | } | ||
834 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
835 | return err; | ||
836 | } | ||
837 | |||
737 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); | 838 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); |
738 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); | 839 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); |
739 | 840 | ||
740 | EXPORT_SYMBOL(ieee80211_wx_get_scan); | 841 | EXPORT_SYMBOL(ieee80211_wx_get_scan); |
741 | EXPORT_SYMBOL(ieee80211_wx_set_encode); | 842 | EXPORT_SYMBOL(ieee80211_wx_set_encode); |
742 | EXPORT_SYMBOL(ieee80211_wx_get_encode); | 843 | EXPORT_SYMBOL(ieee80211_wx_get_encode); |
844 | |||
845 | EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth); | ||
846 | EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth); | ||
diff --git a/net/socket.c b/net/socket.c index a00851f981db..7e1bdef8b09e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -84,10 +84,7 @@ | |||
84 | #include <linux/compat.h> | 84 | #include <linux/compat.h> |
85 | #include <linux/kmod.h> | 85 | #include <linux/kmod.h> |
86 | #include <linux/audit.h> | 86 | #include <linux/audit.h> |
87 | 87 | #include <linux/wireless.h> | |
88 | #ifdef CONFIG_NET_RADIO | ||
89 | #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ | ||
90 | #endif /* CONFIG_NET_RADIO */ | ||
91 | 88 | ||
92 | #include <asm/uaccess.h> | 89 | #include <asm/uaccess.h> |
93 | #include <asm/unistd.h> | 90 | #include <asm/unistd.h> |
@@ -840,11 +837,11 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
840 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { | 837 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { |
841 | err = dev_ioctl(cmd, argp); | 838 | err = dev_ioctl(cmd, argp); |
842 | } else | 839 | } else |
843 | #ifdef WIRELESS_EXT | 840 | #ifdef CONFIG_WIRELESS_EXT |
844 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 841 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
845 | err = dev_ioctl(cmd, argp); | 842 | err = dev_ioctl(cmd, argp); |
846 | } else | 843 | } else |
847 | #endif /* WIRELESS_EXT */ | 844 | #endif /* CONFIG_WIRELESS_EXT */ |
848 | switch (cmd) { | 845 | switch (cmd) { |
849 | case FIOSETOWN: | 846 | case FIOSETOWN: |
850 | case SIOCSPGRP: | 847 | case SIOCSPGRP: |