diff options
214 files changed, 4973 insertions, 1717 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index 73e794f0ff09..debf6813934a 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt | |||
@@ -373,14 +373,15 @@ maps this page at its virtual address. | |||
373 | likely that you will need to flush the instruction cache | 373 | likely that you will need to flush the instruction cache |
374 | for copy_to_user_page(). | 374 | for copy_to_user_page(). |
375 | 375 | ||
376 | void flush_anon_page(struct page *page, unsigned long vmaddr) | 376 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, |
377 | unsigned long vmaddr) | ||
377 | When the kernel needs to access the contents of an anonymous | 378 | When the kernel needs to access the contents of an anonymous |
378 | page, it calls this function (currently only | 379 | page, it calls this function (currently only |
379 | get_user_pages()). Note: flush_dcache_page() deliberately | 380 | get_user_pages()). Note: flush_dcache_page() deliberately |
380 | doesn't work for an anonymous page. The default | 381 | doesn't work for an anonymous page. The default |
381 | implementation is a nop (and should remain so for all coherent | 382 | implementation is a nop (and should remain so for all coherent |
382 | architectures). For incoherent architectures, it should flush | 383 | architectures). For incoherent architectures, it should flush |
383 | the cache of the page at vmaddr in the current user process. | 384 | the cache of the page at vmaddr. |
384 | 385 | ||
385 | void flush_kernel_dcache_page(struct page *page) | 386 | void flush_kernel_dcache_page(struct page *page) |
386 | When the kernel needs to modify a user page is has obtained | 387 | When the kernel needs to modify a user page is has obtained |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 30f3c8c9c12a..fc532395d116 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -226,6 +226,23 @@ Who: Jean Delvare <khali@linux-fr.org> | |||
226 | 226 | ||
227 | --------------------------- | 227 | --------------------------- |
228 | 228 | ||
229 | What: i2c_adapter.dev | ||
230 | i2c_adapter.list | ||
231 | When: July 2007 | ||
232 | Why: Superfluous, given i2c_adapter.class_dev: | ||
233 | * The "dev" was a stand-in for the physical device node that legacy | ||
234 | drivers would not have; but now it's almost always present. Any | ||
235 | remaining legacy drivers must upgrade (they now trigger warnings). | ||
236 | * The "list" duplicates class device children. | ||
237 | The delay in removing this is so upgraded lm_sensors and libsensors | ||
238 | can get deployed. (Removal causes minor changes in the sysfs layout, | ||
239 | notably the location of the adapter type name and parenting the i2c | ||
240 | client hardware directly from their controller.) | ||
241 | Who: Jean Delvare <khali@linux-fr.org>, | ||
242 | David Brownell <dbrownell@users.sourceforge.net> | ||
243 | |||
244 | --------------------------- | ||
245 | |||
229 | What: IPv4 only connection tracking/NAT/helpers | 246 | What: IPv4 only connection tracking/NAT/helpers |
230 | When: 2.6.22 | 247 | When: 2.6.22 |
231 | Why: The new layer 3 independant connection tracking replaces the old | 248 | Why: The new layer 3 independant connection tracking replaces the old |
@@ -256,3 +273,48 @@ Why: Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are | |||
256 | Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 273 | Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
257 | 274 | ||
258 | --------------------------- | 275 | --------------------------- |
276 | |||
277 | What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY) | ||
278 | When: 2.6.21 | ||
279 | Why: hotkey.c was an attempt to consolidate multiple drivers that use | ||
280 | ACPI to implement hotkeys. However, hotkeys are not documented | ||
281 | in the ACPI specification, so the drivers used undocumented | ||
282 | vendor-specific hooks and turned out to be more different than | ||
283 | the same. | ||
284 | |||
285 | Further, the keys and the features supplied by each platform | ||
286 | are different, so there will always be a need for | ||
287 | platform-specific drivers. | ||
288 | |||
289 | So the new plan is to delete hotkey.c and instead, work on the | ||
290 | platform specific drivers to try to make them look the same | ||
291 | to the user when they supply the same features. | ||
292 | |||
293 | hotkey.c has always depended on CONFIG_EXPERIMENTAL | ||
294 | |||
295 | Who: Len Brown <len.brown@intel.com> | ||
296 | |||
297 | --------------------------- | ||
298 | |||
299 | What: /sys/firmware/acpi/namespace | ||
300 | When: 2.6.21 | ||
301 | Why: The ACPI namespace is effectively the symbol list for | ||
302 | the BIOS. The device names are completely arbitrary | ||
303 | and have no place being exposed to user-space. | ||
304 | |||
305 | For those interested in the BIOS ACPI namespace, | ||
306 | the BIOS can be extracted and disassembled with acpidump | ||
307 | and iasl as documented in the pmtools package here: | ||
308 | http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils | ||
309 | |||
310 | Who: Len Brown <len.brown@intel.com> | ||
311 | |||
312 | --------------------------- | ||
313 | |||
314 | What: /proc/acpi/button | ||
315 | When: August 2007 | ||
316 | Why: /proc/acpi/button has been replaced by events to the input layer | ||
317 | since 2.6.20. | ||
318 | Who: Len Brown <len.brown@intel.com> | ||
319 | |||
320 | --------------------------- | ||
diff --git a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt index d077d764f82b..7fb3b8a44eb6 100644 --- a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt +++ b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt | |||
@@ -157,8 +157,8 @@ rtc@<addr> rtc *-rtc Real time clock | |||
157 | mscan@<addr> mscan *-mscan CAN bus controller | 157 | mscan@<addr> mscan *-mscan CAN bus controller |
158 | pci@<addr> pci *-pci PCI bridge | 158 | pci@<addr> pci *-pci PCI bridge |
159 | serial@<addr> serial *-psc-uart PSC in serial mode | 159 | serial@<addr> serial *-psc-uart PSC in serial mode |
160 | i2s@<addr> i2s *-psc-i2s PSC in i2s mode | 160 | i2s@<addr> sound *-psc-i2s PSC in i2s mode |
161 | ac97@<addr> ac97 *-psc-ac97 PSC in ac97 mode | 161 | ac97@<addr> sound *-psc-ac97 PSC in ac97 mode |
162 | spi@<addr> spi *-psc-spi PSC in spi mode | 162 | spi@<addr> spi *-psc-spi PSC in spi mode |
163 | irda@<addr> irda *-psc-irda PSC in IrDA mode | 163 | irda@<addr> irda *-psc-irda PSC in IrDA mode |
164 | spi@<addr> spi *-spi MPC52xx spi device | 164 | spi@<addr> spi *-spi MPC52xx spi device |
diff --git a/Documentation/usb/acm.txt b/Documentation/usb/acm.txt index 737d6104c3f3..17f5c2e1a570 100644 --- a/Documentation/usb/acm.txt +++ b/Documentation/usb/acm.txt | |||
@@ -46,6 +46,10 @@ Abstract Control Model (USB CDC ACM) specification. | |||
46 | 46 | ||
47 | 3Com USR ISDN Pro TA | 47 | 3Com USR ISDN Pro TA |
48 | 48 | ||
49 | Some cell phones also connect via USB. I know the following phones work: | ||
50 | |||
51 | SonyEricsson K800i | ||
52 | |||
49 | Unfortunately many modems and most ISDN TAs use proprietary interfaces and | 53 | Unfortunately many modems and most ISDN TAs use proprietary interfaces and |
50 | thus won't work with this drivers. Check for ACM compliance before buying. | 54 | thus won't work with this drivers. Check for ACM compliance before buying. |
51 | 55 | ||
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index dbdcaf68e3ea..5c86ed6f0448 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt | |||
@@ -52,6 +52,10 @@ APICs | |||
52 | apicmaintimer. Useful when your PIT timer is totally | 52 | apicmaintimer. Useful when your PIT timer is totally |
53 | broken. | 53 | broken. |
54 | 54 | ||
55 | disable_8254_timer / enable_8254_timer | ||
56 | Enable interrupt 0 timer routing over the 8254 in addition to over | ||
57 | the IO-APIC. The kernel tries to set a sensible default. | ||
58 | |||
55 | Early Console | 59 | Early Console |
56 | 60 | ||
57 | syntax: earlyprintk=vga | 61 | syntax: earlyprintk=vga |
diff --git a/MAINTAINERS b/MAINTAINERS index 7f6c051cac65..8e1d7da07ce3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -207,16 +207,45 @@ S: Supported | |||
207 | ACPI | 207 | ACPI |
208 | P: Len Brown | 208 | P: Len Brown |
209 | M: len.brown@intel.com | 209 | M: len.brown@intel.com |
210 | M: lenb@kernel.org | ||
210 | L: linux-acpi@vger.kernel.org | 211 | L: linux-acpi@vger.kernel.org |
211 | W: http://acpi.sourceforge.net/ | 212 | W: http://acpi.sourceforge.net/ |
212 | T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git | 213 | T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git |
213 | S: Maintained | 214 | S: Supported |
215 | |||
216 | ACPI BATTERY DRIVERS | ||
217 | P: Vladimir P. Lebedev | ||
218 | M: vladimir.p.lebedev@intel.com | ||
219 | L: linux-acpi@vger.kernel.org | ||
220 | W: http://acpi.sourceforge.net/ | ||
221 | S: Supported | ||
222 | |||
223 | ACPI EC DRIVER | ||
224 | P: Alexey Starikovskiy | ||
225 | M: alexey.y.starikovskiy@linux.intel.com | ||
226 | L: linux-acpi@vger.kernel.org | ||
227 | W: http://acpi.sourceforge.net/ | ||
228 | S: Supported | ||
229 | |||
230 | ACPI FAN DRIVER | ||
231 | P: Konstantin A. Karasyov | ||
232 | M: konstantin.a.karasyov@intel.com | ||
233 | L: linux-acpi@vger.kernel.org | ||
234 | W: http://acpi.sourceforge.net/ | ||
235 | S: Supported | ||
214 | 236 | ||
215 | ACPI PCI HOTPLUG DRIVER | 237 | ACPI PCI HOTPLUG DRIVER |
216 | P: Kristen Carlson Accardi | 238 | P: Kristen Carlson Accardi |
217 | M: kristen.c.accardi@intel.com | 239 | M: kristen.c.accardi@intel.com |
218 | L: pcihpd-discuss@lists.sourceforge.net | 240 | L: pcihpd-discuss@lists.sourceforge.net |
219 | S: Maintained | 241 | S: Supported |
242 | |||
243 | ACPI THERMAL DRIVER | ||
244 | P: Konstantin A. Karasyov | ||
245 | M: konstantin.a.karasyov@intel.com | ||
246 | L: linux-acpi@vger.kernel.org | ||
247 | W: http://acpi.sourceforge.net/ | ||
248 | S: Supported | ||
220 | 249 | ||
221 | AD1816 SOUND DRIVER | 250 | AD1816 SOUND DRIVER |
222 | P: Thorsten Knabe | 251 | P: Thorsten Knabe |
@@ -412,20 +441,32 @@ S: Maintained | |||
412 | ARM/INTEL IOP32X ARM ARCHITECTURE | 441 | ARM/INTEL IOP32X ARM ARCHITECTURE |
413 | P: Lennert Buytenhek | 442 | P: Lennert Buytenhek |
414 | M: kernel@wantstofly.org | 443 | M: kernel@wantstofly.org |
444 | P: Dan Williams | ||
445 | M: dan.j.williams@intel.com | ||
415 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 446 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
416 | S: Maintained | 447 | S: Supported |
448 | |||
449 | ARM/INTEL IOP33X ARM ARCHITECTURE | ||
450 | P: Dan Williams | ||
451 | M: dan.j.williams@intel.com | ||
452 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
453 | S: Supported | ||
417 | 454 | ||
418 | ARM/INTEL IOP13XX ARM ARCHITECTURE | 455 | ARM/INTEL IOP13XX ARM ARCHITECTURE |
419 | P: Lennert Buytenhek | 456 | P: Lennert Buytenhek |
420 | M: kernel@wantstofly.org | 457 | M: kernel@wantstofly.org |
458 | P: Dan Williams | ||
459 | M: dan.j.williams@intel.com | ||
421 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 460 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
422 | S: Maintained | 461 | S: Supported |
423 | 462 | ||
424 | ARM/INTEL IQ81342EX MACHINE SUPPORT | 463 | ARM/INTEL IQ81342EX MACHINE SUPPORT |
425 | P: Lennert Buytenhek | 464 | P: Lennert Buytenhek |
426 | M: kernel@wantstofly.org | 465 | M: kernel@wantstofly.org |
466 | P: Dan Williams | ||
467 | M: dan.j.williams@intel.com | ||
427 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 468 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
428 | S: Maintained | 469 | S: Supported |
429 | 470 | ||
430 | ARM/INTEL IXP2000 ARM ARCHITECTURE | 471 | ARM/INTEL IXP2000 ARM ARCHITECTURE |
431 | P: Lennert Buytenhek | 472 | P: Lennert Buytenhek |
@@ -448,8 +489,10 @@ S: Maintained | |||
448 | ARM/INTEL XSC3 (MANZANO) ARM CORE | 489 | ARM/INTEL XSC3 (MANZANO) ARM CORE |
449 | P: Lennert Buytenhek | 490 | P: Lennert Buytenhek |
450 | M: kernel@wantstofly.org | 491 | M: kernel@wantstofly.org |
492 | P: Dan Williams | ||
493 | M: dan.j.williams@intel.com | ||
451 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 494 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
452 | S: Maintained | 495 | S: Supported |
453 | 496 | ||
454 | ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT | 497 | ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT |
455 | P: Lennert Buytenhek | 498 | P: Lennert Buytenhek |
@@ -532,13 +575,13 @@ L: netdev@vger.kernel.org | |||
532 | S: Maintained | 575 | S: Maintained |
533 | 576 | ||
534 | ASUS ACPI EXTRAS DRIVER | 577 | ASUS ACPI EXTRAS DRIVER |
578 | P: Corentin Chary | ||
579 | M: corentincj@iksaif.net | ||
535 | P: Karol Kozimor | 580 | P: Karol Kozimor |
536 | M: sziwan@users.sourceforge.net | 581 | M: sziwan@users.sourceforge.net |
537 | P: Julien Lerouge | ||
538 | M: julien.lerouge@free.fr | ||
539 | L: acpi4asus-user@lists.sourceforge.net | 582 | L: acpi4asus-user@lists.sourceforge.net |
540 | W: http://sourceforge.net/projects/acpi4asus | 583 | W: http://sourceforge.net/projects/acpi4asus |
541 | W: http://julien.lerouge.free.fr | 584 | W: http://xf.iksaif.net/acpi4asus |
542 | S: Maintained | 585 | S: Maintained |
543 | 586 | ||
544 | ATA OVER ETHERNET DRIVER | 587 | ATA OVER ETHERNET DRIVER |
@@ -1071,7 +1114,7 @@ DOCKING STATION DRIVER | |||
1071 | P: Kristen Carlson Accardi | 1114 | P: Kristen Carlson Accardi |
1072 | M: kristen.c.accardi@intel.com | 1115 | M: kristen.c.accardi@intel.com |
1073 | L: linux-acpi@vger.kernel.org | 1116 | L: linux-acpi@vger.kernel.org |
1074 | S: Maintained | 1117 | S: Supported |
1075 | 1118 | ||
1076 | DOUBLETALK DRIVER | 1119 | DOUBLETALK DRIVER |
1077 | P: James R. Van Zandt | 1120 | P: James R. Van Zandt |
@@ -2521,7 +2564,7 @@ PCIE HOTPLUG DRIVER | |||
2521 | P: Kristen Carlson Accardi | 2564 | P: Kristen Carlson Accardi |
2522 | M: kristen.c.accardi@intel.com | 2565 | M: kristen.c.accardi@intel.com |
2523 | L: pcihpd-discuss@lists.sourceforge.net | 2566 | L: pcihpd-discuss@lists.sourceforge.net |
2524 | S: Maintained | 2567 | S: Supported |
2525 | 2568 | ||
2526 | PCMCIA SUBSYSTEM | 2569 | PCMCIA SUBSYSTEM |
2527 | P: Linux PCMCIA Team | 2570 | P: Linux PCMCIA Team |
@@ -2579,6 +2622,12 @@ P: Adam Belay | |||
2579 | M: ambx1@neo.rr.com | 2622 | M: ambx1@neo.rr.com |
2580 | S: Maintained | 2623 | S: Maintained |
2581 | 2624 | ||
2625 | PNXxxxx I2C DRIVER | ||
2626 | P: Vitaly Wool | ||
2627 | M: vitalywool@gmail.com | ||
2628 | L: i2c@lm-sensors.org | ||
2629 | S: Maintained | ||
2630 | |||
2582 | PPP PROTOCOL DRIVERS AND COMPRESSORS | 2631 | PPP PROTOCOL DRIVERS AND COMPRESSORS |
2583 | P: Paul Mackerras | 2632 | P: Paul Mackerras |
2584 | M: paulus@samba.org | 2633 | M: paulus@samba.org |
@@ -3015,7 +3064,7 @@ SHPC HOTPLUG DRIVER | |||
3015 | P: Kristen Carlson Accardi | 3064 | P: Kristen Carlson Accardi |
3016 | M: kristen.c.accardi@intel.com | 3065 | M: kristen.c.accardi@intel.com |
3017 | L: pcihpd-discuss@lists.sourceforge.net | 3066 | L: pcihpd-discuss@lists.sourceforge.net |
3018 | S: Maintained | 3067 | S: Supported |
3019 | 3068 | ||
3020 | SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER | 3069 | SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER |
3021 | P: Pierre Ossman | 3070 | P: Pierre Ossman |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 20 | 3 | SUBLEVEL = 20 |
4 | EXTRAVERSION =-rc3 | 4 | EXTRAVERSION =-rc4 |
5 | NAME = Homicidal Dwarf Hamster | 5 | NAME = Homicidal Dwarf Hamster |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2db42b18f53f..8517c3c3eb33 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -436,7 +436,7 @@ __und_usr: | |||
436 | usr_entry | 436 | usr_entry |
437 | 437 | ||
438 | tst r3, #PSR_T_BIT @ Thumb mode? | 438 | tst r3, #PSR_T_BIT @ Thumb mode? |
439 | bne fpundefinstr @ ignore FP | 439 | bne __und_usr_unknown @ ignore FP |
440 | sub r4, r2, #4 | 440 | sub r4, r2, #4 |
441 | 441 | ||
442 | @ | 442 | @ |
@@ -448,7 +448,7 @@ __und_usr: | |||
448 | @ | 448 | @ |
449 | 1: ldrt r0, [r4] | 449 | 1: ldrt r0, [r4] |
450 | adr r9, ret_from_exception | 450 | adr r9, ret_from_exception |
451 | adr lr, fpundefinstr | 451 | adr lr, __und_usr_unknown |
452 | @ | 452 | @ |
453 | @ fallthrough to call_fpe | 453 | @ fallthrough to call_fpe |
454 | @ | 454 | @ |
@@ -476,7 +476,9 @@ __und_usr: | |||
476 | * Emulators may wish to make use of the following registers: | 476 | * Emulators may wish to make use of the following registers: |
477 | * r0 = instruction opcode. | 477 | * r0 = instruction opcode. |
478 | * r2 = PC+4 | 478 | * r2 = PC+4 |
479 | * r9 = normal "successful" return address | ||
479 | * r10 = this threads thread_info structure. | 480 | * r10 = this threads thread_info structure. |
481 | * lr = unrecognised instruction return address | ||
480 | */ | 482 | */ |
481 | call_fpe: | 483 | call_fpe: |
482 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 | 484 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
@@ -545,10 +547,12 @@ do_fpe: | |||
545 | 547 | ||
546 | .data | 548 | .data |
547 | ENTRY(fp_enter) | 549 | ENTRY(fp_enter) |
548 | .word fpundefinstr | 550 | .word no_fp |
549 | .text | 551 | .text |
550 | 552 | ||
551 | fpundefinstr: | 553 | no_fp: mov pc, lr |
554 | |||
555 | __und_usr_unknown: | ||
552 | mov r0, sp | 556 | mov r0, sp |
553 | adr lr, ret_from_exception | 557 | adr lr, ret_from_exception |
554 | b do_undefinstr | 558 | b do_undefinstr |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 6ff5e3ff6cb5..3c8cdcfe8d4a 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
30 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
31 | 31 | ||
32 | #include <linux/mc146818rtc.h> | ||
33 | |||
32 | #include <asm/leds.h> | 34 | #include <asm/leds.h> |
33 | #include <asm/thread_info.h> | 35 | #include <asm/thread_info.h> |
34 | #include <asm/mach/time.h> | 36 | #include <asm/mach/time.h> |
@@ -85,6 +87,17 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
85 | return (unsigned long long)jiffies * (1000000000 / HZ); | 87 | return (unsigned long long)jiffies * (1000000000 / HZ); |
86 | } | 88 | } |
87 | 89 | ||
90 | /* | ||
91 | * An implementation of printk_clock() independent from | ||
92 | * sched_clock(). This avoids non-bootable kernels when | ||
93 | * printk_clock is enabled. | ||
94 | */ | ||
95 | unsigned long long printk_clock(void) | ||
96 | { | ||
97 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) * | ||
98 | (1000000000 / HZ); | ||
99 | } | ||
100 | |||
88 | static unsigned long next_rtc_update; | 101 | static unsigned long next_rtc_update; |
89 | 102 | ||
90 | /* | 103 | /* |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 042a12982e98..908915675edc 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
28 | #include <asm/unistd.h> | 28 | #include <asm/unistd.h> |
29 | #include <asm/traps.h> | 29 | #include <asm/traps.h> |
30 | #include <asm/io.h> | ||
30 | 31 | ||
31 | #include "ptrace.h" | 32 | #include "ptrace.h" |
32 | #include "signal.h" | 33 | #include "signal.h" |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 628348c9f6c5..9df507d36e0b 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -202,3 +202,42 @@ void flush_dcache_page(struct page *page) | |||
202 | } | 202 | } |
203 | } | 203 | } |
204 | EXPORT_SYMBOL(flush_dcache_page); | 204 | EXPORT_SYMBOL(flush_dcache_page); |
205 | |||
206 | /* | ||
207 | * Flush an anonymous page so that users of get_user_pages() | ||
208 | * can safely access the data. The expected sequence is: | ||
209 | * | ||
210 | * get_user_pages() | ||
211 | * -> flush_anon_page | ||
212 | * memcpy() to/from page | ||
213 | * if written to page, flush_dcache_page() | ||
214 | */ | ||
215 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | ||
216 | { | ||
217 | unsigned long pfn; | ||
218 | |||
219 | /* VIPT non-aliasing caches need do nothing */ | ||
220 | if (cache_is_vipt_nonaliasing()) | ||
221 | return; | ||
222 | |||
223 | /* | ||
224 | * Write back and invalidate userspace mapping. | ||
225 | */ | ||
226 | pfn = page_to_pfn(page); | ||
227 | if (cache_is_vivt()) { | ||
228 | flush_cache_page(vma, vmaddr, pfn); | ||
229 | } else { | ||
230 | /* | ||
231 | * For aliasing VIPT, we can flush an alias of the | ||
232 | * userspace address only. | ||
233 | */ | ||
234 | flush_pfn_alias(pfn, vmaddr); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Invalidate kernel mapping. No data should be contained | ||
239 | * in this mapping of the page. FIXME: this is overkill | ||
240 | * since we actually ask for a write-back and invalidate. | ||
241 | */ | ||
242 | __cpuc_flush_dcache_page(page_address(page)); | ||
243 | } | ||
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 0d67a0a1151e..0dfee812811a 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -777,6 +777,47 @@ config CRASH_DUMP | |||
777 | PHYSICAL_START. | 777 | PHYSICAL_START. |
778 | For more details see Documentation/kdump/kdump.txt | 778 | For more details see Documentation/kdump/kdump.txt |
779 | 779 | ||
780 | config PHYSICAL_START | ||
781 | hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) | ||
782 | default "0x100000" | ||
783 | help | ||
784 | This gives the physical address where the kernel is loaded. | ||
785 | |||
786 | If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then | ||
787 | bzImage will decompress itself to above physical address and | ||
788 | run from there. Otherwise, bzImage will run from the address where | ||
789 | it has been loaded by the boot loader and will ignore above physical | ||
790 | address. | ||
791 | |||
792 | In normal kdump cases one does not have to set/change this option | ||
793 | as now bzImage can be compiled as a completely relocatable image | ||
794 | (CONFIG_RELOCATABLE=y) and be used to load and run from a different | ||
795 | address. This option is mainly useful for the folks who don't want | ||
796 | to use a bzImage for capturing the crash dump and want to use a | ||
797 | vmlinux instead. vmlinux is not relocatable hence a kernel needs | ||
798 | to be specifically compiled to run from a specific memory area | ||
799 | (normally a reserved region) and this option comes handy. | ||
800 | |||
801 | So if you are using bzImage for capturing the crash dump, leave | ||
802 | the value here unchanged to 0x100000 and set CONFIG_RELOCATABLE=y. | ||
803 | Otherwise if you plan to use vmlinux for capturing the crash dump | ||
804 | change this value to start of the reserved region (Typically 16MB | ||
805 | 0x1000000). In other words, it can be set based on the "X" value as | ||
806 | specified in the "crashkernel=YM@XM" command line boot parameter | ||
807 | passed to the panic-ed kernel. Typically this parameter is set as | ||
808 | crashkernel=64M@16M. Please take a look at | ||
809 | Documentation/kdump/kdump.txt for more details about crash dumps. | ||
810 | |||
811 | Usage of bzImage for capturing the crash dump is recommended as | ||
812 | one does not have to build two kernels. Same kernel can be used | ||
813 | as production kernel and capture kernel. Above option should have | ||
814 | gone away after relocatable bzImage support is introduced. But it | ||
815 | is present because there are users out there who continue to use | ||
816 | vmlinux for dump capture. This option should go away down the | ||
817 | line. | ||
818 | |||
819 | Don't change this unless you know what you are doing. | ||
820 | |||
780 | config RELOCATABLE | 821 | config RELOCATABLE |
781 | bool "Build a relocatable kernel(EXPERIMENTAL)" | 822 | bool "Build a relocatable kernel(EXPERIMENTAL)" |
782 | depends on EXPERIMENTAL | 823 | depends on EXPERIMENTAL |
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 094300b3a81f..cbcb2c27f48b 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -333,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) | |||
333 | /* | 333 | /* |
334 | * Parse Interrupt Source Override for the ACPI SCI | 334 | * Parse Interrupt Source Override for the ACPI SCI |
335 | */ | 335 | */ |
336 | static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | 336 | static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) |
337 | { | 337 | { |
338 | if (trigger == 0) /* compatible SCI trigger is level */ | 338 | if (trigger == 0) /* compatible SCI trigger is level */ |
339 | trigger = 3; | 339 | trigger = 3; |
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c index 12e937c1ce4b..2d39f55d29a8 100644 --- a/arch/i386/kernel/acpi/cstate.c +++ b/arch/i386/kernel/acpi/cstate.c | |||
@@ -47,13 +47,13 @@ EXPORT_SYMBOL(acpi_processor_power_init_bm_check); | |||
47 | 47 | ||
48 | /* The code below handles cstate entry with monitor-mwait pair on Intel*/ | 48 | /* The code below handles cstate entry with monitor-mwait pair on Intel*/ |
49 | 49 | ||
50 | struct cstate_entry_s { | 50 | struct cstate_entry { |
51 | struct { | 51 | struct { |
52 | unsigned int eax; | 52 | unsigned int eax; |
53 | unsigned int ecx; | 53 | unsigned int ecx; |
54 | } states[ACPI_PROCESSOR_MAX_POWER]; | 54 | } states[ACPI_PROCESSOR_MAX_POWER]; |
55 | }; | 55 | }; |
56 | static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */ | 56 | static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ |
57 | 57 | ||
58 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 58 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
59 | 59 | ||
@@ -71,7 +71,7 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | |||
71 | int acpi_processor_ffh_cstate_probe(unsigned int cpu, | 71 | int acpi_processor_ffh_cstate_probe(unsigned int cpu, |
72 | struct acpi_processor_cx *cx, struct acpi_power_register *reg) | 72 | struct acpi_processor_cx *cx, struct acpi_power_register *reg) |
73 | { | 73 | { |
74 | struct cstate_entry_s *percpu_entry; | 74 | struct cstate_entry *percpu_entry; |
75 | struct cpuinfo_x86 *c = cpu_data + cpu; | 75 | struct cpuinfo_x86 *c = cpu_data + cpu; |
76 | 76 | ||
77 | cpumask_t saved_mask; | 77 | cpumask_t saved_mask; |
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); | |||
136 | void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) | 136 | void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) |
137 | { | 137 | { |
138 | unsigned int cpu = smp_processor_id(); | 138 | unsigned int cpu = smp_processor_id(); |
139 | struct cstate_entry_s *percpu_entry; | 139 | struct cstate_entry *percpu_entry; |
140 | 140 | ||
141 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); | 141 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); |
142 | mwait_idle_with_hints(percpu_entry->states[cx->index].eax, | 142 | mwait_idle_with_hints(percpu_entry->states[cx->index].eax, |
@@ -150,7 +150,7 @@ static int __init ffh_cstate_init(void) | |||
150 | if (c->x86_vendor != X86_VENDOR_INTEL) | 150 | if (c->x86_vendor != X86_VENDOR_INTEL) |
151 | return -1; | 151 | return -1; |
152 | 152 | ||
153 | cpu_cstate_entry = alloc_percpu(struct cstate_entry_s); | 153 | cpu_cstate_entry = alloc_percpu(struct cstate_entry); |
154 | return 0; | 154 | return 0; |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 1b34c56f8123..8689d62abd4a 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -54,7 +54,7 @@ static struct cpu_dev __cpuinitdata default_cpu = { | |||
54 | .c_init = default_init, | 54 | .c_init = default_init, |
55 | .c_vendor = "Unknown", | 55 | .c_vendor = "Unknown", |
56 | }; | 56 | }; |
57 | static struct cpu_dev * this_cpu = &default_cpu; | 57 | static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; |
58 | 58 | ||
59 | static int __init cachesize_setup(char *str) | 59 | static int __init cachesize_setup(char *str) |
60 | { | 60 | { |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index aef39be81361..300d9b38d02e 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -227,7 +227,7 @@ static struct { | |||
227 | atomic_t count_start; | 227 | atomic_t count_start; |
228 | atomic_t count_stop; | 228 | atomic_t count_stop; |
229 | unsigned long long values[NR_CPUS]; | 229 | unsigned long long values[NR_CPUS]; |
230 | } tsc __initdata = { | 230 | } tsc __cpuinitdata = { |
231 | .start_flag = ATOMIC_INIT(0), | 231 | .start_flag = ATOMIC_INIT(0), |
232 | .count_start = ATOMIC_INIT(0), | 232 | .count_start = ATOMIC_INIT(0), |
233 | .count_stop = ATOMIC_INIT(0), | 233 | .count_stop = ATOMIC_INIT(0), |
@@ -332,7 +332,7 @@ static void __init synchronize_tsc_bp(void) | |||
332 | printk("passed.\n"); | 332 | printk("passed.\n"); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void __init synchronize_tsc_ap(void) | 335 | static void __cpuinit synchronize_tsc_ap(void) |
336 | { | 336 | { |
337 | int i; | 337 | int i; |
338 | 338 | ||
diff --git a/arch/i386/kernel/trampoline.S b/arch/i386/kernel/trampoline.S index fcce0e61b0e7..2f1814c5cfd7 100644 --- a/arch/i386/kernel/trampoline.S +++ b/arch/i386/kernel/trampoline.S | |||
@@ -38,6 +38,11 @@ | |||
38 | 38 | ||
39 | .data | 39 | .data |
40 | 40 | ||
41 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | ||
42 | #ifndef CONFIG_HOTPLUG_CPU | ||
43 | .section ".init.data","aw",@progbits | ||
44 | #endif | ||
45 | |||
41 | .code16 | 46 | .code16 |
42 | 47 | ||
43 | ENTRY(trampoline_data) | 48 | ENTRY(trampoline_data) |
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index f44a01357ada..2ef857c3ee53 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -46,5 +46,7 @@ EXPORT_SYMBOL(__strnlen_user_nocheck_asm); | |||
46 | EXPORT_SYMBOL(__strnlen_user_asm); | 46 | EXPORT_SYMBOL(__strnlen_user_asm); |
47 | 47 | ||
48 | EXPORT_SYMBOL(csum_partial); | 48 | EXPORT_SYMBOL(csum_partial); |
49 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
50 | EXPORT_SYMBOL(__csum_partial_copy_user); | ||
49 | 51 | ||
50 | EXPORT_SYMBOL(invalid_pte_table); | 52 | EXPORT_SYMBOL(invalid_pte_table); |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 11aab6d6bfe5..8aa544f73a5e 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -94,10 +94,8 @@ static void c0_timer_ack(void) | |||
94 | { | 94 | { |
95 | unsigned int count; | 95 | unsigned int count; |
96 | 96 | ||
97 | #ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ | ||
98 | /* Ack this timer interrupt and set the next one. */ | 97 | /* Ack this timer interrupt and set the next one. */ |
99 | expirelo += cycles_per_jiffy; | 98 | expirelo += cycles_per_jiffy; |
100 | #endif | ||
101 | write_c0_compare(expirelo); | 99 | write_c0_compare(expirelo); |
102 | 100 | ||
103 | /* Check to see if we have missed any timer interrupts. */ | 101 | /* Check to see if we have missed any timer interrupts. */ |
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 888b61ea12fe..989c900b8b14 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for MIPS-specific library files.. | 2 | # Makefile for MIPS-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \ | 5 | lib-y += csum_partial.o memcpy.o promlib.o \ |
6 | strlen_user.o strncpy_user.o strnlen_user.o uncached.o | 6 | strlen_user.o strncpy_user.o strnlen_user.o uncached.o |
7 | 7 | ||
8 | obj-y += iomap.o | 8 | obj-y += iomap.o |
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 9db357294be1..c0a77fe038be 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -8,7 +8,9 @@ | |||
8 | * Copyright (C) 1998, 1999 Ralf Baechle | 8 | * Copyright (C) 1998, 1999 Ralf Baechle |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | */ | 10 | */ |
11 | #include <linux/errno.h> | ||
11 | #include <asm/asm.h> | 12 | #include <asm/asm.h> |
13 | #include <asm/asm-offsets.h> | ||
12 | #include <asm/regdef.h> | 14 | #include <asm/regdef.h> |
13 | 15 | ||
14 | #ifdef CONFIG_64BIT | 16 | #ifdef CONFIG_64BIT |
@@ -271,3 +273,443 @@ small_csumcpy: | |||
271 | jr ra | 273 | jr ra |
272 | .set noreorder | 274 | .set noreorder |
273 | END(csum_partial) | 275 | END(csum_partial) |
276 | |||
277 | |||
278 | /* | ||
279 | * checksum and copy routines based on memcpy.S | ||
280 | * | ||
281 | * csum_partial_copy_nocheck(src, dst, len, sum) | ||
282 | * __csum_partial_copy_user(src, dst, len, sum, errp) | ||
283 | * | ||
284 | * See "Spec" in memcpy.S for details. Unlike __copy_user, all | ||
285 | * function in this file use the standard calling convention. | ||
286 | */ | ||
287 | |||
288 | #define src a0 | ||
289 | #define dst a1 | ||
290 | #define len a2 | ||
291 | #define psum a3 | ||
292 | #define sum v0 | ||
293 | #define odd t8 | ||
294 | #define errptr t9 | ||
295 | |||
296 | /* | ||
297 | * The exception handler for loads requires that: | ||
298 | * 1- AT contain the address of the byte just past the end of the source | ||
299 | * of the copy, | ||
300 | * 2- src_entry <= src < AT, and | ||
301 | * 3- (dst - src) == (dst_entry - src_entry), | ||
302 | * The _entry suffix denotes values when __copy_user was called. | ||
303 | * | ||
304 | * (1) is set up up by __csum_partial_copy_from_user and maintained by | ||
305 | * not writing AT in __csum_partial_copy | ||
306 | * (2) is met by incrementing src by the number of bytes copied | ||
307 | * (3) is met by not doing loads between a pair of increments of dst and src | ||
308 | * | ||
309 | * The exception handlers for stores stores -EFAULT to errptr and return. | ||
310 | * These handlers do not need to overwrite any data. | ||
311 | */ | ||
312 | |||
313 | #define EXC(inst_reg,addr,handler) \ | ||
314 | 9: inst_reg, addr; \ | ||
315 | .section __ex_table,"a"; \ | ||
316 | PTR 9b, handler; \ | ||
317 | .previous | ||
318 | |||
319 | #ifdef USE_DOUBLE | ||
320 | |||
321 | #define LOAD ld | ||
322 | #define LOADL ldl | ||
323 | #define LOADR ldr | ||
324 | #define STOREL sdl | ||
325 | #define STORER sdr | ||
326 | #define STORE sd | ||
327 | #define ADD daddu | ||
328 | #define SUB dsubu | ||
329 | #define SRL dsrl | ||
330 | #define SLL dsll | ||
331 | #define SLLV dsllv | ||
332 | #define SRLV dsrlv | ||
333 | #define NBYTES 8 | ||
334 | #define LOG_NBYTES 3 | ||
335 | |||
336 | #else | ||
337 | |||
338 | #define LOAD lw | ||
339 | #define LOADL lwl | ||
340 | #define LOADR lwr | ||
341 | #define STOREL swl | ||
342 | #define STORER swr | ||
343 | #define STORE sw | ||
344 | #define ADD addu | ||
345 | #define SUB subu | ||
346 | #define SRL srl | ||
347 | #define SLL sll | ||
348 | #define SLLV sllv | ||
349 | #define SRLV srlv | ||
350 | #define NBYTES 4 | ||
351 | #define LOG_NBYTES 2 | ||
352 | |||
353 | #endif /* USE_DOUBLE */ | ||
354 | |||
355 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
356 | #define LDFIRST LOADR | ||
357 | #define LDREST LOADL | ||
358 | #define STFIRST STORER | ||
359 | #define STREST STOREL | ||
360 | #define SHIFT_DISCARD SLLV | ||
361 | #define SHIFT_DISCARD_REVERT SRLV | ||
362 | #else | ||
363 | #define LDFIRST LOADL | ||
364 | #define LDREST LOADR | ||
365 | #define STFIRST STOREL | ||
366 | #define STREST STORER | ||
367 | #define SHIFT_DISCARD SRLV | ||
368 | #define SHIFT_DISCARD_REVERT SLLV | ||
369 | #endif | ||
370 | |||
371 | #define FIRST(unit) ((unit)*NBYTES) | ||
372 | #define REST(unit) (FIRST(unit)+NBYTES-1) | ||
373 | |||
374 | #define ADDRMASK (NBYTES-1) | ||
375 | |||
376 | .set noat | ||
377 | |||
378 | LEAF(__csum_partial_copy_user) | ||
379 | PTR_ADDU AT, src, len /* See (1) above. */ | ||
380 | #ifdef CONFIG_64BIT | ||
381 | move errptr, a4 | ||
382 | #else | ||
383 | lw errptr, 16(sp) | ||
384 | #endif | ||
385 | FEXPORT(csum_partial_copy_nocheck) | ||
386 | move sum, zero | ||
387 | move odd, zero | ||
388 | /* | ||
389 | * Note: dst & src may be unaligned, len may be 0 | ||
390 | * Temps | ||
391 | */ | ||
392 | /* | ||
393 | * The "issue break"s below are very approximate. | ||
394 | * Issue delays for dcache fills will perturb the schedule, as will | ||
395 | * load queue full replay traps, etc. | ||
396 | * | ||
397 | * If len < NBYTES use byte operations. | ||
398 | */ | ||
399 | sltu t2, len, NBYTES | ||
400 | and t1, dst, ADDRMASK | ||
401 | bnez t2, copy_bytes_checklen | ||
402 | and t0, src, ADDRMASK | ||
403 | andi odd, dst, 0x1 /* odd buffer? */ | ||
404 | bnez t1, dst_unaligned | ||
405 | nop | ||
406 | bnez t0, src_unaligned_dst_aligned | ||
407 | /* | ||
408 | * use delay slot for fall-through | ||
409 | * src and dst are aligned; need to compute rem | ||
410 | */ | ||
411 | both_aligned: | ||
412 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter | ||
413 | beqz t0, cleanup_both_aligned # len < 8*NBYTES | ||
414 | nop | ||
415 | SUB len, 8*NBYTES # subtract here for bgez loop | ||
416 | .align 4 | ||
417 | 1: | ||
418 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
419 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
420 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
421 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
422 | EXC( LOAD t4, UNIT(4)(src), l_exc_copy) | ||
423 | EXC( LOAD t5, UNIT(5)(src), l_exc_copy) | ||
424 | EXC( LOAD t6, UNIT(6)(src), l_exc_copy) | ||
425 | EXC( LOAD t7, UNIT(7)(src), l_exc_copy) | ||
426 | SUB len, len, 8*NBYTES | ||
427 | ADD src, src, 8*NBYTES | ||
428 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
429 | ADDC(sum, t0) | ||
430 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
431 | ADDC(sum, t1) | ||
432 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
433 | ADDC(sum, t2) | ||
434 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
435 | ADDC(sum, t3) | ||
436 | EXC( STORE t4, UNIT(4)(dst), s_exc) | ||
437 | ADDC(sum, t4) | ||
438 | EXC( STORE t5, UNIT(5)(dst), s_exc) | ||
439 | ADDC(sum, t5) | ||
440 | EXC( STORE t6, UNIT(6)(dst), s_exc) | ||
441 | ADDC(sum, t6) | ||
442 | EXC( STORE t7, UNIT(7)(dst), s_exc) | ||
443 | ADDC(sum, t7) | ||
444 | bgez len, 1b | ||
445 | ADD dst, dst, 8*NBYTES | ||
446 | ADD len, 8*NBYTES # revert len (see above) | ||
447 | |||
448 | /* | ||
449 | * len == the number of bytes left to copy < 8*NBYTES | ||
450 | */ | ||
451 | cleanup_both_aligned: | ||
452 | #define rem t7 | ||
453 | beqz len, done | ||
454 | sltu t0, len, 4*NBYTES | ||
455 | bnez t0, less_than_4units | ||
456 | and rem, len, (NBYTES-1) # rem = len % NBYTES | ||
457 | /* | ||
458 | * len >= 4*NBYTES | ||
459 | */ | ||
460 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
461 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
462 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
463 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
464 | SUB len, len, 4*NBYTES | ||
465 | ADD src, src, 4*NBYTES | ||
466 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
467 | ADDC(sum, t0) | ||
468 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
469 | ADDC(sum, t1) | ||
470 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
471 | ADDC(sum, t2) | ||
472 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
473 | ADDC(sum, t3) | ||
474 | beqz len, done | ||
475 | ADD dst, dst, 4*NBYTES | ||
476 | less_than_4units: | ||
477 | /* | ||
478 | * rem = len % NBYTES | ||
479 | */ | ||
480 | beq rem, len, copy_bytes | ||
481 | nop | ||
482 | 1: | ||
483 | EXC( LOAD t0, 0(src), l_exc) | ||
484 | ADD src, src, NBYTES | ||
485 | SUB len, len, NBYTES | ||
486 | EXC( STORE t0, 0(dst), s_exc) | ||
487 | ADDC(sum, t0) | ||
488 | bne rem, len, 1b | ||
489 | ADD dst, dst, NBYTES | ||
490 | |||
491 | /* | ||
492 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | ||
493 | * A loop would do only a byte at a time with possible branch | ||
494 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE | ||
495 | * because can't assume read-access to dst. Instead, use | ||
496 | * STREST dst, which doesn't require read access to dst. | ||
497 | * | ||
498 | * This code should perform better than a simple loop on modern, | ||
499 | * wide-issue mips processors because the code has fewer branches and | ||
500 | * more instruction-level parallelism. | ||
501 | */ | ||
502 | #define bits t2 | ||
503 | beqz len, done | ||
504 | ADD t1, dst, len # t1 is just past last byte of dst | ||
505 | li bits, 8*NBYTES | ||
506 | SLL rem, len, 3 # rem = number of bits to keep | ||
507 | EXC( LOAD t0, 0(src), l_exc) | ||
508 | SUB bits, bits, rem # bits = number of bits to discard | ||
509 | SHIFT_DISCARD t0, t0, bits | ||
510 | EXC( STREST t0, -1(t1), s_exc) | ||
511 | SHIFT_DISCARD_REVERT t0, t0, bits | ||
512 | .set reorder | ||
513 | ADDC(sum, t0) | ||
514 | b done | ||
515 | .set noreorder | ||
516 | dst_unaligned: | ||
517 | /* | ||
518 | * dst is unaligned | ||
519 | * t0 = src & ADDRMASK | ||
520 | * t1 = dst & ADDRMASK; T1 > 0 | ||
521 | * len >= NBYTES | ||
522 | * | ||
523 | * Copy enough bytes to align dst | ||
524 | * Set match = (src and dst have same alignment) | ||
525 | */ | ||
526 | #define match rem | ||
527 | EXC( LDFIRST t3, FIRST(0)(src), l_exc) | ||
528 | ADD t2, zero, NBYTES | ||
529 | EXC( LDREST t3, REST(0)(src), l_exc_copy) | ||
530 | SUB t2, t2, t1 # t2 = number of bytes copied | ||
531 | xor match, t0, t1 | ||
532 | EXC( STFIRST t3, FIRST(0)(dst), s_exc) | ||
533 | SLL t4, t1, 3 # t4 = number of bits to discard | ||
534 | SHIFT_DISCARD t3, t3, t4 | ||
535 | /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ | ||
536 | ADDC(sum, t3) | ||
537 | beq len, t2, done | ||
538 | SUB len, len, t2 | ||
539 | ADD dst, dst, t2 | ||
540 | beqz match, both_aligned | ||
541 | ADD src, src, t2 | ||
542 | |||
543 | src_unaligned_dst_aligned: | ||
544 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | ||
545 | beqz t0, cleanup_src_unaligned | ||
546 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | ||
547 | 1: | ||
548 | /* | ||
549 | * Avoid consecutive LD*'s to the same register since some mips | ||
550 | * implementations can't issue them in the same cycle. | ||
551 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | ||
552 | * are to the same unit (unless src is aligned, but it's not). | ||
553 | */ | ||
554 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
555 | EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) | ||
556 | SUB len, len, 4*NBYTES | ||
557 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
558 | EXC( LDREST t1, REST(1)(src), l_exc_copy) | ||
559 | EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) | ||
560 | EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) | ||
561 | EXC( LDREST t2, REST(2)(src), l_exc_copy) | ||
562 | EXC( LDREST t3, REST(3)(src), l_exc_copy) | ||
563 | ADD src, src, 4*NBYTES | ||
564 | #ifdef CONFIG_CPU_SB1 | ||
565 | nop # improves slotting | ||
566 | #endif | ||
567 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
568 | ADDC(sum, t0) | ||
569 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
570 | ADDC(sum, t1) | ||
571 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
572 | ADDC(sum, t2) | ||
573 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
574 | ADDC(sum, t3) | ||
575 | bne len, rem, 1b | ||
576 | ADD dst, dst, 4*NBYTES | ||
577 | |||
578 | cleanup_src_unaligned: | ||
579 | beqz len, done | ||
580 | and rem, len, NBYTES-1 # rem = len % NBYTES | ||
581 | beq rem, len, copy_bytes | ||
582 | nop | ||
583 | 1: | ||
584 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
585 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
586 | ADD src, src, NBYTES | ||
587 | SUB len, len, NBYTES | ||
588 | EXC( STORE t0, 0(dst), s_exc) | ||
589 | ADDC(sum, t0) | ||
590 | bne len, rem, 1b | ||
591 | ADD dst, dst, NBYTES | ||
592 | |||
593 | copy_bytes_checklen: | ||
594 | beqz len, done | ||
595 | nop | ||
596 | copy_bytes: | ||
597 | /* 0 < len < NBYTES */ | ||
598 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
599 | #define SHIFT_START 0 | ||
600 | #define SHIFT_INC 8 | ||
601 | #else | ||
602 | #define SHIFT_START 8*(NBYTES-1) | ||
603 | #define SHIFT_INC -8 | ||
604 | #endif | ||
605 | move t2, zero # partial word | ||
606 | li t3, SHIFT_START # shift | ||
607 | /* use l_exc_copy here to return correct sum on fault */ | ||
608 | #define COPY_BYTE(N) \ | ||
609 | EXC( lbu t0, N(src), l_exc_copy); \ | ||
610 | SUB len, len, 1; \ | ||
611 | EXC( sb t0, N(dst), s_exc); \ | ||
612 | SLLV t0, t0, t3; \ | ||
613 | addu t3, SHIFT_INC; \ | ||
614 | beqz len, copy_bytes_done; \ | ||
615 | or t2, t0 | ||
616 | |||
617 | COPY_BYTE(0) | ||
618 | COPY_BYTE(1) | ||
619 | #ifdef USE_DOUBLE | ||
620 | COPY_BYTE(2) | ||
621 | COPY_BYTE(3) | ||
622 | COPY_BYTE(4) | ||
623 | COPY_BYTE(5) | ||
624 | #endif | ||
625 | EXC( lbu t0, NBYTES-2(src), l_exc_copy) | ||
626 | SUB len, len, 1 | ||
627 | EXC( sb t0, NBYTES-2(dst), s_exc) | ||
628 | SLLV t0, t0, t3 | ||
629 | or t2, t0 | ||
630 | copy_bytes_done: | ||
631 | ADDC(sum, t2) | ||
632 | done: | ||
633 | /* fold checksum */ | ||
634 | #ifdef USE_DOUBLE | ||
635 | dsll32 v1, sum, 0 | ||
636 | daddu sum, v1 | ||
637 | sltu v1, sum, v1 | ||
638 | dsra32 sum, sum, 0 | ||
639 | addu sum, v1 | ||
640 | #endif | ||
641 | sll v1, sum, 16 | ||
642 | addu sum, v1 | ||
643 | sltu v1, sum, v1 | ||
644 | srl sum, sum, 16 | ||
645 | addu sum, v1 | ||
646 | |||
647 | /* odd buffer alignment? */ | ||
648 | beqz odd, 1f | ||
649 | nop | ||
650 | sll v1, sum, 8 | ||
651 | srl sum, sum, 8 | ||
652 | or sum, v1 | ||
653 | andi sum, 0xffff | ||
654 | 1: | ||
655 | .set reorder | ||
656 | ADDC(sum, psum) | ||
657 | jr ra | ||
658 | .set noreorder | ||
659 | |||
660 | l_exc_copy: | ||
661 | /* | ||
662 | * Copy bytes from src until faulting load address (or until a | ||
663 | * lb faults) | ||
664 | * | ||
665 | * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) | ||
666 | * may be more than a byte beyond the last address. | ||
667 | * Hence, the lb below may get an exception. | ||
668 | * | ||
669 | * Assumes src < THREAD_BUADDR($28) | ||
670 | */ | ||
671 | LOAD t0, TI_TASK($28) | ||
672 | li t2, SHIFT_START | ||
673 | LOAD t0, THREAD_BUADDR(t0) | ||
674 | 1: | ||
675 | EXC( lbu t1, 0(src), l_exc) | ||
676 | ADD src, src, 1 | ||
677 | sb t1, 0(dst) # can't fault -- we're copy_from_user | ||
678 | SLLV t1, t1, t2 | ||
679 | addu t2, SHIFT_INC | ||
680 | ADDC(sum, t1) | ||
681 | bne src, t0, 1b | ||
682 | ADD dst, dst, 1 | ||
683 | l_exc: | ||
684 | LOAD t0, TI_TASK($28) | ||
685 | nop | ||
686 | LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address | ||
687 | nop | ||
688 | SUB len, AT, t0 # len number of uncopied bytes | ||
689 | /* | ||
690 | * Here's where we rely on src and dst being incremented in tandem, | ||
691 | * See (3) above. | ||
692 | * dst += (fault addr - src) to put dst at first byte to clear | ||
693 | */ | ||
694 | ADD dst, t0 # compute start address in a1 | ||
695 | SUB dst, src | ||
696 | /* | ||
697 | * Clear len bytes starting at dst. Can't call __bzero because it | ||
698 | * might modify len. An inefficient loop for these rare times... | ||
699 | */ | ||
700 | beqz len, done | ||
701 | SUB src, len, 1 | ||
702 | 1: sb zero, 0(dst) | ||
703 | ADD dst, dst, 1 | ||
704 | bnez src, 1b | ||
705 | SUB src, src, 1 | ||
706 | li v1, -EFAULT | ||
707 | b done | ||
708 | sw v1, (errptr) | ||
709 | |||
710 | s_exc: | ||
711 | li v0, -1 /* invalid checksum */ | ||
712 | li v1, -EFAULT | ||
713 | jr ra | ||
714 | sw v1, (errptr) | ||
715 | END(__csum_partial_copy_user) | ||
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c deleted file mode 100644 index 06771040a267..000000000000 --- a/arch/mips/lib/csum_partial_copy.c +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995 Waldorf Electronics GmbH | ||
7 | * Copyright (C) 1998, 1999 Ralf Baechle | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <asm/byteorder.h> | ||
13 | #include <asm/string.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <net/checksum.h> | ||
16 | |||
17 | /* | ||
18 | * copy while checksumming, otherwise like csum_partial | ||
19 | */ | ||
20 | __wsum csum_partial_copy_nocheck(const void *src, | ||
21 | void *dst, int len, __wsum sum) | ||
22 | { | ||
23 | /* | ||
24 | * It's 2:30 am and I don't feel like doing it real ... | ||
25 | * This is lots slower than the real thing (tm) | ||
26 | */ | ||
27 | sum = csum_partial(src, len, sum); | ||
28 | memcpy(dst, src, len); | ||
29 | |||
30 | return sum; | ||
31 | } | ||
32 | |||
33 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
34 | |||
35 | /* | ||
36 | * Copy from userspace and compute checksum. If we catch an exception | ||
37 | * then zero the rest of the buffer. | ||
38 | */ | ||
39 | __wsum csum_partial_copy_from_user (const void __user *src, | ||
40 | void *dst, int len, __wsum sum, int *err_ptr) | ||
41 | { | ||
42 | int missing; | ||
43 | |||
44 | might_sleep(); | ||
45 | missing = copy_from_user(dst, src, len); | ||
46 | if (missing) { | ||
47 | memset(dst + len - missing, 0, missing); | ||
48 | *err_ptr = -EFAULT; | ||
49 | } | ||
50 | |||
51 | return csum_partial(dst, len, sum); | ||
52 | } | ||
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index e4604c73f02e..a3c3a1d462b2 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c | |||
@@ -47,6 +47,9 @@ | |||
47 | #ifdef CONFIG_MIPS_MALTA | 47 | #ifdef CONFIG_MIPS_MALTA |
48 | #include <asm/mips-boards/maltaint.h> | 48 | #include <asm/mips-boards/maltaint.h> |
49 | #endif | 49 | #endif |
50 | #ifdef CONFIG_MIPS_SEAD | ||
51 | #include <asm/mips-boards/seadint.h> | ||
52 | #endif | ||
50 | 53 | ||
51 | unsigned long cpu_khz; | 54 | unsigned long cpu_khz; |
52 | 55 | ||
@@ -263,11 +266,13 @@ void __init mips_time_init(void) | |||
263 | 266 | ||
264 | void __init plat_timer_setup(struct irqaction *irq) | 267 | void __init plat_timer_setup(struct irqaction *irq) |
265 | { | 268 | { |
269 | #ifdef MSC01E_INT_BASE | ||
266 | if (cpu_has_veic) { | 270 | if (cpu_has_veic) { |
267 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); | 271 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); |
268 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | 272 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; |
269 | } | 273 | } else |
270 | else { | 274 | #endif |
275 | { | ||
271 | if (cpu_has_vint) | 276 | if (cpu_has_vint) |
272 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); | 277 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); |
273 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; | 278 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; |
diff --git a/arch/mips/mips-boards/malta/malta_mtd.c b/arch/mips/mips-boards/malta/malta_mtd.c new file mode 100644 index 000000000000..8ad9bdf25dce --- /dev/null +++ b/arch/mips/mips-boards/malta/malta_mtd.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 MIPS Technologies, Inc. | ||
7 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <mtd/mtd-abi.h> | ||
15 | |||
16 | static struct mtd_partition malta_mtd_partitions[] = { | ||
17 | { | ||
18 | .name = "YAMON", | ||
19 | .offset = 0x0, | ||
20 | .size = 0x100000, | ||
21 | .mask_flags = MTD_WRITEABLE | ||
22 | }, { | ||
23 | .name = "User FS", | ||
24 | .offset = 0x100000, | ||
25 | .size = 0x2e0000 | ||
26 | }, { | ||
27 | .name = "Board Config", | ||
28 | .offset = 0x3e0000, | ||
29 | .size = 0x020000, | ||
30 | .mask_flags = MTD_WRITEABLE | ||
31 | } | ||
32 | }; | ||
33 | |||
34 | static struct physmap_flash_data malta_flash_data = { | ||
35 | .width = 4, | ||
36 | .nr_parts = ARRAY_SIZE(malta_mtd_partitions), | ||
37 | .parts = malta_mtd_partitions | ||
38 | }; | ||
39 | |||
40 | static struct resource malta_flash_resource = { | ||
41 | .start = 0x1e000000, | ||
42 | .end = 0x1e3fffff, | ||
43 | .flags = IORESOURCE_MEM | ||
44 | }; | ||
45 | |||
46 | static struct platform_device malta_flash = { | ||
47 | .name = "physmap-flash", | ||
48 | .id = 0, | ||
49 | .dev = { | ||
50 | .platform_data = &malta_flash_data, | ||
51 | }, | ||
52 | .num_resources = 1, | ||
53 | .resource = &malta_flash_resource, | ||
54 | }; | ||
55 | |||
56 | static int __init malta_mtd_init(void) | ||
57 | { | ||
58 | platform_device_register(&malta_flash); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | module_init(malta_mtd_init) | ||
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c index f445fcddfdfd..874ccb0066b8 100644 --- a/arch/mips/mips-boards/sead/sead_int.c +++ b/arch/mips/mips-boards/sead/sead_int.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * Sead board. | 21 | * Sead board. |
22 | */ | 22 | */ |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/interrupt.h> |
25 | 25 | ||
26 | #include <asm/irq_cpu.h> | 26 | #include <asm/irq_cpu.h> |
27 | #include <asm/mipsregs.h> | 27 | #include <asm/mipsregs.h> |
@@ -108,7 +108,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
108 | if (irq >= 0) | 108 | if (irq >= 0) |
109 | do_IRQ(MIPSCPU_INT_BASE + irq); | 109 | do_IRQ(MIPSCPU_INT_BASE + irq); |
110 | else | 110 | else |
111 | spurious_interrupt(regs); | 111 | spurious_interrupt(); |
112 | } | 112 | } |
113 | 113 | ||
114 | void __init arch_init_irq(void) | 114 | void __init arch_init_irq(void) |
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index d41fc5885e87..dc795be62807 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c | |||
@@ -243,11 +243,10 @@ static void __init __build_store_reg(int reg) | |||
243 | 243 | ||
244 | static inline void build_store_reg(int reg) | 244 | static inline void build_store_reg(int reg) |
245 | { | 245 | { |
246 | if (cpu_has_prefetch) | 246 | int pref_off = cpu_has_prefetch ? |
247 | if (reg) | 247 | (reg ? pref_offset_copy : pref_offset_clear) : 0; |
248 | build_dst_pref(pref_offset_copy); | 248 | if (pref_off) |
249 | else | 249 | build_dst_pref(pref_off); |
250 | build_dst_pref(pref_offset_clear); | ||
251 | else if (cpu_has_cache_cdex_s) | 250 | else if (cpu_has_cache_cdex_s) |
252 | build_cdex_s(); | 251 | build_cdex_s(); |
253 | else if (cpu_has_cache_cdex_p) | 252 | else if (cpu_has_cache_cdex_p) |
diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c index 454b65cc3354..f556b7a8dccd 100644 --- a/arch/mips/pci/ops-pnx8550.c +++ b/arch/mips/pci/ops-pnx8550.c | |||
@@ -202,7 +202,7 @@ write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) | |||
202 | break; | 202 | break; |
203 | } | 203 | } |
204 | 204 | ||
205 | err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data); | 205 | err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data); |
206 | 206 | ||
207 | return err; | 207 | return err; |
208 | } | 208 | } |
diff --git a/arch/mips/philips/pnx8550/common/time.c b/arch/mips/philips/pnx8550/common/time.c index 65c440e8480b..f80acae07cee 100644 --- a/arch/mips/philips/pnx8550/common/time.c +++ b/arch/mips/philips/pnx8550/common/time.c | |||
@@ -33,7 +33,17 @@ | |||
33 | #include <int.h> | 33 | #include <int.h> |
34 | #include <cm.h> | 34 | #include <cm.h> |
35 | 35 | ||
36 | extern unsigned int mips_hpt_frequency; | 36 | static unsigned long cpj; |
37 | |||
38 | static cycle_t hpt_read(void) | ||
39 | { | ||
40 | return read_c0_count2(); | ||
41 | } | ||
42 | |||
43 | static void timer_ack(void) | ||
44 | { | ||
45 | write_c0_compare(cpj); | ||
46 | } | ||
37 | 47 | ||
38 | /* | 48 | /* |
39 | * pnx8550_time_init() - it does the following things: | 49 | * pnx8550_time_init() - it does the following things: |
@@ -68,27 +78,47 @@ void pnx8550_time_init(void) | |||
68 | * HZ timer interrupts per second. | 78 | * HZ timer interrupts per second. |
69 | */ | 79 | */ |
70 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); | 80 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); |
81 | cpj = (mips_hpt_frequency + HZ / 2) / HZ; | ||
82 | timer_ack(); | ||
83 | |||
84 | /* Setup Timer 2 */ | ||
85 | write_c0_count2(0); | ||
86 | write_c0_compare2(0xffffffff); | ||
87 | |||
88 | clocksource_mips.read = hpt_read; | ||
89 | mips_timer_ack = timer_ack; | ||
90 | } | ||
91 | |||
92 | static irqreturn_t monotonic_interrupt(int irq, void *dev_id) | ||
93 | { | ||
94 | /* Timer 2 clear interrupt */ | ||
95 | write_c0_compare2(-1); | ||
96 | return IRQ_HANDLED; | ||
71 | } | 97 | } |
72 | 98 | ||
99 | static struct irqaction monotonic_irqaction = { | ||
100 | .handler = monotonic_interrupt, | ||
101 | .flags = IRQF_DISABLED, | ||
102 | .name = "Monotonic timer", | ||
103 | }; | ||
104 | |||
73 | void __init plat_timer_setup(struct irqaction *irq) | 105 | void __init plat_timer_setup(struct irqaction *irq) |
74 | { | 106 | { |
75 | int configPR; | 107 | int configPR; |
76 | 108 | ||
77 | setup_irq(PNX8550_INT_TIMER1, irq); | 109 | setup_irq(PNX8550_INT_TIMER1, irq); |
110 | setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction); | ||
78 | 111 | ||
79 | /* Start timer1 */ | 112 | /* Timer 1 start */ |
80 | configPR = read_c0_config7(); | 113 | configPR = read_c0_config7(); |
81 | configPR &= ~0x00000008; | 114 | configPR &= ~0x00000008; |
82 | write_c0_config7(configPR); | 115 | write_c0_config7(configPR); |
83 | 116 | ||
84 | /* Timer 2 stop */ | 117 | /* Timer 2 start */ |
85 | configPR = read_c0_config7(); | 118 | configPR = read_c0_config7(); |
86 | configPR |= 0x00000010; | 119 | configPR &= ~0x00000010; |
87 | write_c0_config7(configPR); | 120 | write_c0_config7(configPR); |
88 | 121 | ||
89 | write_c0_count2(0); | ||
90 | write_c0_compare2(0xffffffff); | ||
91 | |||
92 | /* Timer 3 stop */ | 122 | /* Timer 3 stop */ |
93 | configPR = read_c0_config7(); | 123 | configPR = read_c0_config7(); |
94 | configPR |= 0x00000020; | 124 | configPR |= 0x00000020; |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8699dadcd096..0855d55c194d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -436,7 +436,7 @@ config PPC_EFIKA | |||
436 | select RTAS_PROC | 436 | select RTAS_PROC |
437 | select PPC_MPC52xx | 437 | select PPC_MPC52xx |
438 | select PPC_NATIVE | 438 | select PPC_NATIVE |
439 | default y | 439 | default n |
440 | 440 | ||
441 | config PPC_LITE5200 | 441 | config PPC_LITE5200 |
442 | bool "Freescale Lite5200 Eval Board" | 442 | bool "Freescale Lite5200 Eval Board" |
@@ -471,7 +471,7 @@ config PPC_PREP | |||
471 | select PPC_INDIRECT_PCI | 471 | select PPC_INDIRECT_PCI |
472 | select PPC_UDBG_16550 | 472 | select PPC_UDBG_16550 |
473 | select PPC_NATIVE | 473 | select PPC_NATIVE |
474 | default y | 474 | default n |
475 | 475 | ||
476 | config PPC_MAPLE | 476 | config PPC_MAPLE |
477 | depends on PPC_MULTIPLATFORM && PPC64 | 477 | depends on PPC_MULTIPLATFORM && PPC64 |
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts index 8bc0d259796d..a8efb59f5dd7 100644 --- a/arch/powerpc/boot/dts/lite5200.dts +++ b/arch/powerpc/boot/dts/lite5200.dts | |||
@@ -238,7 +238,7 @@ | |||
238 | 238 | ||
239 | // PSC3 in CODEC mode example | 239 | // PSC3 in CODEC mode example |
240 | i2s@2400 { // PSC3 | 240 | i2s@2400 { // PSC3 |
241 | device_type = "i2s"; | 241 | device_type = "sound"; |
242 | compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s"; | 242 | compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s"; |
243 | reg = <2400 100>; | 243 | reg = <2400 100>; |
244 | interrupts = <2 3 0>; | 244 | interrupts = <2 3 0>; |
@@ -265,7 +265,7 @@ | |||
265 | 265 | ||
266 | // PSC6 in AC97 mode example | 266 | // PSC6 in AC97 mode example |
267 | ac97@2c00 { // PSC6 | 267 | ac97@2c00 { // PSC6 |
268 | device_type = "ac97"; | 268 | device_type = "sound"; |
269 | compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97"; | 269 | compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97"; |
270 | reg = <2c00 100>; | 270 | reg = <2c00 100>; |
271 | interrupts = <2 4 0>; | 271 | interrupts = <2 4 0>; |
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts index 81cb76418a78..1aabee432d86 100644 --- a/arch/powerpc/boot/dts/lite5200b.dts +++ b/arch/powerpc/boot/dts/lite5200b.dts | |||
@@ -243,7 +243,7 @@ | |||
243 | 243 | ||
244 | // PSC3 in CODEC mode example | 244 | // PSC3 in CODEC mode example |
245 | i2s@2400 { // PSC3 | 245 | i2s@2400 { // PSC3 |
246 | device_type = "i2s"; | 246 | device_type = "sound"; |
247 | compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s"; | 247 | compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s"; |
248 | reg = <2400 100>; | 248 | reg = <2400 100>; |
249 | interrupts = <2 3 0>; | 249 | interrupts = <2 3 0>; |
@@ -270,7 +270,7 @@ | |||
270 | 270 | ||
271 | // PSC6 in AC97 mode example | 271 | // PSC6 in AC97 mode example |
272 | ac97@2c00 { // PSC6 | 272 | ac97@2c00 { // PSC6 |
273 | device_type = "ac97"; | 273 | device_type = "sound"; |
274 | compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97"; | 274 | compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97"; |
275 | reg = <2c00 100>; | 275 | reg = <2c00 100>; |
276 | interrupts = <2 4 0>; | 276 | interrupts = <2 4 0>; |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 1c009651f925..340376a47001 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.18-rc6 | 3 | # Linux kernel version: 2.6.20-rc3 |
4 | # Sun Sep 10 10:45:11 2006 | 4 | # Tue Jan 2 15:32:44 2007 |
5 | # | 5 | # |
6 | CONFIG_PPC64=y | 6 | CONFIG_PPC64=y |
7 | CONFIG_64BIT=y | 7 | CONFIG_64BIT=y |
@@ -10,6 +10,8 @@ CONFIG_MMU=y | |||
10 | CONFIG_GENERIC_HARDIRQS=y | 10 | CONFIG_GENERIC_HARDIRQS=y |
11 | CONFIG_IRQ_PER_CPU=y | 11 | CONFIG_IRQ_PER_CPU=y |
12 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 12 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
13 | CONFIG_ARCH_HAS_ILOG2_U32=y | ||
14 | CONFIG_ARCH_HAS_ILOG2_U64=y | ||
13 | CONFIG_GENERIC_HWEIGHT=y | 15 | CONFIG_GENERIC_HWEIGHT=y |
14 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 16 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
15 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 17 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
@@ -22,6 +24,8 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y | |||
22 | CONFIG_PPC_OF=y | 24 | CONFIG_PPC_OF=y |
23 | CONFIG_PPC_UDBG_16550=y | 25 | CONFIG_PPC_UDBG_16550=y |
24 | CONFIG_GENERIC_TBSYNC=y | 26 | CONFIG_GENERIC_TBSYNC=y |
27 | CONFIG_AUDIT_ARCH=y | ||
28 | CONFIG_GENERIC_BUG=y | ||
25 | # CONFIG_DEFAULT_UIMAGE is not set | 29 | # CONFIG_DEFAULT_UIMAGE is not set |
26 | 30 | ||
27 | # | 31 | # |
@@ -31,6 +35,10 @@ CONFIG_GENERIC_TBSYNC=y | |||
31 | CONFIG_POWER3=y | 35 | CONFIG_POWER3=y |
32 | CONFIG_POWER4=y | 36 | CONFIG_POWER4=y |
33 | CONFIG_PPC_FPU=y | 37 | CONFIG_PPC_FPU=y |
38 | # CONFIG_PPC_DCR_NATIVE is not set | ||
39 | CONFIG_PPC_DCR_MMIO=y | ||
40 | CONFIG_PPC_DCR=y | ||
41 | CONFIG_PPC_OF_PLATFORM_PCI=y | ||
34 | CONFIG_ALTIVEC=y | 42 | CONFIG_ALTIVEC=y |
35 | CONFIG_PPC_STD_MMU=y | 43 | CONFIG_PPC_STD_MMU=y |
36 | CONFIG_VIRT_CPU_ACCOUNTING=y | 44 | CONFIG_VIRT_CPU_ACCOUNTING=y |
@@ -52,19 +60,24 @@ CONFIG_LOCALVERSION="" | |||
52 | CONFIG_LOCALVERSION_AUTO=y | 60 | CONFIG_LOCALVERSION_AUTO=y |
53 | CONFIG_SWAP=y | 61 | CONFIG_SWAP=y |
54 | CONFIG_SYSVIPC=y | 62 | CONFIG_SYSVIPC=y |
63 | # CONFIG_IPC_NS is not set | ||
55 | CONFIG_POSIX_MQUEUE=y | 64 | CONFIG_POSIX_MQUEUE=y |
56 | # CONFIG_BSD_PROCESS_ACCT is not set | 65 | # CONFIG_BSD_PROCESS_ACCT is not set |
57 | CONFIG_TASKSTATS=y | 66 | CONFIG_TASKSTATS=y |
58 | CONFIG_TASK_DELAY_ACCT=y | 67 | CONFIG_TASK_DELAY_ACCT=y |
59 | CONFIG_SYSCTL=y | 68 | # CONFIG_UTS_NS is not set |
60 | # CONFIG_AUDIT is not set | 69 | # CONFIG_AUDIT is not set |
61 | CONFIG_IKCONFIG=y | 70 | CONFIG_IKCONFIG=y |
62 | CONFIG_IKCONFIG_PROC=y | 71 | CONFIG_IKCONFIG_PROC=y |
63 | CONFIG_CPUSETS=y | 72 | CONFIG_CPUSETS=y |
73 | CONFIG_SYSFS_DEPRECATED=y | ||
64 | CONFIG_RELAY=y | 74 | CONFIG_RELAY=y |
65 | CONFIG_INITRAMFS_SOURCE="" | 75 | CONFIG_INITRAMFS_SOURCE="" |
66 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 76 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
77 | # CONFIG_TASK_XACCT is not set | ||
78 | CONFIG_SYSCTL=y | ||
67 | # CONFIG_EMBEDDED is not set | 79 | # CONFIG_EMBEDDED is not set |
80 | CONFIG_SYSCTL_SYSCALL=y | ||
68 | CONFIG_KALLSYMS=y | 81 | CONFIG_KALLSYMS=y |
69 | CONFIG_KALLSYMS_ALL=y | 82 | CONFIG_KALLSYMS_ALL=y |
70 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 83 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
@@ -73,12 +86,12 @@ CONFIG_PRINTK=y | |||
73 | CONFIG_BUG=y | 86 | CONFIG_BUG=y |
74 | CONFIG_ELF_CORE=y | 87 | CONFIG_ELF_CORE=y |
75 | CONFIG_BASE_FULL=y | 88 | CONFIG_BASE_FULL=y |
76 | CONFIG_RT_MUTEXES=y | ||
77 | CONFIG_FUTEX=y | 89 | CONFIG_FUTEX=y |
78 | CONFIG_EPOLL=y | 90 | CONFIG_EPOLL=y |
79 | CONFIG_SHMEM=y | 91 | CONFIG_SHMEM=y |
80 | CONFIG_SLAB=y | 92 | CONFIG_SLAB=y |
81 | CONFIG_VM_EVENT_COUNTERS=y | 93 | CONFIG_VM_EVENT_COUNTERS=y |
94 | CONFIG_RT_MUTEXES=y | ||
82 | # CONFIG_TINY_SHMEM is not set | 95 | # CONFIG_TINY_SHMEM is not set |
83 | CONFIG_BASE_SMALL=0 | 96 | CONFIG_BASE_SMALL=0 |
84 | # CONFIG_SLOB is not set | 97 | # CONFIG_SLOB is not set |
@@ -97,6 +110,7 @@ CONFIG_STOP_MACHINE=y | |||
97 | # | 110 | # |
98 | # Block layer | 111 | # Block layer |
99 | # | 112 | # |
113 | CONFIG_BLOCK=y | ||
100 | CONFIG_BLK_DEV_IO_TRACE=y | 114 | CONFIG_BLK_DEV_IO_TRACE=y |
101 | 115 | ||
102 | # | 116 | # |
@@ -116,16 +130,20 @@ CONFIG_DEFAULT_IOSCHED="anticipatory" | |||
116 | # Platform support | 130 | # Platform support |
117 | # | 131 | # |
118 | CONFIG_PPC_MULTIPLATFORM=y | 132 | CONFIG_PPC_MULTIPLATFORM=y |
119 | # CONFIG_PPC_ISERIES is not set | ||
120 | # CONFIG_EMBEDDED6xx is not set | 133 | # CONFIG_EMBEDDED6xx is not set |
121 | # CONFIG_APUS is not set | 134 | # CONFIG_APUS is not set |
122 | CONFIG_PPC_PSERIES=y | 135 | CONFIG_PPC_PSERIES=y |
136 | CONFIG_PPC_ISERIES=y | ||
137 | # CONFIG_PPC_MPC52xx is not set | ||
123 | CONFIG_PPC_PMAC=y | 138 | CONFIG_PPC_PMAC=y |
124 | CONFIG_PPC_PMAC64=y | 139 | CONFIG_PPC_PMAC64=y |
125 | CONFIG_PPC_MAPLE=y | 140 | CONFIG_PPC_MAPLE=y |
141 | # CONFIG_PPC_PASEMI is not set | ||
126 | CONFIG_PPC_CELL=y | 142 | CONFIG_PPC_CELL=y |
127 | CONFIG_PPC_CELL_NATIVE=y | 143 | CONFIG_PPC_CELL_NATIVE=y |
128 | CONFIG_PPC_IBM_CELL_BLADE=y | 144 | CONFIG_PPC_IBM_CELL_BLADE=y |
145 | # CONFIG_PPC_PS3 is not set | ||
146 | CONFIG_PPC_NATIVE=y | ||
129 | CONFIG_UDBG_RTAS_CONSOLE=y | 147 | CONFIG_UDBG_RTAS_CONSOLE=y |
130 | CONFIG_XICS=y | 148 | CONFIG_XICS=y |
131 | CONFIG_U3_DART=y | 149 | CONFIG_U3_DART=y |
@@ -139,6 +157,8 @@ CONFIG_IBMVIO=y | |||
139 | # CONFIG_IBMEBUS is not set | 157 | # CONFIG_IBMEBUS is not set |
140 | # CONFIG_PPC_MPC106 is not set | 158 | # CONFIG_PPC_MPC106 is not set |
141 | CONFIG_PPC_970_NAP=y | 159 | CONFIG_PPC_970_NAP=y |
160 | CONFIG_PPC_INDIRECT_IO=y | ||
161 | CONFIG_GENERIC_IOMAP=y | ||
142 | CONFIG_CPU_FREQ=y | 162 | CONFIG_CPU_FREQ=y |
143 | CONFIG_CPU_FREQ_TABLE=y | 163 | CONFIG_CPU_FREQ_TABLE=y |
144 | # CONFIG_CPU_FREQ_DEBUG is not set | 164 | # CONFIG_CPU_FREQ_DEBUG is not set |
@@ -160,14 +180,16 @@ CONFIG_MPIC=y | |||
160 | # | 180 | # |
161 | CONFIG_SPU_FS=m | 181 | CONFIG_SPU_FS=m |
162 | CONFIG_SPU_BASE=y | 182 | CONFIG_SPU_BASE=y |
163 | CONFIG_SPUFS_MMAP=y | ||
164 | CONFIG_CBE_RAS=y | 183 | CONFIG_CBE_RAS=y |
184 | CONFIG_CBE_THERM=m | ||
185 | CONFIG_CBE_CPUFREQ=m | ||
165 | 186 | ||
166 | # | 187 | # |
167 | # Kernel options | 188 | # Kernel options |
168 | # | 189 | # |
169 | # CONFIG_HZ_100 is not set | 190 | # CONFIG_HZ_100 is not set |
170 | CONFIG_HZ_250=y | 191 | CONFIG_HZ_250=y |
192 | # CONFIG_HZ_300 is not set | ||
171 | # CONFIG_HZ_1000 is not set | 193 | # CONFIG_HZ_1000 is not set |
172 | CONFIG_HZ=250 | 194 | CONFIG_HZ=250 |
173 | CONFIG_PREEMPT_NONE=y | 195 | CONFIG_PREEMPT_NONE=y |
@@ -192,6 +214,7 @@ CONFIG_ARCH_SELECT_MEMORY_MODEL=y | |||
192 | CONFIG_ARCH_FLATMEM_ENABLE=y | 214 | CONFIG_ARCH_FLATMEM_ENABLE=y |
193 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 215 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
194 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | 216 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y |
217 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
195 | CONFIG_SELECT_MEMORY_MODEL=y | 218 | CONFIG_SELECT_MEMORY_MODEL=y |
196 | # CONFIG_FLATMEM_MANUAL is not set | 219 | # CONFIG_FLATMEM_MANUAL is not set |
197 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 220 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
@@ -201,6 +224,7 @@ CONFIG_HAVE_MEMORY_PRESENT=y | |||
201 | # CONFIG_SPARSEMEM_STATIC is not set | 224 | # CONFIG_SPARSEMEM_STATIC is not set |
202 | CONFIG_SPARSEMEM_EXTREME=y | 225 | CONFIG_SPARSEMEM_EXTREME=y |
203 | CONFIG_MEMORY_HOTPLUG=y | 226 | CONFIG_MEMORY_HOTPLUG=y |
227 | CONFIG_MEMORY_HOTPLUG_SPARSE=y | ||
204 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 228 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
205 | CONFIG_RESOURCES_64BIT=y | 229 | CONFIG_RESOURCES_64BIT=y |
206 | CONFIG_ARCH_MEMORY_PROBE=y | 230 | CONFIG_ARCH_MEMORY_PROBE=y |
@@ -222,6 +246,7 @@ CONFIG_PPC_I8259=y | |||
222 | CONFIG_PCI=y | 246 | CONFIG_PCI=y |
223 | CONFIG_PCI_DOMAINS=y | 247 | CONFIG_PCI_DOMAINS=y |
224 | # CONFIG_PCIEPORTBUS is not set | 248 | # CONFIG_PCIEPORTBUS is not set |
249 | # CONFIG_PCI_MULTITHREAD_PROBE is not set | ||
225 | # CONFIG_PCI_DEBUG is not set | 250 | # CONFIG_PCI_DEBUG is not set |
226 | 251 | ||
227 | # | 252 | # |
@@ -254,6 +279,7 @@ CONFIG_PACKET=y | |||
254 | CONFIG_UNIX=y | 279 | CONFIG_UNIX=y |
255 | CONFIG_XFRM=y | 280 | CONFIG_XFRM=y |
256 | CONFIG_XFRM_USER=m | 281 | CONFIG_XFRM_USER=m |
282 | # CONFIG_XFRM_SUB_POLICY is not set | ||
257 | CONFIG_NET_KEY=m | 283 | CONFIG_NET_KEY=m |
258 | CONFIG_INET=y | 284 | CONFIG_INET=y |
259 | CONFIG_IP_MULTICAST=y | 285 | CONFIG_IP_MULTICAST=y |
@@ -272,10 +298,13 @@ CONFIG_INET_XFRM_TUNNEL=m | |||
272 | CONFIG_INET_TUNNEL=y | 298 | CONFIG_INET_TUNNEL=y |
273 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | 299 | CONFIG_INET_XFRM_MODE_TRANSPORT=y |
274 | CONFIG_INET_XFRM_MODE_TUNNEL=y | 300 | CONFIG_INET_XFRM_MODE_TUNNEL=y |
301 | CONFIG_INET_XFRM_MODE_BEET=y | ||
275 | CONFIG_INET_DIAG=y | 302 | CONFIG_INET_DIAG=y |
276 | CONFIG_INET_TCP_DIAG=y | 303 | CONFIG_INET_TCP_DIAG=y |
277 | # CONFIG_TCP_CONG_ADVANCED is not set | 304 | # CONFIG_TCP_CONG_ADVANCED is not set |
278 | CONFIG_TCP_CONG_BIC=y | 305 | CONFIG_TCP_CONG_CUBIC=y |
306 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
307 | # CONFIG_TCP_MD5SIG is not set | ||
279 | 308 | ||
280 | # | 309 | # |
281 | # IP: Virtual Server Configuration | 310 | # IP: Virtual Server Configuration |
@@ -294,25 +323,31 @@ CONFIG_NETFILTER=y | |||
294 | CONFIG_NETFILTER_NETLINK=y | 323 | CONFIG_NETFILTER_NETLINK=y |
295 | CONFIG_NETFILTER_NETLINK_QUEUE=m | 324 | CONFIG_NETFILTER_NETLINK_QUEUE=m |
296 | CONFIG_NETFILTER_NETLINK_LOG=m | 325 | CONFIG_NETFILTER_NETLINK_LOG=m |
326 | CONFIG_NF_CONNTRACK_ENABLED=m | ||
327 | CONFIG_NF_CONNTRACK_SUPPORT=y | ||
328 | # CONFIG_IP_NF_CONNTRACK_SUPPORT is not set | ||
329 | CONFIG_NF_CONNTRACK=m | ||
330 | CONFIG_NF_CT_ACCT=y | ||
331 | CONFIG_NF_CONNTRACK_MARK=y | ||
332 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
333 | CONFIG_NF_CT_PROTO_GRE=m | ||
334 | CONFIG_NF_CT_PROTO_SCTP=m | ||
335 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
336 | CONFIG_NF_CONNTRACK_FTP=m | ||
337 | CONFIG_NF_CONNTRACK_H323=m | ||
338 | CONFIG_NF_CONNTRACK_IRC=m | ||
339 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
340 | CONFIG_NF_CONNTRACK_PPTP=m | ||
341 | CONFIG_NF_CONNTRACK_SIP=m | ||
342 | CONFIG_NF_CONNTRACK_TFTP=m | ||
343 | CONFIG_NF_CT_NETLINK=m | ||
297 | # CONFIG_NETFILTER_XTABLES is not set | 344 | # CONFIG_NETFILTER_XTABLES is not set |
298 | 345 | ||
299 | # | 346 | # |
300 | # IP: Netfilter Configuration | 347 | # IP: Netfilter Configuration |
301 | # | 348 | # |
302 | CONFIG_IP_NF_CONNTRACK=m | 349 | CONFIG_NF_CONNTRACK_IPV4=m |
303 | CONFIG_IP_NF_CT_ACCT=y | 350 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y |
304 | CONFIG_IP_NF_CONNTRACK_MARK=y | ||
305 | CONFIG_IP_NF_CONNTRACK_EVENTS=y | ||
306 | CONFIG_IP_NF_CONNTRACK_NETLINK=m | ||
307 | CONFIG_IP_NF_CT_PROTO_SCTP=m | ||
308 | CONFIG_IP_NF_FTP=m | ||
309 | CONFIG_IP_NF_IRC=m | ||
310 | # CONFIG_IP_NF_NETBIOS_NS is not set | ||
311 | CONFIG_IP_NF_TFTP=m | ||
312 | CONFIG_IP_NF_AMANDA=m | ||
313 | # CONFIG_IP_NF_PPTP is not set | ||
314 | # CONFIG_IP_NF_H323 is not set | ||
315 | CONFIG_IP_NF_SIP=m | ||
316 | CONFIG_IP_NF_QUEUE=m | 351 | CONFIG_IP_NF_QUEUE=m |
317 | 352 | ||
318 | # | 353 | # |
@@ -339,7 +374,6 @@ CONFIG_LLC=y | |||
339 | # CONFIG_ATALK is not set | 374 | # CONFIG_ATALK is not set |
340 | # CONFIG_X25 is not set | 375 | # CONFIG_X25 is not set |
341 | # CONFIG_LAPB is not set | 376 | # CONFIG_LAPB is not set |
342 | # CONFIG_NET_DIVERT is not set | ||
343 | # CONFIG_ECONET is not set | 377 | # CONFIG_ECONET is not set |
344 | # CONFIG_WAN_ROUTER is not set | 378 | # CONFIG_WAN_ROUTER is not set |
345 | 379 | ||
@@ -412,6 +446,12 @@ CONFIG_BLK_DEV_INITRD=y | |||
412 | # CONFIG_ATA_OVER_ETH is not set | 446 | # CONFIG_ATA_OVER_ETH is not set |
413 | 447 | ||
414 | # | 448 | # |
449 | # Misc devices | ||
450 | # | ||
451 | # CONFIG_SGI_IOC4 is not set | ||
452 | # CONFIG_TIFM_CORE is not set | ||
453 | |||
454 | # | ||
415 | # ATA/ATAPI/MFM/RLL support | 455 | # ATA/ATAPI/MFM/RLL support |
416 | # | 456 | # |
417 | CONFIG_IDE=y | 457 | CONFIG_IDE=y |
@@ -438,7 +478,6 @@ CONFIG_IDEPCI_SHARE_IRQ=y | |||
438 | # CONFIG_BLK_DEV_OFFBOARD is not set | 478 | # CONFIG_BLK_DEV_OFFBOARD is not set |
439 | CONFIG_BLK_DEV_GENERIC=y | 479 | CONFIG_BLK_DEV_GENERIC=y |
440 | # CONFIG_BLK_DEV_OPTI621 is not set | 480 | # CONFIG_BLK_DEV_OPTI621 is not set |
441 | CONFIG_BLK_DEV_SL82C105=y | ||
442 | CONFIG_BLK_DEV_IDEDMA_PCI=y | 481 | CONFIG_BLK_DEV_IDEDMA_PCI=y |
443 | # CONFIG_BLK_DEV_IDEDMA_FORCED is not set | 482 | # CONFIG_BLK_DEV_IDEDMA_FORCED is not set |
444 | CONFIG_IDEDMA_PCI_AUTO=y | 483 | CONFIG_IDEDMA_PCI_AUTO=y |
@@ -453,6 +492,7 @@ CONFIG_BLK_DEV_AMD74XX=y | |||
453 | # CONFIG_BLK_DEV_CS5530 is not set | 492 | # CONFIG_BLK_DEV_CS5530 is not set |
454 | # CONFIG_BLK_DEV_HPT34X is not set | 493 | # CONFIG_BLK_DEV_HPT34X is not set |
455 | # CONFIG_BLK_DEV_HPT366 is not set | 494 | # CONFIG_BLK_DEV_HPT366 is not set |
495 | # CONFIG_BLK_DEV_JMICRON is not set | ||
456 | # CONFIG_BLK_DEV_SC1200 is not set | 496 | # CONFIG_BLK_DEV_SC1200 is not set |
457 | # CONFIG_BLK_DEV_PIIX is not set | 497 | # CONFIG_BLK_DEV_PIIX is not set |
458 | # CONFIG_BLK_DEV_IT821X is not set | 498 | # CONFIG_BLK_DEV_IT821X is not set |
@@ -461,6 +501,7 @@ CONFIG_BLK_DEV_AMD74XX=y | |||
461 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | 501 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set |
462 | # CONFIG_BLK_DEV_SVWKS is not set | 502 | # CONFIG_BLK_DEV_SVWKS is not set |
463 | # CONFIG_BLK_DEV_SIIMAGE is not set | 503 | # CONFIG_BLK_DEV_SIIMAGE is not set |
504 | CONFIG_BLK_DEV_SL82C105=y | ||
464 | # CONFIG_BLK_DEV_SLC90E66 is not set | 505 | # CONFIG_BLK_DEV_SLC90E66 is not set |
465 | # CONFIG_BLK_DEV_TRM290 is not set | 506 | # CONFIG_BLK_DEV_TRM290 is not set |
466 | # CONFIG_BLK_DEV_VIA82CXXX is not set | 507 | # CONFIG_BLK_DEV_VIA82CXXX is not set |
@@ -478,6 +519,8 @@ CONFIG_IDEDMA_AUTO=y | |||
478 | # | 519 | # |
479 | # CONFIG_RAID_ATTRS is not set | 520 | # CONFIG_RAID_ATTRS is not set |
480 | CONFIG_SCSI=y | 521 | CONFIG_SCSI=y |
522 | # CONFIG_SCSI_TGT is not set | ||
523 | CONFIG_SCSI_NETLINK=y | ||
481 | CONFIG_SCSI_PROC_FS=y | 524 | CONFIG_SCSI_PROC_FS=y |
482 | 525 | ||
483 | # | 526 | # |
@@ -497,14 +540,16 @@ CONFIG_CHR_DEV_SG=y | |||
497 | CONFIG_SCSI_MULTI_LUN=y | 540 | CONFIG_SCSI_MULTI_LUN=y |
498 | CONFIG_SCSI_CONSTANTS=y | 541 | CONFIG_SCSI_CONSTANTS=y |
499 | # CONFIG_SCSI_LOGGING is not set | 542 | # CONFIG_SCSI_LOGGING is not set |
543 | # CONFIG_SCSI_SCAN_ASYNC is not set | ||
500 | 544 | ||
501 | # | 545 | # |
502 | # SCSI Transport Attributes | 546 | # SCSI Transports |
503 | # | 547 | # |
504 | CONFIG_SCSI_SPI_ATTRS=y | 548 | CONFIG_SCSI_SPI_ATTRS=y |
505 | CONFIG_SCSI_FC_ATTRS=y | 549 | CONFIG_SCSI_FC_ATTRS=y |
506 | CONFIG_SCSI_ISCSI_ATTRS=m | 550 | CONFIG_SCSI_ISCSI_ATTRS=m |
507 | # CONFIG_SCSI_SAS_ATTRS is not set | 551 | # CONFIG_SCSI_SAS_ATTRS is not set |
552 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
508 | 553 | ||
509 | # | 554 | # |
510 | # SCSI low-level drivers | 555 | # SCSI low-level drivers |
@@ -517,26 +562,12 @@ CONFIG_SCSI_ISCSI_ATTRS=m | |||
517 | # CONFIG_SCSI_AIC7XXX is not set | 562 | # CONFIG_SCSI_AIC7XXX is not set |
518 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 563 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
519 | # CONFIG_SCSI_AIC79XX is not set | 564 | # CONFIG_SCSI_AIC79XX is not set |
565 | # CONFIG_SCSI_AIC94XX is not set | ||
566 | # CONFIG_SCSI_ARCMSR is not set | ||
520 | # CONFIG_MEGARAID_NEWGEN is not set | 567 | # CONFIG_MEGARAID_NEWGEN is not set |
521 | # CONFIG_MEGARAID_LEGACY is not set | 568 | # CONFIG_MEGARAID_LEGACY is not set |
522 | # CONFIG_MEGARAID_SAS is not set | 569 | # CONFIG_MEGARAID_SAS is not set |
523 | CONFIG_ATA=y | ||
524 | # CONFIG_SATA_AHCI is not set | ||
525 | CONFIG_SATA_SVW=y | ||
526 | # CONFIG_SCSI_ATA_PIIX is not set | ||
527 | # CONFIG_SATA_MV is not set | ||
528 | # CONFIG_SATA_NV is not set | ||
529 | # CONFIG_SCSI_PDC_ADMA is not set | ||
530 | # CONFIG_SCSI_HPTIOP is not set | 570 | # CONFIG_SCSI_HPTIOP is not set |
531 | # CONFIG_SATA_QSTOR is not set | ||
532 | # CONFIG_SATA_PROMISE is not set | ||
533 | # CONFIG_SATA_SX4 is not set | ||
534 | # CONFIG_SATA_SIL is not set | ||
535 | # CONFIG_SATA_SIL24 is not set | ||
536 | # CONFIG_SATA_SIS is not set | ||
537 | # CONFIG_SATA_ULI is not set | ||
538 | # CONFIG_SATA_VIA is not set | ||
539 | # CONFIG_SATA_VITESSE is not set | ||
540 | # CONFIG_SCSI_BUSLOGIC is not set | 571 | # CONFIG_SCSI_BUSLOGIC is not set |
541 | # CONFIG_SCSI_DMX3191D is not set | 572 | # CONFIG_SCSI_DMX3191D is not set |
542 | # CONFIG_SCSI_EATA is not set | 573 | # CONFIG_SCSI_EATA is not set |
@@ -546,6 +577,7 @@ CONFIG_SATA_SVW=y | |||
546 | CONFIG_SCSI_IBMVSCSI=y | 577 | CONFIG_SCSI_IBMVSCSI=y |
547 | # CONFIG_SCSI_INITIO is not set | 578 | # CONFIG_SCSI_INITIO is not set |
548 | # CONFIG_SCSI_INIA100 is not set | 579 | # CONFIG_SCSI_INIA100 is not set |
580 | # CONFIG_SCSI_STEX is not set | ||
549 | CONFIG_SCSI_SYM53C8XX_2=y | 581 | CONFIG_SCSI_SYM53C8XX_2=y |
550 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 | 582 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 |
551 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | 583 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 |
@@ -556,10 +588,66 @@ CONFIG_SCSI_IPR_TRACE=y | |||
556 | CONFIG_SCSI_IPR_DUMP=y | 588 | CONFIG_SCSI_IPR_DUMP=y |
557 | # CONFIG_SCSI_QLOGIC_1280 is not set | 589 | # CONFIG_SCSI_QLOGIC_1280 is not set |
558 | # CONFIG_SCSI_QLA_FC is not set | 590 | # CONFIG_SCSI_QLA_FC is not set |
591 | # CONFIG_SCSI_QLA_ISCSI is not set | ||
559 | CONFIG_SCSI_LPFC=m | 592 | CONFIG_SCSI_LPFC=m |
560 | # CONFIG_SCSI_DC395x is not set | 593 | # CONFIG_SCSI_DC395x is not set |
561 | # CONFIG_SCSI_DC390T is not set | 594 | # CONFIG_SCSI_DC390T is not set |
562 | CONFIG_SCSI_DEBUG=m | 595 | CONFIG_SCSI_DEBUG=m |
596 | # CONFIG_SCSI_SRP is not set | ||
597 | |||
598 | # | ||
599 | # Serial ATA (prod) and Parallel ATA (experimental) drivers | ||
600 | # | ||
601 | CONFIG_ATA=y | ||
602 | # CONFIG_SATA_AHCI is not set | ||
603 | CONFIG_SATA_SVW=y | ||
604 | # CONFIG_ATA_PIIX is not set | ||
605 | # CONFIG_SATA_MV is not set | ||
606 | # CONFIG_SATA_NV is not set | ||
607 | # CONFIG_PDC_ADMA is not set | ||
608 | # CONFIG_SATA_QSTOR is not set | ||
609 | # CONFIG_SATA_PROMISE is not set | ||
610 | # CONFIG_SATA_SX4 is not set | ||
611 | # CONFIG_SATA_SIL is not set | ||
612 | # CONFIG_SATA_SIL24 is not set | ||
613 | # CONFIG_SATA_SIS is not set | ||
614 | # CONFIG_SATA_ULI is not set | ||
615 | # CONFIG_SATA_VIA is not set | ||
616 | # CONFIG_SATA_VITESSE is not set | ||
617 | # CONFIG_PATA_ALI is not set | ||
618 | # CONFIG_PATA_AMD is not set | ||
619 | # CONFIG_PATA_ARTOP is not set | ||
620 | # CONFIG_PATA_ATIIXP is not set | ||
621 | # CONFIG_PATA_CMD64X is not set | ||
622 | # CONFIG_PATA_CS5520 is not set | ||
623 | # CONFIG_PATA_CS5530 is not set | ||
624 | # CONFIG_PATA_CYPRESS is not set | ||
625 | # CONFIG_PATA_EFAR is not set | ||
626 | # CONFIG_ATA_GENERIC is not set | ||
627 | # CONFIG_PATA_HPT366 is not set | ||
628 | # CONFIG_PATA_HPT37X is not set | ||
629 | # CONFIG_PATA_HPT3X2N is not set | ||
630 | # CONFIG_PATA_HPT3X3 is not set | ||
631 | # CONFIG_PATA_IT821X is not set | ||
632 | # CONFIG_PATA_JMICRON is not set | ||
633 | # CONFIG_PATA_TRIFLEX is not set | ||
634 | # CONFIG_PATA_MARVELL is not set | ||
635 | # CONFIG_PATA_MPIIX is not set | ||
636 | # CONFIG_PATA_OLDPIIX is not set | ||
637 | # CONFIG_PATA_NETCELL is not set | ||
638 | # CONFIG_PATA_NS87410 is not set | ||
639 | # CONFIG_PATA_OPTI is not set | ||
640 | # CONFIG_PATA_OPTIDMA is not set | ||
641 | # CONFIG_PATA_PDC_OLD is not set | ||
642 | # CONFIG_PATA_RADISYS is not set | ||
643 | # CONFIG_PATA_RZ1000 is not set | ||
644 | # CONFIG_PATA_SC1200 is not set | ||
645 | # CONFIG_PATA_SERVERWORKS is not set | ||
646 | # CONFIG_PATA_PDC2027X is not set | ||
647 | # CONFIG_PATA_SIL680 is not set | ||
648 | # CONFIG_PATA_SIS is not set | ||
649 | # CONFIG_PATA_VIA is not set | ||
650 | # CONFIG_PATA_WINBOND is not set | ||
563 | 651 | ||
564 | # | 652 | # |
565 | # Multi-device support (RAID and LVM) | 653 | # Multi-device support (RAID and LVM) |
@@ -575,6 +663,7 @@ CONFIG_MD_RAID5_RESHAPE=y | |||
575 | CONFIG_MD_MULTIPATH=m | 663 | CONFIG_MD_MULTIPATH=m |
576 | CONFIG_MD_FAULTY=m | 664 | CONFIG_MD_FAULTY=m |
577 | CONFIG_BLK_DEV_DM=y | 665 | CONFIG_BLK_DEV_DM=y |
666 | # CONFIG_DM_DEBUG is not set | ||
578 | CONFIG_DM_CRYPT=m | 667 | CONFIG_DM_CRYPT=m |
579 | CONFIG_DM_SNAPSHOT=m | 668 | CONFIG_DM_SNAPSHOT=m |
580 | CONFIG_DM_MIRROR=m | 669 | CONFIG_DM_MIRROR=m |
@@ -630,11 +719,13 @@ CONFIG_IEEE1394_RAWIO=y | |||
630 | CONFIG_ADB_PMU=y | 719 | CONFIG_ADB_PMU=y |
631 | # CONFIG_ADB_PMU_LED is not set | 720 | # CONFIG_ADB_PMU_LED is not set |
632 | CONFIG_PMAC_SMU=y | 721 | CONFIG_PMAC_SMU=y |
722 | # CONFIG_MAC_EMUMOUSEBTN is not set | ||
633 | CONFIG_THERM_PM72=y | 723 | CONFIG_THERM_PM72=y |
634 | CONFIG_WINDFARM=y | 724 | CONFIG_WINDFARM=y |
635 | CONFIG_WINDFARM_PM81=y | 725 | CONFIG_WINDFARM_PM81=y |
636 | CONFIG_WINDFARM_PM91=y | 726 | CONFIG_WINDFARM_PM91=y |
637 | CONFIG_WINDFARM_PM112=y | 727 | CONFIG_WINDFARM_PM112=y |
728 | # CONFIG_PMAC_RACKMETER is not set | ||
638 | 729 | ||
639 | # | 730 | # |
640 | # Network device support | 731 | # Network device support |
@@ -675,6 +766,7 @@ CONFIG_VORTEX=y | |||
675 | CONFIG_IBMVETH=m | 766 | CONFIG_IBMVETH=m |
676 | CONFIG_NET_PCI=y | 767 | CONFIG_NET_PCI=y |
677 | CONFIG_PCNET32=y | 768 | CONFIG_PCNET32=y |
769 | # CONFIG_PCNET32_NAPI is not set | ||
678 | # CONFIG_AMD8111_ETH is not set | 770 | # CONFIG_AMD8111_ETH is not set |
679 | # CONFIG_ADAPTEC_STARFIRE is not set | 771 | # CONFIG_ADAPTEC_STARFIRE is not set |
680 | # CONFIG_B44 is not set | 772 | # CONFIG_B44 is not set |
@@ -713,7 +805,7 @@ CONFIG_E1000=y | |||
713 | CONFIG_TIGON3=y | 805 | CONFIG_TIGON3=y |
714 | # CONFIG_BNX2 is not set | 806 | # CONFIG_BNX2 is not set |
715 | CONFIG_SPIDER_NET=m | 807 | CONFIG_SPIDER_NET=m |
716 | # CONFIG_MV643XX_ETH is not set | 808 | # CONFIG_QLA3XXX is not set |
717 | 809 | ||
718 | # | 810 | # |
719 | # Ethernet (10000 Mbit) | 811 | # Ethernet (10000 Mbit) |
@@ -723,6 +815,7 @@ CONFIG_IXGB=m | |||
723 | # CONFIG_IXGB_NAPI is not set | 815 | # CONFIG_IXGB_NAPI is not set |
724 | # CONFIG_S2IO is not set | 816 | # CONFIG_S2IO is not set |
725 | # CONFIG_MYRI10GE is not set | 817 | # CONFIG_MYRI10GE is not set |
818 | # CONFIG_NETXEN_NIC is not set | ||
726 | 819 | ||
727 | # | 820 | # |
728 | # Token Ring devices | 821 | # Token Ring devices |
@@ -741,6 +834,7 @@ CONFIG_IBMOL=y | |||
741 | # Wan interfaces | 834 | # Wan interfaces |
742 | # | 835 | # |
743 | # CONFIG_WAN is not set | 836 | # CONFIG_WAN is not set |
837 | CONFIG_ISERIES_VETH=m | ||
744 | # CONFIG_FDDI is not set | 838 | # CONFIG_FDDI is not set |
745 | # CONFIG_HIPPI is not set | 839 | # CONFIG_HIPPI is not set |
746 | CONFIG_PPP=m | 840 | CONFIG_PPP=m |
@@ -753,6 +847,7 @@ CONFIG_PPP_BSDCOMP=m | |||
753 | # CONFIG_PPP_MPPE is not set | 847 | # CONFIG_PPP_MPPE is not set |
754 | CONFIG_PPPOE=m | 848 | CONFIG_PPPOE=m |
755 | # CONFIG_SLIP is not set | 849 | # CONFIG_SLIP is not set |
850 | CONFIG_SLHC=m | ||
756 | # CONFIG_NET_FC is not set | 851 | # CONFIG_NET_FC is not set |
757 | # CONFIG_SHAPER is not set | 852 | # CONFIG_SHAPER is not set |
758 | CONFIG_NETCONSOLE=y | 853 | CONFIG_NETCONSOLE=y |
@@ -775,6 +870,7 @@ CONFIG_NET_POLL_CONTROLLER=y | |||
775 | # Input device support | 870 | # Input device support |
776 | # | 871 | # |
777 | CONFIG_INPUT=y | 872 | CONFIG_INPUT=y |
873 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
778 | 874 | ||
779 | # | 875 | # |
780 | # Userland interfaces | 876 | # Userland interfaces |
@@ -797,6 +893,7 @@ CONFIG_KEYBOARD_ATKBD=y | |||
797 | # CONFIG_KEYBOARD_LKKBD is not set | 893 | # CONFIG_KEYBOARD_LKKBD is not set |
798 | # CONFIG_KEYBOARD_XTKBD is not set | 894 | # CONFIG_KEYBOARD_XTKBD is not set |
799 | # CONFIG_KEYBOARD_NEWTON is not set | 895 | # CONFIG_KEYBOARD_NEWTON is not set |
896 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
800 | CONFIG_INPUT_MOUSE=y | 897 | CONFIG_INPUT_MOUSE=y |
801 | CONFIG_MOUSE_PS2=y | 898 | CONFIG_MOUSE_PS2=y |
802 | # CONFIG_MOUSE_SERIAL is not set | 899 | # CONFIG_MOUSE_SERIAL is not set |
@@ -850,6 +947,7 @@ CONFIG_LEGACY_PTYS=y | |||
850 | CONFIG_LEGACY_PTY_COUNT=256 | 947 | CONFIG_LEGACY_PTY_COUNT=256 |
851 | CONFIG_HVC_DRIVER=y | 948 | CONFIG_HVC_DRIVER=y |
852 | CONFIG_HVC_CONSOLE=y | 949 | CONFIG_HVC_CONSOLE=y |
950 | CONFIG_HVC_ISERIES=y | ||
853 | CONFIG_HVC_RTAS=y | 951 | CONFIG_HVC_RTAS=y |
854 | CONFIG_HVCS=m | 952 | CONFIG_HVCS=m |
855 | 953 | ||
@@ -868,10 +966,6 @@ CONFIG_GEN_RTC=y | |||
868 | # CONFIG_DTLK is not set | 966 | # CONFIG_DTLK is not set |
869 | # CONFIG_R3964 is not set | 967 | # CONFIG_R3964 is not set |
870 | # CONFIG_APPLICOM is not set | 968 | # CONFIG_APPLICOM is not set |
871 | |||
872 | # | ||
873 | # Ftape, the floppy tape device driver | ||
874 | # | ||
875 | # CONFIG_AGP is not set | 969 | # CONFIG_AGP is not set |
876 | # CONFIG_DRM is not set | 970 | # CONFIG_DRM is not set |
877 | CONFIG_RAW_DRIVER=y | 971 | CONFIG_RAW_DRIVER=y |
@@ -882,7 +976,6 @@ CONFIG_MAX_RAW_DEVS=256 | |||
882 | # TPM devices | 976 | # TPM devices |
883 | # | 977 | # |
884 | # CONFIG_TCG_TPM is not set | 978 | # CONFIG_TCG_TPM is not set |
885 | # CONFIG_TELCLOCK is not set | ||
886 | 979 | ||
887 | # | 980 | # |
888 | # I2C support | 981 | # I2C support |
@@ -947,6 +1040,7 @@ CONFIG_I2C_POWERMAC=y | |||
947 | # | 1040 | # |
948 | # Dallas's 1-wire bus | 1041 | # Dallas's 1-wire bus |
949 | # | 1042 | # |
1043 | # CONFIG_W1 is not set | ||
950 | 1044 | ||
951 | # | 1045 | # |
952 | # Hardware Monitoring support | 1046 | # Hardware Monitoring support |
@@ -955,14 +1049,9 @@ CONFIG_I2C_POWERMAC=y | |||
955 | # CONFIG_HWMON_VID is not set | 1049 | # CONFIG_HWMON_VID is not set |
956 | 1050 | ||
957 | # | 1051 | # |
958 | # Misc devices | ||
959 | # | ||
960 | |||
961 | # | ||
962 | # Multimedia devices | 1052 | # Multimedia devices |
963 | # | 1053 | # |
964 | # CONFIG_VIDEO_DEV is not set | 1054 | # CONFIG_VIDEO_DEV is not set |
965 | CONFIG_VIDEO_V4L2=y | ||
966 | 1055 | ||
967 | # | 1056 | # |
968 | # Digital Video Broadcasting Devices | 1057 | # Digital Video Broadcasting Devices |
@@ -975,6 +1064,7 @@ CONFIG_VIDEO_V4L2=y | |||
975 | # | 1064 | # |
976 | CONFIG_FIRMWARE_EDID=y | 1065 | CONFIG_FIRMWARE_EDID=y |
977 | CONFIG_FB=y | 1066 | CONFIG_FB=y |
1067 | CONFIG_FB_DDC=y | ||
978 | CONFIG_FB_CFB_FILLRECT=y | 1068 | CONFIG_FB_CFB_FILLRECT=y |
979 | CONFIG_FB_CFB_COPYAREA=y | 1069 | CONFIG_FB_CFB_COPYAREA=y |
980 | CONFIG_FB_CFB_IMAGEBLIT=y | 1070 | CONFIG_FB_CFB_IMAGEBLIT=y |
@@ -1011,6 +1101,7 @@ CONFIG_FB_RADEON_I2C=y | |||
1011 | # CONFIG_FB_3DFX is not set | 1101 | # CONFIG_FB_3DFX is not set |
1012 | # CONFIG_FB_VOODOO1 is not set | 1102 | # CONFIG_FB_VOODOO1 is not set |
1013 | # CONFIG_FB_TRIDENT is not set | 1103 | # CONFIG_FB_TRIDENT is not set |
1104 | CONFIG_FB_IBM_GXT4500=y | ||
1014 | # CONFIG_FB_VIRTUAL is not set | 1105 | # CONFIG_FB_VIRTUAL is not set |
1015 | 1106 | ||
1016 | # | 1107 | # |
@@ -1159,6 +1250,11 @@ CONFIG_SND_AOA_SOUNDBUS_I2S=m | |||
1159 | # CONFIG_SOUND_PRIME is not set | 1250 | # CONFIG_SOUND_PRIME is not set |
1160 | 1251 | ||
1161 | # | 1252 | # |
1253 | # HID Devices | ||
1254 | # | ||
1255 | CONFIG_HID=y | ||
1256 | |||
1257 | # | ||
1162 | # USB support | 1258 | # USB support |
1163 | # | 1259 | # |
1164 | CONFIG_USB_ARCH_HAS_HCD=y | 1260 | CONFIG_USB_ARCH_HAS_HCD=y |
@@ -1173,6 +1269,7 @@ CONFIG_USB=y | |||
1173 | CONFIG_USB_DEVICEFS=y | 1269 | CONFIG_USB_DEVICEFS=y |
1174 | # CONFIG_USB_BANDWIDTH is not set | 1270 | # CONFIG_USB_BANDWIDTH is not set |
1175 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1271 | # CONFIG_USB_DYNAMIC_MINORS is not set |
1272 | # CONFIG_USB_MULTITHREAD_PROBE is not set | ||
1176 | # CONFIG_USB_OTG is not set | 1273 | # CONFIG_USB_OTG is not set |
1177 | 1274 | ||
1178 | # | 1275 | # |
@@ -1214,13 +1311,13 @@ CONFIG_USB_STORAGE=m | |||
1214 | # CONFIG_USB_STORAGE_JUMPSHOT is not set | 1311 | # CONFIG_USB_STORAGE_JUMPSHOT is not set |
1215 | # CONFIG_USB_STORAGE_ALAUDA is not set | 1312 | # CONFIG_USB_STORAGE_ALAUDA is not set |
1216 | # CONFIG_USB_STORAGE_ONETOUCH is not set | 1313 | # CONFIG_USB_STORAGE_ONETOUCH is not set |
1314 | # CONFIG_USB_STORAGE_KARMA is not set | ||
1217 | # CONFIG_USB_LIBUSUAL is not set | 1315 | # CONFIG_USB_LIBUSUAL is not set |
1218 | 1316 | ||
1219 | # | 1317 | # |
1220 | # USB Input Devices | 1318 | # USB Input Devices |
1221 | # | 1319 | # |
1222 | CONFIG_USB_HID=y | 1320 | CONFIG_USB_HID=y |
1223 | CONFIG_USB_HIDINPUT=y | ||
1224 | # CONFIG_USB_HIDINPUT_POWERBOOK is not set | 1321 | # CONFIG_USB_HIDINPUT_POWERBOOK is not set |
1225 | # CONFIG_HID_FF is not set | 1322 | # CONFIG_HID_FF is not set |
1226 | CONFIG_USB_HIDDEV=y | 1323 | CONFIG_USB_HIDDEV=y |
@@ -1250,6 +1347,7 @@ CONFIG_USB_HIDDEV=y | |||
1250 | # CONFIG_USB_KAWETH is not set | 1347 | # CONFIG_USB_KAWETH is not set |
1251 | # CONFIG_USB_PEGASUS is not set | 1348 | # CONFIG_USB_PEGASUS is not set |
1252 | # CONFIG_USB_RTL8150 is not set | 1349 | # CONFIG_USB_RTL8150 is not set |
1350 | # CONFIG_USB_USBNET_MII is not set | ||
1253 | # CONFIG_USB_USBNET is not set | 1351 | # CONFIG_USB_USBNET is not set |
1254 | # CONFIG_USB_MON is not set | 1352 | # CONFIG_USB_MON is not set |
1255 | 1353 | ||
@@ -1267,6 +1365,7 @@ CONFIG_USB_HIDDEV=y | |||
1267 | # | 1365 | # |
1268 | # CONFIG_USB_EMI62 is not set | 1366 | # CONFIG_USB_EMI62 is not set |
1269 | # CONFIG_USB_EMI26 is not set | 1367 | # CONFIG_USB_EMI26 is not set |
1368 | # CONFIG_USB_ADUTUX is not set | ||
1270 | # CONFIG_USB_AUERSWALD is not set | 1369 | # CONFIG_USB_AUERSWALD is not set |
1271 | # CONFIG_USB_RIO500 is not set | 1370 | # CONFIG_USB_RIO500 is not set |
1272 | # CONFIG_USB_LEGOTOWER is not set | 1371 | # CONFIG_USB_LEGOTOWER is not set |
@@ -1274,12 +1373,13 @@ CONFIG_USB_HIDDEV=y | |||
1274 | # CONFIG_USB_LED is not set | 1373 | # CONFIG_USB_LED is not set |
1275 | # CONFIG_USB_CYPRESS_CY7C63 is not set | 1374 | # CONFIG_USB_CYPRESS_CY7C63 is not set |
1276 | # CONFIG_USB_CYTHERM is not set | 1375 | # CONFIG_USB_CYTHERM is not set |
1277 | # CONFIG_USB_PHIDGETKIT is not set | 1376 | # CONFIG_USB_PHIDGET is not set |
1278 | # CONFIG_USB_PHIDGETSERVO is not set | ||
1279 | # CONFIG_USB_IDMOUSE is not set | 1377 | # CONFIG_USB_IDMOUSE is not set |
1378 | # CONFIG_USB_FTDI_ELAN is not set | ||
1280 | CONFIG_USB_APPLEDISPLAY=m | 1379 | CONFIG_USB_APPLEDISPLAY=m |
1281 | # CONFIG_USB_SISUSBVGA is not set | 1380 | # CONFIG_USB_SISUSBVGA is not set |
1282 | # CONFIG_USB_LD is not set | 1381 | # CONFIG_USB_LD is not set |
1382 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1283 | # CONFIG_USB_TEST is not set | 1383 | # CONFIG_USB_TEST is not set |
1284 | 1384 | ||
1285 | # | 1385 | # |
@@ -1318,6 +1418,7 @@ CONFIG_INFINIBAND=m | |||
1318 | CONFIG_INFINIBAND_ADDR_TRANS=y | 1418 | CONFIG_INFINIBAND_ADDR_TRANS=y |
1319 | CONFIG_INFINIBAND_MTHCA=m | 1419 | CONFIG_INFINIBAND_MTHCA=m |
1320 | CONFIG_INFINIBAND_MTHCA_DEBUG=y | 1420 | CONFIG_INFINIBAND_MTHCA_DEBUG=y |
1421 | # CONFIG_INFINIBAND_AMSO1100 is not set | ||
1321 | CONFIG_INFINIBAND_IPOIB=m | 1422 | CONFIG_INFINIBAND_IPOIB=m |
1322 | CONFIG_INFINIBAND_IPOIB_DEBUG=y | 1423 | CONFIG_INFINIBAND_IPOIB_DEBUG=y |
1323 | # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set | 1424 | # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set |
@@ -1347,6 +1448,10 @@ CONFIG_INFINIBAND_ISER=m | |||
1347 | # | 1448 | # |
1348 | 1449 | ||
1349 | # | 1450 | # |
1451 | # Virtualization | ||
1452 | # | ||
1453 | |||
1454 | # | ||
1350 | # File systems | 1455 | # File systems |
1351 | # | 1456 | # |
1352 | CONFIG_EXT2_FS=y | 1457 | CONFIG_EXT2_FS=y |
@@ -1359,6 +1464,7 @@ CONFIG_EXT3_FS=y | |||
1359 | CONFIG_EXT3_FS_XATTR=y | 1464 | CONFIG_EXT3_FS_XATTR=y |
1360 | CONFIG_EXT3_FS_POSIX_ACL=y | 1465 | CONFIG_EXT3_FS_POSIX_ACL=y |
1361 | CONFIG_EXT3_FS_SECURITY=y | 1466 | CONFIG_EXT3_FS_SECURITY=y |
1467 | # CONFIG_EXT4DEV_FS is not set | ||
1362 | CONFIG_JBD=y | 1468 | CONFIG_JBD=y |
1363 | # CONFIG_JBD_DEBUG is not set | 1469 | # CONFIG_JBD_DEBUG is not set |
1364 | CONFIG_FS_MBCACHE=y | 1470 | CONFIG_FS_MBCACHE=y |
@@ -1379,6 +1485,7 @@ CONFIG_XFS_FS=m | |||
1379 | CONFIG_XFS_SECURITY=y | 1485 | CONFIG_XFS_SECURITY=y |
1380 | CONFIG_XFS_POSIX_ACL=y | 1486 | CONFIG_XFS_POSIX_ACL=y |
1381 | # CONFIG_XFS_RT is not set | 1487 | # CONFIG_XFS_RT is not set |
1488 | # CONFIG_GFS2_FS is not set | ||
1382 | # CONFIG_OCFS2_FS is not set | 1489 | # CONFIG_OCFS2_FS is not set |
1383 | # CONFIG_MINIX_FS is not set | 1490 | # CONFIG_MINIX_FS is not set |
1384 | # CONFIG_ROMFS_FS is not set | 1491 | # CONFIG_ROMFS_FS is not set |
@@ -1414,8 +1521,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | |||
1414 | # | 1521 | # |
1415 | CONFIG_PROC_FS=y | 1522 | CONFIG_PROC_FS=y |
1416 | CONFIG_PROC_KCORE=y | 1523 | CONFIG_PROC_KCORE=y |
1524 | CONFIG_PROC_SYSCTL=y | ||
1417 | CONFIG_SYSFS=y | 1525 | CONFIG_SYSFS=y |
1418 | CONFIG_TMPFS=y | 1526 | CONFIG_TMPFS=y |
1527 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1419 | CONFIG_HUGETLBFS=y | 1528 | CONFIG_HUGETLBFS=y |
1420 | CONFIG_HUGETLB_PAGE=y | 1529 | CONFIG_HUGETLB_PAGE=y |
1421 | CONFIG_RAMFS=y | 1530 | CONFIG_RAMFS=y |
@@ -1540,8 +1649,23 @@ CONFIG_NLS_KOI8_U=m | |||
1540 | CONFIG_NLS_UTF8=m | 1649 | CONFIG_NLS_UTF8=m |
1541 | 1650 | ||
1542 | # | 1651 | # |
1652 | # Distributed Lock Manager | ||
1653 | # | ||
1654 | # CONFIG_DLM is not set | ||
1655 | |||
1656 | # | ||
1657 | # iSeries device drivers | ||
1658 | # | ||
1659 | # CONFIG_VIOCONS is not set | ||
1660 | CONFIG_VIODASD=y | ||
1661 | CONFIG_VIOCD=m | ||
1662 | CONFIG_VIOTAPE=m | ||
1663 | CONFIG_VIOPATH=y | ||
1664 | |||
1665 | # | ||
1543 | # Library routines | 1666 | # Library routines |
1544 | # | 1667 | # |
1668 | CONFIG_BITREVERSE=y | ||
1545 | CONFIG_CRC_CCITT=m | 1669 | CONFIG_CRC_CCITT=m |
1546 | # CONFIG_CRC16 is not set | 1670 | # CONFIG_CRC16 is not set |
1547 | CONFIG_CRC32=y | 1671 | CONFIG_CRC32=y |
@@ -1551,6 +1675,7 @@ CONFIG_ZLIB_DEFLATE=m | |||
1551 | CONFIG_TEXTSEARCH=y | 1675 | CONFIG_TEXTSEARCH=y |
1552 | CONFIG_TEXTSEARCH_KMP=m | 1676 | CONFIG_TEXTSEARCH_KMP=m |
1553 | CONFIG_PLIST=y | 1677 | CONFIG_PLIST=y |
1678 | CONFIG_IOMAP_COPY=y | ||
1554 | 1679 | ||
1555 | # | 1680 | # |
1556 | # Instrumentation Support | 1681 | # Instrumentation Support |
@@ -1563,8 +1688,11 @@ CONFIG_OPROFILE=y | |||
1563 | # Kernel hacking | 1688 | # Kernel hacking |
1564 | # | 1689 | # |
1565 | # CONFIG_PRINTK_TIME is not set | 1690 | # CONFIG_PRINTK_TIME is not set |
1691 | CONFIG_ENABLE_MUST_CHECK=y | ||
1566 | CONFIG_MAGIC_SYSRQ=y | 1692 | CONFIG_MAGIC_SYSRQ=y |
1567 | # CONFIG_UNUSED_SYMBOLS is not set | 1693 | # CONFIG_UNUSED_SYMBOLS is not set |
1694 | CONFIG_DEBUG_FS=y | ||
1695 | # CONFIG_HEADERS_CHECK is not set | ||
1568 | CONFIG_DEBUG_KERNEL=y | 1696 | CONFIG_DEBUG_KERNEL=y |
1569 | CONFIG_LOG_BUF_SHIFT=17 | 1697 | CONFIG_LOG_BUF_SHIFT=17 |
1570 | CONFIG_DETECT_SOFTLOCKUP=y | 1698 | CONFIG_DETECT_SOFTLOCKUP=y |
@@ -1578,16 +1706,19 @@ CONFIG_DEBUG_MUTEXES=y | |||
1578 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1706 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
1579 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | 1707 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set |
1580 | # CONFIG_DEBUG_KOBJECT is not set | 1708 | # CONFIG_DEBUG_KOBJECT is not set |
1709 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1581 | # CONFIG_DEBUG_INFO is not set | 1710 | # CONFIG_DEBUG_INFO is not set |
1582 | CONFIG_DEBUG_FS=y | ||
1583 | # CONFIG_DEBUG_VM is not set | 1711 | # CONFIG_DEBUG_VM is not set |
1712 | # CONFIG_DEBUG_LIST is not set | ||
1584 | CONFIG_FORCED_INLINING=y | 1713 | CONFIG_FORCED_INLINING=y |
1585 | # CONFIG_RCU_TORTURE_TEST is not set | 1714 | # CONFIG_RCU_TORTURE_TEST is not set |
1586 | CONFIG_DEBUG_STACKOVERFLOW=y | 1715 | CONFIG_DEBUG_STACKOVERFLOW=y |
1587 | CONFIG_DEBUG_STACK_USAGE=y | 1716 | CONFIG_DEBUG_STACK_USAGE=y |
1717 | # CONFIG_HCALL_STATS is not set | ||
1588 | CONFIG_DEBUGGER=y | 1718 | CONFIG_DEBUGGER=y |
1589 | CONFIG_XMON=y | 1719 | CONFIG_XMON=y |
1590 | # CONFIG_XMON_DEFAULT is not set | 1720 | # CONFIG_XMON_DEFAULT is not set |
1721 | CONFIG_XMON_DISASSEMBLY=y | ||
1591 | CONFIG_IRQSTACKS=y | 1722 | CONFIG_IRQSTACKS=y |
1592 | CONFIG_BOOTX_TEXT=y | 1723 | CONFIG_BOOTX_TEXT=y |
1593 | # CONFIG_PPC_EARLY_DEBUG is not set | 1724 | # CONFIG_PPC_EARLY_DEBUG is not set |
@@ -1602,7 +1733,12 @@ CONFIG_BOOTX_TEXT=y | |||
1602 | # Cryptographic options | 1733 | # Cryptographic options |
1603 | # | 1734 | # |
1604 | CONFIG_CRYPTO=y | 1735 | CONFIG_CRYPTO=y |
1736 | CONFIG_CRYPTO_ALGAPI=y | ||
1737 | CONFIG_CRYPTO_BLKCIPHER=y | ||
1738 | CONFIG_CRYPTO_HASH=y | ||
1739 | CONFIG_CRYPTO_MANAGER=y | ||
1605 | CONFIG_CRYPTO_HMAC=y | 1740 | CONFIG_CRYPTO_HMAC=y |
1741 | # CONFIG_CRYPTO_XCBC is not set | ||
1606 | CONFIG_CRYPTO_NULL=m | 1742 | CONFIG_CRYPTO_NULL=m |
1607 | CONFIG_CRYPTO_MD4=m | 1743 | CONFIG_CRYPTO_MD4=m |
1608 | CONFIG_CRYPTO_MD5=y | 1744 | CONFIG_CRYPTO_MD5=y |
@@ -1611,9 +1747,14 @@ CONFIG_CRYPTO_SHA256=m | |||
1611 | CONFIG_CRYPTO_SHA512=m | 1747 | CONFIG_CRYPTO_SHA512=m |
1612 | CONFIG_CRYPTO_WP512=m | 1748 | CONFIG_CRYPTO_WP512=m |
1613 | CONFIG_CRYPTO_TGR192=m | 1749 | CONFIG_CRYPTO_TGR192=m |
1750 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1751 | CONFIG_CRYPTO_ECB=m | ||
1752 | CONFIG_CRYPTO_CBC=y | ||
1753 | # CONFIG_CRYPTO_LRW is not set | ||
1614 | CONFIG_CRYPTO_DES=y | 1754 | CONFIG_CRYPTO_DES=y |
1615 | CONFIG_CRYPTO_BLOWFISH=m | 1755 | CONFIG_CRYPTO_BLOWFISH=m |
1616 | CONFIG_CRYPTO_TWOFISH=m | 1756 | CONFIG_CRYPTO_TWOFISH=m |
1757 | CONFIG_CRYPTO_TWOFISH_COMMON=m | ||
1617 | CONFIG_CRYPTO_SERPENT=m | 1758 | CONFIG_CRYPTO_SERPENT=m |
1618 | CONFIG_CRYPTO_AES=m | 1759 | CONFIG_CRYPTO_AES=m |
1619 | CONFIG_CRYPTO_CAST5=m | 1760 | CONFIG_CRYPTO_CAST5=m |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index e96521530d21..030d300cd71c 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -303,5 +303,8 @@ int main(void) | |||
303 | DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); | 303 | DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); |
304 | DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); | 304 | DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); |
305 | 305 | ||
306 | #ifdef CONFIG_BUG | ||
307 | DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); | ||
308 | #endif | ||
306 | return 0; | 309 | return 0; |
307 | } | 310 | } |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 1a3d4de197d2..2551c0884afc 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/asm-offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/cputable.h> | 29 | #include <asm/cputable.h> |
30 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
31 | #include <asm/bug.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * System calls. | 34 | * System calls. |
@@ -634,19 +635,15 @@ _GLOBAL(enter_rtas) | |||
634 | li r0,0 | 635 | li r0,0 |
635 | mtcr r0 | 636 | mtcr r0 |
636 | 637 | ||
638 | #ifdef CONFIG_BUG | ||
637 | /* There is no way it is acceptable to get here with interrupts enabled, | 639 | /* There is no way it is acceptable to get here with interrupts enabled, |
638 | * check it with the asm equivalent of WARN_ON | 640 | * check it with the asm equivalent of WARN_ON |
639 | */ | 641 | */ |
640 | lbz r0,PACASOFTIRQEN(r13) | 642 | lbz r0,PACASOFTIRQEN(r13) |
641 | 1: tdnei r0,0 | 643 | 1: tdnei r0,0 |
642 | .section __bug_table,"a" | 644 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
643 | .llong 1b,__LINE__ + 0x1000000, 1f, 2f | 645 | #endif |
644 | .previous | 646 | |
645 | .section .rodata,"a" | ||
646 | 1: .asciz __FILE__ | ||
647 | 2: .asciz "enter_rtas" | ||
648 | .previous | ||
649 | |||
650 | /* Hard-disable interrupts */ | 647 | /* Hard-disable interrupts */ |
651 | mfmsr r6 | 648 | mfmsr r6 |
652 | rldicl r7,r6,48,1 | 649 | rldicl r7,r6,48,1 |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 89c836d54809..1bb20d841080 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -744,7 +744,8 @@ static int htlb_check_hinted_area(unsigned long addr, unsigned long len) | |||
744 | struct vm_area_struct *vma; | 744 | struct vm_area_struct *vma; |
745 | 745 | ||
746 | vma = find_vma(current->mm, addr); | 746 | vma = find_vma(current->mm, addr); |
747 | if (!vma || ((addr + len) <= vma->vm_start)) | 747 | if (TASK_SIZE - len >= addr && |
748 | (!vma || ((addr + len) <= vma->vm_start))) | ||
748 | return 0; | 749 | return 0; |
749 | 750 | ||
750 | return -ENOMEM; | 751 | return -ENOMEM; |
@@ -815,6 +816,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
815 | return -EINVAL; | 816 | return -EINVAL; |
816 | if (len & ~HPAGE_MASK) | 817 | if (len & ~HPAGE_MASK) |
817 | return -EINVAL; | 818 | return -EINVAL; |
819 | if (len > TASK_SIZE) | ||
820 | return -ENOMEM; | ||
818 | 821 | ||
819 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | 822 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) |
820 | return -EINVAL; | 823 | return -EINVAL; |
@@ -823,9 +826,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
823 | BUG_ON((addr + len) < addr); | 826 | BUG_ON((addr + len) < addr); |
824 | 827 | ||
825 | if (test_thread_flag(TIF_32BIT)) { | 828 | if (test_thread_flag(TIF_32BIT)) { |
826 | /* Paranoia, caller should have dealt with this */ | ||
827 | BUG_ON((addr + len) > 0x100000000UL); | ||
828 | |||
829 | curareas = current->mm->context.low_htlb_areas; | 829 | curareas = current->mm->context.low_htlb_areas; |
830 | 830 | ||
831 | /* First see if we can use the hint address */ | 831 | /* First see if we can use the hint address */ |
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index eaff71e74fb0..0f21bab33f6c 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c | |||
@@ -153,6 +153,7 @@ define_machine(lite52xx) { | |||
153 | .name = "lite52xx", | 153 | .name = "lite52xx", |
154 | .probe = lite52xx_probe, | 154 | .probe = lite52xx_probe, |
155 | .setup_arch = lite52xx_setup_arch, | 155 | .setup_arch = lite52xx_setup_arch, |
156 | .init = mpc52xx_declare_of_platform_devices, | ||
156 | .init_IRQ = mpc52xx_init_irq, | 157 | .init_IRQ = mpc52xx_init_irq, |
157 | .get_irq = mpc52xx_get_irq, | 158 | .get_irq = mpc52xx_get_irq, |
158 | .show_cpuinfo = lite52xx_show_cpuinfo, | 159 | .show_cpuinfo = lite52xx_show_cpuinfo, |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 8331ff457770..cc40889074bd 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -116,11 +116,12 @@ unmap_regs: | |||
116 | if (xlb) iounmap(xlb); | 116 | if (xlb) iounmap(xlb); |
117 | } | 117 | } |
118 | 118 | ||
119 | static int __init | 119 | void __init |
120 | mpc52xx_declare_of_platform_devices(void) | 120 | mpc52xx_declare_of_platform_devices(void) |
121 | { | 121 | { |
122 | /* Find every child of the SOC node and add it to of_platform */ | 122 | /* Find every child of the SOC node and add it to of_platform */ |
123 | return of_platform_bus_probe(NULL, NULL, NULL); | 123 | if (of_platform_bus_probe(NULL, NULL, NULL)) |
124 | printk(KERN_ERR __FILE__ ": " | ||
125 | "Error while probing of_platform bus\n"); | ||
124 | } | 126 | } |
125 | 127 | ||
126 | device_initcall(mpc52xx_declare_of_platform_devices); | ||
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c index e3e929e1b460..c1f4502a3c6a 100644 --- a/arch/powerpc/platforms/iseries/lpevents.c +++ b/arch/powerpc/platforms/iseries/lpevents.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/paca.h> | 19 | #include <asm/paca.h> |
20 | #include <asm/firmware.h> | ||
20 | #include <asm/iseries/it_lp_queue.h> | 21 | #include <asm/iseries/it_lp_queue.h> |
21 | #include <asm/iseries/hv_lp_event.h> | 22 | #include <asm/iseries/hv_lp_event.h> |
22 | #include <asm/iseries/hv_call_event.h> | 23 | #include <asm/iseries/hv_call_event.h> |
@@ -318,6 +319,9 @@ static int __init proc_lpevents_init(void) | |||
318 | { | 319 | { |
319 | struct proc_dir_entry *e; | 320 | struct proc_dir_entry *e; |
320 | 321 | ||
322 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
323 | return 0; | ||
324 | |||
321 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); | 325 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); |
322 | if (e) | 326 | if (e) |
323 | e->proc_fops = &proc_lpevents_operations; | 327 | e->proc_fops = &proc_lpevents_operations; |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index cff15ae24f6b..1ad0e4aaad1a 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
39 | #include <asm/paca.h> | 39 | #include <asm/paca.h> |
40 | #include <asm/abs_addr.h> | 40 | #include <asm/abs_addr.h> |
41 | #include <asm/firmware.h> | ||
41 | #include <asm/iseries/vio.h> | 42 | #include <asm/iseries/vio.h> |
42 | #include <asm/iseries/mf.h> | 43 | #include <asm/iseries/mf.h> |
43 | #include <asm/iseries/hv_lp_config.h> | 44 | #include <asm/iseries/hv_lp_config.h> |
@@ -1235,6 +1236,9 @@ static int __init mf_proc_init(void) | |||
1235 | char name[2]; | 1236 | char name[2]; |
1236 | int i; | 1237 | int i; |
1237 | 1238 | ||
1239 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
1240 | return 0; | ||
1241 | |||
1238 | mf_proc_root = proc_mkdir("iSeries/mf", NULL); | 1242 | mf_proc_root = proc_mkdir("iSeries/mf", NULL); |
1239 | if (!mf_proc_root) | 1243 | if (!mf_proc_root) |
1240 | return 1; | 1244 | return 1; |
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c index c241413629ac..b54e37101e69 100644 --- a/arch/powerpc/platforms/iseries/proc.c +++ b/arch/powerpc/platforms/iseries/proc.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
25 | #include <asm/time.h> | 25 | #include <asm/time.h> |
26 | #include <asm/lppaca.h> | 26 | #include <asm/lppaca.h> |
27 | #include <asm/firmware.h> | ||
27 | #include <asm/iseries/hv_call_xm.h> | 28 | #include <asm/iseries/hv_call_xm.h> |
28 | 29 | ||
29 | #include "processor_vpd.h" | 30 | #include "processor_vpd.h" |
@@ -31,7 +32,12 @@ | |||
31 | 32 | ||
32 | static int __init iseries_proc_create(void) | 33 | static int __init iseries_proc_create(void) |
33 | { | 34 | { |
34 | struct proc_dir_entry *e = proc_mkdir("iSeries", 0); | 35 | struct proc_dir_entry *e; |
36 | |||
37 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
38 | return 0; | ||
39 | |||
40 | e = proc_mkdir("iSeries", 0); | ||
35 | if (!e) | 41 | if (!e) |
36 | return 1; | 42 | return 1; |
37 | 43 | ||
@@ -106,6 +112,9 @@ static int __init iseries_proc_init(void) | |||
106 | { | 112 | { |
107 | struct proc_dir_entry *e; | 113 | struct proc_dir_entry *e; |
108 | 114 | ||
115 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
116 | return 0; | ||
117 | |||
109 | e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); | 118 | e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); |
110 | if (e) | 119 | if (e) |
111 | e->proc_fops = &proc_titantod_operations; | 120 | e->proc_fops = &proc_titantod_operations; |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index bdf2afbb60c1..cce7e309340c 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -527,7 +527,8 @@ static void __init iSeries_fixup_klimit(void) | |||
527 | static int __init iSeries_src_init(void) | 527 | static int __init iSeries_src_init(void) |
528 | { | 528 | { |
529 | /* clear the progress line */ | 529 | /* clear the progress line */ |
530 | ppc_md.progress(" ", 0xffff); | 530 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
531 | ppc_md.progress(" ", 0xffff); | ||
531 | return 0; | 532 | return 0; |
532 | } | 533 | } |
533 | 534 | ||
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index 84e7ee2c086f..a6799ed34a66 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/system.h> | 42 | #include <asm/system.h> |
43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
44 | #include <asm/prom.h> | 44 | #include <asm/prom.h> |
45 | #include <asm/firmware.h> | ||
45 | #include <asm/iseries/hv_types.h> | 46 | #include <asm/iseries/hv_types.h> |
46 | #include <asm/iseries/hv_lp_event.h> | 47 | #include <asm/iseries/hv_lp_event.h> |
47 | #include <asm/iseries/hv_lp_config.h> | 48 | #include <asm/iseries/hv_lp_config.h> |
@@ -183,6 +184,9 @@ static int __init vio_proc_init(void) | |||
183 | { | 184 | { |
184 | struct proc_dir_entry *e; | 185 | struct proc_dir_entry *e; |
185 | 186 | ||
187 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
188 | return 0; | ||
189 | |||
186 | e = create_proc_entry("iSeries/config", 0, NULL); | 190 | e = create_proc_entry("iSeries/config", 0, NULL); |
187 | if (e) | 191 | if (e) |
188 | e->proc_fops = &proc_viopath_operations; | 192 | e->proc_fops = &proc_viopath_operations; |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index f12d5c69e74d..50855d4fd5a0 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -254,7 +254,6 @@ static void __init maple_init_IRQ(void) | |||
254 | printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", | 254 | printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", |
255 | openpic_addr, has_isus); | 255 | openpic_addr, has_isus); |
256 | } | 256 | } |
257 | of_node_put(root); | ||
258 | 257 | ||
259 | BUG_ON(openpic_addr == 0); | 258 | BUG_ON(openpic_addr == 0); |
260 | 259 | ||
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 89d6e295dbf7..bea7d1bb1a3b 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -129,7 +129,6 @@ static __init void pas_init_IRQ(void) | |||
129 | } | 129 | } |
130 | openpic_addr = of_read_number(opprop, naddr); | 130 | openpic_addr = of_read_number(opprop, naddr); |
131 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); | 131 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); |
132 | of_node_put(root); | ||
133 | 132 | ||
134 | mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0, | 133 | mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0, |
135 | " PAS-OPIC "); | 134 | " PAS-OPIC "); |
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index c00cfed7af2c..5c7e38789897 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -26,7 +26,7 @@ | |||
26 | BEGIN_FTR_SECTION; \ | 26 | BEGIN_FTR_SECTION; \ |
27 | mfspr r0,SPRN_PURR; /* get PURR and */ \ | 27 | mfspr r0,SPRN_PURR; /* get PURR and */ \ |
28 | std r0,STK_PARM(r6)(r1); /* save for later */ \ | 28 | std r0,STK_PARM(r6)(r1); /* save for later */ \ |
29 | END_FTR_SECTION_IFCLR(CPU_FTR_PURR); | 29 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * postcall is performed immediately before function return which | 32 | * postcall is performed immediately before function return which |
@@ -43,7 +43,7 @@ BEGIN_FTR_SECTION; \ | |||
43 | mfspr r8,SPRN_PURR; /* PURR after */ \ | 43 | mfspr r8,SPRN_PURR; /* PURR after */ \ |
44 | ld r6,STK_PARM(r6)(r1); /* PURR before */ \ | 44 | ld r6,STK_PARM(r6)(r1); /* PURR before */ \ |
45 | subf r6,r6,r8; /* delta */ \ | 45 | subf r6,r6,r8; /* delta */ \ |
46 | END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | 46 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ |
47 | ld r5,STK_PARM(r5)(r1); /* timebase before */ \ | 47 | ld r5,STK_PARM(r5)(r1); /* timebase before */ \ |
48 | subf r5,r5,r7; /* time delta */ \ | 48 | subf r5,r5,r7; /* time delta */ \ |
49 | \ | 49 | \ |
@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION; \ | |||
66 | ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ | 66 | ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ |
67 | add r7,r7,r6; \ | 67 | add r7,r7,r6; \ |
68 | std r7,HCALL_STAT_PURR(r4); \ | 68 | std r7,HCALL_STAT_PURR(r4); \ |
69 | END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | 69 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ |
70 | 1: | 70 | 1: |
71 | #else | 71 | #else |
72 | #define HCALL_INST_PRECALL | 72 | #define HCALL_INST_PRECALL |
@@ -145,6 +145,7 @@ _GLOBAL(plpar_hcall9) | |||
145 | 145 | ||
146 | HVSC /* invoke the hypervisor */ | 146 | HVSC /* invoke the hypervisor */ |
147 | 147 | ||
148 | mr r0,r12 | ||
148 | ld r12,STK_PARM(r4)(r1) | 149 | ld r12,STK_PARM(r4)(r1) |
149 | std r4, 0(r12) | 150 | std r4, 0(r12) |
150 | std r5, 8(r12) | 151 | std r5, 8(r12) |
@@ -154,7 +155,7 @@ _GLOBAL(plpar_hcall9) | |||
154 | std r9, 40(r12) | 155 | std r9, 40(r12) |
155 | std r10,48(r12) | 156 | std r10,48(r12) |
156 | std r11,56(r12) | 157 | std r11,56(r12) |
157 | std r12,64(r12) | 158 | std r0, 64(r12) |
158 | 159 | ||
159 | HCALL_INST_POSTCALL | 160 | HCALL_INST_POSTCALL |
160 | 161 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 80181c4c49eb..3ddc04925d50 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); | |||
34 | */ | 34 | */ |
35 | static void *hc_start(struct seq_file *m, loff_t *pos) | 35 | static void *hc_start(struct seq_file *m, loff_t *pos) |
36 | { | 36 | { |
37 | if ((int)*pos < HCALL_STAT_ARRAY_SIZE) | 37 | if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1)) |
38 | return (void *)(unsigned long)(*pos + 1); | 38 | return (void *)(unsigned long)(*pos + 1); |
39 | 39 | ||
40 | return NULL; | 40 | return NULL; |
@@ -57,7 +57,7 @@ static int hc_show(struct seq_file *m, void *p) | |||
57 | struct hcall_stats *hs = (struct hcall_stats *)m->private; | 57 | struct hcall_stats *hs = (struct hcall_stats *)m->private; |
58 | 58 | ||
59 | if (hs[h_num].num_calls) { | 59 | if (hs[h_num].num_calls) { |
60 | if (!cpu_has_feature(CPU_FTR_PURR)) | 60 | if (cpu_has_feature(CPU_FTR_PURR)) |
61 | seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, | 61 | seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, |
62 | hs[h_num].num_calls, | 62 | hs[h_num].num_calls, |
63 | hs[h_num].tb_total, | 63 | hs[h_num].tb_total, |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index b5b2b1103de8..81d172d65038 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -224,7 +224,6 @@ static void xics_unmask_irq(unsigned int virq) | |||
224 | static void xics_mask_real_irq(unsigned int irq) | 224 | static void xics_mask_real_irq(unsigned int irq) |
225 | { | 225 | { |
226 | int call_status; | 226 | int call_status; |
227 | unsigned int server; | ||
228 | 227 | ||
229 | if (irq == XICS_IPI) | 228 | if (irq == XICS_IPI) |
230 | return; | 229 | return; |
@@ -236,9 +235,9 @@ static void xics_mask_real_irq(unsigned int irq) | |||
236 | return; | 235 | return; |
237 | } | 236 | } |
238 | 237 | ||
239 | server = get_irq_server(irq); | ||
240 | /* Have to set XIVE to 0xff to be able to remove a slot */ | 238 | /* Have to set XIVE to 0xff to be able to remove a slot */ |
241 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff); | 239 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, |
240 | default_server, 0xff); | ||
242 | if (call_status != 0) { | 241 | if (call_status != 0) { |
243 | printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)" | 242 | printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)" |
244 | " returned %d\n", irq, call_status); | 243 | " returned %d\n", irq, call_status); |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 04d4917eb303..2621a7e72d2d 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -12,7 +12,6 @@ obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o | |||
12 | obj-$(CONFIG_FSL_SOC) += fsl_soc.o | 12 | obj-$(CONFIG_FSL_SOC) += fsl_soc.o |
13 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o | 13 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o |
14 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ | 14 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ |
15 | obj-$(CONFIG_MTD) += rom.o | ||
16 | 15 | ||
17 | ifeq ($(CONFIG_PPC_MERGE),y) | 16 | ifeq ($(CONFIG_PPC_MERGE),y) |
18 | obj-$(CONFIG_PPC_I8259) += i8259.o | 17 | obj-$(CONFIG_PPC_I8259) += i8259.o |
@@ -21,5 +20,6 @@ endif | |||
21 | 20 | ||
22 | # Temporary hack until we have migrated to asm-powerpc | 21 | # Temporary hack until we have migrated to asm-powerpc |
23 | ifeq ($(ARCH),powerpc) | 22 | ifeq ($(ARCH),powerpc) |
23 | obj-$(CONFIG_MTD) += rom.o | ||
24 | obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o | 24 | obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o |
25 | endif | 25 | endif |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 4388b3309e0c..eca507050e47 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -164,11 +164,14 @@ startup_continue: | |||
164 | srl %r7,28 | 164 | srl %r7,28 |
165 | clr %r6,%r7 # compare cc with last access code | 165 | clr %r6,%r7 # compare cc with last access code |
166 | be .Lsame-.LPG1(%r13) | 166 | be .Lsame-.LPG1(%r13) |
167 | b .Lchkmem-.LPG1(%r13) | 167 | lhi %r8,0 # no program checks |
168 | b .Lsavchk-.LPG1(%r13) | ||
168 | .Lsame: | 169 | .Lsame: |
169 | ar %r5,%r1 # add 128KB to end of chunk | 170 | ar %r5,%r1 # add 128KB to end of chunk |
170 | bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop | 171 | bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop |
171 | .Lchkmem: # > 2GB or tprot got a program check | 172 | .Lchkmem: # > 2GB or tprot got a program check |
173 | lhi %r8,1 # set program check flag | ||
174 | .Lsavchk: | ||
172 | clr %r4,%r5 # chunk size > 0? | 175 | clr %r4,%r5 # chunk size > 0? |
173 | be .Lchkloop-.LPG1(%r13) | 176 | be .Lchkloop-.LPG1(%r13) |
174 | st %r4,0(%r3) # store start address of chunk | 177 | st %r4,0(%r3) # store start address of chunk |
@@ -190,8 +193,15 @@ startup_continue: | |||
190 | je .Ldonemem # if not, leave | 193 | je .Ldonemem # if not, leave |
191 | chi %r10,0 # do we have chunks left? | 194 | chi %r10,0 # do we have chunks left? |
192 | je .Ldonemem | 195 | je .Ldonemem |
196 | chi %r8,1 # program check ? | ||
197 | je .Lpgmchk | ||
198 | lr %r4,%r5 # potential new chunk | ||
199 | alr %r5,%r1 # add 128KB to end of chunk | ||
200 | j .Llpcnt | ||
201 | .Lpgmchk: | ||
193 | alr %r5,%r1 # add 128KB to end of chunk | 202 | alr %r5,%r1 # add 128KB to end of chunk |
194 | lr %r4,%r5 # potential new chunk | 203 | lr %r4,%r5 # potential new chunk |
204 | .Llpcnt: | ||
195 | clr %r5,%r9 # should we go on? | 205 | clr %r5,%r9 # should we go on? |
196 | jl .Lloop | 206 | jl .Lloop |
197 | .Ldonemem: | 207 | .Ldonemem: |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index c526279e1123..6ba3f4512dd1 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -172,12 +172,15 @@ startup_continue: | |||
172 | srl %r7,28 | 172 | srl %r7,28 |
173 | clr %r6,%r7 # compare cc with last access code | 173 | clr %r6,%r7 # compare cc with last access code |
174 | je .Lsame | 174 | je .Lsame |
175 | j .Lchkmem | 175 | lghi %r8,0 # no program checks |
176 | j .Lsavchk | ||
176 | .Lsame: | 177 | .Lsame: |
177 | algr %r5,%r1 # add 128KB to end of chunk | 178 | algr %r5,%r1 # add 128KB to end of chunk |
178 | # no need to check here, | 179 | # no need to check here, |
179 | brc 12,.Lloop # this is the same chunk | 180 | brc 12,.Lloop # this is the same chunk |
180 | .Lchkmem: # > 16EB or tprot got a program check | 181 | .Lchkmem: # > 16EB or tprot got a program check |
182 | lghi %r8,1 # set program check flag | ||
183 | .Lsavchk: | ||
181 | clgr %r4,%r5 # chunk size > 0? | 184 | clgr %r4,%r5 # chunk size > 0? |
182 | je .Lchkloop | 185 | je .Lchkloop |
183 | stg %r4,0(%r3) # store start address of chunk | 186 | stg %r4,0(%r3) # store start address of chunk |
@@ -204,8 +207,15 @@ startup_continue: | |||
204 | chi %r10, 0 # do we have chunks left? | 207 | chi %r10, 0 # do we have chunks left? |
205 | je .Ldonemem | 208 | je .Ldonemem |
206 | .Lhsaskip: | 209 | .Lhsaskip: |
210 | chi %r8,1 # program check ? | ||
211 | je .Lpgmchk | ||
212 | lgr %r4,%r5 # potential new chunk | ||
213 | algr %r5,%r1 # add 128KB to end of chunk | ||
214 | j .Llpcnt | ||
215 | .Lpgmchk: | ||
207 | algr %r5,%r1 # add 128KB to end of chunk | 216 | algr %r5,%r1 # add 128KB to end of chunk |
208 | lgr %r4,%r5 # potential new chunk | 217 | lgr %r4,%r5 # potential new chunk |
218 | .Llpcnt: | ||
209 | clgr %r5,%r9 # should we go on? | 219 | clgr %r5,%r9 # should we go on? |
210 | jl .Lloop | 220 | jl .Lloop |
211 | .Ldonemem: | 221 | .Ldonemem: |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 49ef206ec880..5d8ee3baac14 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -476,7 +476,7 @@ static void __init setup_memory_end(void) | |||
476 | int i; | 476 | int i; |
477 | 477 | ||
478 | memory_size = real_size = 0; | 478 | memory_size = real_size = 0; |
479 | max_phys = VMALLOC_END - VMALLOC_MIN_SIZE; | 479 | max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; |
480 | memory_end &= PAGE_MASK; | 480 | memory_end &= PAGE_MASK; |
481 | 481 | ||
482 | max_mem = memory_end ? min(max_phys, memory_end) : max_phys; | 482 | max_mem = memory_end ? min(max_phys, memory_end) : max_phys; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 19090f7d4f51..c0cd255fddbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -794,7 +794,10 @@ static int __init topology_init(void) | |||
794 | int ret; | 794 | int ret; |
795 | 795 | ||
796 | for_each_possible_cpu(cpu) { | 796 | for_each_possible_cpu(cpu) { |
797 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); | 797 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
798 | |||
799 | c->hotpluggable = 1; | ||
800 | ret = register_cpu(c, cpu); | ||
798 | if (ret) | 801 | if (ret) |
799 | printk(KERN_WARNING "topology_init: register_cpu %d " | 802 | printk(KERN_WARNING "topology_init: register_cpu %d " |
800 | "failed (%d)\n", cpu, ret); | 803 | "failed (%d)\n", cpu, ret); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 633249c3ba91..49c3e46b4065 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/hardirq.h> | ||
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
13 | #include <asm/futex.h> | 14 | #include <asm/futex.h> |
@@ -18,6 +19,8 @@ static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | |||
18 | struct vm_area_struct *vma; | 19 | struct vm_area_struct *vma; |
19 | int ret = -EFAULT; | 20 | int ret = -EFAULT; |
20 | 21 | ||
22 | if (in_atomic()) | ||
23 | return ret; | ||
21 | down_read(&mm->mmap_sem); | 24 | down_read(&mm->mmap_sem); |
22 | vma = find_vma(mm, address); | 25 | vma = find_vma(mm, address); |
23 | if (unlikely(!vma)) | 26 | if (unlikely(!vma)) |
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index bbaca66fa293..56a0214e9928 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -258,8 +258,6 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
258 | { | 258 | { |
259 | int oldval = 0, newval, ret; | 259 | int oldval = 0, newval, ret; |
260 | 260 | ||
261 | pagefault_disable(); | ||
262 | |||
263 | switch (op) { | 261 | switch (op) { |
264 | case FUTEX_OP_SET: | 262 | case FUTEX_OP_SET: |
265 | __futex_atomic_op("lr %2,%5\n", | 263 | __futex_atomic_op("lr %2,%5\n", |
@@ -284,7 +282,6 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
284 | default: | 282 | default: |
285 | ret = -ENOSYS; | 283 | ret = -ENOSYS; |
286 | } | 284 | } |
287 | pagefault_enable(); | ||
288 | *old = oldval; | 285 | *old = oldval; |
289 | return ret; | 286 | return ret; |
290 | } | 287 | } |
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c index 829698f6d049..49802f1bee94 100644 --- a/arch/x86_64/kernel/early-quirks.c +++ b/arch/x86_64/kernel/early-quirks.c | |||
@@ -69,6 +69,11 @@ static void nvidia_bugs(void) | |||
69 | 69 | ||
70 | static void ati_bugs(void) | 70 | static void ati_bugs(void) |
71 | { | 71 | { |
72 | if (timer_over_8254 == 1) { | ||
73 | timer_over_8254 = 0; | ||
74 | printk(KERN_INFO | ||
75 | "ATI board detected. Disabling timer routing over 8254.\n"); | ||
76 | } | ||
72 | } | 77 | } |
73 | 78 | ||
74 | static void intel_bugs(void) | 79 | static void intel_bugs(void) |
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 2a1dcd5f69c2..d7bad90a5ad8 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -55,6 +55,10 @@ int sis_apic_bug; /* not actually supported, dummy for compile */ | |||
55 | 55 | ||
56 | static int no_timer_check; | 56 | static int no_timer_check; |
57 | 57 | ||
58 | static int disable_timer_pin_1 __initdata; | ||
59 | |||
60 | int timer_over_8254 __initdata = 1; | ||
61 | |||
58 | /* Where if anywhere is the i8259 connect in external int mode */ | 62 | /* Where if anywhere is the i8259 connect in external int mode */ |
59 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | 63 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; |
60 | 64 | ||
@@ -350,6 +354,29 @@ static int __init disable_ioapic_setup(char *str) | |||
350 | } | 354 | } |
351 | early_param("noapic", disable_ioapic_setup); | 355 | early_param("noapic", disable_ioapic_setup); |
352 | 356 | ||
357 | /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ | ||
358 | static int __init disable_timer_pin_setup(char *arg) | ||
359 | { | ||
360 | disable_timer_pin_1 = 1; | ||
361 | return 1; | ||
362 | } | ||
363 | __setup("disable_timer_pin_1", disable_timer_pin_setup); | ||
364 | |||
365 | static int __init setup_disable_8254_timer(char *s) | ||
366 | { | ||
367 | timer_over_8254 = -1; | ||
368 | return 1; | ||
369 | } | ||
370 | static int __init setup_enable_8254_timer(char *s) | ||
371 | { | ||
372 | timer_over_8254 = 2; | ||
373 | return 1; | ||
374 | } | ||
375 | |||
376 | __setup("disable_8254_timer", setup_disable_8254_timer); | ||
377 | __setup("enable_8254_timer", setup_enable_8254_timer); | ||
378 | |||
379 | |||
353 | /* | 380 | /* |
354 | * Find the IRQ entry number of a certain pin. | 381 | * Find the IRQ entry number of a certain pin. |
355 | */ | 382 | */ |
@@ -1568,33 +1595,10 @@ static inline void unlock_ExtINT_logic(void) | |||
1568 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ | 1595 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ |
1569 | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast | 1596 | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast |
1570 | * fanatically on his truly buggy board. | 1597 | * fanatically on his truly buggy board. |
1598 | * | ||
1599 | * FIXME: really need to revamp this for modern platforms only. | ||
1571 | */ | 1600 | */ |
1572 | 1601 | static inline void check_timer(void) | |
1573 | static int try_apic_pin(int apic, int pin, char *msg) | ||
1574 | { | ||
1575 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
1576 | "..TIMER: trying IO-APIC=%d PIN=%d %s", | ||
1577 | apic, pin, msg); | ||
1578 | |||
1579 | /* | ||
1580 | * Ok, does IRQ0 through the IOAPIC work? | ||
1581 | */ | ||
1582 | if (!no_timer_check && timer_irq_works()) { | ||
1583 | nmi_watchdog_default(); | ||
1584 | if (nmi_watchdog == NMI_IO_APIC) { | ||
1585 | disable_8259A_irq(0); | ||
1586 | setup_nmi(); | ||
1587 | enable_8259A_irq(0); | ||
1588 | } | ||
1589 | return 1; | ||
1590 | } | ||
1591 | clear_IO_APIC_pin(apic, pin); | ||
1592 | apic_printk(APIC_QUIET, KERN_ERR " .. failed\n"); | ||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | /* The function from hell */ | ||
1597 | static void check_timer(void) | ||
1598 | { | 1602 | { |
1599 | int apic1, pin1, apic2, pin2; | 1603 | int apic1, pin1, apic2, pin2; |
1600 | int vector; | 1604 | int vector; |
@@ -1615,43 +1619,61 @@ static void check_timer(void) | |||
1615 | */ | 1619 | */ |
1616 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 1620 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
1617 | init_8259A(1); | 1621 | init_8259A(1); |
1622 | if (timer_over_8254 > 0) | ||
1623 | enable_8259A_irq(0); | ||
1618 | 1624 | ||
1619 | pin1 = find_isa_irq_pin(0, mp_INT); | 1625 | pin1 = find_isa_irq_pin(0, mp_INT); |
1620 | apic1 = find_isa_irq_apic(0, mp_INT); | 1626 | apic1 = find_isa_irq_apic(0, mp_INT); |
1621 | pin2 = ioapic_i8259.pin; | 1627 | pin2 = ioapic_i8259.pin; |
1622 | apic2 = ioapic_i8259.apic; | 1628 | apic2 = ioapic_i8259.apic; |
1623 | 1629 | ||
1624 | /* Do this first, otherwise we get double interrupts on ATI boards */ | 1630 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
1625 | if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled")) | 1631 | vector, apic1, pin1, apic2, pin2); |
1626 | return; | ||
1627 | 1632 | ||
1628 | /* Now try again with IRQ0 8259A enabled. | 1633 | if (pin1 != -1) { |
1629 | Assumes timer is on IO-APIC 0 ?!? */ | 1634 | /* |
1630 | enable_8259A_irq(0); | 1635 | * Ok, does IRQ0 through the IOAPIC work? |
1631 | unmask_IO_APIC_irq(0); | 1636 | */ |
1632 | if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled")) | 1637 | unmask_IO_APIC_irq(0); |
1633 | return; | 1638 | if (!no_timer_check && timer_irq_works()) { |
1634 | disable_8259A_irq(0); | 1639 | nmi_watchdog_default(); |
1635 | 1640 | if (nmi_watchdog == NMI_IO_APIC) { | |
1636 | /* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides | 1641 | disable_8259A_irq(0); |
1637 | on Nvidia boards */ | 1642 | setup_nmi(); |
1638 | if (!(apic1 == 0 && pin1 == 0) && | 1643 | enable_8259A_irq(0); |
1639 | try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled")) | 1644 | } |
1640 | return; | 1645 | if (disable_timer_pin_1 > 0) |
1641 | if (!(apic1 == 0 && pin1 == 2) && | 1646 | clear_IO_APIC_pin(0, pin1); |
1642 | try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled")) | 1647 | return; |
1643 | return; | 1648 | } |
1649 | clear_IO_APIC_pin(apic1, pin1); | ||
1650 | apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not " | ||
1651 | "connected to IO-APIC\n"); | ||
1652 | } | ||
1644 | 1653 | ||
1645 | /* Then try pure 8259A routing on the 8259 as reported by BIOS*/ | 1654 | apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) " |
1646 | enable_8259A_irq(0); | 1655 | "through the 8259A ... "); |
1647 | if (pin2 != -1) { | 1656 | if (pin2 != -1) { |
1657 | apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...", | ||
1658 | apic2, pin2); | ||
1659 | /* | ||
1660 | * legacy devices should be connected to IO APIC #0 | ||
1661 | */ | ||
1648 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); | 1662 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); |
1649 | if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS")) | 1663 | if (timer_irq_works()) { |
1664 | apic_printk(APIC_VERBOSE," works.\n"); | ||
1665 | nmi_watchdog_default(); | ||
1666 | if (nmi_watchdog == NMI_IO_APIC) { | ||
1667 | setup_nmi(); | ||
1668 | } | ||
1650 | return; | 1669 | return; |
1670 | } | ||
1671 | /* | ||
1672 | * Cleanup, just in case ... | ||
1673 | */ | ||
1674 | clear_IO_APIC_pin(apic2, pin2); | ||
1651 | } | 1675 | } |
1652 | 1676 | apic_printk(APIC_VERBOSE," failed.\n"); | |
1653 | /* Tried all possibilities to go through the IO-APIC. Now come the | ||
1654 | really cheesy fallbacks. */ | ||
1655 | 1677 | ||
1656 | if (nmi_watchdog == NMI_IO_APIC) { | 1678 | if (nmi_watchdog == NMI_IO_APIC) { |
1657 | printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); | 1679 | printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9c52d87d6f04..cbdf031f3c09 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -424,7 +424,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) | |||
424 | 424 | ||
425 | snprintf(object_name, 8, "_Q%2.2X", value); | 425 | snprintf(object_name, 8, "_Q%2.2X", value); |
426 | 426 | ||
427 | printk(KERN_INFO PREFIX "evaluating %s\n", object_name); | 427 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s", object_name)); |
428 | 428 | ||
429 | acpi_evaluate_object(ec->handle, object_name, NULL, NULL); | 429 | acpi_evaluate_object(ec->handle, object_name, NULL, NULL); |
430 | } | 430 | } |
@@ -1016,8 +1016,8 @@ static int __init acpi_ec_set_intr_mode(char *str) | |||
1016 | acpi_ec_mode = EC_POLL; | 1016 | acpi_ec_mode = EC_POLL; |
1017 | } | 1017 | } |
1018 | acpi_ec_driver.ops.add = acpi_ec_add; | 1018 | acpi_ec_driver.ops.add = acpi_ec_add; |
1019 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "EC %s mode.\n", | 1019 | printk(KERN_NOTICE PREFIX "%s mode.\n", |
1020 | intr ? "interrupt" : "polling")); | 1020 | intr ? "interrupt" : "polling"); |
1021 | 1021 | ||
1022 | return 1; | 1022 | return 1; |
1023 | } | 1023 | } |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 25718fed39f1..5f9496d59ed6 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -476,9 +476,6 @@ static int acpi_processor_get_info(struct acpi_processor *pr) | |||
476 | if (cpu_index == -1) { | 476 | if (cpu_index == -1) { |
477 | if (ACPI_FAILURE | 477 | if (ACPI_FAILURE |
478 | (acpi_processor_hotadd_init(pr->handle, &pr->id))) { | 478 | (acpi_processor_hotadd_init(pr->handle, &pr->id))) { |
479 | printk(KERN_ERR PREFIX | ||
480 | "Getting cpuindex for acpiid 0x%x\n", | ||
481 | pr->acpi_id); | ||
482 | return -ENODEV; | 479 | return -ENODEV; |
483 | } | 480 | } |
484 | } | 481 | } |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0e60382714bb..5207f9e4b443 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -736,10 +736,6 @@ int acpi_processor_preregister_performance( | |||
736 | } | 736 | } |
737 | 737 | ||
738 | err_ret: | 738 | err_ret: |
739 | if (retval) { | ||
740 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n")); | ||
741 | } | ||
742 | |||
743 | for_each_possible_cpu(i) { | 739 | for_each_possible_cpu(i) { |
744 | pr = processors[i]; | 740 | pr = processors[i]; |
745 | if (!pr || !pr->performance) | 741 | if (!pr || !pr->performance) |
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c index 88aeccbafaaf..d9b651ffcdc0 100644 --- a/drivers/acpi/toshiba_acpi.c +++ b/drivers/acpi/toshiba_acpi.c | |||
@@ -321,13 +321,16 @@ static int set_lcd_status(struct backlight_device *bd) | |||
321 | static unsigned long write_lcd(const char *buffer, unsigned long count) | 321 | static unsigned long write_lcd(const char *buffer, unsigned long count) |
322 | { | 322 | { |
323 | int value; | 323 | int value; |
324 | int ret = count; | 324 | int ret; |
325 | 325 | ||
326 | if (sscanf(buffer, " brightness : %i", &value) == 1 && | 326 | if (sscanf(buffer, " brightness : %i", &value) == 1 && |
327 | value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) | 327 | value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) { |
328 | ret = set_lcd(value); | 328 | ret = set_lcd(value); |
329 | else | 329 | if (ret == 0) |
330 | ret = count; | ||
331 | } else { | ||
330 | ret = -EINVAL; | 332 | ret = -EINVAL; |
333 | } | ||
331 | return ret; | 334 | return ret; |
332 | } | 335 | } |
333 | 336 | ||
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index b34e0a958d0f..da21552d2b1c 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -381,7 +381,7 @@ config PATA_OPTI | |||
381 | If unsure, say N. | 381 | If unsure, say N. |
382 | 382 | ||
383 | config PATA_OPTIDMA | 383 | config PATA_OPTIDMA |
384 | tristate "OPTI FireStar PATA support (Veyr Experimental)" | 384 | tristate "OPTI FireStar PATA support (Very Experimental)" |
385 | depends on PCI && EXPERIMENTAL | 385 | depends on PCI && EXPERIMENTAL |
386 | help | 386 | help |
387 | This option enables DMA/PIO support for the later OPTi | 387 | This option enables DMA/PIO support for the later OPTi |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 47082df7199e..dfb306057cf4 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/libata.h> | 25 | #include <linux/libata.h> |
26 | 26 | ||
27 | #define DRV_NAME "pata_hpt37x" | 27 | #define DRV_NAME "pata_hpt37x" |
28 | #define DRV_VERSION "0.5.1" | 28 | #define DRV_VERSION "0.5.2" |
29 | 29 | ||
30 | struct hpt_clock { | 30 | struct hpt_clock { |
31 | u8 xfer_speed; | 31 | u8 xfer_speed; |
@@ -416,7 +416,7 @@ static const char *bad_ata100_5[] = { | |||
416 | 416 | ||
417 | static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) | 417 | static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) |
418 | { | 418 | { |
419 | if (adev->class != ATA_DEV_ATA) { | 419 | if (adev->class == ATA_DEV_ATA) { |
420 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 420 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
421 | mask &= ~ATA_MASK_UDMA; | 421 | mask &= ~ATA_MASK_UDMA; |
422 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 422 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
@@ -749,7 +749,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) | |||
749 | { | 749 | { |
750 | struct ata_port *ap = qc->ap; | 750 | struct ata_port *ap = qc->ap; |
751 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 751 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
752 | int mscreg = 0x50 + 2 * ap->port_no; | 752 | int mscreg = 0x50 + 4 * ap->port_no; |
753 | u8 bwsr_stat, msc_stat; | 753 | u8 bwsr_stat, msc_stat; |
754 | 754 | ||
755 | pci_read_config_byte(pdev, 0x6A, &bwsr_stat); | 755 | pci_read_config_byte(pdev, 0x6A, &bwsr_stat); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7c95c762950f..62462190e07e 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -765,47 +765,34 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio | |||
765 | */ | 765 | */ |
766 | static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) | 766 | static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) |
767 | { | 767 | { |
768 | char sense[SCSI_SENSE_BUFFERSIZE]; | 768 | request_queue_t *q = bdev_get_queue(pd->bdev); |
769 | request_queue_t *q; | ||
770 | struct request *rq; | 769 | struct request *rq; |
771 | DECLARE_COMPLETION_ONSTACK(wait); | 770 | int ret = 0; |
772 | int err = 0; | ||
773 | 771 | ||
774 | q = bdev_get_queue(pd->bdev); | 772 | rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? |
773 | WRITE : READ, __GFP_WAIT); | ||
774 | |||
775 | if (cgc->buflen) { | ||
776 | if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT)) | ||
777 | goto out; | ||
778 | } | ||
779 | |||
780 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
781 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); | ||
782 | if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) | ||
783 | memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); | ||
775 | 784 | ||
776 | rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ, | ||
777 | __GFP_WAIT); | ||
778 | rq->errors = 0; | ||
779 | rq->rq_disk = pd->bdev->bd_disk; | ||
780 | rq->bio = NULL; | ||
781 | rq->buffer = NULL; | ||
782 | rq->timeout = 60*HZ; | 785 | rq->timeout = 60*HZ; |
783 | rq->data = cgc->buffer; | ||
784 | rq->data_len = cgc->buflen; | ||
785 | rq->sense = sense; | ||
786 | memset(sense, 0, sizeof(sense)); | ||
787 | rq->sense_len = 0; | ||
788 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 786 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
789 | rq->cmd_flags |= REQ_HARDBARRIER; | 787 | rq->cmd_flags |= REQ_HARDBARRIER; |
790 | if (cgc->quiet) | 788 | if (cgc->quiet) |
791 | rq->cmd_flags |= REQ_QUIET; | 789 | rq->cmd_flags |= REQ_QUIET; |
792 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); | ||
793 | if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) | ||
794 | memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); | ||
795 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
796 | |||
797 | rq->ref_count++; | ||
798 | rq->end_io_data = &wait; | ||
799 | rq->end_io = blk_end_sync_rq; | ||
800 | elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); | ||
801 | generic_unplug_device(q); | ||
802 | wait_for_completion(&wait); | ||
803 | |||
804 | if (rq->errors) | ||
805 | err = -EIO; | ||
806 | 790 | ||
791 | blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); | ||
792 | ret = rq->errors; | ||
793 | out: | ||
807 | blk_put_request(rq); | 794 | blk_put_request(rq); |
808 | return err; | 795 | return ret; |
809 | } | 796 | } |
810 | 797 | ||
811 | /* | 798 | /* |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index aeefec97fdee..6bdf593081d8 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -117,10 +117,17 @@ static struct usb_device_id blacklist_ids[] = { | |||
117 | 117 | ||
118 | /* IBM/Lenovo ThinkPad with Broadcom chip */ | 118 | /* IBM/Lenovo ThinkPad with Broadcom chip */ |
119 | { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, | 119 | { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, |
120 | { USB_DEVICE(0x0a5c, 0x2110), .driver_info = HCI_WRONG_SCO_MTU }, | ||
120 | 121 | ||
121 | /* ANYCOM Bluetooth USB-200 and USB-250 */ | 122 | /* ANYCOM Bluetooth USB-200 and USB-250 */ |
122 | { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET }, | 123 | { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET }, |
123 | 124 | ||
125 | /* HP laptop with Broadcom chip */ | ||
126 | { USB_DEVICE(0x03f0, 0x171d), .driver_info = HCI_WRONG_SCO_MTU }, | ||
127 | |||
128 | /* Dell laptop with Broadcom chip */ | ||
129 | { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_WRONG_SCO_MTU }, | ||
130 | |||
124 | /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ | 131 | /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ |
125 | { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, | 132 | { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, |
126 | 133 | ||
diff --git a/drivers/char/ip2/i2ellis.h b/drivers/char/ip2/i2ellis.h index 5eabe47b0bc8..433305062fb8 100644 --- a/drivers/char/ip2/i2ellis.h +++ b/drivers/char/ip2/i2ellis.h | |||
@@ -606,9 +606,9 @@ static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int); | |||
606 | // code and returning. | 606 | // code and returning. |
607 | // | 607 | // |
608 | #define COMPLETE(pB,code) \ | 608 | #define COMPLETE(pB,code) \ |
609 | if(1){ \ | 609 | do { \ |
610 | pB->i2eError = code; \ | 610 | pB->i2eError = code; \ |
611 | return (code == I2EE_GOOD);\ | 611 | return (code == I2EE_GOOD);\ |
612 | } | 612 | } while (0) |
613 | 613 | ||
614 | #endif // I2ELLIS_H | 614 | #endif // I2ELLIS_H |
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 3ece69231343..5c9f67f98d10 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/connector.h> | 29 | #include <linux/connector.h> |
30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
31 | #include <asm/unaligned.h> | ||
31 | 32 | ||
32 | #include <linux/cn_proc.h> | 33 | #include <linux/cn_proc.h> |
33 | 34 | ||
@@ -60,7 +61,7 @@ void proc_fork_connector(struct task_struct *task) | |||
60 | ev = (struct proc_event*)msg->data; | 61 | ev = (struct proc_event*)msg->data; |
61 | get_seq(&msg->seq, &ev->cpu); | 62 | get_seq(&msg->seq, &ev->cpu); |
62 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 63 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
63 | ev->timestamp_ns = timespec_to_ns(&ts); | 64 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
64 | ev->what = PROC_EVENT_FORK; | 65 | ev->what = PROC_EVENT_FORK; |
65 | ev->event_data.fork.parent_pid = task->real_parent->pid; | 66 | ev->event_data.fork.parent_pid = task->real_parent->pid; |
66 | ev->event_data.fork.parent_tgid = task->real_parent->tgid; | 67 | ev->event_data.fork.parent_tgid = task->real_parent->tgid; |
@@ -88,7 +89,7 @@ void proc_exec_connector(struct task_struct *task) | |||
88 | ev = (struct proc_event*)msg->data; | 89 | ev = (struct proc_event*)msg->data; |
89 | get_seq(&msg->seq, &ev->cpu); | 90 | get_seq(&msg->seq, &ev->cpu); |
90 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 91 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
91 | ev->timestamp_ns = timespec_to_ns(&ts); | 92 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
92 | ev->what = PROC_EVENT_EXEC; | 93 | ev->what = PROC_EVENT_EXEC; |
93 | ev->event_data.exec.process_pid = task->pid; | 94 | ev->event_data.exec.process_pid = task->pid; |
94 | ev->event_data.exec.process_tgid = task->tgid; | 95 | ev->event_data.exec.process_tgid = task->tgid; |
@@ -124,7 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id) | |||
124 | return; | 125 | return; |
125 | get_seq(&msg->seq, &ev->cpu); | 126 | get_seq(&msg->seq, &ev->cpu); |
126 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 127 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
127 | ev->timestamp_ns = timespec_to_ns(&ts); | 128 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
128 | 129 | ||
129 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 130 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
130 | msg->ack = 0; /* not used */ | 131 | msg->ack = 0; /* not used */ |
@@ -146,7 +147,7 @@ void proc_exit_connector(struct task_struct *task) | |||
146 | ev = (struct proc_event*)msg->data; | 147 | ev = (struct proc_event*)msg->data; |
147 | get_seq(&msg->seq, &ev->cpu); | 148 | get_seq(&msg->seq, &ev->cpu); |
148 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 149 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
149 | ev->timestamp_ns = timespec_to_ns(&ts); | 150 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
150 | ev->what = PROC_EVENT_EXIT; | 151 | ev->what = PROC_EVENT_EXIT; |
151 | ev->event_data.exit.process_pid = task->pid; | 152 | ev->event_data.exit.process_pid = task->pid; |
152 | ev->event_data.exit.process_tgid = task->tgid; | 153 | ev->event_data.exit.process_tgid = task->tgid; |
@@ -181,7 +182,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) | |||
181 | ev = (struct proc_event*)msg->data; | 182 | ev = (struct proc_event*)msg->data; |
182 | msg->seq = rcvd_seq; | 183 | msg->seq = rcvd_seq; |
183 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 184 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
184 | ev->timestamp_ns = timespec_to_ns(&ts); | 185 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
185 | ev->cpu = -1; | 186 | ev->cpu = -1; |
186 | ev->what = PROC_EVENT_NONE; | 187 | ev->what = PROC_EVENT_NONE; |
187 | ev->event_data.ack.err = err; | 188 | ev->event_data.ack.err = err; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 96d4a0bb2203..ec796ad087df 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -6,13 +6,21 @@ menu "HID Devices" | |||
6 | 6 | ||
7 | config HID | 7 | config HID |
8 | tristate "Generic HID support" | 8 | tristate "Generic HID support" |
9 | depends on INPUT | ||
9 | default y | 10 | default y |
10 | ---help--- | 11 | ---help--- |
11 | Say Y here if you want generic HID support to connect keyboards, | 12 | A human interface device (HID) is a type of computer device that |
12 | mice, joysticks, graphic tablets, or any other HID based devices | 13 | interacts directly with and takes input from humans. The term "HID" |
13 | to your computer. You also need to select particular types of | 14 | most commonly used to refer to the USB-HID specification, but other |
14 | HID devices you want to compile support for, in the particular | 15 | devices (such as, but not strictly limited to, Bluetooth) are |
15 | driver menu (USB, Bluetooth) | 16 | designed using HID specification (this involves certain keyboards, |
17 | mice, tablets, etc). This option compiles into kernel the generic | ||
18 | HID layer code (parser, usages, etc.), which can then be used by | ||
19 | transport-specific HID implementation (like USB or Bluetooth). | ||
20 | |||
21 | For docs and specs, see http://www.usb.org/developers/hidpage/ | ||
22 | |||
23 | If unsure, say Y | ||
16 | 24 | ||
17 | endmenu | 25 | endmenu |
18 | 26 | ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e1989f3a2684..9367c4cfe936 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -564,13 +564,4 @@ config I2C_PNX | |||
564 | This driver can also be built as a module. If so, the module | 564 | This driver can also be built as a module. If so, the module |
565 | will be called i2c-pnx. | 565 | will be called i2c-pnx. |
566 | 566 | ||
567 | config I2C_PNX_EARLY | ||
568 | bool "Early initialization for I2C on PNXxxxx" | ||
569 | depends on I2C_PNX=y | ||
570 | help | ||
571 | Under certain circumstances one may need to make sure I2C on PNXxxxx | ||
572 | is initialized earlier than some other driver that depends on it | ||
573 | (for instance, that might be USB in case of PNX4008). With this | ||
574 | option turned on you can guarantee that. | ||
575 | |||
576 | endmenu | 567 | endmenu |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index bbc8e3a7ff55..490173611d6b 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -529,6 +529,8 @@ mv64xxx_i2c_probe(struct platform_device *pd) | |||
529 | platform_set_drvdata(pd, drv_data); | 529 | platform_set_drvdata(pd, drv_data); |
530 | i2c_set_adapdata(&drv_data->adapter, drv_data); | 530 | i2c_set_adapdata(&drv_data->adapter, drv_data); |
531 | 531 | ||
532 | mv64xxx_i2c_hw_init(drv_data); | ||
533 | |||
532 | if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0, | 534 | if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0, |
533 | MV64XXX_I2C_CTLR_NAME, drv_data)) { | 535 | MV64XXX_I2C_CTLR_NAME, drv_data)) { |
534 | dev_err(&drv_data->adapter.dev, | 536 | dev_err(&drv_data->adapter.dev, |
@@ -542,8 +544,6 @@ mv64xxx_i2c_probe(struct platform_device *pd) | |||
542 | goto exit_free_irq; | 544 | goto exit_free_irq; |
543 | } | 545 | } |
544 | 546 | ||
545 | mv64xxx_i2c_hw_init(drv_data); | ||
546 | |||
547 | return 0; | 547 | return 0; |
548 | 548 | ||
549 | exit_free_irq: | 549 | exit_free_irq: |
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index de0bca77e926..17376feb1acc 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
@@ -305,8 +305,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap) | |||
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static irqreturn_t | 308 | static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) |
309 | i2c_pnx_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
310 | { | 309 | { |
311 | u32 stat, ctl; | 310 | u32 stat, ctl; |
312 | struct i2c_adapter *adap = dev_id; | 311 | struct i2c_adapter *adap = dev_id; |
@@ -699,10 +698,6 @@ MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); | |||
699 | MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); | 698 | MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); |
700 | MODULE_LICENSE("GPL"); | 699 | MODULE_LICENSE("GPL"); |
701 | 700 | ||
702 | #ifdef CONFIG_I2C_PNX_EARLY | ||
703 | /* We need to make sure I2C is initialized before USB */ | 701 | /* We need to make sure I2C is initialized before USB */ |
704 | subsys_initcall(i2c_adap_pnx_init); | 702 | subsys_initcall(i2c_adap_pnx_init); |
705 | #else | ||
706 | mudule_init(i2c_adap_pnx_init); | ||
707 | #endif | ||
708 | module_exit(i2c_adap_pnx_exit); | 703 | module_exit(i2c_adap_pnx_exit); |
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c index 420377c86422..3fcb646e2073 100644 --- a/drivers/i2c/chips/m41t00.c +++ b/drivers/i2c/chips/m41t00.c | |||
@@ -209,6 +209,7 @@ m41t00_set(void *arg) | |||
209 | buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f); | 209 | buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f); |
210 | buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f); | 210 | buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f); |
211 | buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f); | 211 | buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f); |
212 | buf[m41t00_chip->year] = year; | ||
212 | 213 | ||
213 | if (i2c_master_send(save_client, wbuf, 9) < 0) | 214 | if (i2c_master_send(save_client, wbuf, 9) < 0) |
214 | dev_err(&save_client->dev, "m41t00_set: Write error\n"); | 215 | dev_err(&save_client->dev, "m41t00_set: Write error\n"); |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 3e31f1d265c9..b05378a3d673 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -95,16 +95,32 @@ struct device_driver i2c_adapter_driver = { | |||
95 | .bus = &i2c_bus_type, | 95 | .bus = &i2c_bus_type, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | /* ------------------------------------------------------------------------- */ | ||
99 | |||
100 | /* I2C bus adapters -- one roots each I2C or SMBUS segment */ | ||
101 | |||
98 | static void i2c_adapter_class_dev_release(struct class_device *dev) | 102 | static void i2c_adapter_class_dev_release(struct class_device *dev) |
99 | { | 103 | { |
100 | struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev); | 104 | struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev); |
101 | complete(&adap->class_dev_released); | 105 | complete(&adap->class_dev_released); |
102 | } | 106 | } |
103 | 107 | ||
108 | static ssize_t i2c_adapter_show_name(struct class_device *cdev, char *buf) | ||
109 | { | ||
110 | struct i2c_adapter *adap = class_dev_to_i2c_adapter(cdev); | ||
111 | return sprintf(buf, "%s\n", adap->name); | ||
112 | } | ||
113 | |||
114 | static struct class_device_attribute i2c_adapter_attrs[] = { | ||
115 | __ATTR(name, S_IRUGO, i2c_adapter_show_name, NULL), | ||
116 | { }, | ||
117 | }; | ||
118 | |||
104 | struct class i2c_adapter_class = { | 119 | struct class i2c_adapter_class = { |
105 | .owner = THIS_MODULE, | 120 | .owner = THIS_MODULE, |
106 | .name = "i2c-adapter", | 121 | .name = "i2c-adapter", |
107 | .release = &i2c_adapter_class_dev_release, | 122 | .class_dev_attrs = i2c_adapter_attrs, |
123 | .release = &i2c_adapter_class_dev_release, | ||
108 | }; | 124 | }; |
109 | 125 | ||
110 | static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) | 126 | static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -175,8 +191,12 @@ int i2c_add_adapter(struct i2c_adapter *adap) | |||
175 | * If the parent pointer is not set up, | 191 | * If the parent pointer is not set up, |
176 | * we add this adapter to the host bus. | 192 | * we add this adapter to the host bus. |
177 | */ | 193 | */ |
178 | if (adap->dev.parent == NULL) | 194 | if (adap->dev.parent == NULL) { |
179 | adap->dev.parent = &platform_bus; | 195 | adap->dev.parent = &platform_bus; |
196 | printk(KERN_WARNING "**WARNING** I2C adapter driver [%s] " | ||
197 | "forgot to specify physical device; fix it!\n", | ||
198 | adap->name); | ||
199 | } | ||
180 | sprintf(adap->dev.bus_id, "i2c-%d", adap->nr); | 200 | sprintf(adap->dev.bus_id, "i2c-%d", adap->nr); |
181 | adap->dev.driver = &i2c_adapter_driver; | 201 | adap->dev.driver = &i2c_adapter_driver; |
182 | adap->dev.release = &i2c_adapter_dev_release; | 202 | adap->dev.release = &i2c_adapter_dev_release; |
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c index ffdffb6379ef..524e65de4398 100644 --- a/drivers/ide/pci/atiixp.c +++ b/drivers/ide/pci/atiixp.c | |||
@@ -46,6 +46,8 @@ static atiixp_ide_timing mdma_timing[] = { | |||
46 | 46 | ||
47 | static int save_mdma_mode[4]; | 47 | static int save_mdma_mode[4]; |
48 | 48 | ||
49 | static DEFINE_SPINLOCK(atiixp_lock); | ||
50 | |||
49 | /** | 51 | /** |
50 | * atiixp_ratemask - compute rate mask for ATIIXP IDE | 52 | * atiixp_ratemask - compute rate mask for ATIIXP IDE |
51 | * @drive: IDE drive to compute for | 53 | * @drive: IDE drive to compute for |
@@ -105,7 +107,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive) | |||
105 | unsigned long flags; | 107 | unsigned long flags; |
106 | u16 tmp16; | 108 | u16 tmp16; |
107 | 109 | ||
108 | spin_lock_irqsave(&ide_lock, flags); | 110 | spin_lock_irqsave(&atiixp_lock, flags); |
109 | 111 | ||
110 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); | 112 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); |
111 | if (save_mdma_mode[drive->dn]) | 113 | if (save_mdma_mode[drive->dn]) |
@@ -114,7 +116,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive) | |||
114 | tmp16 |= (1 << drive->dn); | 116 | tmp16 |= (1 << drive->dn); |
115 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); | 117 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); |
116 | 118 | ||
117 | spin_unlock_irqrestore(&ide_lock, flags); | 119 | spin_unlock_irqrestore(&atiixp_lock, flags); |
118 | 120 | ||
119 | return __ide_dma_host_on(drive); | 121 | return __ide_dma_host_on(drive); |
120 | } | 122 | } |
@@ -125,13 +127,13 @@ static int atiixp_ide_dma_host_off(ide_drive_t *drive) | |||
125 | unsigned long flags; | 127 | unsigned long flags; |
126 | u16 tmp16; | 128 | u16 tmp16; |
127 | 129 | ||
128 | spin_lock_irqsave(&ide_lock, flags); | 130 | spin_lock_irqsave(&atiixp_lock, flags); |
129 | 131 | ||
130 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); | 132 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); |
131 | tmp16 &= ~(1 << drive->dn); | 133 | tmp16 &= ~(1 << drive->dn); |
132 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); | 134 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); |
133 | 135 | ||
134 | spin_unlock_irqrestore(&ide_lock, flags); | 136 | spin_unlock_irqrestore(&atiixp_lock, flags); |
135 | 137 | ||
136 | return __ide_dma_host_off(drive); | 138 | return __ide_dma_host_off(drive); |
137 | } | 139 | } |
@@ -152,7 +154,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio) | |||
152 | u32 pio_timing_data; | 154 | u32 pio_timing_data; |
153 | u16 pio_mode_data; | 155 | u16 pio_mode_data; |
154 | 156 | ||
155 | spin_lock_irqsave(&ide_lock, flags); | 157 | spin_lock_irqsave(&atiixp_lock, flags); |
156 | 158 | ||
157 | pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); | 159 | pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); |
158 | pio_mode_data &= ~(0x07 << (drive->dn * 4)); | 160 | pio_mode_data &= ~(0x07 << (drive->dn * 4)); |
@@ -165,7 +167,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio) | |||
165 | (pio_timing[pio].command_width << (timing_shift + 4)); | 167 | (pio_timing[pio].command_width << (timing_shift + 4)); |
166 | pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); | 168 | pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); |
167 | 169 | ||
168 | spin_unlock_irqrestore(&ide_lock, flags); | 170 | spin_unlock_irqrestore(&atiixp_lock, flags); |
169 | } | 171 | } |
170 | 172 | ||
171 | /** | 173 | /** |
@@ -189,7 +191,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed) | |||
189 | 191 | ||
190 | speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed); | 192 | speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed); |
191 | 193 | ||
192 | spin_lock_irqsave(&ide_lock, flags); | 194 | spin_lock_irqsave(&atiixp_lock, flags); |
193 | 195 | ||
194 | save_mdma_mode[drive->dn] = 0; | 196 | save_mdma_mode[drive->dn] = 0; |
195 | if (speed >= XFER_UDMA_0) { | 197 | if (speed >= XFER_UDMA_0) { |
@@ -208,7 +210,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed) | |||
208 | } | 210 | } |
209 | } | 211 | } |
210 | 212 | ||
211 | spin_unlock_irqrestore(&ide_lock, flags); | 213 | spin_unlock_irqrestore(&atiixp_lock, flags); |
212 | 214 | ||
213 | if (speed >= XFER_SW_DMA_0) | 215 | if (speed >= XFER_SW_DMA_0) |
214 | pio = atiixp_dma_2_pio(speed); | 216 | pio = atiixp_dma_2_pio(speed); |
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index 61f1a9665a7f..381cc6f101ce 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c | |||
@@ -123,7 +123,7 @@ struct via82cxxx_dev | |||
123 | static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) | 123 | static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) |
124 | { | 124 | { |
125 | struct pci_dev *dev = hwif->pci_dev; | 125 | struct pci_dev *dev = hwif->pci_dev; |
126 | struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif); | 126 | struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev); |
127 | u8 t; | 127 | u8 t; |
128 | 128 | ||
129 | if (~vdev->via_config->flags & VIA_BAD_AST) { | 129 | if (~vdev->via_config->flags & VIA_BAD_AST) { |
@@ -162,7 +162,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) | |||
162 | static int via_set_drive(ide_drive_t *drive, u8 speed) | 162 | static int via_set_drive(ide_drive_t *drive, u8 speed) |
163 | { | 163 | { |
164 | ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1); | 164 | ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1); |
165 | struct via82cxxx_dev *vdev = ide_get_hwifdata(drive->hwif); | 165 | struct via82cxxx_dev *vdev = pci_get_drvdata(drive->hwif->pci_dev); |
166 | struct ide_timing t, p; | 166 | struct ide_timing t, p; |
167 | unsigned int T, UT; | 167 | unsigned int T, UT; |
168 | 168 | ||
@@ -225,7 +225,7 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio) | |||
225 | static int via82cxxx_ide_dma_check (ide_drive_t *drive) | 225 | static int via82cxxx_ide_dma_check (ide_drive_t *drive) |
226 | { | 226 | { |
227 | ide_hwif_t *hwif = HWIF(drive); | 227 | ide_hwif_t *hwif = HWIF(drive); |
228 | struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif); | 228 | struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev); |
229 | u16 w80 = hwif->udma_four; | 229 | u16 w80 = hwif->udma_four; |
230 | 230 | ||
231 | u16 speed = ide_find_best_mode(drive, | 231 | u16 speed = ide_find_best_mode(drive, |
@@ -262,6 +262,53 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa) | |||
262 | return via_config; | 262 | return via_config; |
263 | } | 263 | } |
264 | 264 | ||
265 | /* | ||
266 | * Check and handle 80-wire cable presence | ||
267 | */ | ||
268 | static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u) | ||
269 | { | ||
270 | int i; | ||
271 | |||
272 | switch (vdev->via_config->flags & VIA_UDMA) { | ||
273 | case VIA_UDMA_66: | ||
274 | for (i = 24; i >= 0; i -= 8) | ||
275 | if (((u >> (i & 16)) & 8) && | ||
276 | ((u >> i) & 0x20) && | ||
277 | (((u >> i) & 7) < 2)) { | ||
278 | /* | ||
279 | * 2x PCI clock and | ||
280 | * UDMA w/ < 3T/cycle | ||
281 | */ | ||
282 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
283 | } | ||
284 | break; | ||
285 | |||
286 | case VIA_UDMA_100: | ||
287 | for (i = 24; i >= 0; i -= 8) | ||
288 | if (((u >> i) & 0x10) || | ||
289 | (((u >> i) & 0x20) && | ||
290 | (((u >> i) & 7) < 4))) { | ||
291 | /* BIOS 80-wire bit or | ||
292 | * UDMA w/ < 60ns/cycle | ||
293 | */ | ||
294 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
295 | } | ||
296 | break; | ||
297 | |||
298 | case VIA_UDMA_133: | ||
299 | for (i = 24; i >= 0; i -= 8) | ||
300 | if (((u >> i) & 0x10) || | ||
301 | (((u >> i) & 0x20) && | ||
302 | (((u >> i) & 7) < 6))) { | ||
303 | /* BIOS 80-wire bit or | ||
304 | * UDMA w/ < 60ns/cycle | ||
305 | */ | ||
306 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
307 | } | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | |||
265 | /** | 312 | /** |
266 | * init_chipset_via82cxxx - initialization handler | 313 | * init_chipset_via82cxxx - initialization handler |
267 | * @dev: PCI device | 314 | * @dev: PCI device |
@@ -274,14 +321,22 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa) | |||
274 | static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name) | 321 | static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name) |
275 | { | 322 | { |
276 | struct pci_dev *isa = NULL; | 323 | struct pci_dev *isa = NULL; |
324 | struct via82cxxx_dev *vdev; | ||
277 | struct via_isa_bridge *via_config; | 325 | struct via_isa_bridge *via_config; |
278 | u8 t, v; | 326 | u8 t, v; |
279 | unsigned int u; | 327 | u32 u; |
328 | |||
329 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
330 | if (!vdev) { | ||
331 | printk(KERN_ERR "VP_IDE: out of memory :(\n"); | ||
332 | return -ENOMEM; | ||
333 | } | ||
334 | pci_set_drvdata(dev, vdev); | ||
280 | 335 | ||
281 | /* | 336 | /* |
282 | * Find the ISA bridge to see how good the IDE is. | 337 | * Find the ISA bridge to see how good the IDE is. |
283 | */ | 338 | */ |
284 | via_config = via_config_find(&isa); | 339 | vdev->via_config = via_config = via_config_find(&isa); |
285 | 340 | ||
286 | /* We checked this earlier so if it fails here deeep badness | 341 | /* We checked this earlier so if it fails here deeep badness |
287 | is involved */ | 342 | is involved */ |
@@ -289,16 +344,17 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const | |||
289 | BUG_ON(!via_config->id); | 344 | BUG_ON(!via_config->id); |
290 | 345 | ||
291 | /* | 346 | /* |
292 | * Setup or disable Clk66 if appropriate | 347 | * Detect cable and configure Clk66 |
293 | */ | 348 | */ |
349 | pci_read_config_dword(dev, VIA_UDMA_TIMING, &u); | ||
350 | |||
351 | via_cable_detect(vdev, u); | ||
294 | 352 | ||
295 | if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) { | 353 | if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) { |
296 | /* Enable Clk66 */ | 354 | /* Enable Clk66 */ |
297 | pci_read_config_dword(dev, VIA_UDMA_TIMING, &u); | ||
298 | pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008); | 355 | pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008); |
299 | } else if (via_config->flags & VIA_BAD_CLK66) { | 356 | } else if (via_config->flags & VIA_BAD_CLK66) { |
300 | /* Would cause trouble on 596a and 686 */ | 357 | /* Would cause trouble on 596a and 686 */ |
301 | pci_read_config_dword(dev, VIA_UDMA_TIMING, &u); | ||
302 | pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008); | 358 | pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008); |
303 | } | 359 | } |
304 | 360 | ||
@@ -367,75 +423,11 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const | |||
367 | return 0; | 423 | return 0; |
368 | } | 424 | } |
369 | 425 | ||
370 | /* | ||
371 | * Check and handle 80-wire cable presence | ||
372 | */ | ||
373 | static void __devinit via_cable_detect(struct pci_dev *dev, struct via82cxxx_dev *vdev) | ||
374 | { | ||
375 | unsigned int u; | ||
376 | int i; | ||
377 | pci_read_config_dword(dev, VIA_UDMA_TIMING, &u); | ||
378 | |||
379 | switch (vdev->via_config->flags & VIA_UDMA) { | ||
380 | |||
381 | case VIA_UDMA_66: | ||
382 | for (i = 24; i >= 0; i -= 8) | ||
383 | if (((u >> (i & 16)) & 8) && | ||
384 | ((u >> i) & 0x20) && | ||
385 | (((u >> i) & 7) < 2)) { | ||
386 | /* | ||
387 | * 2x PCI clock and | ||
388 | * UDMA w/ < 3T/cycle | ||
389 | */ | ||
390 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
391 | } | ||
392 | break; | ||
393 | |||
394 | case VIA_UDMA_100: | ||
395 | for (i = 24; i >= 0; i -= 8) | ||
396 | if (((u >> i) & 0x10) || | ||
397 | (((u >> i) & 0x20) && | ||
398 | (((u >> i) & 7) < 4))) { | ||
399 | /* BIOS 80-wire bit or | ||
400 | * UDMA w/ < 60ns/cycle | ||
401 | */ | ||
402 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
403 | } | ||
404 | break; | ||
405 | |||
406 | case VIA_UDMA_133: | ||
407 | for (i = 24; i >= 0; i -= 8) | ||
408 | if (((u >> i) & 0x10) || | ||
409 | (((u >> i) & 0x20) && | ||
410 | (((u >> i) & 7) < 6))) { | ||
411 | /* BIOS 80-wire bit or | ||
412 | * UDMA w/ < 60ns/cycle | ||
413 | */ | ||
414 | vdev->via_80w |= (1 << (1 - (i >> 4))); | ||
415 | } | ||
416 | break; | ||
417 | |||
418 | } | ||
419 | } | ||
420 | |||
421 | static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) | 426 | static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) |
422 | { | 427 | { |
423 | struct via82cxxx_dev *vdev = kmalloc(sizeof(struct via82cxxx_dev), | 428 | struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev); |
424 | GFP_KERNEL); | ||
425 | struct pci_dev *isa = NULL; | ||
426 | int i; | 429 | int i; |
427 | 430 | ||
428 | if (vdev == NULL) { | ||
429 | printk(KERN_ERR "VP_IDE: out of memory :(\n"); | ||
430 | return; | ||
431 | } | ||
432 | |||
433 | memset(vdev, 0, sizeof(struct via82cxxx_dev)); | ||
434 | ide_set_hwifdata(hwif, vdev); | ||
435 | |||
436 | vdev->via_config = via_config_find(&isa); | ||
437 | via_cable_detect(hwif->pci_dev, vdev); | ||
438 | |||
439 | hwif->autodma = 0; | 431 | hwif->autodma = 0; |
440 | 432 | ||
441 | hwif->tuneproc = &via82cxxx_tune_drive; | 433 | hwif->tuneproc = &via82cxxx_tune_drive; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 533193d4e5df..9e0ab048c878 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1088 | *sin = iw_event->local_addr; | 1088 | *sin = iw_event->local_addr; |
1089 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; | 1089 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; |
1090 | *sin = iw_event->remote_addr; | 1090 | *sin = iw_event->remote_addr; |
1091 | if (iw_event->status) | 1091 | switch (iw_event->status) { |
1092 | event.event = RDMA_CM_EVENT_REJECTED; | 1092 | case 0: |
1093 | else | ||
1094 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1093 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1094 | break; | ||
1095 | case -ECONNRESET: | ||
1096 | case -ECONNREFUSED: | ||
1097 | event.event = RDMA_CM_EVENT_REJECTED; | ||
1098 | break; | ||
1099 | case -ETIMEDOUT: | ||
1100 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1101 | break; | ||
1102 | default: | ||
1103 | event.event = RDMA_CM_EVENT_CONNECT_ERROR; | ||
1104 | break; | ||
1105 | } | ||
1095 | break; | 1106 | break; |
1096 | case IW_CM_EVENT_ESTABLISHED: | 1107 | case IW_CM_EVENT_ESTABLISHED: |
1097 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1108 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 81a5cdc5733a..e2e8d329b443 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
209 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { | 209 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { |
210 | if (!ctx->backlog) { | 210 | if (!ctx->backlog) { |
211 | ret = -EDQUOT; | 211 | ret = -EDQUOT; |
212 | kfree(uevent); | ||
212 | goto out; | 213 | goto out; |
213 | } | 214 | } |
214 | ctx->backlog--; | 215 | ctx->backlog--; |
216 | } else if (!ctx->uid) { | ||
217 | /* | ||
218 | * We ignore events for new connections until userspace has set | ||
219 | * their context. This can only happen if an error occurs on a | ||
220 | * new connection before the user accepts it. This is okay, | ||
221 | * since the accept will just fail later. | ||
222 | */ | ||
223 | kfree(uevent); | ||
224 | goto out; | ||
215 | } | 225 | } |
226 | |||
216 | list_add_tail(&uevent->list, &ctx->file->event_list); | 227 | list_add_tail(&uevent->list, &ctx->file->event_list); |
217 | wake_up_interruptible(&ctx->file->poll_wait); | 228 | wake_up_interruptible(&ctx->file->poll_wait); |
218 | out: | 229 | out: |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index e1b618c5f685..b7be950ab47c 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | |||
50 | ib_device); | 50 | ib_device); |
51 | struct hipz_query_hca *rblock; | 51 | struct hipz_query_hca *rblock; |
52 | 52 | ||
53 | rblock = ehca_alloc_fw_ctrlblock(); | 53 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
54 | if (!rblock) { | 54 | if (!rblock) { |
55 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 55 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
56 | return -ENOMEM; | 56 | return -ENOMEM; |
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
110 | ib_device); | 110 | ib_device); |
111 | struct hipz_query_port *rblock; | 111 | struct hipz_query_port *rblock; |
112 | 112 | ||
113 | rblock = ehca_alloc_fw_ctrlblock(); | 113 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
114 | if (!rblock) { | 114 | if (!rblock) { |
115 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 115 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
116 | return -ENOMEM; | 116 | return -ENOMEM; |
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |||
179 | return -EINVAL; | 179 | return -EINVAL; |
180 | } | 180 | } |
181 | 181 | ||
182 | rblock = ehca_alloc_fw_ctrlblock(); | 182 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
183 | if (!rblock) { | 183 | if (!rblock) { |
184 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 184 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
185 | return -ENOMEM; | 185 | return -ENOMEM; |
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
212 | return -EINVAL; | 212 | return -EINVAL; |
213 | } | 213 | } |
214 | 214 | ||
215 | rblock = ehca_alloc_fw_ctrlblock(); | 215 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
216 | if (!rblock) { | 216 | if (!rblock) { |
217 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 217 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
218 | return -ENOMEM; | 218 | return -ENOMEM; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index c3ea746e9045..e7209afb4250 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data, | |||
138 | u64 *rblock; | 138 | u64 *rblock; |
139 | unsigned long block_count; | 139 | unsigned long block_count; |
140 | 140 | ||
141 | rblock = ehca_alloc_fw_ctrlblock(); | 141 | rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); |
142 | if (!rblock) { | 142 | if (!rblock) { |
143 | ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); | 143 | ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); |
144 | ret = -ENOMEM; | 144 | ret = -ENOMEM; |
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 3720e3032cce..cd7789f0d08e 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped, | |||
180 | int ehca_munmap(unsigned long addr, size_t len); | 180 | int ehca_munmap(unsigned long addr, size_t len); |
181 | 181 | ||
182 | #ifdef CONFIG_PPC_64K_PAGES | 182 | #ifdef CONFIG_PPC_64K_PAGES |
183 | void *ehca_alloc_fw_ctrlblock(void); | 183 | void *ehca_alloc_fw_ctrlblock(gfp_t flags); |
184 | void ehca_free_fw_ctrlblock(void *ptr); | 184 | void ehca_free_fw_ctrlblock(void *ptr); |
185 | #else | 185 | #else |
186 | #define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) | 186 | #define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags)) |
187 | #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) | 187 | #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) |
188 | #endif | 188 | #endif |
189 | 189 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index cc47e4c13a18..6574fbbaead5 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer; | |||
106 | #ifdef CONFIG_PPC_64K_PAGES | 106 | #ifdef CONFIG_PPC_64K_PAGES |
107 | static struct kmem_cache *ctblk_cache = NULL; | 107 | static struct kmem_cache *ctblk_cache = NULL; |
108 | 108 | ||
109 | void *ehca_alloc_fw_ctrlblock(void) | 109 | void *ehca_alloc_fw_ctrlblock(gfp_t flags) |
110 | { | 110 | { |
111 | void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); | 111 | void *ret = kmem_cache_zalloc(ctblk_cache, flags); |
112 | if (!ret) | 112 | if (!ret) |
113 | ehca_gen_err("Out of memory for ctblk"); | 113 | ehca_gen_err("Out of memory for ctblk"); |
114 | return ret; | 114 | return ret; |
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca) | |||
206 | u64 h_ret; | 206 | u64 h_ret; |
207 | struct hipz_query_hca *rblock; | 207 | struct hipz_query_hca *rblock; |
208 | 208 | ||
209 | rblock = ehca_alloc_fw_ctrlblock(); | 209 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
210 | if (!rblock) { | 210 | if (!rblock) { |
211 | ehca_gen_err("Cannot allocate rblock memory."); | 211 | ehca_gen_err("Cannot allocate rblock memory."); |
212 | return -ENOMEM; | 212 | return -ENOMEM; |
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca) | |||
258 | int ret = 0; | 258 | int ret = 0; |
259 | struct hipz_query_hca *rblock; | 259 | struct hipz_query_hca *rblock; |
260 | 260 | ||
261 | rblock = ehca_alloc_fw_ctrlblock(); | 261 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
262 | if (!rblock) { | 262 | if (!rblock) { |
263 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 263 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \ | |||
469 | \ | 469 | \ |
470 | shca = dev->driver_data; \ | 470 | shca = dev->driver_data; \ |
471 | \ | 471 | \ |
472 | rblock = ehca_alloc_fw_ctrlblock(); \ | 472 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \ |
473 | if (!rblock) { \ | 473 | if (!rblock) { \ |
474 | dev_err(dev, "Can't allocate rblock memory."); \ | 474 | dev_err(dev, "Can't allocate rblock memory."); \ |
475 | return 0; \ | 475 | return 0; \ |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 0a5e2214cc5f..cfb362a1029c 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1013 | u32 i; | 1013 | u32 i; |
1014 | u64 *kpage; | 1014 | u64 *kpage; |
1015 | 1015 | ||
1016 | kpage = ehca_alloc_fw_ctrlblock(); | 1016 | kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
1017 | if (!kpage) { | 1017 | if (!kpage) { |
1018 | ehca_err(&shca->ib_device, "kpage alloc failed"); | 1018 | ehca_err(&shca->ib_device, "kpage alloc failed"); |
1019 | ret = -ENOMEM; | 1019 | ret = -ENOMEM; |
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1124 | ehca_mrmw_map_acl(acl, &hipz_acl); | 1124 | ehca_mrmw_map_acl(acl, &hipz_acl); |
1125 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 1125 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
1126 | 1126 | ||
1127 | kpage = ehca_alloc_fw_ctrlblock(); | 1127 | kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
1128 | if (!kpage) { | 1128 | if (!kpage) { |
1129 | ehca_err(&shca->ib_device, "kpage alloc failed"); | 1129 | ehca_err(&shca->ib_device, "kpage alloc failed"); |
1130 | ret = -ENOMEM; | 1130 | ret = -ENOMEM; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index c6c9cef203e3..34b85556d01e 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
807 | unsigned long spl_flags = 0; | 807 | unsigned long spl_flags = 0; |
808 | 808 | ||
809 | /* do query_qp to obtain current attr values */ | 809 | /* do query_qp to obtain current attr values */ |
810 | mqpcb = ehca_alloc_fw_ctrlblock(); | 810 | mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
811 | if (!mqpcb) { | 811 | if (!mqpcb) { |
812 | ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " | 812 | ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " |
813 | "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); | 813 | "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); |
@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1273 | return -EINVAL; | 1273 | return -EINVAL; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | qpcb = ehca_alloc_fw_ctrlblock(); | 1276 | qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
1277 | if (!qpcb) { | 1277 | if (!qpcb) { |
1278 | ehca_err(qp->device,"Out of memory for qpcb " | 1278 | ehca_err(qp->device,"Out of memory for qpcb " |
1279 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); | 1279 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 283d50b76c3d..1159c8a0f2c5 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -54,6 +54,10 @@ enum { | |||
54 | MTHCA_CQ_ENTRY_SIZE = 0x20 | 54 | MTHCA_CQ_ENTRY_SIZE = 0x20 |
55 | }; | 55 | }; |
56 | 56 | ||
57 | enum { | ||
58 | MTHCA_ATOMIC_BYTE_LEN = 8 | ||
59 | }; | ||
60 | |||
57 | /* | 61 | /* |
58 | * Must be packed because start is 64 bits but only aligned to 32 bits. | 62 | * Must be packed because start is 64 bits but only aligned to 32 bits. |
59 | */ | 63 | */ |
@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev, | |||
599 | break; | 603 | break; |
600 | case MTHCA_OPCODE_ATOMIC_CS: | 604 | case MTHCA_OPCODE_ATOMIC_CS: |
601 | entry->opcode = IB_WC_COMP_SWAP; | 605 | entry->opcode = IB_WC_COMP_SWAP; |
602 | entry->byte_len = be32_to_cpu(cqe->byte_cnt); | 606 | entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; |
603 | break; | 607 | break; |
604 | case MTHCA_OPCODE_ATOMIC_FA: | 608 | case MTHCA_OPCODE_ATOMIC_FA: |
605 | entry->opcode = IB_WC_FETCH_ADD; | 609 | entry->opcode = IB_WC_FETCH_ADD; |
606 | entry->byte_len = be32_to_cpu(cqe->byte_cnt); | 610 | entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; |
607 | break; | 611 | break; |
608 | case MTHCA_OPCODE_BIND_MW: | 612 | case MTHCA_OPCODE_BIND_MW: |
609 | entry->opcode = IB_WC_BIND_MW; | 613 | entry->opcode = IB_WC_BIND_MW; |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 15cc2f6eb475..6b19645d946c 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj) | |||
232 | 232 | ||
233 | list_for_each_entry(chunk, &icm->chunk_list, list) { | 233 | list_for_each_entry(chunk, &icm->chunk_list, list) { |
234 | for (i = 0; i < chunk->npages; ++i) { | 234 | for (i = 0; i < chunk->npages; ++i) { |
235 | if (chunk->mem[i].length >= offset) { | 235 | if (chunk->mem[i].length > offset) { |
236 | page = chunk->mem[i].page; | 236 | page = chunk->mem[i].page; |
237 | goto out; | 237 | goto out; |
238 | } | 238 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index d844a2569b47..5f5214c0337d 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m | |||
429 | { | 429 | { |
430 | struct mthca_dev *dev = to_mdev(ibqp->device); | 430 | struct mthca_dev *dev = to_mdev(ibqp->device); |
431 | struct mthca_qp *qp = to_mqp(ibqp); | 431 | struct mthca_qp *qp = to_mqp(ibqp); |
432 | int err; | 432 | int err = 0; |
433 | struct mthca_mailbox *mailbox; | 433 | struct mthca_mailbox *mailbox = NULL; |
434 | struct mthca_qp_param *qp_param; | 434 | struct mthca_qp_param *qp_param; |
435 | struct mthca_qp_context *context; | 435 | struct mthca_qp_context *context; |
436 | int mthca_state; | 436 | int mthca_state; |
437 | u8 status; | 437 | u8 status; |
438 | 438 | ||
439 | if (qp->state == IB_QPS_RESET) { | ||
440 | qp_attr->qp_state = IB_QPS_RESET; | ||
441 | goto done; | ||
442 | } | ||
443 | |||
439 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | 444 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
440 | if (IS_ERR(mailbox)) | 445 | if (IS_ERR(mailbox)) |
441 | return PTR_ERR(mailbox); | 446 | return PTR_ERR(mailbox); |
@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m | |||
454 | mthca_state = be32_to_cpu(context->flags) >> 28; | 459 | mthca_state = be32_to_cpu(context->flags) >> 28; |
455 | 460 | ||
456 | qp_attr->qp_state = to_ib_qp_state(mthca_state); | 461 | qp_attr->qp_state = to_ib_qp_state(mthca_state); |
457 | qp_attr->cur_qp_state = qp_attr->qp_state; | ||
458 | qp_attr->path_mtu = context->mtu_msgmax >> 5; | 462 | qp_attr->path_mtu = context->mtu_msgmax >> 5; |
459 | qp_attr->path_mig_state = | 463 | qp_attr->path_mig_state = |
460 | to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); | 464 | to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); |
@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m | |||
464 | qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; | 468 | qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; |
465 | qp_attr->qp_access_flags = | 469 | qp_attr->qp_access_flags = |
466 | to_ib_qp_access_flags(be32_to_cpu(context->params2)); | 470 | to_ib_qp_access_flags(be32_to_cpu(context->params2)); |
467 | qp_attr->cap.max_send_wr = qp->sq.max; | ||
468 | qp_attr->cap.max_recv_wr = qp->rq.max; | ||
469 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | ||
470 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | ||
471 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
472 | 471 | ||
473 | if (qp->transport == RC || qp->transport == UC) { | 472 | if (qp->transport == RC || qp->transport == UC) { |
474 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 473 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m | |||
495 | qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; | 494 | qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; |
496 | qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; | 495 | qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; |
497 | qp_attr->alt_timeout = context->alt_path.ackto >> 3; | 496 | qp_attr->alt_timeout = context->alt_path.ackto >> 3; |
498 | qp_init_attr->cap = qp_attr->cap; | 497 | |
498 | done: | ||
499 | qp_attr->cur_qp_state = qp_attr->qp_state; | ||
500 | qp_attr->cap.max_send_wr = qp->sq.max; | ||
501 | qp_attr->cap.max_recv_wr = qp->rq.max; | ||
502 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | ||
503 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | ||
504 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
505 | |||
506 | qp_init_attr->cap = qp_attr->cap; | ||
499 | 507 | ||
500 | out: | 508 | out: |
501 | mthca_free_mailbox(dev, mailbox); | 509 | mthca_free_mailbox(dev, mailbox); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9b2041e25d59..dd221eda3ea6 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, | |||
177 | * - if yes, the mtask is recycled at iscsi_complete_pdu | 177 | * - if yes, the mtask is recycled at iscsi_complete_pdu |
178 | * - if no, the mtask is recycled at iser_snd_completion | 178 | * - if no, the mtask is recycled at iser_snd_completion |
179 | */ | 179 | */ |
180 | if (error && error != -EAGAIN) | 180 | if (error && error != -ENOBUFS) |
181 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 181 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
182 | 182 | ||
183 | return error; | 183 | return error; |
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn, | |||
241 | error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); | 241 | error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); |
242 | 242 | ||
243 | iscsi_iser_ctask_xmit_exit: | 243 | iscsi_iser_ctask_xmit_exit: |
244 | if (error && error != -EAGAIN) | 244 | if (error && error != -ENOBUFS) |
245 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 245 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
246 | return error; | 246 | return error; |
247 | } | 247 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index e73c87b9be43..0a7d1ab60e6d 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) | |||
304 | static int | 304 | static int |
305 | iser_check_xmit(struct iscsi_conn *conn, void *task) | 305 | iser_check_xmit(struct iscsi_conn *conn, void *task) |
306 | { | 306 | { |
307 | int rc = 0; | ||
308 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 307 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
309 | 308 | ||
310 | write_lock_bh(conn->recv_lock); | ||
311 | if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == | 309 | if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == |
312 | ISER_QP_MAX_REQ_DTOS) { | 310 | ISER_QP_MAX_REQ_DTOS) { |
313 | iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); | 311 | iser_dbg("%ld can't xmit task %p\n",jiffies,task); |
314 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 312 | return -ENOBUFS; |
315 | rc = -EAGAIN; | ||
316 | } | 313 | } |
317 | write_unlock_bh(conn->recv_lock); | 314 | return 0; |
318 | return rc; | ||
319 | } | 315 | } |
320 | 316 | ||
321 | 317 | ||
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn, | |||
340 | return -EPERM; | 336 | return -EPERM; |
341 | } | 337 | } |
342 | if (iser_check_xmit(conn, ctask)) | 338 | if (iser_check_xmit(conn, ctask)) |
343 | return -EAGAIN; | 339 | return -ENOBUFS; |
344 | 340 | ||
345 | edtl = ntohl(hdr->data_length); | 341 | edtl = ntohl(hdr->data_length); |
346 | 342 | ||
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
426 | } | 422 | } |
427 | 423 | ||
428 | if (iser_check_xmit(conn, ctask)) | 424 | if (iser_check_xmit(conn, ctask)) |
429 | return -EAGAIN; | 425 | return -ENOBUFS; |
430 | 426 | ||
431 | itt = ntohl(hdr->itt); | 427 | itt = ntohl(hdr->itt); |
432 | data_seg_len = ntoh24(hdr->dlength); | 428 | data_seg_len = ntoh24(hdr->dlength); |
@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
498 | } | 494 | } |
499 | 495 | ||
500 | if (iser_check_xmit(conn,mtask)) | 496 | if (iser_check_xmit(conn,mtask)) |
501 | return -EAGAIN; | 497 | return -ENOBUFS; |
502 | 498 | ||
503 | /* build the tx desc regd header and add it to the tx desc dto */ | 499 | /* build the tx desc regd header and add it to the tx desc dto */ |
504 | mdesc->type = ISCSI_TX_CONTROL; | 500 | mdesc->type = ISCSI_TX_CONTROL; |
@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
605 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; | 601 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; |
606 | struct iscsi_conn *conn = iser_conn->iscsi_conn; | 602 | struct iscsi_conn *conn = iser_conn->iscsi_conn; |
607 | struct iscsi_mgmt_task *mtask; | 603 | struct iscsi_mgmt_task *mtask; |
604 | int resume_tx = 0; | ||
608 | 605 | ||
609 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); | 606 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); |
610 | 607 | ||
@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
613 | if (tx_desc->type == ISCSI_TX_DATAOUT) | 610 | if (tx_desc->type == ISCSI_TX_DATAOUT) |
614 | kmem_cache_free(ig.desc_cache, tx_desc); | 611 | kmem_cache_free(ig.desc_cache, tx_desc); |
615 | 612 | ||
613 | if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == | ||
614 | ISER_QP_MAX_REQ_DTOS) | ||
615 | resume_tx = 1; | ||
616 | |||
616 | atomic_dec(&ib_conn->post_send_buf_count); | 617 | atomic_dec(&ib_conn->post_send_buf_count); |
617 | 618 | ||
618 | write_lock(conn->recv_lock); | 619 | if (resume_tx) { |
619 | if (conn->suspend_tx) { | ||
620 | iser_dbg("%ld resuming tx\n",jiffies); | 620 | iser_dbg("%ld resuming tx\n",jiffies); |
621 | clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | ||
622 | scsi_queue_work(conn->session->host, &conn->xmitwork); | 621 | scsi_queue_work(conn->session->host, &conn->xmitwork); |
623 | } | 622 | } |
624 | write_unlock(conn->recv_lock); | ||
625 | 623 | ||
626 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 624 | if (tx_desc->type == ISCSI_TX_CONTROL) { |
627 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 625 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 100df6f38d92..91e0c75aca8f 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -52,6 +52,8 @@ | |||
52 | #define KVM_MAX_VCPUS 1 | 52 | #define KVM_MAX_VCPUS 1 |
53 | #define KVM_MEMORY_SLOTS 4 | 53 | #define KVM_MEMORY_SLOTS 4 |
54 | #define KVM_NUM_MMU_PAGES 256 | 54 | #define KVM_NUM_MMU_PAGES 256 |
55 | #define KVM_MIN_FREE_MMU_PAGES 5 | ||
56 | #define KVM_REFILL_PAGES 25 | ||
55 | 57 | ||
56 | #define FX_IMAGE_SIZE 512 | 58 | #define FX_IMAGE_SIZE 512 |
57 | #define FX_IMAGE_ALIGN 16 | 59 | #define FX_IMAGE_ALIGN 16 |
@@ -89,14 +91,54 @@ typedef unsigned long hva_t; | |||
89 | typedef u64 hpa_t; | 91 | typedef u64 hpa_t; |
90 | typedef unsigned long hfn_t; | 92 | typedef unsigned long hfn_t; |
91 | 93 | ||
94 | #define NR_PTE_CHAIN_ENTRIES 5 | ||
95 | |||
96 | struct kvm_pte_chain { | ||
97 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; | ||
98 | struct hlist_node link; | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * kvm_mmu_page_role, below, is defined as: | ||
103 | * | ||
104 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | ||
105 | * bits 4:7 - page table level for this shadow (1-4) | ||
106 | * bits 8:9 - page table quadrant for 2-level guests | ||
107 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) | ||
108 | */ | ||
109 | union kvm_mmu_page_role { | ||
110 | unsigned word; | ||
111 | struct { | ||
112 | unsigned glevels : 4; | ||
113 | unsigned level : 4; | ||
114 | unsigned quadrant : 2; | ||
115 | unsigned pad_for_nice_hex_output : 6; | ||
116 | unsigned metaphysical : 1; | ||
117 | }; | ||
118 | }; | ||
119 | |||
92 | struct kvm_mmu_page { | 120 | struct kvm_mmu_page { |
93 | struct list_head link; | 121 | struct list_head link; |
122 | struct hlist_node hash_link; | ||
123 | |||
124 | /* | ||
125 | * The following two entries are used to key the shadow page in the | ||
126 | * hash table. | ||
127 | */ | ||
128 | gfn_t gfn; | ||
129 | union kvm_mmu_page_role role; | ||
130 | |||
94 | hpa_t page_hpa; | 131 | hpa_t page_hpa; |
95 | unsigned long slot_bitmap; /* One bit set per slot which has memory | 132 | unsigned long slot_bitmap; /* One bit set per slot which has memory |
96 | * in this shadow page. | 133 | * in this shadow page. |
97 | */ | 134 | */ |
98 | int global; /* Set if all ptes in this page are global */ | 135 | int global; /* Set if all ptes in this page are global */ |
99 | u64 *parent_pte; | 136 | int multimapped; /* More than one parent_pte? */ |
137 | int root_count; /* Currently serving as active root */ | ||
138 | union { | ||
139 | u64 *parent_pte; /* !multimapped */ | ||
140 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | ||
141 | }; | ||
100 | }; | 142 | }; |
101 | 143 | ||
102 | struct vmcs { | 144 | struct vmcs { |
@@ -117,14 +159,26 @@ struct kvm_vcpu; | |||
117 | struct kvm_mmu { | 159 | struct kvm_mmu { |
118 | void (*new_cr3)(struct kvm_vcpu *vcpu); | 160 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
119 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | 161 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
120 | void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva); | ||
121 | void (*free)(struct kvm_vcpu *vcpu); | 162 | void (*free)(struct kvm_vcpu *vcpu); |
122 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | 163 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); |
123 | hpa_t root_hpa; | 164 | hpa_t root_hpa; |
124 | int root_level; | 165 | int root_level; |
125 | int shadow_root_level; | 166 | int shadow_root_level; |
167 | |||
168 | u64 *pae_root; | ||
169 | }; | ||
170 | |||
171 | #define KVM_NR_MEM_OBJS 20 | ||
172 | |||
173 | struct kvm_mmu_memory_cache { | ||
174 | int nobjs; | ||
175 | void *objects[KVM_NR_MEM_OBJS]; | ||
126 | }; | 176 | }; |
127 | 177 | ||
178 | /* | ||
179 | * We don't want allocation failures within the mmu code, so we preallocate | ||
180 | * enough memory for a single page fault in a cache. | ||
181 | */ | ||
128 | struct kvm_guest_debug { | 182 | struct kvm_guest_debug { |
129 | int enabled; | 183 | int enabled; |
130 | unsigned long bp[4]; | 184 | unsigned long bp[4]; |
@@ -173,6 +227,7 @@ struct kvm_vcpu { | |||
173 | struct mutex mutex; | 227 | struct mutex mutex; |
174 | int cpu; | 228 | int cpu; |
175 | int launched; | 229 | int launched; |
230 | int interrupt_window_open; | ||
176 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | 231 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
177 | #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) | 232 | #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) |
178 | unsigned long irq_pending[NR_IRQ_WORDS]; | 233 | unsigned long irq_pending[NR_IRQ_WORDS]; |
@@ -184,6 +239,7 @@ struct kvm_vcpu { | |||
184 | unsigned long cr3; | 239 | unsigned long cr3; |
185 | unsigned long cr4; | 240 | unsigned long cr4; |
186 | unsigned long cr8; | 241 | unsigned long cr8; |
242 | u64 pdptrs[4]; /* pae */ | ||
187 | u64 shadow_efer; | 243 | u64 shadow_efer; |
188 | u64 apic_base; | 244 | u64 apic_base; |
189 | int nmsrs; | 245 | int nmsrs; |
@@ -194,6 +250,12 @@ struct kvm_vcpu { | |||
194 | struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; | 250 | struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; |
195 | struct kvm_mmu mmu; | 251 | struct kvm_mmu mmu; |
196 | 252 | ||
253 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | ||
254 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | ||
255 | |||
256 | gfn_t last_pt_write_gfn; | ||
257 | int last_pt_write_count; | ||
258 | |||
197 | struct kvm_guest_debug guest_debug; | 259 | struct kvm_guest_debug guest_debug; |
198 | 260 | ||
199 | char fx_buf[FX_BUF_SIZE]; | 261 | char fx_buf[FX_BUF_SIZE]; |
@@ -231,10 +293,16 @@ struct kvm { | |||
231 | spinlock_t lock; /* protects everything except vcpus */ | 293 | spinlock_t lock; /* protects everything except vcpus */ |
232 | int nmemslots; | 294 | int nmemslots; |
233 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; | 295 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; |
296 | /* | ||
297 | * Hash table of struct kvm_mmu_page. | ||
298 | */ | ||
234 | struct list_head active_mmu_pages; | 299 | struct list_head active_mmu_pages; |
300 | int n_free_mmu_pages; | ||
301 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | ||
235 | struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; | 302 | struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; |
236 | int memory_config_version; | 303 | int memory_config_version; |
237 | int busy; | 304 | int busy; |
305 | unsigned long rmap_overflow; | ||
238 | }; | 306 | }; |
239 | 307 | ||
240 | struct kvm_stat { | 308 | struct kvm_stat { |
@@ -247,6 +315,9 @@ struct kvm_stat { | |||
247 | u32 io_exits; | 315 | u32 io_exits; |
248 | u32 mmio_exits; | 316 | u32 mmio_exits; |
249 | u32 signal_exits; | 317 | u32 signal_exits; |
318 | u32 irq_window_exits; | ||
319 | u32 halt_exits; | ||
320 | u32 request_irq_exits; | ||
250 | u32 irq_exits; | 321 | u32 irq_exits; |
251 | }; | 322 | }; |
252 | 323 | ||
@@ -279,6 +350,7 @@ struct kvm_arch_ops { | |||
279 | void (*set_segment)(struct kvm_vcpu *vcpu, | 350 | void (*set_segment)(struct kvm_vcpu *vcpu, |
280 | struct kvm_segment *var, int seg); | 351 | struct kvm_segment *var, int seg); |
281 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | 352 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
353 | void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu); | ||
282 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | 354 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
283 | void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, | 355 | void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, |
284 | unsigned long cr0); | 356 | unsigned long cr0); |
@@ -323,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); | |||
323 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | 395 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); |
324 | 396 | ||
325 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 397 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
326 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 398 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot); |
327 | 399 | ||
328 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | 400 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); |
329 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 401 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
@@ -396,6 +468,19 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, | |||
396 | 468 | ||
397 | unsigned long segment_base(u16 selector); | 469 | unsigned long segment_base(u16 selector); |
398 | 470 | ||
471 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | ||
472 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | ||
473 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | ||
474 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | ||
475 | |||
476 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | ||
477 | u32 error_code) | ||
478 | { | ||
479 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | ||
480 | kvm_mmu_free_some_pages(vcpu); | ||
481 | return vcpu->mmu.page_fault(vcpu, gva, error_code); | ||
482 | } | ||
483 | |||
399 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) | 484 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) |
400 | { | 485 | { |
401 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | 486 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
@@ -541,19 +626,4 @@ static inline u32 get_rdx_init_val(void) | |||
541 | #define TSS_REDIRECTION_SIZE (256 / 8) | 626 | #define TSS_REDIRECTION_SIZE (256 / 8) |
542 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | 627 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) |
543 | 628 | ||
544 | #ifdef CONFIG_X86_64 | ||
545 | |||
546 | /* | ||
547 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore | ||
548 | * we need to allocate shadow page tables in the first 4GB of memory, which | ||
549 | * happens to fit the DMA32 zone. | ||
550 | */ | ||
551 | #define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32) | ||
552 | |||
553 | #else | ||
554 | |||
555 | #define GFP_KVM_MMU GFP_KERNEL | ||
556 | |||
557 | #endif | ||
558 | |||
559 | #endif | 629 | #endif |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ce7fe640f18d..67c1154960f0 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item { | |||
58 | { "io_exits", &kvm_stat.io_exits }, | 58 | { "io_exits", &kvm_stat.io_exits }, |
59 | { "mmio_exits", &kvm_stat.mmio_exits }, | 59 | { "mmio_exits", &kvm_stat.mmio_exits }, |
60 | { "signal_exits", &kvm_stat.signal_exits }, | 60 | { "signal_exits", &kvm_stat.signal_exits }, |
61 | { "irq_window", &kvm_stat.irq_window_exits }, | ||
62 | { "halt_exits", &kvm_stat.halt_exits }, | ||
63 | { "request_irq", &kvm_stat.request_irq_exits }, | ||
61 | { "irq_exits", &kvm_stat.irq_exits }, | 64 | { "irq_exits", &kvm_stat.irq_exits }, |
62 | { 0, 0 } | 65 | { 0, 0 } |
63 | }; | 66 | }; |
@@ -227,6 +230,7 @@ static int kvm_dev_open(struct inode *inode, struct file *filp) | |||
227 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; | 230 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; |
228 | 231 | ||
229 | mutex_init(&vcpu->mutex); | 232 | mutex_init(&vcpu->mutex); |
233 | vcpu->kvm = kvm; | ||
230 | vcpu->mmu.root_hpa = INVALID_PAGE; | 234 | vcpu->mmu.root_hpa = INVALID_PAGE; |
231 | INIT_LIST_HEAD(&vcpu->free_pages); | 235 | INIT_LIST_HEAD(&vcpu->free_pages); |
232 | } | 236 | } |
@@ -268,8 +272,8 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
268 | 272 | ||
269 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) | 273 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) |
270 | { | 274 | { |
271 | kvm_arch_ops->vcpu_free(vcpu); | ||
272 | kvm_mmu_destroy(vcpu); | 275 | kvm_mmu_destroy(vcpu); |
276 | kvm_arch_ops->vcpu_free(vcpu); | ||
273 | } | 277 | } |
274 | 278 | ||
275 | static void kvm_free_vcpus(struct kvm *kvm) | 279 | static void kvm_free_vcpus(struct kvm *kvm) |
@@ -295,14 +299,17 @@ static void inject_gp(struct kvm_vcpu *vcpu) | |||
295 | kvm_arch_ops->inject_gp(vcpu, 0); | 299 | kvm_arch_ops->inject_gp(vcpu, 0); |
296 | } | 300 | } |
297 | 301 | ||
298 | static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, | 302 | /* |
299 | unsigned long cr3) | 303 | * Load the pae pdptrs. Return true is they are all valid. |
304 | */ | ||
305 | static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
300 | { | 306 | { |
301 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; | 307 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; |
302 | unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5; | 308 | unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; |
303 | int i; | 309 | int i; |
304 | u64 pdpte; | 310 | u64 pdpte; |
305 | u64 *pdpt; | 311 | u64 *pdpt; |
312 | int ret; | ||
306 | struct kvm_memory_slot *memslot; | 313 | struct kvm_memory_slot *memslot; |
307 | 314 | ||
308 | spin_lock(&vcpu->kvm->lock); | 315 | spin_lock(&vcpu->kvm->lock); |
@@ -310,16 +317,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, | |||
310 | /* FIXME: !memslot - emulate? 0xff? */ | 317 | /* FIXME: !memslot - emulate? 0xff? */ |
311 | pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); | 318 | pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); |
312 | 319 | ||
320 | ret = 1; | ||
313 | for (i = 0; i < 4; ++i) { | 321 | for (i = 0; i < 4; ++i) { |
314 | pdpte = pdpt[offset + i]; | 322 | pdpte = pdpt[offset + i]; |
315 | if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) | 323 | if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) { |
316 | break; | 324 | ret = 0; |
325 | goto out; | ||
326 | } | ||
317 | } | 327 | } |
318 | 328 | ||
329 | for (i = 0; i < 4; ++i) | ||
330 | vcpu->pdptrs[i] = pdpt[offset + i]; | ||
331 | |||
332 | out: | ||
319 | kunmap_atomic(pdpt, KM_USER0); | 333 | kunmap_atomic(pdpt, KM_USER0); |
320 | spin_unlock(&vcpu->kvm->lock); | 334 | spin_unlock(&vcpu->kvm->lock); |
321 | 335 | ||
322 | return i != 4; | 336 | return ret; |
323 | } | 337 | } |
324 | 338 | ||
325 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 339 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
@@ -365,8 +379,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
365 | } | 379 | } |
366 | } else | 380 | } else |
367 | #endif | 381 | #endif |
368 | if (is_pae(vcpu) && | 382 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { |
369 | pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { | ||
370 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | 383 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " |
371 | "reserved bits\n"); | 384 | "reserved bits\n"); |
372 | inject_gp(vcpu); | 385 | inject_gp(vcpu); |
@@ -387,6 +400,7 @@ EXPORT_SYMBOL_GPL(set_cr0); | |||
387 | 400 | ||
388 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | 401 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
389 | { | 402 | { |
403 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
390 | set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); | 404 | set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); |
391 | } | 405 | } |
392 | EXPORT_SYMBOL_GPL(lmsw); | 406 | EXPORT_SYMBOL_GPL(lmsw); |
@@ -407,7 +421,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
407 | return; | 421 | return; |
408 | } | 422 | } |
409 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) | 423 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) |
410 | && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { | 424 | && !load_pdptrs(vcpu, vcpu->cr3)) { |
411 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 425 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
412 | inject_gp(vcpu); | 426 | inject_gp(vcpu); |
413 | } | 427 | } |
@@ -439,7 +453,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
439 | return; | 453 | return; |
440 | } | 454 | } |
441 | if (is_paging(vcpu) && is_pae(vcpu) && | 455 | if (is_paging(vcpu) && is_pae(vcpu) && |
442 | pdptrs_have_reserved_bits_set(vcpu, cr3)) { | 456 | !load_pdptrs(vcpu, cr3)) { |
443 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | 457 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " |
444 | "reserved bits\n"); | 458 | "reserved bits\n"); |
445 | inject_gp(vcpu); | 459 | inject_gp(vcpu); |
@@ -449,7 +463,19 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
449 | 463 | ||
450 | vcpu->cr3 = cr3; | 464 | vcpu->cr3 = cr3; |
451 | spin_lock(&vcpu->kvm->lock); | 465 | spin_lock(&vcpu->kvm->lock); |
452 | vcpu->mmu.new_cr3(vcpu); | 466 | /* |
467 | * Does the new cr3 value map to physical memory? (Note, we | ||
468 | * catch an invalid cr3 even in real-mode, because it would | ||
469 | * cause trouble later on when we turn on paging anyway.) | ||
470 | * | ||
471 | * A real CPU would silently accept an invalid cr3 and would | ||
472 | * attempt to use it - with largely undefined (and often hard | ||
473 | * to debug) behavior on the guest side. | ||
474 | */ | ||
475 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) | ||
476 | inject_gp(vcpu); | ||
477 | else | ||
478 | vcpu->mmu.new_cr3(vcpu); | ||
453 | spin_unlock(&vcpu->kvm->lock); | 479 | spin_unlock(&vcpu->kvm->lock); |
454 | } | 480 | } |
455 | EXPORT_SYMBOL_GPL(set_cr3); | 481 | EXPORT_SYMBOL_GPL(set_cr3); |
@@ -517,7 +543,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
517 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; | 543 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; |
518 | 544 | ||
519 | vcpu->cpu = -1; /* First load will set up TR */ | 545 | vcpu->cpu = -1; /* First load will set up TR */ |
520 | vcpu->kvm = kvm; | ||
521 | r = kvm_arch_ops->vcpu_create(vcpu); | 546 | r = kvm_arch_ops->vcpu_create(vcpu); |
522 | if (r < 0) | 547 | if (r < 0) |
523 | goto out_free_vcpus; | 548 | goto out_free_vcpus; |
@@ -634,6 +659,7 @@ raced: | |||
634 | | __GFP_ZERO); | 659 | | __GFP_ZERO); |
635 | if (!new.phys_mem[i]) | 660 | if (!new.phys_mem[i]) |
636 | goto out_free; | 661 | goto out_free; |
662 | new.phys_mem[i]->private = 0; | ||
637 | } | 663 | } |
638 | } | 664 | } |
639 | 665 | ||
@@ -688,6 +714,13 @@ out: | |||
688 | return r; | 714 | return r; |
689 | } | 715 | } |
690 | 716 | ||
717 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) | ||
718 | { | ||
719 | spin_lock(&vcpu->kvm->lock); | ||
720 | kvm_mmu_slot_remove_write_access(vcpu, slot); | ||
721 | spin_unlock(&vcpu->kvm->lock); | ||
722 | } | ||
723 | |||
691 | /* | 724 | /* |
692 | * Get (and clear) the dirty memory log for a memory slot. | 725 | * Get (and clear) the dirty memory log for a memory slot. |
693 | */ | 726 | */ |
@@ -697,6 +730,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | |||
697 | struct kvm_memory_slot *memslot; | 730 | struct kvm_memory_slot *memslot; |
698 | int r, i; | 731 | int r, i; |
699 | int n; | 732 | int n; |
733 | int cleared; | ||
700 | unsigned long any = 0; | 734 | unsigned long any = 0; |
701 | 735 | ||
702 | spin_lock(&kvm->lock); | 736 | spin_lock(&kvm->lock); |
@@ -727,15 +761,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | |||
727 | 761 | ||
728 | 762 | ||
729 | if (any) { | 763 | if (any) { |
730 | spin_lock(&kvm->lock); | 764 | cleared = 0; |
731 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
732 | spin_unlock(&kvm->lock); | ||
733 | memset(memslot->dirty_bitmap, 0, n); | ||
734 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 765 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
735 | struct kvm_vcpu *vcpu = vcpu_load(kvm, i); | 766 | struct kvm_vcpu *vcpu = vcpu_load(kvm, i); |
736 | 767 | ||
737 | if (!vcpu) | 768 | if (!vcpu) |
738 | continue; | 769 | continue; |
770 | if (!cleared) { | ||
771 | do_remove_write_access(vcpu, log->slot); | ||
772 | memset(memslot->dirty_bitmap, 0, n); | ||
773 | cleared = 1; | ||
774 | } | ||
739 | kvm_arch_ops->tlb_flush(vcpu); | 775 | kvm_arch_ops->tlb_flush(vcpu); |
740 | vcpu_put(vcpu); | 776 | vcpu_put(vcpu); |
741 | } | 777 | } |
@@ -863,6 +899,27 @@ static int emulator_read_emulated(unsigned long addr, | |||
863 | } | 899 | } |
864 | } | 900 | } |
865 | 901 | ||
902 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | ||
903 | unsigned long val, int bytes) | ||
904 | { | ||
905 | struct kvm_memory_slot *m; | ||
906 | struct page *page; | ||
907 | void *virt; | ||
908 | |||
909 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) | ||
910 | return 0; | ||
911 | m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
912 | if (!m) | ||
913 | return 0; | ||
914 | page = gfn_to_page(m, gpa >> PAGE_SHIFT); | ||
915 | kvm_mmu_pre_write(vcpu, gpa, bytes); | ||
916 | virt = kmap_atomic(page, KM_USER0); | ||
917 | memcpy(virt + offset_in_page(gpa), &val, bytes); | ||
918 | kunmap_atomic(virt, KM_USER0); | ||
919 | kvm_mmu_post_write(vcpu, gpa, bytes); | ||
920 | return 1; | ||
921 | } | ||
922 | |||
866 | static int emulator_write_emulated(unsigned long addr, | 923 | static int emulator_write_emulated(unsigned long addr, |
867 | unsigned long val, | 924 | unsigned long val, |
868 | unsigned int bytes, | 925 | unsigned int bytes, |
@@ -874,6 +931,9 @@ static int emulator_write_emulated(unsigned long addr, | |||
874 | if (gpa == UNMAPPED_GVA) | 931 | if (gpa == UNMAPPED_GVA) |
875 | return X86EMUL_PROPAGATE_FAULT; | 932 | return X86EMUL_PROPAGATE_FAULT; |
876 | 933 | ||
934 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | ||
935 | return X86EMUL_CONTINUE; | ||
936 | |||
877 | vcpu->mmio_needed = 1; | 937 | vcpu->mmio_needed = 1; |
878 | vcpu->mmio_phys_addr = gpa; | 938 | vcpu->mmio_phys_addr = gpa; |
879 | vcpu->mmio_size = bytes; | 939 | vcpu->mmio_size = bytes; |
@@ -898,6 +958,30 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
898 | return emulator_write_emulated(addr, new, bytes, ctxt); | 958 | return emulator_write_emulated(addr, new, bytes, ctxt); |
899 | } | 959 | } |
900 | 960 | ||
961 | #ifdef CONFIG_X86_32 | ||
962 | |||
963 | static int emulator_cmpxchg8b_emulated(unsigned long addr, | ||
964 | unsigned long old_lo, | ||
965 | unsigned long old_hi, | ||
966 | unsigned long new_lo, | ||
967 | unsigned long new_hi, | ||
968 | struct x86_emulate_ctxt *ctxt) | ||
969 | { | ||
970 | static int reported; | ||
971 | int r; | ||
972 | |||
973 | if (!reported) { | ||
974 | reported = 1; | ||
975 | printk(KERN_WARNING "kvm: emulating exchange8b as write\n"); | ||
976 | } | ||
977 | r = emulator_write_emulated(addr, new_lo, 4, ctxt); | ||
978 | if (r != X86EMUL_CONTINUE) | ||
979 | return r; | ||
980 | return emulator_write_emulated(addr+4, new_hi, 4, ctxt); | ||
981 | } | ||
982 | |||
983 | #endif | ||
984 | |||
901 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | 985 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) |
902 | { | 986 | { |
903 | return kvm_arch_ops->get_segment_base(vcpu, seg); | 987 | return kvm_arch_ops->get_segment_base(vcpu, seg); |
@@ -905,18 +989,15 @@ static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | |||
905 | 989 | ||
906 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | 990 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) |
907 | { | 991 | { |
908 | spin_lock(&vcpu->kvm->lock); | ||
909 | vcpu->mmu.inval_page(vcpu, address); | ||
910 | spin_unlock(&vcpu->kvm->lock); | ||
911 | kvm_arch_ops->invlpg(vcpu, address); | ||
912 | return X86EMUL_CONTINUE; | 992 | return X86EMUL_CONTINUE; |
913 | } | 993 | } |
914 | 994 | ||
915 | int emulate_clts(struct kvm_vcpu *vcpu) | 995 | int emulate_clts(struct kvm_vcpu *vcpu) |
916 | { | 996 | { |
917 | unsigned long cr0 = vcpu->cr0; | 997 | unsigned long cr0; |
918 | 998 | ||
919 | cr0 &= ~CR0_TS_MASK; | 999 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); |
1000 | cr0 = vcpu->cr0 & ~CR0_TS_MASK; | ||
920 | kvm_arch_ops->set_cr0(vcpu, cr0); | 1001 | kvm_arch_ops->set_cr0(vcpu, cr0); |
921 | return X86EMUL_CONTINUE; | 1002 | return X86EMUL_CONTINUE; |
922 | } | 1003 | } |
@@ -975,6 +1056,9 @@ struct x86_emulate_ops emulate_ops = { | |||
975 | .read_emulated = emulator_read_emulated, | 1056 | .read_emulated = emulator_read_emulated, |
976 | .write_emulated = emulator_write_emulated, | 1057 | .write_emulated = emulator_write_emulated, |
977 | .cmpxchg_emulated = emulator_cmpxchg_emulated, | 1058 | .cmpxchg_emulated = emulator_cmpxchg_emulated, |
1059 | #ifdef CONFIG_X86_32 | ||
1060 | .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated, | ||
1061 | #endif | ||
978 | }; | 1062 | }; |
979 | 1063 | ||
980 | int emulate_instruction(struct kvm_vcpu *vcpu, | 1064 | int emulate_instruction(struct kvm_vcpu *vcpu, |
@@ -1024,6 +1108,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
1024 | } | 1108 | } |
1025 | 1109 | ||
1026 | if (r) { | 1110 | if (r) { |
1111 | if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) | ||
1112 | return EMULATE_DONE; | ||
1027 | if (!vcpu->mmio_needed) { | 1113 | if (!vcpu->mmio_needed) { |
1028 | report_emulation_failure(&emulate_ctxt); | 1114 | report_emulation_failure(&emulate_ctxt); |
1029 | return EMULATE_FAIL; | 1115 | return EMULATE_FAIL; |
@@ -1069,6 +1155,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |||
1069 | 1155 | ||
1070 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 1156 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
1071 | { | 1157 | { |
1158 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
1072 | switch (cr) { | 1159 | switch (cr) { |
1073 | case 0: | 1160 | case 0: |
1074 | return vcpu->cr0; | 1161 | return vcpu->cr0; |
@@ -1403,6 +1490,7 @@ static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | |||
1403 | sregs->gdt.limit = dt.limit; | 1490 | sregs->gdt.limit = dt.limit; |
1404 | sregs->gdt.base = dt.base; | 1491 | sregs->gdt.base = dt.base; |
1405 | 1492 | ||
1493 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
1406 | sregs->cr0 = vcpu->cr0; | 1494 | sregs->cr0 = vcpu->cr0; |
1407 | sregs->cr2 = vcpu->cr2; | 1495 | sregs->cr2 = vcpu->cr2; |
1408 | sregs->cr3 = vcpu->cr3; | 1496 | sregs->cr3 = vcpu->cr3; |
@@ -1467,11 +1555,15 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | |||
1467 | #endif | 1555 | #endif |
1468 | vcpu->apic_base = sregs->apic_base; | 1556 | vcpu->apic_base = sregs->apic_base; |
1469 | 1557 | ||
1558 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
1559 | |||
1470 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; | 1560 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; |
1471 | kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); | 1561 | kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); |
1472 | 1562 | ||
1473 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; | 1563 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; |
1474 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); | 1564 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); |
1565 | if (!is_long_mode(vcpu) && is_pae(vcpu)) | ||
1566 | load_pdptrs(vcpu, vcpu->cr3); | ||
1475 | 1567 | ||
1476 | if (mmu_reset_needed) | 1568 | if (mmu_reset_needed) |
1477 | kvm_mmu_reset_context(vcpu); | 1569 | kvm_mmu_reset_context(vcpu); |
@@ -1693,12 +1785,12 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1693 | if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) | 1785 | if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) |
1694 | goto out; | 1786 | goto out; |
1695 | r = kvm_dev_ioctl_run(kvm, &kvm_run); | 1787 | r = kvm_dev_ioctl_run(kvm, &kvm_run); |
1696 | if (r < 0) | 1788 | if (r < 0 && r != -EINTR) |
1697 | goto out; | 1789 | goto out; |
1698 | r = -EFAULT; | 1790 | if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) { |
1699 | if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) | 1791 | r = -EFAULT; |
1700 | goto out; | 1792 | goto out; |
1701 | r = 0; | 1793 | } |
1702 | break; | 1794 | break; |
1703 | } | 1795 | } |
1704 | case KVM_GET_REGS: { | 1796 | case KVM_GET_REGS: { |
@@ -1842,6 +1934,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1842 | num_msrs_to_save * sizeof(u32))) | 1934 | num_msrs_to_save * sizeof(u32))) |
1843 | goto out; | 1935 | goto out; |
1844 | r = 0; | 1936 | r = 0; |
1937 | break; | ||
1845 | } | 1938 | } |
1846 | default: | 1939 | default: |
1847 | ; | 1940 | ; |
@@ -1944,17 +2037,17 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) | |||
1944 | return -EEXIST; | 2037 | return -EEXIST; |
1945 | } | 2038 | } |
1946 | 2039 | ||
1947 | kvm_arch_ops = ops; | 2040 | if (!ops->cpu_has_kvm_support()) { |
1948 | |||
1949 | if (!kvm_arch_ops->cpu_has_kvm_support()) { | ||
1950 | printk(KERN_ERR "kvm: no hardware support\n"); | 2041 | printk(KERN_ERR "kvm: no hardware support\n"); |
1951 | return -EOPNOTSUPP; | 2042 | return -EOPNOTSUPP; |
1952 | } | 2043 | } |
1953 | if (kvm_arch_ops->disabled_by_bios()) { | 2044 | if (ops->disabled_by_bios()) { |
1954 | printk(KERN_ERR "kvm: disabled by bios\n"); | 2045 | printk(KERN_ERR "kvm: disabled by bios\n"); |
1955 | return -EOPNOTSUPP; | 2046 | return -EOPNOTSUPP; |
1956 | } | 2047 | } |
1957 | 2048 | ||
2049 | kvm_arch_ops = ops; | ||
2050 | |||
1958 | r = kvm_arch_ops->hardware_setup(); | 2051 | r = kvm_arch_ops->hardware_setup(); |
1959 | if (r < 0) | 2052 | if (r < 0) |
1960 | return r; | 2053 | return r; |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 790423c5f23d..c6f972914f08 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -26,7 +26,31 @@ | |||
26 | #include "vmx.h" | 26 | #include "vmx.h" |
27 | #include "kvm.h" | 27 | #include "kvm.h" |
28 | 28 | ||
29 | #undef MMU_DEBUG | ||
30 | |||
31 | #undef AUDIT | ||
32 | |||
33 | #ifdef AUDIT | ||
34 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg); | ||
35 | #else | ||
36 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {} | ||
37 | #endif | ||
38 | |||
39 | #ifdef MMU_DEBUG | ||
40 | |||
41 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) | ||
42 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) | ||
43 | |||
44 | #else | ||
45 | |||
29 | #define pgprintk(x...) do { } while (0) | 46 | #define pgprintk(x...) do { } while (0) |
47 | #define rmap_printk(x...) do { } while (0) | ||
48 | |||
49 | #endif | ||
50 | |||
51 | #if defined(MMU_DEBUG) || defined(AUDIT) | ||
52 | static int dbg = 1; | ||
53 | #endif | ||
30 | 54 | ||
31 | #define ASSERT(x) \ | 55 | #define ASSERT(x) \ |
32 | if (!(x)) { \ | 56 | if (!(x)) { \ |
@@ -34,8 +58,10 @@ | |||
34 | __FILE__, __LINE__, #x); \ | 58 | __FILE__, __LINE__, #x); \ |
35 | } | 59 | } |
36 | 60 | ||
37 | #define PT64_ENT_PER_PAGE 512 | 61 | #define PT64_PT_BITS 9 |
38 | #define PT32_ENT_PER_PAGE 1024 | 62 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) |
63 | #define PT32_PT_BITS 10 | ||
64 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) | ||
39 | 65 | ||
40 | #define PT_WRITABLE_SHIFT 1 | 66 | #define PT_WRITABLE_SHIFT 1 |
41 | 67 | ||
@@ -125,6 +151,13 @@ | |||
125 | #define PT_DIRECTORY_LEVEL 2 | 151 | #define PT_DIRECTORY_LEVEL 2 |
126 | #define PT_PAGE_TABLE_LEVEL 1 | 152 | #define PT_PAGE_TABLE_LEVEL 1 |
127 | 153 | ||
154 | #define RMAP_EXT 4 | ||
155 | |||
156 | struct kvm_rmap_desc { | ||
157 | u64 *shadow_ptes[RMAP_EXT]; | ||
158 | struct kvm_rmap_desc *more; | ||
159 | }; | ||
160 | |||
128 | static int is_write_protection(struct kvm_vcpu *vcpu) | 161 | static int is_write_protection(struct kvm_vcpu *vcpu) |
129 | { | 162 | { |
130 | return vcpu->cr0 & CR0_WP_MASK; | 163 | return vcpu->cr0 & CR0_WP_MASK; |
@@ -150,32 +183,272 @@ static int is_io_pte(unsigned long pte) | |||
150 | return pte & PT_SHADOW_IO_MARK; | 183 | return pte & PT_SHADOW_IO_MARK; |
151 | } | 184 | } |
152 | 185 | ||
186 | static int is_rmap_pte(u64 pte) | ||
187 | { | ||
188 | return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK)) | ||
189 | == (PT_WRITABLE_MASK | PT_PRESENT_MASK); | ||
190 | } | ||
191 | |||
192 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | ||
193 | size_t objsize, int min) | ||
194 | { | ||
195 | void *obj; | ||
196 | |||
197 | if (cache->nobjs >= min) | ||
198 | return 0; | ||
199 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | ||
200 | obj = kzalloc(objsize, GFP_NOWAIT); | ||
201 | if (!obj) | ||
202 | return -ENOMEM; | ||
203 | cache->objects[cache->nobjs++] = obj; | ||
204 | } | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | ||
209 | { | ||
210 | while (mc->nobjs) | ||
211 | kfree(mc->objects[--mc->nobjs]); | ||
212 | } | ||
213 | |||
214 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | ||
215 | { | ||
216 | int r; | ||
217 | |||
218 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, | ||
219 | sizeof(struct kvm_pte_chain), 4); | ||
220 | if (r) | ||
221 | goto out; | ||
222 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, | ||
223 | sizeof(struct kvm_rmap_desc), 1); | ||
224 | out: | ||
225 | return r; | ||
226 | } | ||
227 | |||
228 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | ||
229 | { | ||
230 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); | ||
231 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); | ||
232 | } | ||
233 | |||
234 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | ||
235 | size_t size) | ||
236 | { | ||
237 | void *p; | ||
238 | |||
239 | BUG_ON(!mc->nobjs); | ||
240 | p = mc->objects[--mc->nobjs]; | ||
241 | memset(p, 0, size); | ||
242 | return p; | ||
243 | } | ||
244 | |||
245 | static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj) | ||
246 | { | ||
247 | if (mc->nobjs < KVM_NR_MEM_OBJS) | ||
248 | mc->objects[mc->nobjs++] = obj; | ||
249 | else | ||
250 | kfree(obj); | ||
251 | } | ||
252 | |||
253 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) | ||
254 | { | ||
255 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, | ||
256 | sizeof(struct kvm_pte_chain)); | ||
257 | } | ||
258 | |||
259 | static void mmu_free_pte_chain(struct kvm_vcpu *vcpu, | ||
260 | struct kvm_pte_chain *pc) | ||
261 | { | ||
262 | mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc); | ||
263 | } | ||
264 | |||
265 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | ||
266 | { | ||
267 | return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache, | ||
268 | sizeof(struct kvm_rmap_desc)); | ||
269 | } | ||
270 | |||
271 | static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu, | ||
272 | struct kvm_rmap_desc *rd) | ||
273 | { | ||
274 | mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd); | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Reverse mapping data structures: | ||
279 | * | ||
280 | * If page->private bit zero is zero, then page->private points to the | ||
281 | * shadow page table entry that points to page_address(page). | ||
282 | * | ||
283 | * If page->private bit zero is one, (then page->private & ~1) points | ||
284 | * to a struct kvm_rmap_desc containing more mappings. | ||
285 | */ | ||
286 | static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte) | ||
287 | { | ||
288 | struct page *page; | ||
289 | struct kvm_rmap_desc *desc; | ||
290 | int i; | ||
291 | |||
292 | if (!is_rmap_pte(*spte)) | ||
293 | return; | ||
294 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | ||
295 | if (!page->private) { | ||
296 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | ||
297 | page->private = (unsigned long)spte; | ||
298 | } else if (!(page->private & 1)) { | ||
299 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); | ||
300 | desc = mmu_alloc_rmap_desc(vcpu); | ||
301 | desc->shadow_ptes[0] = (u64 *)page->private; | ||
302 | desc->shadow_ptes[1] = spte; | ||
303 | page->private = (unsigned long)desc | 1; | ||
304 | } else { | ||
305 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | ||
306 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | ||
307 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) | ||
308 | desc = desc->more; | ||
309 | if (desc->shadow_ptes[RMAP_EXT-1]) { | ||
310 | desc->more = mmu_alloc_rmap_desc(vcpu); | ||
311 | desc = desc->more; | ||
312 | } | ||
313 | for (i = 0; desc->shadow_ptes[i]; ++i) | ||
314 | ; | ||
315 | desc->shadow_ptes[i] = spte; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | ||
320 | struct page *page, | ||
321 | struct kvm_rmap_desc *desc, | ||
322 | int i, | ||
323 | struct kvm_rmap_desc *prev_desc) | ||
324 | { | ||
325 | int j; | ||
326 | |||
327 | for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) | ||
328 | ; | ||
329 | desc->shadow_ptes[i] = desc->shadow_ptes[j]; | ||
330 | desc->shadow_ptes[j] = 0; | ||
331 | if (j != 0) | ||
332 | return; | ||
333 | if (!prev_desc && !desc->more) | ||
334 | page->private = (unsigned long)desc->shadow_ptes[0]; | ||
335 | else | ||
336 | if (prev_desc) | ||
337 | prev_desc->more = desc->more; | ||
338 | else | ||
339 | page->private = (unsigned long)desc->more | 1; | ||
340 | mmu_free_rmap_desc(vcpu, desc); | ||
341 | } | ||
342 | |||
343 | static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | ||
344 | { | ||
345 | struct page *page; | ||
346 | struct kvm_rmap_desc *desc; | ||
347 | struct kvm_rmap_desc *prev_desc; | ||
348 | int i; | ||
349 | |||
350 | if (!is_rmap_pte(*spte)) | ||
351 | return; | ||
352 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | ||
353 | if (!page->private) { | ||
354 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | ||
355 | BUG(); | ||
356 | } else if (!(page->private & 1)) { | ||
357 | rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); | ||
358 | if ((u64 *)page->private != spte) { | ||
359 | printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", | ||
360 | spte, *spte); | ||
361 | BUG(); | ||
362 | } | ||
363 | page->private = 0; | ||
364 | } else { | ||
365 | rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); | ||
366 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | ||
367 | prev_desc = NULL; | ||
368 | while (desc) { | ||
369 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | ||
370 | if (desc->shadow_ptes[i] == spte) { | ||
371 | rmap_desc_remove_entry(vcpu, page, | ||
372 | desc, i, | ||
373 | prev_desc); | ||
374 | return; | ||
375 | } | ||
376 | prev_desc = desc; | ||
377 | desc = desc->more; | ||
378 | } | ||
379 | BUG(); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | ||
384 | { | ||
385 | struct kvm *kvm = vcpu->kvm; | ||
386 | struct page *page; | ||
387 | struct kvm_memory_slot *slot; | ||
388 | struct kvm_rmap_desc *desc; | ||
389 | u64 *spte; | ||
390 | |||
391 | slot = gfn_to_memslot(kvm, gfn); | ||
392 | BUG_ON(!slot); | ||
393 | page = gfn_to_page(slot, gfn); | ||
394 | |||
395 | while (page->private) { | ||
396 | if (!(page->private & 1)) | ||
397 | spte = (u64 *)page->private; | ||
398 | else { | ||
399 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | ||
400 | spte = desc->shadow_ptes[0]; | ||
401 | } | ||
402 | BUG_ON(!spte); | ||
403 | BUG_ON((*spte & PT64_BASE_ADDR_MASK) != | ||
404 | page_to_pfn(page) << PAGE_SHIFT); | ||
405 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | ||
406 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); | ||
407 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | ||
408 | rmap_remove(vcpu, spte); | ||
409 | kvm_arch_ops->tlb_flush(vcpu); | ||
410 | *spte &= ~(u64)PT_WRITABLE_MASK; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | static int is_empty_shadow_page(hpa_t page_hpa) | ||
415 | { | ||
416 | u64 *pos; | ||
417 | u64 *end; | ||
418 | |||
419 | for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64); | ||
420 | pos != end; pos++) | ||
421 | if (*pos != 0) { | ||
422 | printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, | ||
423 | pos, *pos); | ||
424 | return 0; | ||
425 | } | ||
426 | return 1; | ||
427 | } | ||
428 | |||
153 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) | 429 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) |
154 | { | 430 | { |
155 | struct kvm_mmu_page *page_head = page_header(page_hpa); | 431 | struct kvm_mmu_page *page_head = page_header(page_hpa); |
156 | 432 | ||
433 | ASSERT(is_empty_shadow_page(page_hpa)); | ||
157 | list_del(&page_head->link); | 434 | list_del(&page_head->link); |
158 | page_head->page_hpa = page_hpa; | 435 | page_head->page_hpa = page_hpa; |
159 | list_add(&page_head->link, &vcpu->free_pages); | 436 | list_add(&page_head->link, &vcpu->free_pages); |
437 | ++vcpu->kvm->n_free_mmu_pages; | ||
160 | } | 438 | } |
161 | 439 | ||
162 | static int is_empty_shadow_page(hpa_t page_hpa) | 440 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
163 | { | 441 | { |
164 | u32 *pos; | 442 | return gfn; |
165 | u32 *end; | ||
166 | for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32); | ||
167 | pos != end; pos++) | ||
168 | if (*pos != 0) | ||
169 | return 0; | ||
170 | return 1; | ||
171 | } | 443 | } |
172 | 444 | ||
173 | static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte) | 445 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
446 | u64 *parent_pte) | ||
174 | { | 447 | { |
175 | struct kvm_mmu_page *page; | 448 | struct kvm_mmu_page *page; |
176 | 449 | ||
177 | if (list_empty(&vcpu->free_pages)) | 450 | if (list_empty(&vcpu->free_pages)) |
178 | return INVALID_PAGE; | 451 | return NULL; |
179 | 452 | ||
180 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); | 453 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); |
181 | list_del(&page->link); | 454 | list_del(&page->link); |
@@ -183,8 +456,239 @@ static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte) | |||
183 | ASSERT(is_empty_shadow_page(page->page_hpa)); | 456 | ASSERT(is_empty_shadow_page(page->page_hpa)); |
184 | page->slot_bitmap = 0; | 457 | page->slot_bitmap = 0; |
185 | page->global = 1; | 458 | page->global = 1; |
459 | page->multimapped = 0; | ||
186 | page->parent_pte = parent_pte; | 460 | page->parent_pte = parent_pte; |
187 | return page->page_hpa; | 461 | --vcpu->kvm->n_free_mmu_pages; |
462 | return page; | ||
463 | } | ||
464 | |||
465 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | ||
466 | struct kvm_mmu_page *page, u64 *parent_pte) | ||
467 | { | ||
468 | struct kvm_pte_chain *pte_chain; | ||
469 | struct hlist_node *node; | ||
470 | int i; | ||
471 | |||
472 | if (!parent_pte) | ||
473 | return; | ||
474 | if (!page->multimapped) { | ||
475 | u64 *old = page->parent_pte; | ||
476 | |||
477 | if (!old) { | ||
478 | page->parent_pte = parent_pte; | ||
479 | return; | ||
480 | } | ||
481 | page->multimapped = 1; | ||
482 | pte_chain = mmu_alloc_pte_chain(vcpu); | ||
483 | INIT_HLIST_HEAD(&page->parent_ptes); | ||
484 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | ||
485 | pte_chain->parent_ptes[0] = old; | ||
486 | } | ||
487 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) { | ||
488 | if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) | ||
489 | continue; | ||
490 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) | ||
491 | if (!pte_chain->parent_ptes[i]) { | ||
492 | pte_chain->parent_ptes[i] = parent_pte; | ||
493 | return; | ||
494 | } | ||
495 | } | ||
496 | pte_chain = mmu_alloc_pte_chain(vcpu); | ||
497 | BUG_ON(!pte_chain); | ||
498 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | ||
499 | pte_chain->parent_ptes[0] = parent_pte; | ||
500 | } | ||
501 | |||
502 | static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, | ||
503 | struct kvm_mmu_page *page, | ||
504 | u64 *parent_pte) | ||
505 | { | ||
506 | struct kvm_pte_chain *pte_chain; | ||
507 | struct hlist_node *node; | ||
508 | int i; | ||
509 | |||
510 | if (!page->multimapped) { | ||
511 | BUG_ON(page->parent_pte != parent_pte); | ||
512 | page->parent_pte = NULL; | ||
513 | return; | ||
514 | } | ||
515 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) | ||
516 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | ||
517 | if (!pte_chain->parent_ptes[i]) | ||
518 | break; | ||
519 | if (pte_chain->parent_ptes[i] != parent_pte) | ||
520 | continue; | ||
521 | while (i + 1 < NR_PTE_CHAIN_ENTRIES | ||
522 | && pte_chain->parent_ptes[i + 1]) { | ||
523 | pte_chain->parent_ptes[i] | ||
524 | = pte_chain->parent_ptes[i + 1]; | ||
525 | ++i; | ||
526 | } | ||
527 | pte_chain->parent_ptes[i] = NULL; | ||
528 | if (i == 0) { | ||
529 | hlist_del(&pte_chain->link); | ||
530 | mmu_free_pte_chain(vcpu, pte_chain); | ||
531 | if (hlist_empty(&page->parent_ptes)) { | ||
532 | page->multimapped = 0; | ||
533 | page->parent_pte = NULL; | ||
534 | } | ||
535 | } | ||
536 | return; | ||
537 | } | ||
538 | BUG(); | ||
539 | } | ||
540 | |||
541 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, | ||
542 | gfn_t gfn) | ||
543 | { | ||
544 | unsigned index; | ||
545 | struct hlist_head *bucket; | ||
546 | struct kvm_mmu_page *page; | ||
547 | struct hlist_node *node; | ||
548 | |||
549 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | ||
550 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
551 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
552 | hlist_for_each_entry(page, node, bucket, hash_link) | ||
553 | if (page->gfn == gfn && !page->role.metaphysical) { | ||
554 | pgprintk("%s: found role %x\n", | ||
555 | __FUNCTION__, page->role.word); | ||
556 | return page; | ||
557 | } | ||
558 | return NULL; | ||
559 | } | ||
560 | |||
561 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | ||
562 | gfn_t gfn, | ||
563 | gva_t gaddr, | ||
564 | unsigned level, | ||
565 | int metaphysical, | ||
566 | u64 *parent_pte) | ||
567 | { | ||
568 | union kvm_mmu_page_role role; | ||
569 | unsigned index; | ||
570 | unsigned quadrant; | ||
571 | struct hlist_head *bucket; | ||
572 | struct kvm_mmu_page *page; | ||
573 | struct hlist_node *node; | ||
574 | |||
575 | role.word = 0; | ||
576 | role.glevels = vcpu->mmu.root_level; | ||
577 | role.level = level; | ||
578 | role.metaphysical = metaphysical; | ||
579 | if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { | ||
580 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | ||
581 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | ||
582 | role.quadrant = quadrant; | ||
583 | } | ||
584 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | ||
585 | gfn, role.word); | ||
586 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
587 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
588 | hlist_for_each_entry(page, node, bucket, hash_link) | ||
589 | if (page->gfn == gfn && page->role.word == role.word) { | ||
590 | mmu_page_add_parent_pte(vcpu, page, parent_pte); | ||
591 | pgprintk("%s: found\n", __FUNCTION__); | ||
592 | return page; | ||
593 | } | ||
594 | page = kvm_mmu_alloc_page(vcpu, parent_pte); | ||
595 | if (!page) | ||
596 | return page; | ||
597 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); | ||
598 | page->gfn = gfn; | ||
599 | page->role = role; | ||
600 | hlist_add_head(&page->hash_link, bucket); | ||
601 | if (!metaphysical) | ||
602 | rmap_write_protect(vcpu, gfn); | ||
603 | return page; | ||
604 | } | ||
605 | |||
606 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | ||
607 | struct kvm_mmu_page *page) | ||
608 | { | ||
609 | unsigned i; | ||
610 | u64 *pt; | ||
611 | u64 ent; | ||
612 | |||
613 | pt = __va(page->page_hpa); | ||
614 | |||
615 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | ||
616 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | ||
617 | if (pt[i] & PT_PRESENT_MASK) | ||
618 | rmap_remove(vcpu, &pt[i]); | ||
619 | pt[i] = 0; | ||
620 | } | ||
621 | kvm_arch_ops->tlb_flush(vcpu); | ||
622 | return; | ||
623 | } | ||
624 | |||
625 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | ||
626 | ent = pt[i]; | ||
627 | |||
628 | pt[i] = 0; | ||
629 | if (!(ent & PT_PRESENT_MASK)) | ||
630 | continue; | ||
631 | ent &= PT64_BASE_ADDR_MASK; | ||
632 | mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); | ||
633 | } | ||
634 | } | ||
635 | |||
636 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | ||
637 | struct kvm_mmu_page *page, | ||
638 | u64 *parent_pte) | ||
639 | { | ||
640 | mmu_page_remove_parent_pte(vcpu, page, parent_pte); | ||
641 | } | ||
642 | |||
643 | static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | ||
644 | struct kvm_mmu_page *page) | ||
645 | { | ||
646 | u64 *parent_pte; | ||
647 | |||
648 | while (page->multimapped || page->parent_pte) { | ||
649 | if (!page->multimapped) | ||
650 | parent_pte = page->parent_pte; | ||
651 | else { | ||
652 | struct kvm_pte_chain *chain; | ||
653 | |||
654 | chain = container_of(page->parent_ptes.first, | ||
655 | struct kvm_pte_chain, link); | ||
656 | parent_pte = chain->parent_ptes[0]; | ||
657 | } | ||
658 | BUG_ON(!parent_pte); | ||
659 | kvm_mmu_put_page(vcpu, page, parent_pte); | ||
660 | *parent_pte = 0; | ||
661 | } | ||
662 | kvm_mmu_page_unlink_children(vcpu, page); | ||
663 | if (!page->root_count) { | ||
664 | hlist_del(&page->hash_link); | ||
665 | kvm_mmu_free_page(vcpu, page->page_hpa); | ||
666 | } else { | ||
667 | list_del(&page->link); | ||
668 | list_add(&page->link, &vcpu->kvm->active_mmu_pages); | ||
669 | } | ||
670 | } | ||
671 | |||
672 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
673 | { | ||
674 | unsigned index; | ||
675 | struct hlist_head *bucket; | ||
676 | struct kvm_mmu_page *page; | ||
677 | struct hlist_node *node, *n; | ||
678 | int r; | ||
679 | |||
680 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | ||
681 | r = 0; | ||
682 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
683 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
684 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) | ||
685 | if (page->gfn == gfn && !page->role.metaphysical) { | ||
686 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | ||
687 | page->role.word); | ||
688 | kvm_mmu_zap_page(vcpu, page); | ||
689 | r = 1; | ||
690 | } | ||
691 | return r; | ||
188 | } | 692 | } |
189 | 693 | ||
190 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) | 694 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) |
@@ -225,35 +729,6 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) | |||
225 | return gpa_to_hpa(vcpu, gpa); | 729 | return gpa_to_hpa(vcpu, gpa); |
226 | } | 730 | } |
227 | 731 | ||
228 | |||
229 | static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa, | ||
230 | int level) | ||
231 | { | ||
232 | ASSERT(vcpu); | ||
233 | ASSERT(VALID_PAGE(page_hpa)); | ||
234 | ASSERT(level <= PT64_ROOT_LEVEL && level > 0); | ||
235 | |||
236 | if (level == 1) | ||
237 | memset(__va(page_hpa), 0, PAGE_SIZE); | ||
238 | else { | ||
239 | u64 *pos; | ||
240 | u64 *end; | ||
241 | |||
242 | for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE; | ||
243 | pos != end; pos++) { | ||
244 | u64 current_ent = *pos; | ||
245 | |||
246 | *pos = 0; | ||
247 | if (is_present_pte(current_ent)) | ||
248 | release_pt_page_64(vcpu, | ||
249 | current_ent & | ||
250 | PT64_BASE_ADDR_MASK, | ||
251 | level - 1); | ||
252 | } | ||
253 | } | ||
254 | kvm_mmu_free_page(vcpu, page_hpa); | ||
255 | } | ||
256 | |||
257 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 732 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
258 | { | 733 | { |
259 | } | 734 | } |
@@ -266,52 +741,109 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) | |||
266 | for (; ; level--) { | 741 | for (; ; level--) { |
267 | u32 index = PT64_INDEX(v, level); | 742 | u32 index = PT64_INDEX(v, level); |
268 | u64 *table; | 743 | u64 *table; |
744 | u64 pte; | ||
269 | 745 | ||
270 | ASSERT(VALID_PAGE(table_addr)); | 746 | ASSERT(VALID_PAGE(table_addr)); |
271 | table = __va(table_addr); | 747 | table = __va(table_addr); |
272 | 748 | ||
273 | if (level == 1) { | 749 | if (level == 1) { |
750 | pte = table[index]; | ||
751 | if (is_present_pte(pte) && is_writeble_pte(pte)) | ||
752 | return 0; | ||
274 | mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); | 753 | mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); |
275 | page_header_update_slot(vcpu->kvm, table, v); | 754 | page_header_update_slot(vcpu->kvm, table, v); |
276 | table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | | 755 | table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | |
277 | PT_USER_MASK; | 756 | PT_USER_MASK; |
757 | rmap_add(vcpu, &table[index]); | ||
278 | return 0; | 758 | return 0; |
279 | } | 759 | } |
280 | 760 | ||
281 | if (table[index] == 0) { | 761 | if (table[index] == 0) { |
282 | hpa_t new_table = kvm_mmu_alloc_page(vcpu, | 762 | struct kvm_mmu_page *new_table; |
283 | &table[index]); | 763 | gfn_t pseudo_gfn; |
284 | 764 | ||
285 | if (!VALID_PAGE(new_table)) { | 765 | pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK) |
766 | >> PAGE_SHIFT; | ||
767 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, | ||
768 | v, level - 1, | ||
769 | 1, &table[index]); | ||
770 | if (!new_table) { | ||
286 | pgprintk("nonpaging_map: ENOMEM\n"); | 771 | pgprintk("nonpaging_map: ENOMEM\n"); |
287 | return -ENOMEM; | 772 | return -ENOMEM; |
288 | } | 773 | } |
289 | 774 | ||
290 | if (level == PT32E_ROOT_LEVEL) | 775 | table[index] = new_table->page_hpa | PT_PRESENT_MASK |
291 | table[index] = new_table | PT_PRESENT_MASK; | 776 | | PT_WRITABLE_MASK | PT_USER_MASK; |
292 | else | ||
293 | table[index] = new_table | PT_PRESENT_MASK | | ||
294 | PT_WRITABLE_MASK | PT_USER_MASK; | ||
295 | } | 777 | } |
296 | table_addr = table[index] & PT64_BASE_ADDR_MASK; | 778 | table_addr = table[index] & PT64_BASE_ADDR_MASK; |
297 | } | 779 | } |
298 | } | 780 | } |
299 | 781 | ||
300 | static void nonpaging_flush(struct kvm_vcpu *vcpu) | 782 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
301 | { | 783 | { |
302 | hpa_t root = vcpu->mmu.root_hpa; | 784 | int i; |
785 | struct kvm_mmu_page *page; | ||
303 | 786 | ||
304 | ++kvm_stat.tlb_flush; | 787 | #ifdef CONFIG_X86_64 |
305 | pgprintk("nonpaging_flush\n"); | 788 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
306 | ASSERT(VALID_PAGE(root)); | 789 | hpa_t root = vcpu->mmu.root_hpa; |
307 | release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level); | 790 | |
308 | root = kvm_mmu_alloc_page(vcpu, NULL); | 791 | ASSERT(VALID_PAGE(root)); |
309 | ASSERT(VALID_PAGE(root)); | 792 | page = page_header(root); |
310 | vcpu->mmu.root_hpa = root; | 793 | --page->root_count; |
311 | if (is_paging(vcpu)) | 794 | vcpu->mmu.root_hpa = INVALID_PAGE; |
312 | root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)); | 795 | return; |
313 | kvm_arch_ops->set_cr3(vcpu, root); | 796 | } |
314 | kvm_arch_ops->tlb_flush(vcpu); | 797 | #endif |
798 | for (i = 0; i < 4; ++i) { | ||
799 | hpa_t root = vcpu->mmu.pae_root[i]; | ||
800 | |||
801 | ASSERT(VALID_PAGE(root)); | ||
802 | root &= PT64_BASE_ADDR_MASK; | ||
803 | page = page_header(root); | ||
804 | --page->root_count; | ||
805 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | ||
806 | } | ||
807 | vcpu->mmu.root_hpa = INVALID_PAGE; | ||
808 | } | ||
809 | |||
810 | static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | ||
811 | { | ||
812 | int i; | ||
813 | gfn_t root_gfn; | ||
814 | struct kvm_mmu_page *page; | ||
815 | |||
816 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; | ||
817 | |||
818 | #ifdef CONFIG_X86_64 | ||
819 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | ||
820 | hpa_t root = vcpu->mmu.root_hpa; | ||
821 | |||
822 | ASSERT(!VALID_PAGE(root)); | ||
823 | page = kvm_mmu_get_page(vcpu, root_gfn, 0, | ||
824 | PT64_ROOT_LEVEL, 0, NULL); | ||
825 | root = page->page_hpa; | ||
826 | ++page->root_count; | ||
827 | vcpu->mmu.root_hpa = root; | ||
828 | return; | ||
829 | } | ||
830 | #endif | ||
831 | for (i = 0; i < 4; ++i) { | ||
832 | hpa_t root = vcpu->mmu.pae_root[i]; | ||
833 | |||
834 | ASSERT(!VALID_PAGE(root)); | ||
835 | if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) | ||
836 | root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; | ||
837 | else if (vcpu->mmu.root_level == 0) | ||
838 | root_gfn = 0; | ||
839 | page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | ||
840 | PT32_ROOT_LEVEL, !is_paging(vcpu), | ||
841 | NULL); | ||
842 | root = page->page_hpa; | ||
843 | ++page->root_count; | ||
844 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | ||
845 | } | ||
846 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | ||
315 | } | 847 | } |
316 | 848 | ||
317 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | 849 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) |
@@ -322,43 +854,29 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
322 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 854 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
323 | u32 error_code) | 855 | u32 error_code) |
324 | { | 856 | { |
325 | int ret; | ||
326 | gpa_t addr = gva; | 857 | gpa_t addr = gva; |
858 | hpa_t paddr; | ||
859 | int r; | ||
860 | |||
861 | r = mmu_topup_memory_caches(vcpu); | ||
862 | if (r) | ||
863 | return r; | ||
327 | 864 | ||
328 | ASSERT(vcpu); | 865 | ASSERT(vcpu); |
329 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); | 866 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); |
330 | 867 | ||
331 | for (;;) { | ||
332 | hpa_t paddr; | ||
333 | |||
334 | paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); | ||
335 | 868 | ||
336 | if (is_error_hpa(paddr)) | 869 | paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); |
337 | return 1; | ||
338 | 870 | ||
339 | ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr); | 871 | if (is_error_hpa(paddr)) |
340 | if (ret) { | 872 | return 1; |
341 | nonpaging_flush(vcpu); | ||
342 | continue; | ||
343 | } | ||
344 | break; | ||
345 | } | ||
346 | return ret; | ||
347 | } | ||
348 | 873 | ||
349 | static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) | 874 | return nonpaging_map(vcpu, addr & PAGE_MASK, paddr); |
350 | { | ||
351 | } | 875 | } |
352 | 876 | ||
353 | static void nonpaging_free(struct kvm_vcpu *vcpu) | 877 | static void nonpaging_free(struct kvm_vcpu *vcpu) |
354 | { | 878 | { |
355 | hpa_t root; | 879 | mmu_free_roots(vcpu); |
356 | |||
357 | ASSERT(vcpu); | ||
358 | root = vcpu->mmu.root_hpa; | ||
359 | if (VALID_PAGE(root)) | ||
360 | release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level); | ||
361 | vcpu->mmu.root_hpa = INVALID_PAGE; | ||
362 | } | 880 | } |
363 | 881 | ||
364 | static int nonpaging_init_context(struct kvm_vcpu *vcpu) | 882 | static int nonpaging_init_context(struct kvm_vcpu *vcpu) |
@@ -367,40 +885,31 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu) | |||
367 | 885 | ||
368 | context->new_cr3 = nonpaging_new_cr3; | 886 | context->new_cr3 = nonpaging_new_cr3; |
369 | context->page_fault = nonpaging_page_fault; | 887 | context->page_fault = nonpaging_page_fault; |
370 | context->inval_page = nonpaging_inval_page; | ||
371 | context->gva_to_gpa = nonpaging_gva_to_gpa; | 888 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
372 | context->free = nonpaging_free; | 889 | context->free = nonpaging_free; |
373 | context->root_level = PT32E_ROOT_LEVEL; | 890 | context->root_level = 0; |
374 | context->shadow_root_level = PT32E_ROOT_LEVEL; | 891 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
375 | context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); | 892 | mmu_alloc_roots(vcpu); |
376 | ASSERT(VALID_PAGE(context->root_hpa)); | 893 | ASSERT(VALID_PAGE(context->root_hpa)); |
377 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa); | 894 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa); |
378 | return 0; | 895 | return 0; |
379 | } | 896 | } |
380 | 897 | ||
381 | |||
382 | static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 898 | static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
383 | { | 899 | { |
384 | struct kvm_mmu_page *page, *npage; | ||
385 | |||
386 | list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages, | ||
387 | link) { | ||
388 | if (page->global) | ||
389 | continue; | ||
390 | |||
391 | if (!page->parent_pte) | ||
392 | continue; | ||
393 | |||
394 | *page->parent_pte = 0; | ||
395 | release_pt_page_64(vcpu, page->page_hpa, 1); | ||
396 | } | ||
397 | ++kvm_stat.tlb_flush; | 900 | ++kvm_stat.tlb_flush; |
398 | kvm_arch_ops->tlb_flush(vcpu); | 901 | kvm_arch_ops->tlb_flush(vcpu); |
399 | } | 902 | } |
400 | 903 | ||
401 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 904 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
402 | { | 905 | { |
906 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); | ||
907 | mmu_free_roots(vcpu); | ||
908 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | ||
909 | kvm_mmu_free_some_pages(vcpu); | ||
910 | mmu_alloc_roots(vcpu); | ||
403 | kvm_mmu_flush_tlb(vcpu); | 911 | kvm_mmu_flush_tlb(vcpu); |
912 | kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); | ||
404 | } | 913 | } |
405 | 914 | ||
406 | static void mark_pagetable_nonglobal(void *shadow_pte) | 915 | static void mark_pagetable_nonglobal(void *shadow_pte) |
@@ -412,7 +921,8 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu, | |||
412 | u64 *shadow_pte, | 921 | u64 *shadow_pte, |
413 | gpa_t gaddr, | 922 | gpa_t gaddr, |
414 | int dirty, | 923 | int dirty, |
415 | u64 access_bits) | 924 | u64 access_bits, |
925 | gfn_t gfn) | ||
416 | { | 926 | { |
417 | hpa_t paddr; | 927 | hpa_t paddr; |
418 | 928 | ||
@@ -420,13 +930,10 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu, | |||
420 | if (!dirty) | 930 | if (!dirty) |
421 | access_bits &= ~PT_WRITABLE_MASK; | 931 | access_bits &= ~PT_WRITABLE_MASK; |
422 | 932 | ||
423 | if (access_bits & PT_WRITABLE_MASK) | 933 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); |
424 | mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); | ||
425 | 934 | ||
426 | *shadow_pte |= access_bits; | 935 | *shadow_pte |= access_bits; |
427 | 936 | ||
428 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); | ||
429 | |||
430 | if (!(*shadow_pte & PT_GLOBAL_MASK)) | 937 | if (!(*shadow_pte & PT_GLOBAL_MASK)) |
431 | mark_pagetable_nonglobal(shadow_pte); | 938 | mark_pagetable_nonglobal(shadow_pte); |
432 | 939 | ||
@@ -434,10 +941,31 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu, | |||
434 | *shadow_pte |= gaddr; | 941 | *shadow_pte |= gaddr; |
435 | *shadow_pte |= PT_SHADOW_IO_MARK; | 942 | *shadow_pte |= PT_SHADOW_IO_MARK; |
436 | *shadow_pte &= ~PT_PRESENT_MASK; | 943 | *shadow_pte &= ~PT_PRESENT_MASK; |
437 | } else { | 944 | return; |
438 | *shadow_pte |= paddr; | 945 | } |
439 | page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); | 946 | |
947 | *shadow_pte |= paddr; | ||
948 | |||
949 | if (access_bits & PT_WRITABLE_MASK) { | ||
950 | struct kvm_mmu_page *shadow; | ||
951 | |||
952 | shadow = kvm_mmu_lookup_page(vcpu, gfn); | ||
953 | if (shadow) { | ||
954 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
955 | __FUNCTION__, gfn); | ||
956 | access_bits &= ~PT_WRITABLE_MASK; | ||
957 | if (is_writeble_pte(*shadow_pte)) { | ||
958 | *shadow_pte &= ~PT_WRITABLE_MASK; | ||
959 | kvm_arch_ops->tlb_flush(vcpu); | ||
960 | } | ||
961 | } | ||
440 | } | 962 | } |
963 | |||
964 | if (access_bits & PT_WRITABLE_MASK) | ||
965 | mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); | ||
966 | |||
967 | page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); | ||
968 | rmap_add(vcpu, shadow_pte); | ||
441 | } | 969 | } |
442 | 970 | ||
443 | static void inject_page_fault(struct kvm_vcpu *vcpu, | 971 | static void inject_page_fault(struct kvm_vcpu *vcpu, |
@@ -474,41 +1002,6 @@ static int may_access(u64 pte, int write, int user) | |||
474 | return 1; | 1002 | return 1; |
475 | } | 1003 | } |
476 | 1004 | ||
477 | /* | ||
478 | * Remove a shadow pte. | ||
479 | */ | ||
480 | static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) | ||
481 | { | ||
482 | hpa_t page_addr = vcpu->mmu.root_hpa; | ||
483 | int level = vcpu->mmu.shadow_root_level; | ||
484 | |||
485 | ++kvm_stat.invlpg; | ||
486 | |||
487 | for (; ; level--) { | ||
488 | u32 index = PT64_INDEX(addr, level); | ||
489 | u64 *table = __va(page_addr); | ||
490 | |||
491 | if (level == PT_PAGE_TABLE_LEVEL ) { | ||
492 | table[index] = 0; | ||
493 | return; | ||
494 | } | ||
495 | |||
496 | if (!is_present_pte(table[index])) | ||
497 | return; | ||
498 | |||
499 | page_addr = table[index] & PT64_BASE_ADDR_MASK; | ||
500 | |||
501 | if (level == PT_DIRECTORY_LEVEL && | ||
502 | (table[index] & PT_SHADOW_PS_MARK)) { | ||
503 | table[index] = 0; | ||
504 | release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL); | ||
505 | |||
506 | kvm_arch_ops->tlb_flush(vcpu); | ||
507 | return; | ||
508 | } | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static void paging_free(struct kvm_vcpu *vcpu) | 1005 | static void paging_free(struct kvm_vcpu *vcpu) |
513 | { | 1006 | { |
514 | nonpaging_free(vcpu); | 1007 | nonpaging_free(vcpu); |
@@ -522,37 +1015,40 @@ static void paging_free(struct kvm_vcpu *vcpu) | |||
522 | #include "paging_tmpl.h" | 1015 | #include "paging_tmpl.h" |
523 | #undef PTTYPE | 1016 | #undef PTTYPE |
524 | 1017 | ||
525 | static int paging64_init_context(struct kvm_vcpu *vcpu) | 1018 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) |
526 | { | 1019 | { |
527 | struct kvm_mmu *context = &vcpu->mmu; | 1020 | struct kvm_mmu *context = &vcpu->mmu; |
528 | 1021 | ||
529 | ASSERT(is_pae(vcpu)); | 1022 | ASSERT(is_pae(vcpu)); |
530 | context->new_cr3 = paging_new_cr3; | 1023 | context->new_cr3 = paging_new_cr3; |
531 | context->page_fault = paging64_page_fault; | 1024 | context->page_fault = paging64_page_fault; |
532 | context->inval_page = paging_inval_page; | ||
533 | context->gva_to_gpa = paging64_gva_to_gpa; | 1025 | context->gva_to_gpa = paging64_gva_to_gpa; |
534 | context->free = paging_free; | 1026 | context->free = paging_free; |
535 | context->root_level = PT64_ROOT_LEVEL; | 1027 | context->root_level = level; |
536 | context->shadow_root_level = PT64_ROOT_LEVEL; | 1028 | context->shadow_root_level = level; |
537 | context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); | 1029 | mmu_alloc_roots(vcpu); |
538 | ASSERT(VALID_PAGE(context->root_hpa)); | 1030 | ASSERT(VALID_PAGE(context->root_hpa)); |
539 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | | 1031 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | |
540 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); | 1032 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); |
541 | return 0; | 1033 | return 0; |
542 | } | 1034 | } |
543 | 1035 | ||
1036 | static int paging64_init_context(struct kvm_vcpu *vcpu) | ||
1037 | { | ||
1038 | return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL); | ||
1039 | } | ||
1040 | |||
544 | static int paging32_init_context(struct kvm_vcpu *vcpu) | 1041 | static int paging32_init_context(struct kvm_vcpu *vcpu) |
545 | { | 1042 | { |
546 | struct kvm_mmu *context = &vcpu->mmu; | 1043 | struct kvm_mmu *context = &vcpu->mmu; |
547 | 1044 | ||
548 | context->new_cr3 = paging_new_cr3; | 1045 | context->new_cr3 = paging_new_cr3; |
549 | context->page_fault = paging32_page_fault; | 1046 | context->page_fault = paging32_page_fault; |
550 | context->inval_page = paging_inval_page; | ||
551 | context->gva_to_gpa = paging32_gva_to_gpa; | 1047 | context->gva_to_gpa = paging32_gva_to_gpa; |
552 | context->free = paging_free; | 1048 | context->free = paging_free; |
553 | context->root_level = PT32_ROOT_LEVEL; | 1049 | context->root_level = PT32_ROOT_LEVEL; |
554 | context->shadow_root_level = PT32E_ROOT_LEVEL; | 1050 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
555 | context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); | 1051 | mmu_alloc_roots(vcpu); |
556 | ASSERT(VALID_PAGE(context->root_hpa)); | 1052 | ASSERT(VALID_PAGE(context->root_hpa)); |
557 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | | 1053 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | |
558 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); | 1054 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); |
@@ -561,14 +1057,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu) | |||
561 | 1057 | ||
562 | static int paging32E_init_context(struct kvm_vcpu *vcpu) | 1058 | static int paging32E_init_context(struct kvm_vcpu *vcpu) |
563 | { | 1059 | { |
564 | int ret; | 1060 | return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); |
565 | |||
566 | if ((ret = paging64_init_context(vcpu))) | ||
567 | return ret; | ||
568 | |||
569 | vcpu->mmu.root_level = PT32E_ROOT_LEVEL; | ||
570 | vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL; | ||
571 | return 0; | ||
572 | } | 1061 | } |
573 | 1062 | ||
574 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) | 1063 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) |
@@ -597,41 +1086,161 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) | |||
597 | 1086 | ||
598 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) | 1087 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
599 | { | 1088 | { |
1089 | int r; | ||
1090 | |||
600 | destroy_kvm_mmu(vcpu); | 1091 | destroy_kvm_mmu(vcpu); |
601 | return init_kvm_mmu(vcpu); | 1092 | r = init_kvm_mmu(vcpu); |
1093 | if (r < 0) | ||
1094 | goto out; | ||
1095 | r = mmu_topup_memory_caches(vcpu); | ||
1096 | out: | ||
1097 | return r; | ||
602 | } | 1098 | } |
603 | 1099 | ||
604 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 1100 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) |
605 | { | 1101 | { |
606 | while (!list_empty(&vcpu->free_pages)) { | 1102 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1103 | struct kvm_mmu_page *page; | ||
1104 | struct kvm_mmu_page *child; | ||
1105 | struct hlist_node *node, *n; | ||
1106 | struct hlist_head *bucket; | ||
1107 | unsigned index; | ||
1108 | u64 *spte; | ||
1109 | u64 pte; | ||
1110 | unsigned offset = offset_in_page(gpa); | ||
1111 | unsigned pte_size; | ||
1112 | unsigned page_offset; | ||
1113 | unsigned misaligned; | ||
1114 | int level; | ||
1115 | int flooded = 0; | ||
1116 | |||
1117 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | ||
1118 | if (gfn == vcpu->last_pt_write_gfn) { | ||
1119 | ++vcpu->last_pt_write_count; | ||
1120 | if (vcpu->last_pt_write_count >= 3) | ||
1121 | flooded = 1; | ||
1122 | } else { | ||
1123 | vcpu->last_pt_write_gfn = gfn; | ||
1124 | vcpu->last_pt_write_count = 1; | ||
1125 | } | ||
1126 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
1127 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
1128 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) { | ||
1129 | if (page->gfn != gfn || page->role.metaphysical) | ||
1130 | continue; | ||
1131 | pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; | ||
1132 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); | ||
1133 | if (misaligned || flooded) { | ||
1134 | /* | ||
1135 | * Misaligned accesses are too much trouble to fix | ||
1136 | * up; also, they usually indicate a page is not used | ||
1137 | * as a page table. | ||
1138 | * | ||
1139 | * If we're seeing too many writes to a page, | ||
1140 | * it may no longer be a page table, or we may be | ||
1141 | * forking, in which case it is better to unmap the | ||
1142 | * page. | ||
1143 | */ | ||
1144 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | ||
1145 | gpa, bytes, page->role.word); | ||
1146 | kvm_mmu_zap_page(vcpu, page); | ||
1147 | continue; | ||
1148 | } | ||
1149 | page_offset = offset; | ||
1150 | level = page->role.level; | ||
1151 | if (page->role.glevels == PT32_ROOT_LEVEL) { | ||
1152 | page_offset <<= 1; /* 32->64 */ | ||
1153 | page_offset &= ~PAGE_MASK; | ||
1154 | } | ||
1155 | spte = __va(page->page_hpa); | ||
1156 | spte += page_offset / sizeof(*spte); | ||
1157 | pte = *spte; | ||
1158 | if (is_present_pte(pte)) { | ||
1159 | if (level == PT_PAGE_TABLE_LEVEL) | ||
1160 | rmap_remove(vcpu, spte); | ||
1161 | else { | ||
1162 | child = page_header(pte & PT64_BASE_ADDR_MASK); | ||
1163 | mmu_page_remove_parent_pte(vcpu, child, spte); | ||
1164 | } | ||
1165 | } | ||
1166 | *spte = 0; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | ||
1171 | { | ||
1172 | } | ||
1173 | |||
1174 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | ||
1175 | { | ||
1176 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | ||
1177 | |||
1178 | return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); | ||
1179 | } | ||
1180 | |||
1181 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | ||
1182 | { | ||
1183 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { | ||
607 | struct kvm_mmu_page *page; | 1184 | struct kvm_mmu_page *page; |
608 | 1185 | ||
1186 | page = container_of(vcpu->kvm->active_mmu_pages.prev, | ||
1187 | struct kvm_mmu_page, link); | ||
1188 | kvm_mmu_zap_page(vcpu, page); | ||
1189 | } | ||
1190 | } | ||
1191 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); | ||
1192 | |||
1193 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | ||
1194 | { | ||
1195 | struct kvm_mmu_page *page; | ||
1196 | |||
1197 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | ||
1198 | page = container_of(vcpu->kvm->active_mmu_pages.next, | ||
1199 | struct kvm_mmu_page, link); | ||
1200 | kvm_mmu_zap_page(vcpu, page); | ||
1201 | } | ||
1202 | while (!list_empty(&vcpu->free_pages)) { | ||
609 | page = list_entry(vcpu->free_pages.next, | 1203 | page = list_entry(vcpu->free_pages.next, |
610 | struct kvm_mmu_page, link); | 1204 | struct kvm_mmu_page, link); |
611 | list_del(&page->link); | 1205 | list_del(&page->link); |
612 | __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); | 1206 | __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); |
613 | page->page_hpa = INVALID_PAGE; | 1207 | page->page_hpa = INVALID_PAGE; |
614 | } | 1208 | } |
1209 | free_page((unsigned long)vcpu->mmu.pae_root); | ||
615 | } | 1210 | } |
616 | 1211 | ||
617 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | 1212 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) |
618 | { | 1213 | { |
1214 | struct page *page; | ||
619 | int i; | 1215 | int i; |
620 | 1216 | ||
621 | ASSERT(vcpu); | 1217 | ASSERT(vcpu); |
622 | 1218 | ||
623 | for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { | 1219 | for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { |
624 | struct page *page; | ||
625 | struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i]; | 1220 | struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i]; |
626 | 1221 | ||
627 | INIT_LIST_HEAD(&page_header->link); | 1222 | INIT_LIST_HEAD(&page_header->link); |
628 | if ((page = alloc_page(GFP_KVM_MMU)) == NULL) | 1223 | if ((page = alloc_page(GFP_KERNEL)) == NULL) |
629 | goto error_1; | 1224 | goto error_1; |
630 | page->private = (unsigned long)page_header; | 1225 | page->private = (unsigned long)page_header; |
631 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; | 1226 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; |
632 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); | 1227 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); |
633 | list_add(&page_header->link, &vcpu->free_pages); | 1228 | list_add(&page_header->link, &vcpu->free_pages); |
1229 | ++vcpu->kvm->n_free_mmu_pages; | ||
634 | } | 1230 | } |
1231 | |||
1232 | /* | ||
1233 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. | ||
1234 | * Therefore we need to allocate shadow page tables in the first | ||
1235 | * 4GB of memory, which happens to fit the DMA32 zone. | ||
1236 | */ | ||
1237 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); | ||
1238 | if (!page) | ||
1239 | goto error_1; | ||
1240 | vcpu->mmu.pae_root = page_address(page); | ||
1241 | for (i = 0; i < 4; ++i) | ||
1242 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | ||
1243 | |||
635 | return 0; | 1244 | return 0; |
636 | 1245 | ||
637 | error_1: | 1246 | error_1: |
@@ -663,10 +1272,12 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) | |||
663 | 1272 | ||
664 | destroy_kvm_mmu(vcpu); | 1273 | destroy_kvm_mmu(vcpu); |
665 | free_mmu_pages(vcpu); | 1274 | free_mmu_pages(vcpu); |
1275 | mmu_free_memory_caches(vcpu); | ||
666 | } | 1276 | } |
667 | 1277 | ||
668 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | 1278 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) |
669 | { | 1279 | { |
1280 | struct kvm *kvm = vcpu->kvm; | ||
670 | struct kvm_mmu_page *page; | 1281 | struct kvm_mmu_page *page; |
671 | 1282 | ||
672 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { | 1283 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { |
@@ -679,8 +1290,169 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
679 | pt = __va(page->page_hpa); | 1290 | pt = __va(page->page_hpa); |
680 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 1291 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
681 | /* avoid RMW */ | 1292 | /* avoid RMW */ |
682 | if (pt[i] & PT_WRITABLE_MASK) | 1293 | if (pt[i] & PT_WRITABLE_MASK) { |
1294 | rmap_remove(vcpu, &pt[i]); | ||
683 | pt[i] &= ~PT_WRITABLE_MASK; | 1295 | pt[i] &= ~PT_WRITABLE_MASK; |
1296 | } | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1300 | #ifdef AUDIT | ||
1301 | |||
1302 | static const char *audit_msg; | ||
1303 | |||
1304 | static gva_t canonicalize(gva_t gva) | ||
1305 | { | ||
1306 | #ifdef CONFIG_X86_64 | ||
1307 | gva = (long long)(gva << 16) >> 16; | ||
1308 | #endif | ||
1309 | return gva; | ||
1310 | } | ||
684 | 1311 | ||
1312 | static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, | ||
1313 | gva_t va, int level) | ||
1314 | { | ||
1315 | u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK); | ||
1316 | int i; | ||
1317 | gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1)); | ||
1318 | |||
1319 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) { | ||
1320 | u64 ent = pt[i]; | ||
1321 | |||
1322 | if (!ent & PT_PRESENT_MASK) | ||
1323 | continue; | ||
1324 | |||
1325 | va = canonicalize(va); | ||
1326 | if (level > 1) | ||
1327 | audit_mappings_page(vcpu, ent, va, level - 1); | ||
1328 | else { | ||
1329 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va); | ||
1330 | hpa_t hpa = gpa_to_hpa(vcpu, gpa); | ||
1331 | |||
1332 | if ((ent & PT_PRESENT_MASK) | ||
1333 | && (ent & PT64_BASE_ADDR_MASK) != hpa) | ||
1334 | printk(KERN_ERR "audit error: (%s) levels %d" | ||
1335 | " gva %lx gpa %llx hpa %llx ent %llx\n", | ||
1336 | audit_msg, vcpu->mmu.root_level, | ||
1337 | va, gpa, hpa, ent); | ||
1338 | } | ||
685 | } | 1339 | } |
686 | } | 1340 | } |
1341 | |||
1342 | static void audit_mappings(struct kvm_vcpu *vcpu) | ||
1343 | { | ||
1344 | int i; | ||
1345 | |||
1346 | if (vcpu->mmu.root_level == 4) | ||
1347 | audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4); | ||
1348 | else | ||
1349 | for (i = 0; i < 4; ++i) | ||
1350 | if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK) | ||
1351 | audit_mappings_page(vcpu, | ||
1352 | vcpu->mmu.pae_root[i], | ||
1353 | i << 30, | ||
1354 | 2); | ||
1355 | } | ||
1356 | |||
1357 | static int count_rmaps(struct kvm_vcpu *vcpu) | ||
1358 | { | ||
1359 | int nmaps = 0; | ||
1360 | int i, j, k; | ||
1361 | |||
1362 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
1363 | struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; | ||
1364 | struct kvm_rmap_desc *d; | ||
1365 | |||
1366 | for (j = 0; j < m->npages; ++j) { | ||
1367 | struct page *page = m->phys_mem[j]; | ||
1368 | |||
1369 | if (!page->private) | ||
1370 | continue; | ||
1371 | if (!(page->private & 1)) { | ||
1372 | ++nmaps; | ||
1373 | continue; | ||
1374 | } | ||
1375 | d = (struct kvm_rmap_desc *)(page->private & ~1ul); | ||
1376 | while (d) { | ||
1377 | for (k = 0; k < RMAP_EXT; ++k) | ||
1378 | if (d->shadow_ptes[k]) | ||
1379 | ++nmaps; | ||
1380 | else | ||
1381 | break; | ||
1382 | d = d->more; | ||
1383 | } | ||
1384 | } | ||
1385 | } | ||
1386 | return nmaps; | ||
1387 | } | ||
1388 | |||
1389 | static int count_writable_mappings(struct kvm_vcpu *vcpu) | ||
1390 | { | ||
1391 | int nmaps = 0; | ||
1392 | struct kvm_mmu_page *page; | ||
1393 | int i; | ||
1394 | |||
1395 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { | ||
1396 | u64 *pt = __va(page->page_hpa); | ||
1397 | |||
1398 | if (page->role.level != PT_PAGE_TABLE_LEVEL) | ||
1399 | continue; | ||
1400 | |||
1401 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | ||
1402 | u64 ent = pt[i]; | ||
1403 | |||
1404 | if (!(ent & PT_PRESENT_MASK)) | ||
1405 | continue; | ||
1406 | if (!(ent & PT_WRITABLE_MASK)) | ||
1407 | continue; | ||
1408 | ++nmaps; | ||
1409 | } | ||
1410 | } | ||
1411 | return nmaps; | ||
1412 | } | ||
1413 | |||
1414 | static void audit_rmap(struct kvm_vcpu *vcpu) | ||
1415 | { | ||
1416 | int n_rmap = count_rmaps(vcpu); | ||
1417 | int n_actual = count_writable_mappings(vcpu); | ||
1418 | |||
1419 | if (n_rmap != n_actual) | ||
1420 | printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", | ||
1421 | __FUNCTION__, audit_msg, n_rmap, n_actual); | ||
1422 | } | ||
1423 | |||
1424 | static void audit_write_protection(struct kvm_vcpu *vcpu) | ||
1425 | { | ||
1426 | struct kvm_mmu_page *page; | ||
1427 | |||
1428 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { | ||
1429 | hfn_t hfn; | ||
1430 | struct page *pg; | ||
1431 | |||
1432 | if (page->role.metaphysical) | ||
1433 | continue; | ||
1434 | |||
1435 | hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT) | ||
1436 | >> PAGE_SHIFT; | ||
1437 | pg = pfn_to_page(hfn); | ||
1438 | if (pg->private) | ||
1439 | printk(KERN_ERR "%s: (%s) shadow page has writable" | ||
1440 | " mappings: gfn %lx role %x\n", | ||
1441 | __FUNCTION__, audit_msg, page->gfn, | ||
1442 | page->role.word); | ||
1443 | } | ||
1444 | } | ||
1445 | |||
1446 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) | ||
1447 | { | ||
1448 | int olddbg = dbg; | ||
1449 | |||
1450 | dbg = 0; | ||
1451 | audit_msg = msg; | ||
1452 | audit_rmap(vcpu); | ||
1453 | audit_write_protection(vcpu); | ||
1454 | audit_mappings(vcpu); | ||
1455 | dbg = olddbg; | ||
1456 | } | ||
1457 | |||
1458 | #endif | ||
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 09bb9b4ed12d..2dbf4307ed9e 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -32,6 +32,11 @@ | |||
32 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 32 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
33 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) | 33 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) |
34 | #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK | 34 | #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK |
35 | #ifdef CONFIG_X86_64 | ||
36 | #define PT_MAX_FULL_LEVELS 4 | ||
37 | #else | ||
38 | #define PT_MAX_FULL_LEVELS 2 | ||
39 | #endif | ||
35 | #elif PTTYPE == 32 | 40 | #elif PTTYPE == 32 |
36 | #define pt_element_t u32 | 41 | #define pt_element_t u32 |
37 | #define guest_walker guest_walker32 | 42 | #define guest_walker guest_walker32 |
@@ -42,6 +47,7 @@ | |||
42 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 47 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
43 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) | 48 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) |
44 | #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK | 49 | #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK |
50 | #define PT_MAX_FULL_LEVELS 2 | ||
45 | #else | 51 | #else |
46 | #error Invalid PTTYPE value | 52 | #error Invalid PTTYPE value |
47 | #endif | 53 | #endif |
@@ -52,93 +58,126 @@ | |||
52 | */ | 58 | */ |
53 | struct guest_walker { | 59 | struct guest_walker { |
54 | int level; | 60 | int level; |
61 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; | ||
55 | pt_element_t *table; | 62 | pt_element_t *table; |
63 | pt_element_t *ptep; | ||
56 | pt_element_t inherited_ar; | 64 | pt_element_t inherited_ar; |
65 | gfn_t gfn; | ||
57 | }; | 66 | }; |
58 | 67 | ||
59 | static void FNAME(init_walker)(struct guest_walker *walker, | 68 | /* |
60 | struct kvm_vcpu *vcpu) | 69 | * Fetch a guest pte for a guest virtual address |
70 | */ | ||
71 | static void FNAME(walk_addr)(struct guest_walker *walker, | ||
72 | struct kvm_vcpu *vcpu, gva_t addr) | ||
61 | { | 73 | { |
62 | hpa_t hpa; | 74 | hpa_t hpa; |
63 | struct kvm_memory_slot *slot; | 75 | struct kvm_memory_slot *slot; |
76 | pt_element_t *ptep; | ||
77 | pt_element_t root; | ||
78 | gfn_t table_gfn; | ||
64 | 79 | ||
80 | pgprintk("%s: addr %lx\n", __FUNCTION__, addr); | ||
65 | walker->level = vcpu->mmu.root_level; | 81 | walker->level = vcpu->mmu.root_level; |
66 | slot = gfn_to_memslot(vcpu->kvm, | 82 | walker->table = NULL; |
67 | (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | 83 | root = vcpu->cr3; |
68 | hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); | 84 | #if PTTYPE == 64 |
85 | if (!is_long_mode(vcpu)) { | ||
86 | walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; | ||
87 | root = *walker->ptep; | ||
88 | if (!(root & PT_PRESENT_MASK)) | ||
89 | return; | ||
90 | --walker->level; | ||
91 | } | ||
92 | #endif | ||
93 | table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | ||
94 | walker->table_gfn[walker->level - 1] = table_gfn; | ||
95 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | ||
96 | walker->level - 1, table_gfn); | ||
97 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); | ||
98 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); | ||
69 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); | 99 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); |
70 | 100 | ||
71 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 101 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
72 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); | 102 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); |
73 | 103 | ||
74 | walker->table = (pt_element_t *)( (unsigned long)walker->table | | ||
75 | (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) ); | ||
76 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; | 104 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; |
105 | |||
106 | for (;;) { | ||
107 | int index = PT_INDEX(addr, walker->level); | ||
108 | hpa_t paddr; | ||
109 | |||
110 | ptep = &walker->table[index]; | ||
111 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | ||
112 | ((unsigned long)ptep & PAGE_MASK)); | ||
113 | |||
114 | if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK)) | ||
115 | *ptep |= PT_ACCESSED_MASK; | ||
116 | |||
117 | if (!is_present_pte(*ptep)) | ||
118 | break; | ||
119 | |||
120 | if (walker->level == PT_PAGE_TABLE_LEVEL) { | ||
121 | walker->gfn = (*ptep & PT_BASE_ADDR_MASK) | ||
122 | >> PAGE_SHIFT; | ||
123 | break; | ||
124 | } | ||
125 | |||
126 | if (walker->level == PT_DIRECTORY_LEVEL | ||
127 | && (*ptep & PT_PAGE_SIZE_MASK) | ||
128 | && (PTTYPE == 64 || is_pse(vcpu))) { | ||
129 | walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK) | ||
130 | >> PAGE_SHIFT; | ||
131 | walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL); | ||
132 | break; | ||
133 | } | ||
134 | |||
135 | if (walker->level != 3 || is_long_mode(vcpu)) | ||
136 | walker->inherited_ar &= walker->table[index]; | ||
137 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; | ||
138 | paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK); | ||
139 | kunmap_atomic(walker->table, KM_USER0); | ||
140 | walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), | ||
141 | KM_USER0); | ||
142 | --walker->level; | ||
143 | walker->table_gfn[walker->level - 1 ] = table_gfn; | ||
144 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | ||
145 | walker->level - 1, table_gfn); | ||
146 | } | ||
147 | walker->ptep = ptep; | ||
148 | pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); | ||
77 | } | 149 | } |
78 | 150 | ||
79 | static void FNAME(release_walker)(struct guest_walker *walker) | 151 | static void FNAME(release_walker)(struct guest_walker *walker) |
80 | { | 152 | { |
81 | kunmap_atomic(walker->table, KM_USER0); | 153 | if (walker->table) |
154 | kunmap_atomic(walker->table, KM_USER0); | ||
82 | } | 155 | } |
83 | 156 | ||
84 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, | 157 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, |
85 | u64 *shadow_pte, u64 access_bits) | 158 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) |
86 | { | 159 | { |
87 | ASSERT(*shadow_pte == 0); | 160 | ASSERT(*shadow_pte == 0); |
88 | access_bits &= guest_pte; | 161 | access_bits &= guest_pte; |
89 | *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); | 162 | *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); |
90 | set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, | 163 | set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, |
91 | guest_pte & PT_DIRTY_MASK, access_bits); | 164 | guest_pte & PT_DIRTY_MASK, access_bits, gfn); |
92 | } | 165 | } |
93 | 166 | ||
94 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, | 167 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, |
95 | u64 *shadow_pte, u64 access_bits, | 168 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) |
96 | int index) | ||
97 | { | 169 | { |
98 | gpa_t gaddr; | 170 | gpa_t gaddr; |
99 | 171 | ||
100 | ASSERT(*shadow_pte == 0); | 172 | ASSERT(*shadow_pte == 0); |
101 | access_bits &= guest_pde; | 173 | access_bits &= guest_pde; |
102 | gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; | 174 | gaddr = (gpa_t)gfn << PAGE_SHIFT; |
103 | if (PTTYPE == 32 && is_cpuid_PSE36()) | 175 | if (PTTYPE == 32 && is_cpuid_PSE36()) |
104 | gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << | 176 | gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << |
105 | (32 - PT32_DIR_PSE36_SHIFT); | 177 | (32 - PT32_DIR_PSE36_SHIFT); |
106 | *shadow_pte = guest_pde & PT_PTE_COPY_MASK; | 178 | *shadow_pte = guest_pde & PT_PTE_COPY_MASK; |
107 | set_pte_common(vcpu, shadow_pte, gaddr, | 179 | set_pte_common(vcpu, shadow_pte, gaddr, |
108 | guest_pde & PT_DIRTY_MASK, access_bits); | 180 | guest_pde & PT_DIRTY_MASK, access_bits, gfn); |
109 | } | ||
110 | |||
111 | /* | ||
112 | * Fetch a guest pte from a specific level in the paging hierarchy. | ||
113 | */ | ||
114 | static pt_element_t *FNAME(fetch_guest)(struct kvm_vcpu *vcpu, | ||
115 | struct guest_walker *walker, | ||
116 | int level, | ||
117 | gva_t addr) | ||
118 | { | ||
119 | |||
120 | ASSERT(level > 0 && level <= walker->level); | ||
121 | |||
122 | for (;;) { | ||
123 | int index = PT_INDEX(addr, walker->level); | ||
124 | hpa_t paddr; | ||
125 | |||
126 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | ||
127 | ((unsigned long)&walker->table[index] & PAGE_MASK)); | ||
128 | if (level == walker->level || | ||
129 | !is_present_pte(walker->table[index]) || | ||
130 | (walker->level == PT_DIRECTORY_LEVEL && | ||
131 | (walker->table[index] & PT_PAGE_SIZE_MASK) && | ||
132 | (PTTYPE == 64 || is_pse(vcpu)))) | ||
133 | return &walker->table[index]; | ||
134 | if (walker->level != 3 || is_long_mode(vcpu)) | ||
135 | walker->inherited_ar &= walker->table[index]; | ||
136 | paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK); | ||
137 | kunmap_atomic(walker->table, KM_USER0); | ||
138 | walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), | ||
139 | KM_USER0); | ||
140 | --walker->level; | ||
141 | } | ||
142 | } | 181 | } |
143 | 182 | ||
144 | /* | 183 | /* |
@@ -150,15 +189,26 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
150 | hpa_t shadow_addr; | 189 | hpa_t shadow_addr; |
151 | int level; | 190 | int level; |
152 | u64 *prev_shadow_ent = NULL; | 191 | u64 *prev_shadow_ent = NULL; |
192 | pt_element_t *guest_ent = walker->ptep; | ||
193 | |||
194 | if (!is_present_pte(*guest_ent)) | ||
195 | return NULL; | ||
153 | 196 | ||
154 | shadow_addr = vcpu->mmu.root_hpa; | 197 | shadow_addr = vcpu->mmu.root_hpa; |
155 | level = vcpu->mmu.shadow_root_level; | 198 | level = vcpu->mmu.shadow_root_level; |
199 | if (level == PT32E_ROOT_LEVEL) { | ||
200 | shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3]; | ||
201 | shadow_addr &= PT64_BASE_ADDR_MASK; | ||
202 | --level; | ||
203 | } | ||
156 | 204 | ||
157 | for (; ; level--) { | 205 | for (; ; level--) { |
158 | u32 index = SHADOW_PT_INDEX(addr, level); | 206 | u32 index = SHADOW_PT_INDEX(addr, level); |
159 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; | 207 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; |
160 | pt_element_t *guest_ent; | 208 | struct kvm_mmu_page *shadow_page; |
161 | u64 shadow_pte; | 209 | u64 shadow_pte; |
210 | int metaphysical; | ||
211 | gfn_t table_gfn; | ||
162 | 212 | ||
163 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { | 213 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { |
164 | if (level == PT_PAGE_TABLE_LEVEL) | 214 | if (level == PT_PAGE_TABLE_LEVEL) |
@@ -168,21 +218,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
168 | continue; | 218 | continue; |
169 | } | 219 | } |
170 | 220 | ||
171 | if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) { | ||
172 | ASSERT(level == PT32E_ROOT_LEVEL); | ||
173 | guest_ent = FNAME(fetch_guest)(vcpu, walker, | ||
174 | PT32_ROOT_LEVEL, addr); | ||
175 | } else | ||
176 | guest_ent = FNAME(fetch_guest)(vcpu, walker, | ||
177 | level, addr); | ||
178 | |||
179 | if (!is_present_pte(*guest_ent)) | ||
180 | return NULL; | ||
181 | |||
182 | /* Don't set accessed bit on PAE PDPTRs */ | ||
183 | if (vcpu->mmu.root_level != 3 || walker->level != 3) | ||
184 | *guest_ent |= PT_ACCESSED_MASK; | ||
185 | |||
186 | if (level == PT_PAGE_TABLE_LEVEL) { | 221 | if (level == PT_PAGE_TABLE_LEVEL) { |
187 | 222 | ||
188 | if (walker->level == PT_DIRECTORY_LEVEL) { | 223 | if (walker->level == PT_DIRECTORY_LEVEL) { |
@@ -190,21 +225,30 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
190 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; | 225 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; |
191 | FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, | 226 | FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, |
192 | walker->inherited_ar, | 227 | walker->inherited_ar, |
193 | PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); | 228 | walker->gfn); |
194 | } else { | 229 | } else { |
195 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); | 230 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); |
196 | FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); | 231 | FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, |
232 | walker->inherited_ar, | ||
233 | walker->gfn); | ||
197 | } | 234 | } |
198 | return shadow_ent; | 235 | return shadow_ent; |
199 | } | 236 | } |
200 | 237 | ||
201 | shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); | 238 | if (level - 1 == PT_PAGE_TABLE_LEVEL |
202 | if (!VALID_PAGE(shadow_addr)) | 239 | && walker->level == PT_DIRECTORY_LEVEL) { |
203 | return ERR_PTR(-ENOMEM); | 240 | metaphysical = 1; |
204 | shadow_pte = shadow_addr | PT_PRESENT_MASK; | 241 | table_gfn = (*guest_ent & PT_BASE_ADDR_MASK) |
205 | if (vcpu->mmu.root_level > 3 || level != 3) | 242 | >> PAGE_SHIFT; |
206 | shadow_pte |= PT_ACCESSED_MASK | 243 | } else { |
207 | | PT_WRITABLE_MASK | PT_USER_MASK; | 244 | metaphysical = 0; |
245 | table_gfn = walker->table_gfn[level - 2]; | ||
246 | } | ||
247 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | ||
248 | metaphysical, shadow_ent); | ||
249 | shadow_addr = shadow_page->page_hpa; | ||
250 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK | ||
251 | | PT_WRITABLE_MASK | PT_USER_MASK; | ||
208 | *shadow_ent = shadow_pte; | 252 | *shadow_ent = shadow_pte; |
209 | prev_shadow_ent = shadow_ent; | 253 | prev_shadow_ent = shadow_ent; |
210 | } | 254 | } |
@@ -221,11 +265,13 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |||
221 | u64 *shadow_ent, | 265 | u64 *shadow_ent, |
222 | struct guest_walker *walker, | 266 | struct guest_walker *walker, |
223 | gva_t addr, | 267 | gva_t addr, |
224 | int user) | 268 | int user, |
269 | int *write_pt) | ||
225 | { | 270 | { |
226 | pt_element_t *guest_ent; | 271 | pt_element_t *guest_ent; |
227 | int writable_shadow; | 272 | int writable_shadow; |
228 | gfn_t gfn; | 273 | gfn_t gfn; |
274 | struct kvm_mmu_page *page; | ||
229 | 275 | ||
230 | if (is_writeble_pte(*shadow_ent)) | 276 | if (is_writeble_pte(*shadow_ent)) |
231 | return 0; | 277 | return 0; |
@@ -250,17 +296,35 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |||
250 | *shadow_ent &= ~PT_USER_MASK; | 296 | *shadow_ent &= ~PT_USER_MASK; |
251 | } | 297 | } |
252 | 298 | ||
253 | guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr); | 299 | guest_ent = walker->ptep; |
254 | 300 | ||
255 | if (!is_present_pte(*guest_ent)) { | 301 | if (!is_present_pte(*guest_ent)) { |
256 | *shadow_ent = 0; | 302 | *shadow_ent = 0; |
257 | return 0; | 303 | return 0; |
258 | } | 304 | } |
259 | 305 | ||
260 | gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 306 | gfn = walker->gfn; |
307 | |||
308 | if (user) { | ||
309 | /* | ||
310 | * Usermode page faults won't be for page table updates. | ||
311 | */ | ||
312 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { | ||
313 | pgprintk("%s: zap %lx %x\n", | ||
314 | __FUNCTION__, gfn, page->role.word); | ||
315 | kvm_mmu_zap_page(vcpu, page); | ||
316 | } | ||
317 | } else if (kvm_mmu_lookup_page(vcpu, gfn)) { | ||
318 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
319 | __FUNCTION__, gfn); | ||
320 | *guest_ent |= PT_DIRTY_MASK; | ||
321 | *write_pt = 1; | ||
322 | return 0; | ||
323 | } | ||
261 | mark_page_dirty(vcpu->kvm, gfn); | 324 | mark_page_dirty(vcpu->kvm, gfn); |
262 | *shadow_ent |= PT_WRITABLE_MASK; | 325 | *shadow_ent |= PT_WRITABLE_MASK; |
263 | *guest_ent |= PT_DIRTY_MASK; | 326 | *guest_ent |= PT_DIRTY_MASK; |
327 | rmap_add(vcpu, shadow_ent); | ||
264 | 328 | ||
265 | return 1; | 329 | return 1; |
266 | } | 330 | } |
@@ -276,7 +340,8 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |||
276 | * - normal guest page fault due to the guest pte marked not present, not | 340 | * - normal guest page fault due to the guest pte marked not present, not |
277 | * writable, or not executable | 341 | * writable, or not executable |
278 | * | 342 | * |
279 | * Returns: 1 if we need to emulate the instruction, 0 otherwise | 343 | * Returns: 1 if we need to emulate the instruction, 0 otherwise, or |
344 | * a negative value on error. | ||
280 | */ | 345 | */ |
281 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | 346 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, |
282 | u32 error_code) | 347 | u32 error_code) |
@@ -287,39 +352,47 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
287 | struct guest_walker walker; | 352 | struct guest_walker walker; |
288 | u64 *shadow_pte; | 353 | u64 *shadow_pte; |
289 | int fixed; | 354 | int fixed; |
355 | int write_pt = 0; | ||
356 | int r; | ||
357 | |||
358 | pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); | ||
359 | kvm_mmu_audit(vcpu, "pre page fault"); | ||
360 | |||
361 | r = mmu_topup_memory_caches(vcpu); | ||
362 | if (r) | ||
363 | return r; | ||
290 | 364 | ||
291 | /* | 365 | /* |
292 | * Look up the shadow pte for the faulting address. | 366 | * Look up the shadow pte for the faulting address. |
293 | */ | 367 | */ |
294 | for (;;) { | 368 | FNAME(walk_addr)(&walker, vcpu, addr); |
295 | FNAME(init_walker)(&walker, vcpu); | 369 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker); |
296 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker); | ||
297 | if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */ | ||
298 | nonpaging_flush(vcpu); | ||
299 | FNAME(release_walker)(&walker); | ||
300 | continue; | ||
301 | } | ||
302 | break; | ||
303 | } | ||
304 | 370 | ||
305 | /* | 371 | /* |
306 | * The page is not mapped by the guest. Let the guest handle it. | 372 | * The page is not mapped by the guest. Let the guest handle it. |
307 | */ | 373 | */ |
308 | if (!shadow_pte) { | 374 | if (!shadow_pte) { |
375 | pgprintk("%s: not mapped\n", __FUNCTION__); | ||
309 | inject_page_fault(vcpu, addr, error_code); | 376 | inject_page_fault(vcpu, addr, error_code); |
310 | FNAME(release_walker)(&walker); | 377 | FNAME(release_walker)(&walker); |
311 | return 0; | 378 | return 0; |
312 | } | 379 | } |
313 | 380 | ||
381 | pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, | ||
382 | shadow_pte, *shadow_pte); | ||
383 | |||
314 | /* | 384 | /* |
315 | * Update the shadow pte. | 385 | * Update the shadow pte. |
316 | */ | 386 | */ |
317 | if (write_fault) | 387 | if (write_fault) |
318 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, | 388 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, |
319 | user_fault); | 389 | user_fault, &write_pt); |
320 | else | 390 | else |
321 | fixed = fix_read_pf(shadow_pte); | 391 | fixed = fix_read_pf(shadow_pte); |
322 | 392 | ||
393 | pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__, | ||
394 | shadow_pte, *shadow_pte); | ||
395 | |||
323 | FNAME(release_walker)(&walker); | 396 | FNAME(release_walker)(&walker); |
324 | 397 | ||
325 | /* | 398 | /* |
@@ -331,20 +404,23 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
331 | pgprintk("%s: io work, no access\n", __FUNCTION__); | 404 | pgprintk("%s: io work, no access\n", __FUNCTION__); |
332 | inject_page_fault(vcpu, addr, | 405 | inject_page_fault(vcpu, addr, |
333 | error_code | PFERR_PRESENT_MASK); | 406 | error_code | PFERR_PRESENT_MASK); |
407 | kvm_mmu_audit(vcpu, "post page fault (io)"); | ||
334 | return 0; | 408 | return 0; |
335 | } | 409 | } |
336 | 410 | ||
337 | /* | 411 | /* |
338 | * pte not present, guest page fault. | 412 | * pte not present, guest page fault. |
339 | */ | 413 | */ |
340 | if (pte_present && !fixed) { | 414 | if (pte_present && !fixed && !write_pt) { |
341 | inject_page_fault(vcpu, addr, error_code); | 415 | inject_page_fault(vcpu, addr, error_code); |
416 | kvm_mmu_audit(vcpu, "post page fault (guest)"); | ||
342 | return 0; | 417 | return 0; |
343 | } | 418 | } |
344 | 419 | ||
345 | ++kvm_stat.pf_fixed; | 420 | ++kvm_stat.pf_fixed; |
421 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | ||
346 | 422 | ||
347 | return 0; | 423 | return write_pt; |
348 | } | 424 | } |
349 | 425 | ||
350 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | 426 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |
@@ -353,9 +429,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
353 | pt_element_t guest_pte; | 429 | pt_element_t guest_pte; |
354 | gpa_t gpa; | 430 | gpa_t gpa; |
355 | 431 | ||
356 | FNAME(init_walker)(&walker, vcpu); | 432 | FNAME(walk_addr)(&walker, vcpu, vaddr); |
357 | guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL, | 433 | guest_pte = *walker.ptep; |
358 | vaddr); | ||
359 | FNAME(release_walker)(&walker); | 434 | FNAME(release_walker)(&walker); |
360 | 435 | ||
361 | if (!is_present_pte(guest_pte)) | 436 | if (!is_present_pte(guest_pte)) |
@@ -389,3 +464,4 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
389 | #undef PT_PTE_COPY_MASK | 464 | #undef PT_PTE_COPY_MASK |
390 | #undef PT_NON_PTE_COPY_MASK | 465 | #undef PT_NON_PTE_COPY_MASK |
391 | #undef PT_DIR_BASE_ADDR_MASK | 466 | #undef PT_DIR_BASE_ADDR_MASK |
467 | #undef PT_MAX_FULL_LEVELS | ||
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index fa0428735717..ccc06b1b91b5 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -235,6 +235,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
235 | 235 | ||
236 | vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; | 236 | vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; |
237 | vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | 237 | vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
238 | |||
239 | vcpu->interrupt_window_open = 1; | ||
238 | } | 240 | } |
239 | 241 | ||
240 | static int has_svm(void) | 242 | static int has_svm(void) |
@@ -495,7 +497,6 @@ static void init_vmcb(struct vmcb *vmcb) | |||
495 | /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ | 497 | /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ |
496 | (1ULL << INTERCEPT_CPUID) | | 498 | (1ULL << INTERCEPT_CPUID) | |
497 | (1ULL << INTERCEPT_HLT) | | 499 | (1ULL << INTERCEPT_HLT) | |
498 | (1ULL << INTERCEPT_INVLPG) | | ||
499 | (1ULL << INTERCEPT_INVLPGA) | | 500 | (1ULL << INTERCEPT_INVLPGA) | |
500 | (1ULL << INTERCEPT_IOIO_PROT) | | 501 | (1ULL << INTERCEPT_IOIO_PROT) | |
501 | (1ULL << INTERCEPT_MSR_PROT) | | 502 | (1ULL << INTERCEPT_MSR_PROT) | |
@@ -700,6 +701,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |||
700 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; | 701 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; |
701 | } | 702 | } |
702 | 703 | ||
704 | static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
705 | { | ||
706 | } | ||
707 | |||
703 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 708 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
704 | { | 709 | { |
705 | #ifdef CONFIG_X86_64 | 710 | #ifdef CONFIG_X86_64 |
@@ -847,6 +852,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
847 | u64 fault_address; | 852 | u64 fault_address; |
848 | u32 error_code; | 853 | u32 error_code; |
849 | enum emulation_result er; | 854 | enum emulation_result er; |
855 | int r; | ||
850 | 856 | ||
851 | if (is_external_interrupt(exit_int_info)) | 857 | if (is_external_interrupt(exit_int_info)) |
852 | push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); | 858 | push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); |
@@ -855,7 +861,12 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
855 | 861 | ||
856 | fault_address = vcpu->svm->vmcb->control.exit_info_2; | 862 | fault_address = vcpu->svm->vmcb->control.exit_info_2; |
857 | error_code = vcpu->svm->vmcb->control.exit_info_1; | 863 | error_code = vcpu->svm->vmcb->control.exit_info_1; |
858 | if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) { | 864 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); |
865 | if (r < 0) { | ||
866 | spin_unlock(&vcpu->kvm->lock); | ||
867 | return r; | ||
868 | } | ||
869 | if (!r) { | ||
859 | spin_unlock(&vcpu->kvm->lock); | 870 | spin_unlock(&vcpu->kvm->lock); |
860 | return 1; | 871 | return 1; |
861 | } | 872 | } |
@@ -1031,10 +1042,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1031 | { | 1042 | { |
1032 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; | 1043 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; |
1033 | skip_emulated_instruction(vcpu); | 1044 | skip_emulated_instruction(vcpu); |
1034 | if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) | 1045 | if (vcpu->irq_summary) |
1035 | return 1; | 1046 | return 1; |
1036 | 1047 | ||
1037 | kvm_run->exit_reason = KVM_EXIT_HLT; | 1048 | kvm_run->exit_reason = KVM_EXIT_HLT; |
1049 | ++kvm_stat.halt_exits; | ||
1038 | return 0; | 1050 | return 0; |
1039 | } | 1051 | } |
1040 | 1052 | ||
@@ -1186,6 +1198,23 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1186 | return rdmsr_interception(vcpu, kvm_run); | 1198 | return rdmsr_interception(vcpu, kvm_run); |
1187 | } | 1199 | } |
1188 | 1200 | ||
1201 | static int interrupt_window_interception(struct kvm_vcpu *vcpu, | ||
1202 | struct kvm_run *kvm_run) | ||
1203 | { | ||
1204 | /* | ||
1205 | * If the user space waits to inject interrupts, exit as soon as | ||
1206 | * possible | ||
1207 | */ | ||
1208 | if (kvm_run->request_interrupt_window && | ||
1209 | !vcpu->irq_summary) { | ||
1210 | ++kvm_stat.irq_window_exits; | ||
1211 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | return 1; | ||
1216 | } | ||
1217 | |||
1189 | static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | 1218 | static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, |
1190 | struct kvm_run *kvm_run) = { | 1219 | struct kvm_run *kvm_run) = { |
1191 | [SVM_EXIT_READ_CR0] = emulate_on_interception, | 1220 | [SVM_EXIT_READ_CR0] = emulate_on_interception, |
@@ -1210,6 +1239,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1210 | [SVM_EXIT_NMI] = nop_on_interception, | 1239 | [SVM_EXIT_NMI] = nop_on_interception, |
1211 | [SVM_EXIT_SMI] = nop_on_interception, | 1240 | [SVM_EXIT_SMI] = nop_on_interception, |
1212 | [SVM_EXIT_INIT] = nop_on_interception, | 1241 | [SVM_EXIT_INIT] = nop_on_interception, |
1242 | [SVM_EXIT_VINTR] = interrupt_window_interception, | ||
1213 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ | 1243 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ |
1214 | [SVM_EXIT_CPUID] = cpuid_interception, | 1244 | [SVM_EXIT_CPUID] = cpuid_interception, |
1215 | [SVM_EXIT_HLT] = halt_interception, | 1245 | [SVM_EXIT_HLT] = halt_interception, |
@@ -1278,15 +1308,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu) | |||
1278 | } | 1308 | } |
1279 | 1309 | ||
1280 | 1310 | ||
1281 | static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu) | 1311 | static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) |
1282 | { | 1312 | { |
1283 | struct vmcb_control_area *control; | 1313 | struct vmcb_control_area *control; |
1284 | 1314 | ||
1285 | if (!vcpu->irq_summary) | ||
1286 | return; | ||
1287 | |||
1288 | control = &vcpu->svm->vmcb->control; | 1315 | control = &vcpu->svm->vmcb->control; |
1289 | |||
1290 | control->int_vector = pop_irq(vcpu); | 1316 | control->int_vector = pop_irq(vcpu); |
1291 | control->int_ctl &= ~V_INTR_PRIO_MASK; | 1317 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
1292 | control->int_ctl |= V_IRQ_MASK | | 1318 | control->int_ctl |= V_IRQ_MASK | |
@@ -1301,6 +1327,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu) | |||
1301 | control->int_ctl &= ~V_IRQ_MASK; | 1327 | control->int_ctl &= ~V_IRQ_MASK; |
1302 | push_irq(vcpu, control->int_vector); | 1328 | push_irq(vcpu, control->int_vector); |
1303 | } | 1329 | } |
1330 | |||
1331 | vcpu->interrupt_window_open = | ||
1332 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); | ||
1333 | } | ||
1334 | |||
1335 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1336 | struct kvm_run *kvm_run) | ||
1337 | { | ||
1338 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | ||
1339 | |||
1340 | vcpu->interrupt_window_open = | ||
1341 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | ||
1342 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | ||
1343 | |||
1344 | if (vcpu->interrupt_window_open && vcpu->irq_summary) | ||
1345 | /* | ||
1346 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
1347 | */ | ||
1348 | kvm_do_inject_irq(vcpu); | ||
1349 | |||
1350 | /* | ||
1351 | * Interrupts blocked. Wait for unblock. | ||
1352 | */ | ||
1353 | if (!vcpu->interrupt_window_open && | ||
1354 | (vcpu->irq_summary || kvm_run->request_interrupt_window)) { | ||
1355 | control->intercept |= 1ULL << INTERCEPT_VINTR; | ||
1356 | } else | ||
1357 | control->intercept &= ~(1ULL << INTERCEPT_VINTR); | ||
1358 | } | ||
1359 | |||
1360 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | ||
1361 | struct kvm_run *kvm_run) | ||
1362 | { | ||
1363 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | ||
1364 | vcpu->irq_summary == 0); | ||
1365 | kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; | ||
1366 | kvm_run->cr8 = vcpu->cr8; | ||
1367 | kvm_run->apic_base = vcpu->apic_base; | ||
1368 | } | ||
1369 | |||
1370 | /* | ||
1371 | * Check if userspace requested an interrupt window, and that the | ||
1372 | * interrupt window is open. | ||
1373 | * | ||
1374 | * No need to exit to userspace if we already have an interrupt queued. | ||
1375 | */ | ||
1376 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | ||
1377 | struct kvm_run *kvm_run) | ||
1378 | { | ||
1379 | return (!vcpu->irq_summary && | ||
1380 | kvm_run->request_interrupt_window && | ||
1381 | vcpu->interrupt_window_open && | ||
1382 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | ||
1304 | } | 1383 | } |
1305 | 1384 | ||
1306 | static void save_db_regs(unsigned long *db_regs) | 1385 | static void save_db_regs(unsigned long *db_regs) |
@@ -1324,9 +1403,10 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1324 | u16 fs_selector; | 1403 | u16 fs_selector; |
1325 | u16 gs_selector; | 1404 | u16 gs_selector; |
1326 | u16 ldt_selector; | 1405 | u16 ldt_selector; |
1406 | int r; | ||
1327 | 1407 | ||
1328 | again: | 1408 | again: |
1329 | kvm_try_inject_irq(vcpu); | 1409 | do_interrupt_requests(vcpu, kvm_run); |
1330 | 1410 | ||
1331 | clgi(); | 1411 | clgi(); |
1332 | 1412 | ||
@@ -1487,18 +1567,28 @@ again: | |||
1487 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 1567 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
1488 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; | 1568 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; |
1489 | kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; | 1569 | kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; |
1570 | post_kvm_run_save(vcpu, kvm_run); | ||
1490 | return 0; | 1571 | return 0; |
1491 | } | 1572 | } |
1492 | 1573 | ||
1493 | if (handle_exit(vcpu, kvm_run)) { | 1574 | r = handle_exit(vcpu, kvm_run); |
1575 | if (r > 0) { | ||
1494 | if (signal_pending(current)) { | 1576 | if (signal_pending(current)) { |
1495 | ++kvm_stat.signal_exits; | 1577 | ++kvm_stat.signal_exits; |
1578 | post_kvm_run_save(vcpu, kvm_run); | ||
1579 | return -EINTR; | ||
1580 | } | ||
1581 | |||
1582 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
1583 | ++kvm_stat.request_irq_exits; | ||
1584 | post_kvm_run_save(vcpu, kvm_run); | ||
1496 | return -EINTR; | 1585 | return -EINTR; |
1497 | } | 1586 | } |
1498 | kvm_resched(vcpu); | 1587 | kvm_resched(vcpu); |
1499 | goto again; | 1588 | goto again; |
1500 | } | 1589 | } |
1501 | return 0; | 1590 | post_kvm_run_save(vcpu, kvm_run); |
1591 | return r; | ||
1502 | } | 1592 | } |
1503 | 1593 | ||
1504 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) | 1594 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) |
@@ -1565,6 +1655,7 @@ static struct kvm_arch_ops svm_arch_ops = { | |||
1565 | .get_segment = svm_get_segment, | 1655 | .get_segment = svm_get_segment, |
1566 | .set_segment = svm_set_segment, | 1656 | .set_segment = svm_set_segment, |
1567 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, | 1657 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, |
1658 | .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits, | ||
1568 | .set_cr0 = svm_set_cr0, | 1659 | .set_cr0 = svm_set_cr0, |
1569 | .set_cr0_no_modeswitch = svm_set_cr0, | 1660 | .set_cr0_no_modeswitch = svm_set_cr0, |
1570 | .set_cr3 = svm_set_cr3, | 1661 | .set_cr3 = svm_set_cr3, |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index d0a2c2d5342a..d4701cb4c654 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -116,7 +116,7 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
116 | static void __vcpu_clear(void *arg) | 116 | static void __vcpu_clear(void *arg) |
117 | { | 117 | { |
118 | struct kvm_vcpu *vcpu = arg; | 118 | struct kvm_vcpu *vcpu = arg; |
119 | int cpu = smp_processor_id(); | 119 | int cpu = raw_smp_processor_id(); |
120 | 120 | ||
121 | if (vcpu->cpu == cpu) | 121 | if (vcpu->cpu == cpu) |
122 | vmcs_clear(vcpu->vmcs); | 122 | vmcs_clear(vcpu->vmcs); |
@@ -152,15 +152,21 @@ static u64 vmcs_read64(unsigned long field) | |||
152 | #endif | 152 | #endif |
153 | } | 153 | } |
154 | 154 | ||
155 | static noinline void vmwrite_error(unsigned long field, unsigned long value) | ||
156 | { | ||
157 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | ||
158 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
159 | dump_stack(); | ||
160 | } | ||
161 | |||
155 | static void vmcs_writel(unsigned long field, unsigned long value) | 162 | static void vmcs_writel(unsigned long field, unsigned long value) |
156 | { | 163 | { |
157 | u8 error; | 164 | u8 error; |
158 | 165 | ||
159 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | 166 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" |
160 | : "=q"(error) : "a"(value), "d"(field) : "cc" ); | 167 | : "=q"(error) : "a"(value), "d"(field) : "cc" ); |
161 | if (error) | 168 | if (unlikely(error)) |
162 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | 169 | vmwrite_error(field, value); |
163 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
164 | } | 170 | } |
165 | 171 | ||
166 | static void vmcs_write16(unsigned long field, u16 value) | 172 | static void vmcs_write16(unsigned long field, u16 value) |
@@ -263,6 +269,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
263 | if (interruptibility & 3) | 269 | if (interruptibility & 3) |
264 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 270 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
265 | interruptibility & ~3); | 271 | interruptibility & ~3); |
272 | vcpu->interrupt_window_open = 1; | ||
266 | } | 273 | } |
267 | 274 | ||
268 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 275 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
@@ -541,7 +548,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) | |||
541 | 548 | ||
542 | static struct vmcs *alloc_vmcs(void) | 549 | static struct vmcs *alloc_vmcs(void) |
543 | { | 550 | { |
544 | return alloc_vmcs_cpu(smp_processor_id()); | 551 | return alloc_vmcs_cpu(raw_smp_processor_id()); |
545 | } | 552 | } |
546 | 553 | ||
547 | static void free_vmcs(struct vmcs *vmcs) | 554 | static void free_vmcs(struct vmcs *vmcs) |
@@ -736,6 +743,15 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
736 | 743 | ||
737 | #endif | 744 | #endif |
738 | 745 | ||
746 | static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
747 | { | ||
748 | vcpu->cr0 &= KVM_GUEST_CR0_MASK; | ||
749 | vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK; | ||
750 | |||
751 | vcpu->cr4 &= KVM_GUEST_CR4_MASK; | ||
752 | vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | ||
753 | } | ||
754 | |||
739 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 755 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
740 | { | 756 | { |
741 | if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) | 757 | if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) |
@@ -1011,8 +1027,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1011 | vmcs_writel(GUEST_RIP, 0xfff0); | 1027 | vmcs_writel(GUEST_RIP, 0xfff0); |
1012 | vmcs_writel(GUEST_RSP, 0); | 1028 | vmcs_writel(GUEST_RSP, 0); |
1013 | 1029 | ||
1014 | vmcs_writel(GUEST_CR3, 0); | ||
1015 | |||
1016 | //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 | 1030 | //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 |
1017 | vmcs_writel(GUEST_DR7, 0x400); | 1031 | vmcs_writel(GUEST_DR7, 0x400); |
1018 | 1032 | ||
@@ -1049,7 +1063,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1049 | | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ | 1063 | | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ |
1050 | | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ | 1064 | | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ |
1051 | | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ | 1065 | | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ |
1052 | | CPU_BASED_INVDPG_EXITING | ||
1053 | | CPU_BASED_MOV_DR_EXITING | 1066 | | CPU_BASED_MOV_DR_EXITING |
1054 | | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ | 1067 | | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ |
1055 | ); | 1068 | ); |
@@ -1094,14 +1107,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1094 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | 1107 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); |
1095 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | 1108 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ |
1096 | 1109 | ||
1097 | ret = -ENOMEM; | ||
1098 | vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1099 | if (!vcpu->guest_msrs) | ||
1100 | goto out; | ||
1101 | vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1102 | if (!vcpu->host_msrs) | ||
1103 | goto out_free_guest_msrs; | ||
1104 | |||
1105 | for (i = 0; i < NR_VMX_MSR; ++i) { | 1110 | for (i = 0; i < NR_VMX_MSR; ++i) { |
1106 | u32 index = vmx_msr_index[i]; | 1111 | u32 index = vmx_msr_index[i]; |
1107 | u32 data_low, data_high; | 1112 | u32 data_low, data_high; |
@@ -1155,8 +1160,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1155 | 1160 | ||
1156 | return 0; | 1161 | return 0; |
1157 | 1162 | ||
1158 | out_free_guest_msrs: | ||
1159 | kfree(vcpu->guest_msrs); | ||
1160 | out: | 1163 | out: |
1161 | return ret; | 1164 | return ret; |
1162 | } | 1165 | } |
@@ -1224,21 +1227,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1224 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 1227 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
1225 | } | 1228 | } |
1226 | 1229 | ||
1227 | static void kvm_try_inject_irq(struct kvm_vcpu *vcpu) | 1230 | |
1231 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1232 | struct kvm_run *kvm_run) | ||
1228 | { | 1233 | { |
1229 | if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) | 1234 | u32 cpu_based_vm_exec_control; |
1230 | && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0) | 1235 | |
1236 | vcpu->interrupt_window_open = | ||
1237 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
1238 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
1239 | |||
1240 | if (vcpu->interrupt_window_open && | ||
1241 | vcpu->irq_summary && | ||
1242 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1231 | /* | 1243 | /* |
1232 | * Interrupts enabled, and not blocked by sti or mov ss. Good. | 1244 | * If interrupts enabled, and not blocked by sti or mov ss. Good. |
1233 | */ | 1245 | */ |
1234 | kvm_do_inject_irq(vcpu); | 1246 | kvm_do_inject_irq(vcpu); |
1235 | else | 1247 | |
1248 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
1249 | if (!vcpu->interrupt_window_open && | ||
1250 | (vcpu->irq_summary || kvm_run->request_interrupt_window)) | ||
1236 | /* | 1251 | /* |
1237 | * Interrupts blocked. Wait for unblock. | 1252 | * Interrupts blocked. Wait for unblock. |
1238 | */ | 1253 | */ |
1239 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1254 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
1240 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 1255 | else |
1241 | | CPU_BASED_VIRTUAL_INTR_PENDING); | 1256 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
1257 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
1242 | } | 1258 | } |
1243 | 1259 | ||
1244 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | 1260 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) |
@@ -1277,6 +1293,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1277 | unsigned long cr2, rip; | 1293 | unsigned long cr2, rip; |
1278 | u32 vect_info; | 1294 | u32 vect_info; |
1279 | enum emulation_result er; | 1295 | enum emulation_result er; |
1296 | int r; | ||
1280 | 1297 | ||
1281 | vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | 1298 | vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
1282 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 1299 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
@@ -1305,7 +1322,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1305 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 1322 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
1306 | 1323 | ||
1307 | spin_lock(&vcpu->kvm->lock); | 1324 | spin_lock(&vcpu->kvm->lock); |
1308 | if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) { | 1325 | r = kvm_mmu_page_fault(vcpu, cr2, error_code); |
1326 | if (r < 0) { | ||
1327 | spin_unlock(&vcpu->kvm->lock); | ||
1328 | return r; | ||
1329 | } | ||
1330 | if (!r) { | ||
1309 | spin_unlock(&vcpu->kvm->lock); | 1331 | spin_unlock(&vcpu->kvm->lock); |
1310 | return 1; | 1332 | return 1; |
1311 | } | 1333 | } |
@@ -1425,17 +1447,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1425 | return 0; | 1447 | return 0; |
1426 | } | 1448 | } |
1427 | 1449 | ||
1428 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1429 | { | ||
1430 | u64 address = vmcs_read64(EXIT_QUALIFICATION); | ||
1431 | int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
1432 | spin_lock(&vcpu->kvm->lock); | ||
1433 | vcpu->mmu.inval_page(vcpu, address); | ||
1434 | spin_unlock(&vcpu->kvm->lock); | ||
1435 | vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length); | ||
1436 | return 1; | ||
1437 | } | ||
1438 | |||
1439 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1450 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1440 | { | 1451 | { |
1441 | u64 exit_qualification; | 1452 | u64 exit_qualification; |
@@ -1575,23 +1586,40 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1575 | return 1; | 1586 | return 1; |
1576 | } | 1587 | } |
1577 | 1588 | ||
1589 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | ||
1590 | struct kvm_run *kvm_run) | ||
1591 | { | ||
1592 | kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0; | ||
1593 | kvm_run->cr8 = vcpu->cr8; | ||
1594 | kvm_run->apic_base = vcpu->apic_base; | ||
1595 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | ||
1596 | vcpu->irq_summary == 0); | ||
1597 | } | ||
1598 | |||
1578 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | 1599 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, |
1579 | struct kvm_run *kvm_run) | 1600 | struct kvm_run *kvm_run) |
1580 | { | 1601 | { |
1581 | /* Turn off interrupt window reporting. */ | 1602 | /* |
1582 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1603 | * If the user space waits to inject interrupts, exit as soon as |
1583 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 1604 | * possible |
1584 | & ~CPU_BASED_VIRTUAL_INTR_PENDING); | 1605 | */ |
1606 | if (kvm_run->request_interrupt_window && | ||
1607 | !vcpu->irq_summary) { | ||
1608 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
1609 | ++kvm_stat.irq_window_exits; | ||
1610 | return 0; | ||
1611 | } | ||
1585 | return 1; | 1612 | return 1; |
1586 | } | 1613 | } |
1587 | 1614 | ||
1588 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1615 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1589 | { | 1616 | { |
1590 | skip_emulated_instruction(vcpu); | 1617 | skip_emulated_instruction(vcpu); |
1591 | if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) | 1618 | if (vcpu->irq_summary) |
1592 | return 1; | 1619 | return 1; |
1593 | 1620 | ||
1594 | kvm_run->exit_reason = KVM_EXIT_HLT; | 1621 | kvm_run->exit_reason = KVM_EXIT_HLT; |
1622 | ++kvm_stat.halt_exits; | ||
1595 | return 0; | 1623 | return 0; |
1596 | } | 1624 | } |
1597 | 1625 | ||
@@ -1605,7 +1633,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1605 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 1633 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
1606 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 1634 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
1607 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | 1635 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, |
1608 | [EXIT_REASON_INVLPG] = handle_invlpg, | ||
1609 | [EXIT_REASON_CR_ACCESS] = handle_cr, | 1636 | [EXIT_REASON_CR_ACCESS] = handle_cr, |
1610 | [EXIT_REASON_DR_ACCESS] = handle_dr, | 1637 | [EXIT_REASON_DR_ACCESS] = handle_dr, |
1611 | [EXIT_REASON_CPUID] = handle_cpuid, | 1638 | [EXIT_REASON_CPUID] = handle_cpuid, |
@@ -1642,11 +1669,27 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1642 | return 0; | 1669 | return 0; |
1643 | } | 1670 | } |
1644 | 1671 | ||
1672 | /* | ||
1673 | * Check if userspace requested an interrupt window, and that the | ||
1674 | * interrupt window is open. | ||
1675 | * | ||
1676 | * No need to exit to userspace if we already have an interrupt queued. | ||
1677 | */ | ||
1678 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | ||
1679 | struct kvm_run *kvm_run) | ||
1680 | { | ||
1681 | return (!vcpu->irq_summary && | ||
1682 | kvm_run->request_interrupt_window && | ||
1683 | vcpu->interrupt_window_open && | ||
1684 | (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); | ||
1685 | } | ||
1686 | |||
1645 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1687 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1646 | { | 1688 | { |
1647 | u8 fail; | 1689 | u8 fail; |
1648 | u16 fs_sel, gs_sel, ldt_sel; | 1690 | u16 fs_sel, gs_sel, ldt_sel; |
1649 | int fs_gs_ldt_reload_needed; | 1691 | int fs_gs_ldt_reload_needed; |
1692 | int r; | ||
1650 | 1693 | ||
1651 | again: | 1694 | again: |
1652 | /* | 1695 | /* |
@@ -1673,9 +1716,7 @@ again: | |||
1673 | vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); | 1716 | vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); |
1674 | #endif | 1717 | #endif |
1675 | 1718 | ||
1676 | if (vcpu->irq_summary && | 1719 | do_interrupt_requests(vcpu, kvm_run); |
1677 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1678 | kvm_try_inject_irq(vcpu); | ||
1679 | 1720 | ||
1680 | if (vcpu->guest_debug.enabled) | 1721 | if (vcpu->guest_debug.enabled) |
1681 | kvm_guest_debug_pre(vcpu); | 1722 | kvm_guest_debug_pre(vcpu); |
@@ -1812,6 +1853,7 @@ again: | |||
1812 | 1853 | ||
1813 | fx_save(vcpu->guest_fx_image); | 1854 | fx_save(vcpu->guest_fx_image); |
1814 | fx_restore(vcpu->host_fx_image); | 1855 | fx_restore(vcpu->host_fx_image); |
1856 | vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | ||
1815 | 1857 | ||
1816 | #ifndef CONFIG_X86_64 | 1858 | #ifndef CONFIG_X86_64 |
1817 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 1859 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
@@ -1821,6 +1863,7 @@ again: | |||
1821 | if (fail) { | 1863 | if (fail) { |
1822 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; | 1864 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; |
1823 | kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); | 1865 | kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); |
1866 | r = 0; | ||
1824 | } else { | 1867 | } else { |
1825 | if (fs_gs_ldt_reload_needed) { | 1868 | if (fs_gs_ldt_reload_needed) { |
1826 | load_ldt(ldt_sel); | 1869 | load_ldt(ldt_sel); |
@@ -1840,17 +1883,28 @@ again: | |||
1840 | } | 1883 | } |
1841 | vcpu->launched = 1; | 1884 | vcpu->launched = 1; |
1842 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; | 1885 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; |
1843 | if (kvm_handle_exit(kvm_run, vcpu)) { | 1886 | r = kvm_handle_exit(kvm_run, vcpu); |
1887 | if (r > 0) { | ||
1844 | /* Give scheduler a change to reschedule. */ | 1888 | /* Give scheduler a change to reschedule. */ |
1845 | if (signal_pending(current)) { | 1889 | if (signal_pending(current)) { |
1846 | ++kvm_stat.signal_exits; | 1890 | ++kvm_stat.signal_exits; |
1891 | post_kvm_run_save(vcpu, kvm_run); | ||
1892 | return -EINTR; | ||
1893 | } | ||
1894 | |||
1895 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
1896 | ++kvm_stat.request_irq_exits; | ||
1897 | post_kvm_run_save(vcpu, kvm_run); | ||
1847 | return -EINTR; | 1898 | return -EINTR; |
1848 | } | 1899 | } |
1900 | |||
1849 | kvm_resched(vcpu); | 1901 | kvm_resched(vcpu); |
1850 | goto again; | 1902 | goto again; |
1851 | } | 1903 | } |
1852 | } | 1904 | } |
1853 | return 0; | 1905 | |
1906 | post_kvm_run_save(vcpu, kvm_run); | ||
1907 | return r; | ||
1854 | } | 1908 | } |
1855 | 1909 | ||
1856 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | 1910 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) |
@@ -1906,13 +1960,33 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) | |||
1906 | { | 1960 | { |
1907 | struct vmcs *vmcs; | 1961 | struct vmcs *vmcs; |
1908 | 1962 | ||
1963 | vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1964 | if (!vcpu->guest_msrs) | ||
1965 | return -ENOMEM; | ||
1966 | |||
1967 | vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1968 | if (!vcpu->host_msrs) | ||
1969 | goto out_free_guest_msrs; | ||
1970 | |||
1909 | vmcs = alloc_vmcs(); | 1971 | vmcs = alloc_vmcs(); |
1910 | if (!vmcs) | 1972 | if (!vmcs) |
1911 | return -ENOMEM; | 1973 | goto out_free_msrs; |
1974 | |||
1912 | vmcs_clear(vmcs); | 1975 | vmcs_clear(vmcs); |
1913 | vcpu->vmcs = vmcs; | 1976 | vcpu->vmcs = vmcs; |
1914 | vcpu->launched = 0; | 1977 | vcpu->launched = 0; |
1978 | |||
1915 | return 0; | 1979 | return 0; |
1980 | |||
1981 | out_free_msrs: | ||
1982 | kfree(vcpu->host_msrs); | ||
1983 | vcpu->host_msrs = NULL; | ||
1984 | |||
1985 | out_free_guest_msrs: | ||
1986 | kfree(vcpu->guest_msrs); | ||
1987 | vcpu->guest_msrs = NULL; | ||
1988 | |||
1989 | return -ENOMEM; | ||
1916 | } | 1990 | } |
1917 | 1991 | ||
1918 | static struct kvm_arch_ops vmx_arch_ops = { | 1992 | static struct kvm_arch_ops vmx_arch_ops = { |
@@ -1936,6 +2010,7 @@ static struct kvm_arch_ops vmx_arch_ops = { | |||
1936 | .get_segment = vmx_get_segment, | 2010 | .get_segment = vmx_get_segment, |
1937 | .set_segment = vmx_set_segment, | 2011 | .set_segment = vmx_set_segment, |
1938 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, | 2012 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, |
2013 | .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits, | ||
1939 | .set_cr0 = vmx_set_cr0, | 2014 | .set_cr0 = vmx_set_cr0, |
1940 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, | 2015 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, |
1941 | .set_cr3 = vmx_set_cr3, | 2016 | .set_cr3 = vmx_set_cr3, |
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c index 1bff3e925fda..be70795b4822 100644 --- a/drivers/kvm/x86_emulate.c +++ b/drivers/kvm/x86_emulate.c | |||
@@ -1323,7 +1323,7 @@ twobyte_special_insn: | |||
1323 | ctxt)) != 0)) | 1323 | ctxt)) != 0)) |
1324 | goto done; | 1324 | goto done; |
1325 | if ((old_lo != _regs[VCPU_REGS_RAX]) | 1325 | if ((old_lo != _regs[VCPU_REGS_RAX]) |
1326 | || (old_hi != _regs[VCPU_REGS_RDI])) { | 1326 | || (old_hi != _regs[VCPU_REGS_RDX])) { |
1327 | _regs[VCPU_REGS_RAX] = old_lo; | 1327 | _regs[VCPU_REGS_RAX] = old_lo; |
1328 | _regs[VCPU_REGS_RDX] = old_hi; | 1328 | _regs[VCPU_REGS_RDX] = old_hi; |
1329 | _eflags &= ~EFLG_ZF; | 1329 | _eflags &= ~EFLG_ZF; |
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c index fb1edc1c9edb..50914439d861 100644 --- a/drivers/leds/leds-s3c24xx.c +++ b/drivers/leds/leds-s3c24xx.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/leds.h> | 17 | #include <linux/leds.h> |
18 | 18 | ||
19 | #include <asm/arch/hardware.h> | 19 | #include <asm/hardware.h> |
20 | #include <asm/arch/regs-gpio.h> | 20 | #include <asm/arch/regs-gpio.h> |
21 | #include <asm/arch/leds-gpio.h> | 21 | #include <asm/arch/leds-gpio.h> |
22 | 22 | ||
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index c8558d4ed506..8ca75e52f637 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/sysdev.h> | 44 | #include <linux/sysdev.h> |
45 | #include <linux/freezer.h> | 45 | #include <linux/freezer.h> |
46 | #include <linux/syscalls.h> | 46 | #include <linux/syscalls.h> |
47 | #include <linux/suspend.h> | ||
47 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
48 | #include <asm/prom.h> | 49 | #include <asm/prom.h> |
49 | #include <asm/machdep.h> | 50 | #include <asm/machdep.h> |
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c index e9b80e920266..ccfe6561be24 100644 --- a/drivers/mmc/mmci.c +++ b/drivers/mmc/mmci.c | |||
@@ -42,6 +42,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
42 | { | 42 | { |
43 | writel(0, host->base + MMCICOMMAND); | 43 | writel(0, host->base + MMCICOMMAND); |
44 | 44 | ||
45 | BUG_ON(host->data); | ||
46 | |||
45 | host->mrq = NULL; | 47 | host->mrq = NULL; |
46 | host->cmd = NULL; | 48 | host->cmd = NULL; |
47 | 49 | ||
@@ -198,6 +200,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, | |||
198 | } | 200 | } |
199 | 201 | ||
200 | if (!cmd->data || cmd->error != MMC_ERR_NONE) { | 202 | if (!cmd->data || cmd->error != MMC_ERR_NONE) { |
203 | if (host->data) | ||
204 | mmci_stop_data(host); | ||
201 | mmci_request_end(host, cmd->mrq); | 205 | mmci_request_end(host, cmd->mrq); |
202 | } else if (!(cmd->data->flags & MMC_DATA_READ)) { | 206 | } else if (!(cmd->data->flags & MMC_DATA_READ)) { |
203 | mmci_start_data(host, cmd->data); | 207 | mmci_start_data(host, cmd->data); |
diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 602ed31a5dd9..9305eb9b1b98 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c | |||
@@ -349,22 +349,11 @@ static void __init trif_probe2(int unit) | |||
349 | #endif | 349 | #endif |
350 | 350 | ||
351 | 351 | ||
352 | /* | ||
353 | * The loopback device is global so it can be directly referenced | ||
354 | * by the network code. Also, it must be first on device list. | ||
355 | */ | ||
356 | extern int loopback_init(void); | ||
357 | |||
358 | /* Statically configured drivers -- order matters here. */ | 352 | /* Statically configured drivers -- order matters here. */ |
359 | static int __init net_olddevs_init(void) | 353 | static int __init net_olddevs_init(void) |
360 | { | 354 | { |
361 | int num; | 355 | int num; |
362 | 356 | ||
363 | if (loopback_init()) { | ||
364 | printk(KERN_ERR "Network loopback device setup failed\n"); | ||
365 | } | ||
366 | |||
367 | |||
368 | #ifdef CONFIG_SBNI | 357 | #ifdef CONFIG_SBNI |
369 | for (num = 0; num < 8; ++num) | 358 | for (num = 0; num < 8; ++num) |
370 | sbni_probe(num); | 359 | sbni_probe(num); |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index ada5e9b9988c..ca5acc4736df 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -57,8 +57,8 @@ | |||
57 | 57 | ||
58 | #define DRV_MODULE_NAME "bnx2" | 58 | #define DRV_MODULE_NAME "bnx2" |
59 | #define PFX DRV_MODULE_NAME ": " | 59 | #define PFX DRV_MODULE_NAME ": " |
60 | #define DRV_MODULE_VERSION "1.5.2" | 60 | #define DRV_MODULE_VERSION "1.5.3" |
61 | #define DRV_MODULE_RELDATE "December 13, 2006" | 61 | #define DRV_MODULE_RELDATE "January 8, 2007" |
62 | 62 | ||
63 | #define RUN_AT(x) (jiffies + (x)) | 63 | #define RUN_AT(x) (jiffies + (x)) |
64 | 64 | ||
@@ -1345,8 +1345,6 @@ bnx2_init_copper_phy(struct bnx2 *bp) | |||
1345 | { | 1345 | { |
1346 | u32 val; | 1346 | u32 val; |
1347 | 1347 | ||
1348 | bp->phy_flags |= PHY_CRC_FIX_FLAG; | ||
1349 | |||
1350 | if (bp->phy_flags & PHY_CRC_FIX_FLAG) { | 1348 | if (bp->phy_flags & PHY_CRC_FIX_FLAG) { |
1351 | bnx2_write_phy(bp, 0x18, 0x0c00); | 1349 | bnx2_write_phy(bp, 0x18, 0x0c00); |
1352 | bnx2_write_phy(bp, 0x17, 0x000a); | 1350 | bnx2_write_phy(bp, 0x17, 0x000a); |
@@ -3085,7 +3083,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3085 | int buf_size) | 3083 | int buf_size) |
3086 | { | 3084 | { |
3087 | u32 written, offset32, len32; | 3085 | u32 written, offset32, len32; |
3088 | u8 *buf, start[4], end[4], *flash_buffer = NULL; | 3086 | u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; |
3089 | int rc = 0; | 3087 | int rc = 0; |
3090 | int align_start, align_end; | 3088 | int align_start, align_end; |
3091 | 3089 | ||
@@ -3113,16 +3111,17 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3113 | } | 3111 | } |
3114 | 3112 | ||
3115 | if (align_start || align_end) { | 3113 | if (align_start || align_end) { |
3116 | buf = kmalloc(len32, GFP_KERNEL); | 3114 | align_buf = kmalloc(len32, GFP_KERNEL); |
3117 | if (buf == NULL) | 3115 | if (align_buf == NULL) |
3118 | return -ENOMEM; | 3116 | return -ENOMEM; |
3119 | if (align_start) { | 3117 | if (align_start) { |
3120 | memcpy(buf, start, 4); | 3118 | memcpy(align_buf, start, 4); |
3121 | } | 3119 | } |
3122 | if (align_end) { | 3120 | if (align_end) { |
3123 | memcpy(buf + len32 - 4, end, 4); | 3121 | memcpy(align_buf + len32 - 4, end, 4); |
3124 | } | 3122 | } |
3125 | memcpy(buf + align_start, data_buf, buf_size); | 3123 | memcpy(align_buf + align_start, data_buf, buf_size); |
3124 | buf = align_buf; | ||
3126 | } | 3125 | } |
3127 | 3126 | ||
3128 | if (bp->flash_info->buffered == 0) { | 3127 | if (bp->flash_info->buffered == 0) { |
@@ -3256,11 +3255,8 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3256 | } | 3255 | } |
3257 | 3256 | ||
3258 | nvram_write_end: | 3257 | nvram_write_end: |
3259 | if (bp->flash_info->buffered == 0) | 3258 | kfree(flash_buffer); |
3260 | kfree(flash_buffer); | 3259 | kfree(align_buf); |
3261 | |||
3262 | if (align_start || align_end) | ||
3263 | kfree(buf); | ||
3264 | return rc; | 3260 | return rc; |
3265 | } | 3261 | } |
3266 | 3262 | ||
@@ -5645,6 +5641,44 @@ poll_bnx2(struct net_device *dev) | |||
5645 | } | 5641 | } |
5646 | #endif | 5642 | #endif |
5647 | 5643 | ||
5644 | static void __devinit | ||
5645 | bnx2_get_5709_media(struct bnx2 *bp) | ||
5646 | { | ||
5647 | u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); | ||
5648 | u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; | ||
5649 | u32 strap; | ||
5650 | |||
5651 | if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) | ||
5652 | return; | ||
5653 | else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { | ||
5654 | bp->phy_flags |= PHY_SERDES_FLAG; | ||
5655 | return; | ||
5656 | } | ||
5657 | |||
5658 | if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) | ||
5659 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; | ||
5660 | else | ||
5661 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; | ||
5662 | |||
5663 | if (PCI_FUNC(bp->pdev->devfn) == 0) { | ||
5664 | switch (strap) { | ||
5665 | case 0x4: | ||
5666 | case 0x5: | ||
5667 | case 0x6: | ||
5668 | bp->phy_flags |= PHY_SERDES_FLAG; | ||
5669 | return; | ||
5670 | } | ||
5671 | } else { | ||
5672 | switch (strap) { | ||
5673 | case 0x1: | ||
5674 | case 0x2: | ||
5675 | case 0x4: | ||
5676 | bp->phy_flags |= PHY_SERDES_FLAG; | ||
5677 | return; | ||
5678 | } | ||
5679 | } | ||
5680 | } | ||
5681 | |||
5648 | static int __devinit | 5682 | static int __devinit |
5649 | bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | 5683 | bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) |
5650 | { | 5684 | { |
@@ -5865,10 +5899,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5865 | bp->phy_addr = 1; | 5899 | bp->phy_addr = 1; |
5866 | 5900 | ||
5867 | /* Disable WOL support if we are running on a SERDES chip. */ | 5901 | /* Disable WOL support if we are running on a SERDES chip. */ |
5868 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 5902 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
5869 | if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) | 5903 | bnx2_get_5709_media(bp); |
5870 | bp->phy_flags |= PHY_SERDES_FLAG; | 5904 | else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) |
5871 | } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) | ||
5872 | bp->phy_flags |= PHY_SERDES_FLAG; | 5905 | bp->phy_flags |= PHY_SERDES_FLAG; |
5873 | 5906 | ||
5874 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 5907 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
@@ -5880,7 +5913,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5880 | if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) | 5913 | if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) |
5881 | bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; | 5914 | bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; |
5882 | } | 5915 | } |
5883 | } | 5916 | } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || |
5917 | CHIP_NUM(bp) == CHIP_NUM_5708) | ||
5918 | bp->phy_flags |= PHY_CRC_FIX_FLAG; | ||
5884 | 5919 | ||
5885 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || | 5920 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || |
5886 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || | 5921 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || |
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c index c7731b6f9de3..82fed1dd5005 100644 --- a/drivers/net/chelsio/my3126.c +++ b/drivers/net/chelsio/my3126.c | |||
@@ -170,9 +170,10 @@ static struct cphy *my3126_phy_create(adapter_t *adapter, | |||
170 | { | 170 | { |
171 | struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); | 171 | struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); |
172 | 172 | ||
173 | if (cphy) | 173 | if (!cphy) |
174 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); | 174 | return NULL; |
175 | 175 | ||
176 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); | ||
176 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); | 177 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); |
177 | cphy->bmsr = 0; | 178 | cphy->bmsr = 0; |
178 | 179 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4c1ff752048c..c6259c7127f6 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -995,12 +995,6 @@ e1000_probe(struct pci_dev *pdev, | |||
995 | (adapter->hw.mac_type != e1000_82547)) | 995 | (adapter->hw.mac_type != e1000_82547)) |
996 | netdev->features |= NETIF_F_TSO; | 996 | netdev->features |= NETIF_F_TSO; |
997 | 997 | ||
998 | #ifdef CONFIG_DEBUG_SLAB | ||
999 | /* 82544's work arounds do not play nicely with DEBUG SLAB */ | ||
1000 | if (adapter->hw.mac_type == e1000_82544) | ||
1001 | netdev->features &= ~NETIF_F_TSO; | ||
1002 | #endif | ||
1003 | |||
1004 | #ifdef NETIF_F_TSO6 | 998 | #ifdef NETIF_F_TSO6 |
1005 | if (adapter->hw.mac_type > e1000_82547_rev_2) | 999 | if (adapter->hw.mac_type > e1000_82547_rev_2) |
1006 | netdev->features |= NETIF_F_TSO6; | 1000 | netdev->features |= NETIF_F_TSO6; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 2f48fe9a29a7..93f2b7a22160 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -234,6 +234,7 @@ enum { | |||
234 | #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 | 234 | #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 |
235 | #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 | 235 | #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 |
236 | #define NVREG_XMITCTL_HOST_LOADED 0x00004000 | 236 | #define NVREG_XMITCTL_HOST_LOADED 0x00004000 |
237 | #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 | ||
237 | NvRegTransmitterStatus = 0x088, | 238 | NvRegTransmitterStatus = 0x088, |
238 | #define NVREG_XMITSTAT_BUSY 0x01 | 239 | #define NVREG_XMITSTAT_BUSY 0x01 |
239 | 240 | ||
@@ -249,6 +250,7 @@ enum { | |||
249 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE | 250 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE |
250 | NvRegReceiverControl = 0x094, | 251 | NvRegReceiverControl = 0x094, |
251 | #define NVREG_RCVCTL_START 0x01 | 252 | #define NVREG_RCVCTL_START 0x01 |
253 | #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 | ||
252 | NvRegReceiverStatus = 0x98, | 254 | NvRegReceiverStatus = 0x98, |
253 | #define NVREG_RCVSTAT_BUSY 0x01 | 255 | #define NVREG_RCVSTAT_BUSY 0x01 |
254 | 256 | ||
@@ -1169,16 +1171,21 @@ static void nv_start_rx(struct net_device *dev) | |||
1169 | { | 1171 | { |
1170 | struct fe_priv *np = netdev_priv(dev); | 1172 | struct fe_priv *np = netdev_priv(dev); |
1171 | u8 __iomem *base = get_hwbase(dev); | 1173 | u8 __iomem *base = get_hwbase(dev); |
1174 | u32 rx_ctrl = readl(base + NvRegReceiverControl); | ||
1172 | 1175 | ||
1173 | dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); | 1176 | dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); |
1174 | /* Already running? Stop it. */ | 1177 | /* Already running? Stop it. */ |
1175 | if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { | 1178 | if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { |
1176 | writel(0, base + NvRegReceiverControl); | 1179 | rx_ctrl &= ~NVREG_RCVCTL_START; |
1180 | writel(rx_ctrl, base + NvRegReceiverControl); | ||
1177 | pci_push(base); | 1181 | pci_push(base); |
1178 | } | 1182 | } |
1179 | writel(np->linkspeed, base + NvRegLinkSpeed); | 1183 | writel(np->linkspeed, base + NvRegLinkSpeed); |
1180 | pci_push(base); | 1184 | pci_push(base); |
1181 | writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); | 1185 | rx_ctrl |= NVREG_RCVCTL_START; |
1186 | if (np->mac_in_use) | ||
1187 | rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; | ||
1188 | writel(rx_ctrl, base + NvRegReceiverControl); | ||
1182 | dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", | 1189 | dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", |
1183 | dev->name, np->duplex, np->linkspeed); | 1190 | dev->name, np->duplex, np->linkspeed); |
1184 | pci_push(base); | 1191 | pci_push(base); |
@@ -1186,39 +1193,59 @@ static void nv_start_rx(struct net_device *dev) | |||
1186 | 1193 | ||
1187 | static void nv_stop_rx(struct net_device *dev) | 1194 | static void nv_stop_rx(struct net_device *dev) |
1188 | { | 1195 | { |
1196 | struct fe_priv *np = netdev_priv(dev); | ||
1189 | u8 __iomem *base = get_hwbase(dev); | 1197 | u8 __iomem *base = get_hwbase(dev); |
1198 | u32 rx_ctrl = readl(base + NvRegReceiverControl); | ||
1190 | 1199 | ||
1191 | dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); | 1200 | dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); |
1192 | writel(0, base + NvRegReceiverControl); | 1201 | if (!np->mac_in_use) |
1202 | rx_ctrl &= ~NVREG_RCVCTL_START; | ||
1203 | else | ||
1204 | rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; | ||
1205 | writel(rx_ctrl, base + NvRegReceiverControl); | ||
1193 | reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, | 1206 | reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, |
1194 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, | 1207 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, |
1195 | KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); | 1208 | KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); |
1196 | 1209 | ||
1197 | udelay(NV_RXSTOP_DELAY2); | 1210 | udelay(NV_RXSTOP_DELAY2); |
1198 | writel(0, base + NvRegLinkSpeed); | 1211 | if (!np->mac_in_use) |
1212 | writel(0, base + NvRegLinkSpeed); | ||
1199 | } | 1213 | } |
1200 | 1214 | ||
1201 | static void nv_start_tx(struct net_device *dev) | 1215 | static void nv_start_tx(struct net_device *dev) |
1202 | { | 1216 | { |
1217 | struct fe_priv *np = netdev_priv(dev); | ||
1203 | u8 __iomem *base = get_hwbase(dev); | 1218 | u8 __iomem *base = get_hwbase(dev); |
1219 | u32 tx_ctrl = readl(base + NvRegTransmitterControl); | ||
1204 | 1220 | ||
1205 | dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); | 1221 | dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); |
1206 | writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); | 1222 | tx_ctrl |= NVREG_XMITCTL_START; |
1223 | if (np->mac_in_use) | ||
1224 | tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; | ||
1225 | writel(tx_ctrl, base + NvRegTransmitterControl); | ||
1207 | pci_push(base); | 1226 | pci_push(base); |
1208 | } | 1227 | } |
1209 | 1228 | ||
1210 | static void nv_stop_tx(struct net_device *dev) | 1229 | static void nv_stop_tx(struct net_device *dev) |
1211 | { | 1230 | { |
1231 | struct fe_priv *np = netdev_priv(dev); | ||
1212 | u8 __iomem *base = get_hwbase(dev); | 1232 | u8 __iomem *base = get_hwbase(dev); |
1233 | u32 tx_ctrl = readl(base + NvRegTransmitterControl); | ||
1213 | 1234 | ||
1214 | dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); | 1235 | dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); |
1215 | writel(0, base + NvRegTransmitterControl); | 1236 | if (!np->mac_in_use) |
1237 | tx_ctrl &= ~NVREG_XMITCTL_START; | ||
1238 | else | ||
1239 | tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; | ||
1240 | writel(tx_ctrl, base + NvRegTransmitterControl); | ||
1216 | reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, | 1241 | reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, |
1217 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, | 1242 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, |
1218 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); | 1243 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); |
1219 | 1244 | ||
1220 | udelay(NV_TXSTOP_DELAY2); | 1245 | udelay(NV_TXSTOP_DELAY2); |
1221 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); | 1246 | if (!np->mac_in_use) |
1247 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, | ||
1248 | base + NvRegTransmitPoll); | ||
1222 | } | 1249 | } |
1223 | 1250 | ||
1224 | static void nv_txrx_reset(struct net_device *dev) | 1251 | static void nv_txrx_reset(struct net_device *dev) |
@@ -4148,20 +4175,6 @@ static int nv_mgmt_acquire_sema(struct net_device *dev) | |||
4148 | return 0; | 4175 | return 0; |
4149 | } | 4176 | } |
4150 | 4177 | ||
4151 | /* Indicate to mgmt unit whether driver is loaded or not */ | ||
4152 | static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded) | ||
4153 | { | ||
4154 | u8 __iomem *base = get_hwbase(dev); | ||
4155 | u32 tx_ctrl; | ||
4156 | |||
4157 | tx_ctrl = readl(base + NvRegTransmitterControl); | ||
4158 | if (loaded) | ||
4159 | tx_ctrl |= NVREG_XMITCTL_HOST_LOADED; | ||
4160 | else | ||
4161 | tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED; | ||
4162 | writel(tx_ctrl, base + NvRegTransmitterControl); | ||
4163 | } | ||
4164 | |||
4165 | static int nv_open(struct net_device *dev) | 4178 | static int nv_open(struct net_device *dev) |
4166 | { | 4179 | { |
4167 | struct fe_priv *np = netdev_priv(dev); | 4180 | struct fe_priv *np = netdev_priv(dev); |
@@ -4659,33 +4672,24 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4659 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | 4672 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); |
4660 | 4673 | ||
4661 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { | 4674 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { |
4662 | writel(0x1, base + 0x204); pci_push(base); | ||
4663 | msleep(500); | ||
4664 | /* management unit running on the mac? */ | 4675 | /* management unit running on the mac? */ |
4665 | np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; | 4676 | if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { |
4666 | if (np->mac_in_use) { | 4677 | np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; |
4667 | u32 mgmt_sync; | 4678 | dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); |
4668 | /* management unit setup the phy already? */ | 4679 | for (i = 0; i < 5000; i++) { |
4669 | mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; | 4680 | msleep(1); |
4670 | if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { | 4681 | if (nv_mgmt_acquire_sema(dev)) { |
4671 | if (!nv_mgmt_acquire_sema(dev)) { | 4682 | /* management unit setup the phy already? */ |
4672 | for (i = 0; i < 5000; i++) { | 4683 | if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == |
4673 | msleep(1); | 4684 | NVREG_XMITCTL_SYNC_PHY_INIT) { |
4674 | mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; | 4685 | /* phy is inited by mgmt unit */ |
4675 | if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) | 4686 | phyinitialized = 1; |
4676 | continue; | 4687 | dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); |
4677 | if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) | 4688 | } else { |
4678 | phyinitialized = 1; | 4689 | /* we need to init the phy */ |
4679 | break; | ||
4680 | } | 4690 | } |
4681 | } else { | 4691 | break; |
4682 | /* we need to init the phy */ | ||
4683 | } | 4692 | } |
4684 | } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) { | ||
4685 | /* phy is inited by SMU */ | ||
4686 | phyinitialized = 1; | ||
4687 | } else { | ||
4688 | /* we need to init the phy */ | ||
4689 | } | 4693 | } |
4690 | } | 4694 | } |
4691 | } | 4695 | } |
@@ -4724,10 +4728,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4724 | if (!phyinitialized) { | 4728 | if (!phyinitialized) { |
4725 | /* reset it */ | 4729 | /* reset it */ |
4726 | phy_init(dev); | 4730 | phy_init(dev); |
4727 | } | 4731 | } else { |
4728 | 4732 | /* see if it is a gigabit phy */ | |
4729 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { | 4733 | u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); |
4730 | nv_mgmt_driver_loaded(dev, 1); | 4734 | if (mii_status & PHY_GIGABIT) { |
4735 | np->gigabit = PHY_GIGABIT; | ||
4736 | } | ||
4731 | } | 4737 | } |
4732 | 4738 | ||
4733 | /* set default link speed settings */ | 4739 | /* set default link speed settings */ |
@@ -4749,8 +4755,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4749 | out_error: | 4755 | out_error: |
4750 | if (phystate_orig) | 4756 | if (phystate_orig) |
4751 | writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | 4757 | writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); |
4752 | if (np->mac_in_use) | ||
4753 | nv_mgmt_driver_loaded(dev, 0); | ||
4754 | pci_set_drvdata(pci_dev, NULL); | 4758 | pci_set_drvdata(pci_dev, NULL); |
4755 | out_freering: | 4759 | out_freering: |
4756 | free_rings(dev); | 4760 | free_rings(dev); |
@@ -4780,9 +4784,6 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
4780 | writel(np->orig_mac[0], base + NvRegMacAddrA); | 4784 | writel(np->orig_mac[0], base + NvRegMacAddrA); |
4781 | writel(np->orig_mac[1], base + NvRegMacAddrB); | 4785 | writel(np->orig_mac[1], base + NvRegMacAddrB); |
4782 | 4786 | ||
4783 | if (np->mac_in_use) | ||
4784 | nv_mgmt_driver_loaded(dev, 0); | ||
4785 | |||
4786 | /* free all structures */ | 4787 | /* free all structures */ |
4787 | free_rings(dev); | 4788 | free_rings(dev); |
4788 | iounmap(get_hwbase(dev)); | 4789 | iounmap(get_hwbase(dev)); |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index c26a4b8e552a..ca2b21f9d444 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -154,8 +154,8 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) | |||
154 | int ret = 0; | 154 | int ret = 0; |
155 | u32 from = G_TC_FROM(skb->tc_verd); | 155 | u32 from = G_TC_FROM(skb->tc_verd); |
156 | 156 | ||
157 | stats->tx_packets++; | 157 | stats->rx_packets++; |
158 | stats->tx_bytes+=skb->len; | 158 | stats->rx_bytes+=skb->len; |
159 | 159 | ||
160 | if (!from || !skb->input_dev) { | 160 | if (!from || !skb->input_dev) { |
161 | dropped: | 161 | dropped: |
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 50ffe90488ff..f4aba4355b19 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -171,6 +171,7 @@ struct ixgb_adapter { | |||
171 | 171 | ||
172 | /* TX */ | 172 | /* TX */ |
173 | struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; | 173 | struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; |
174 | unsigned int restart_queue; | ||
174 | unsigned long timeo_start; | 175 | unsigned long timeo_start; |
175 | uint32_t tx_cmd_type; | 176 | uint32_t tx_cmd_type; |
176 | uint64_t hw_csum_tx_good; | 177 | uint64_t hw_csum_tx_good; |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index cd22523fb035..82c044d6e08a 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { | |||
79 | {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, | 79 | {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, |
80 | {"tx_deferred_ok", IXGB_STAT(stats.dc)}, | 80 | {"tx_deferred_ok", IXGB_STAT(stats.dc)}, |
81 | {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, | 81 | {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, |
82 | {"tx_restart_queue", IXGB_STAT(restart_queue) }, | ||
82 | {"rx_long_length_errors", IXGB_STAT(stats.roc)}, | 83 | {"rx_long_length_errors", IXGB_STAT(stats.roc)}, |
83 | {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, | 84 | {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, |
84 | #ifdef NETIF_F_TSO | 85 | #ifdef NETIF_F_TSO |
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 02089b64e42c..ecbf45861c68 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw) | |||
399 | /* Zero out the other 15 receive addresses. */ | 399 | /* Zero out the other 15 receive addresses. */ |
400 | DEBUGOUT("Clearing RAR[1-15]\n"); | 400 | DEBUGOUT("Clearing RAR[1-15]\n"); |
401 | for(i = 1; i < IXGB_RAR_ENTRIES; i++) { | 401 | for(i = 1; i < IXGB_RAR_ENTRIES; i++) { |
402 | IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 402 | /* Write high reg first to disable the AV bit first */ |
403 | IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 403 | IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
404 | IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | ||
404 | } | 405 | } |
405 | 406 | ||
406 | return; | 407 | return; |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index e628126c9c49..a083a9189230 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; | |||
36 | #else | 36 | #else |
37 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
38 | #endif | 38 | #endif |
39 | #define DRV_VERSION "1.0.117-k2"DRIVERNAPI | 39 | #define DRV_VERSION "1.0.126-k2"DRIVERNAPI |
40 | char ixgb_driver_version[] = DRV_VERSION; | 40 | char ixgb_driver_version[] = DRV_VERSION; |
41 | static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 41 | static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
42 | 42 | ||
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1287 | struct ixgb_buffer *buffer_info; | 1287 | struct ixgb_buffer *buffer_info; |
1288 | int len = skb->len; | 1288 | int len = skb->len; |
1289 | unsigned int offset = 0, size, count = 0, i; | 1289 | unsigned int offset = 0, size, count = 0, i; |
1290 | unsigned int mss = skb_shinfo(skb)->gso_size; | ||
1290 | 1291 | ||
1291 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 1292 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
1292 | unsigned int f; | 1293 | unsigned int f; |
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1298 | while(len) { | 1299 | while(len) { |
1299 | buffer_info = &tx_ring->buffer_info[i]; | 1300 | buffer_info = &tx_ring->buffer_info[i]; |
1300 | size = min(len, IXGB_MAX_DATA_PER_TXD); | 1301 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1302 | /* Workaround for premature desc write-backs | ||
1303 | * in TSO mode. Append 4-byte sentinel desc */ | ||
1304 | if (unlikely(mss && !nr_frags && size == len && size > 8)) | ||
1305 | size -= 4; | ||
1306 | |||
1301 | buffer_info->length = size; | 1307 | buffer_info->length = size; |
1302 | WARN_ON(buffer_info->dma != 0); | 1308 | WARN_ON(buffer_info->dma != 0); |
1303 | buffer_info->dma = | 1309 | buffer_info->dma = |
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1324 | while(len) { | 1330 | while(len) { |
1325 | buffer_info = &tx_ring->buffer_info[i]; | 1331 | buffer_info = &tx_ring->buffer_info[i]; |
1326 | size = min(len, IXGB_MAX_DATA_PER_TXD); | 1332 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1333 | |||
1334 | /* Workaround for premature desc write-backs | ||
1335 | * in TSO mode. Append 4-byte sentinel desc */ | ||
1336 | if (unlikely(mss && !nr_frags && size == len | ||
1337 | && size > 8)) | ||
1338 | size -= 4; | ||
1339 | |||
1327 | buffer_info->length = size; | 1340 | buffer_info->length = size; |
1328 | buffer_info->dma = | 1341 | buffer_info->dma = |
1329 | pci_map_page(adapter->pdev, | 1342 | pci_map_page(adapter->pdev, |
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1398 | IXGB_WRITE_REG(&adapter->hw, TDT, i); | 1411 | IXGB_WRITE_REG(&adapter->hw, TDT, i); |
1399 | } | 1412 | } |
1400 | 1413 | ||
1414 | static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) | ||
1415 | { | ||
1416 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1417 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1418 | |||
1419 | netif_stop_queue(netdev); | ||
1420 | /* Herbert's original patch had: | ||
1421 | * smp_mb__after_netif_stop_queue(); | ||
1422 | * but since that doesn't exist yet, just open code it. */ | ||
1423 | smp_mb(); | ||
1424 | |||
1425 | /* We need to check again in a case another CPU has just | ||
1426 | * made room available. */ | ||
1427 | if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) | ||
1428 | return -EBUSY; | ||
1429 | |||
1430 | /* A reprieve! */ | ||
1431 | netif_start_queue(netdev); | ||
1432 | ++adapter->restart_queue; | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | static int ixgb_maybe_stop_tx(struct net_device *netdev, | ||
1437 | struct ixgb_desc_ring *tx_ring, int size) | ||
1438 | { | ||
1439 | if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) | ||
1440 | return 0; | ||
1441 | return __ixgb_maybe_stop_tx(netdev, size); | ||
1442 | } | ||
1443 | |||
1444 | |||
1401 | /* Tx Descriptors needed, worst case */ | 1445 | /* Tx Descriptors needed, worst case */ |
1402 | #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ | 1446 | #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ |
1403 | (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | 1447 | (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) |
1404 | #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ | 1448 | #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ |
1405 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 | 1449 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ |
1450 | + 1 /* one more needed for sentinel TSO workaround */ | ||
1406 | 1451 | ||
1407 | static int | 1452 | static int |
1408 | ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1453 | ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1430 | spin_lock_irqsave(&adapter->tx_lock, flags); | 1475 | spin_lock_irqsave(&adapter->tx_lock, flags); |
1431 | #endif | 1476 | #endif |
1432 | 1477 | ||
1433 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { | 1478 | if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, |
1479 | DESC_NEEDED))) { | ||
1434 | netif_stop_queue(netdev); | 1480 | netif_stop_queue(netdev); |
1435 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1481 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1436 | return NETDEV_TX_BUSY; | 1482 | return NETDEV_TX_BUSY; |
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1468 | 1514 | ||
1469 | #ifdef NETIF_F_LLTX | 1515 | #ifdef NETIF_F_LLTX |
1470 | /* Make sure there is space in the ring for the next send. */ | 1516 | /* Make sure there is space in the ring for the next send. */ |
1471 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) | 1517 | ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); |
1472 | netif_stop_queue(netdev); | ||
1473 | 1518 | ||
1474 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1519 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1475 | 1520 | ||
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 82c10dec1b5a..2b739fd584f1 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -229,9 +229,11 @@ struct net_device loopback_dev = { | |||
229 | }; | 229 | }; |
230 | 230 | ||
231 | /* Setup and register the loopback device. */ | 231 | /* Setup and register the loopback device. */ |
232 | int __init loopback_init(void) | 232 | static int __init loopback_init(void) |
233 | { | 233 | { |
234 | return register_netdev(&loopback_dev); | 234 | return register_netdev(&loopback_dev); |
235 | }; | 235 | }; |
236 | 236 | ||
237 | module_init(loopback_init); | ||
238 | |||
237 | EXPORT_SYMBOL(loopback_dev); | 239 | EXPORT_SYMBOL(loopback_dev); |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2b1238e2dbdb..d88e9b2e93cf 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1617 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), | 1617 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), |
1618 | PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), | 1618 | PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), |
1619 | PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), | 1619 | PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), |
1620 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), | ||
1620 | PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), | 1621 | PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), |
1621 | PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), | 1622 | PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), |
1622 | PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), | 1623 | PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), |
@@ -1667,6 +1668,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1667 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), | 1668 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), |
1668 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), | 1669 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), |
1669 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), | 1670 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), |
1671 | PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), | ||
1670 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), | 1672 | PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), |
1671 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), | 1673 | PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), |
1672 | PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), | 1674 | PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index d79d141a601d..8844c20eac2d 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -208,6 +208,15 @@ static void ql_write_common_reg(struct ql3_adapter *qdev, | |||
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | 210 | ||
211 | static void ql_write_nvram_reg(struct ql3_adapter *qdev, | ||
212 | u32 __iomem *reg, u32 value) | ||
213 | { | ||
214 | writel(value, reg); | ||
215 | readl(reg); | ||
216 | udelay(1); | ||
217 | return; | ||
218 | } | ||
219 | |||
211 | static void ql_write_page0_reg(struct ql3_adapter *qdev, | 220 | static void ql_write_page0_reg(struct ql3_adapter *qdev, |
212 | u32 __iomem *reg, u32 value) | 221 | u32 __iomem *reg, u32 value) |
213 | { | 222 | { |
@@ -336,9 +345,9 @@ static void fm93c56a_select(struct ql3_adapter *qdev) | |||
336 | qdev->mem_map_registers; | 345 | qdev->mem_map_registers; |
337 | 346 | ||
338 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | 347 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; |
339 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 348 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
340 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | 349 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
341 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 350 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
342 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); | 351 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); |
343 | } | 352 | } |
344 | 353 | ||
@@ -355,14 +364,14 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
355 | qdev->mem_map_registers; | 364 | qdev->mem_map_registers; |
356 | 365 | ||
357 | /* Clock in a zero, then do the start bit */ | 366 | /* Clock in a zero, then do the start bit */ |
358 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 367 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
359 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 368 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
360 | AUBURN_EEPROM_DO_1); | 369 | AUBURN_EEPROM_DO_1); |
361 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 370 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
362 | ISP_NVRAM_MASK | qdev-> | 371 | ISP_NVRAM_MASK | qdev-> |
363 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | 372 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | |
364 | AUBURN_EEPROM_CLK_RISE); | 373 | AUBURN_EEPROM_CLK_RISE); |
365 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 374 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
366 | ISP_NVRAM_MASK | qdev-> | 375 | ISP_NVRAM_MASK | qdev-> |
367 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | 376 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | |
368 | AUBURN_EEPROM_CLK_FALL); | 377 | AUBURN_EEPROM_CLK_FALL); |
@@ -378,20 +387,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
378 | * If the bit changed, then change the DO state to | 387 | * If the bit changed, then change the DO state to |
379 | * match | 388 | * match |
380 | */ | 389 | */ |
381 | ql_write_common_reg(qdev, | 390 | ql_write_nvram_reg(qdev, |
382 | &port_regs->CommonRegs. | 391 | &port_regs->CommonRegs. |
383 | serialPortInterfaceReg, | 392 | serialPortInterfaceReg, |
384 | ISP_NVRAM_MASK | qdev-> | 393 | ISP_NVRAM_MASK | qdev-> |
385 | eeprom_cmd_data | dataBit); | 394 | eeprom_cmd_data | dataBit); |
386 | previousBit = dataBit; | 395 | previousBit = dataBit; |
387 | } | 396 | } |
388 | ql_write_common_reg(qdev, | 397 | ql_write_nvram_reg(qdev, |
389 | &port_regs->CommonRegs. | 398 | &port_regs->CommonRegs. |
390 | serialPortInterfaceReg, | 399 | serialPortInterfaceReg, |
391 | ISP_NVRAM_MASK | qdev-> | 400 | ISP_NVRAM_MASK | qdev-> |
392 | eeprom_cmd_data | dataBit | | 401 | eeprom_cmd_data | dataBit | |
393 | AUBURN_EEPROM_CLK_RISE); | 402 | AUBURN_EEPROM_CLK_RISE); |
394 | ql_write_common_reg(qdev, | 403 | ql_write_nvram_reg(qdev, |
395 | &port_regs->CommonRegs. | 404 | &port_regs->CommonRegs. |
396 | serialPortInterfaceReg, | 405 | serialPortInterfaceReg, |
397 | ISP_NVRAM_MASK | qdev-> | 406 | ISP_NVRAM_MASK | qdev-> |
@@ -412,20 +421,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
412 | * If the bit changed, then change the DO state to | 421 | * If the bit changed, then change the DO state to |
413 | * match | 422 | * match |
414 | */ | 423 | */ |
415 | ql_write_common_reg(qdev, | 424 | ql_write_nvram_reg(qdev, |
416 | &port_regs->CommonRegs. | 425 | &port_regs->CommonRegs. |
417 | serialPortInterfaceReg, | 426 | serialPortInterfaceReg, |
418 | ISP_NVRAM_MASK | qdev-> | 427 | ISP_NVRAM_MASK | qdev-> |
419 | eeprom_cmd_data | dataBit); | 428 | eeprom_cmd_data | dataBit); |
420 | previousBit = dataBit; | 429 | previousBit = dataBit; |
421 | } | 430 | } |
422 | ql_write_common_reg(qdev, | 431 | ql_write_nvram_reg(qdev, |
423 | &port_regs->CommonRegs. | 432 | &port_regs->CommonRegs. |
424 | serialPortInterfaceReg, | 433 | serialPortInterfaceReg, |
425 | ISP_NVRAM_MASK | qdev-> | 434 | ISP_NVRAM_MASK | qdev-> |
426 | eeprom_cmd_data | dataBit | | 435 | eeprom_cmd_data | dataBit | |
427 | AUBURN_EEPROM_CLK_RISE); | 436 | AUBURN_EEPROM_CLK_RISE); |
428 | ql_write_common_reg(qdev, | 437 | ql_write_nvram_reg(qdev, |
429 | &port_regs->CommonRegs. | 438 | &port_regs->CommonRegs. |
430 | serialPortInterfaceReg, | 439 | serialPortInterfaceReg, |
431 | ISP_NVRAM_MASK | qdev-> | 440 | ISP_NVRAM_MASK | qdev-> |
@@ -443,7 +452,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev) | |||
443 | struct ql3xxx_port_registers __iomem *port_regs = | 452 | struct ql3xxx_port_registers __iomem *port_regs = |
444 | qdev->mem_map_registers; | 453 | qdev->mem_map_registers; |
445 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; | 454 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; |
446 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 455 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
447 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | 456 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
448 | } | 457 | } |
449 | 458 | ||
@@ -461,12 +470,12 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) | |||
461 | /* Read the data bits */ | 470 | /* Read the data bits */ |
462 | /* The first bit is a dummy. Clock right over it. */ | 471 | /* The first bit is a dummy. Clock right over it. */ |
463 | for (i = 0; i < dataBits; i++) { | 472 | for (i = 0; i < dataBits; i++) { |
464 | ql_write_common_reg(qdev, | 473 | ql_write_nvram_reg(qdev, |
465 | &port_regs->CommonRegs. | 474 | &port_regs->CommonRegs. |
466 | serialPortInterfaceReg, | 475 | serialPortInterfaceReg, |
467 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 476 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
468 | AUBURN_EEPROM_CLK_RISE); | 477 | AUBURN_EEPROM_CLK_RISE); |
469 | ql_write_common_reg(qdev, | 478 | ql_write_nvram_reg(qdev, |
470 | &port_regs->CommonRegs. | 479 | &port_regs->CommonRegs. |
471 | serialPortInterfaceReg, | 480 | serialPortInterfaceReg, |
472 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 481 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
@@ -3370,7 +3379,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3370 | SET_MODULE_OWNER(ndev); | 3379 | SET_MODULE_OWNER(ndev); |
3371 | SET_NETDEV_DEV(ndev, &pdev->dev); | 3380 | SET_NETDEV_DEV(ndev, &pdev->dev); |
3372 | 3381 | ||
3373 | ndev->features = NETIF_F_LLTX; | ||
3374 | if (pci_using_dac) | 3382 | if (pci_using_dac) |
3375 | ndev->features |= NETIF_F_HIGHDMA; | 3383 | ndev->features |= NETIF_F_HIGHDMA; |
3376 | 3384 | ||
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 785e4a535f9e..616be8d0fa85 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -90,7 +90,8 @@ | |||
90 | 90 | ||
91 | #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ | 91 | #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ |
92 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ | 92 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ |
93 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) | 93 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ |
94 | SUPPORTED_Pause | SUPPORTED_Autoneg) | ||
94 | 95 | ||
95 | #define DRV_NAME "sungem" | 96 | #define DRV_NAME "sungem" |
96 | #define DRV_VERSION "0.98" | 97 | #define DRV_VERSION "0.98" |
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 49800b25907d..d21991ee88c4 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c | |||
@@ -3,10 +3,9 @@ | |||
3 | * | 3 | * |
4 | * This file could be shared with other drivers. | 4 | * This file could be shared with other drivers. |
5 | * | 5 | * |
6 | * (c) 2002, Benjamin Herrenscmidt (benh@kernel.crashing.org) | 6 | * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org) |
7 | * | 7 | * |
8 | * TODO: | 8 | * TODO: |
9 | * - Implement WOL | ||
10 | * - Add support for PHYs that provide an IRQ line | 9 | * - Add support for PHYs that provide an IRQ line |
11 | * - Eventually moved the entire polling state machine in | 10 | * - Eventually moved the entire polling state machine in |
12 | * there (out of the eth driver), so that it can easily be | 11 | * there (out of the eth driver), so that it can easily be |
@@ -152,6 +151,44 @@ static int bcm5221_suspend(struct mii_phy* phy) | |||
152 | return 0; | 151 | return 0; |
153 | } | 152 | } |
154 | 153 | ||
154 | static int bcm5241_init(struct mii_phy* phy) | ||
155 | { | ||
156 | u16 data; | ||
157 | |||
158 | data = phy_read(phy, MII_BCM5221_TEST); | ||
159 | phy_write(phy, MII_BCM5221_TEST, | ||
160 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
161 | |||
162 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); | ||
163 | phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, | ||
164 | data | MII_BCM5221_SHDOW_AUX_STAT2_APD); | ||
165 | |||
166 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
167 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
168 | data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); | ||
169 | |||
170 | data = phy_read(phy, MII_BCM5221_TEST); | ||
171 | phy_write(phy, MII_BCM5221_TEST, | ||
172 | data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int bcm5241_suspend(struct mii_phy* phy) | ||
178 | { | ||
179 | u16 data; | ||
180 | |||
181 | data = phy_read(phy, MII_BCM5221_TEST); | ||
182 | phy_write(phy, MII_BCM5221_TEST, | ||
183 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
184 | |||
185 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
186 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
187 | data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
155 | static int bcm5400_init(struct mii_phy* phy) | 192 | static int bcm5400_init(struct mii_phy* phy) |
156 | { | 193 | { |
157 | u16 data; | 194 | u16 data; |
@@ -373,6 +410,10 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) | |||
373 | adv |= ADVERTISE_100HALF; | 410 | adv |= ADVERTISE_100HALF; |
374 | if (advertise & ADVERTISED_100baseT_Full) | 411 | if (advertise & ADVERTISED_100baseT_Full) |
375 | adv |= ADVERTISE_100FULL; | 412 | adv |= ADVERTISE_100FULL; |
413 | if (advertise & ADVERTISED_Pause) | ||
414 | adv |= ADVERTISE_PAUSE_CAP; | ||
415 | if (advertise & ADVERTISED_Asym_Pause) | ||
416 | adv |= ADVERTISE_PAUSE_ASYM; | ||
376 | phy_write(phy, MII_ADVERTISE, adv); | 417 | phy_write(phy, MII_ADVERTISE, adv); |
377 | 418 | ||
378 | /* Setup 1000BT advertise */ | 419 | /* Setup 1000BT advertise */ |
@@ -436,12 +477,15 @@ static int bcm54xx_read_link(struct mii_phy *phy) | |||
436 | val = phy_read(phy, MII_BCM5400_AUXSTATUS); | 477 | val = phy_read(phy, MII_BCM5400_AUXSTATUS); |
437 | link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> | 478 | link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> |
438 | MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); | 479 | MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); |
439 | phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF; | 480 | phy->duplex = phy_BCM5400_link_table[link_mode][0] ? |
481 | DUPLEX_FULL : DUPLEX_HALF; | ||
440 | phy->speed = phy_BCM5400_link_table[link_mode][2] ? | 482 | phy->speed = phy_BCM5400_link_table[link_mode][2] ? |
441 | SPEED_1000 : | 483 | SPEED_1000 : |
442 | (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); | 484 | (phy_BCM5400_link_table[link_mode][1] ? |
485 | SPEED_100 : SPEED_10); | ||
443 | val = phy_read(phy, MII_LPA); | 486 | val = phy_read(phy, MII_LPA); |
444 | phy->pause = ((val & LPA_PAUSE) != 0); | 487 | phy->pause = (phy->duplex == DUPLEX_FULL) && |
488 | ((val & LPA_PAUSE) != 0); | ||
445 | } | 489 | } |
446 | /* On non-aneg, we assume what we put in BMCR is the speed, | 490 | /* On non-aneg, we assume what we put in BMCR is the speed, |
447 | * though magic-aneg shouldn't prevent this case from occurring | 491 | * though magic-aneg shouldn't prevent this case from occurring |
@@ -450,6 +494,28 @@ static int bcm54xx_read_link(struct mii_phy *phy) | |||
450 | return 0; | 494 | return 0; |
451 | } | 495 | } |
452 | 496 | ||
497 | static int marvell88e1111_init(struct mii_phy* phy) | ||
498 | { | ||
499 | u16 rev; | ||
500 | |||
501 | /* magic init sequence for rev 0 */ | ||
502 | rev = phy_read(phy, MII_PHYSID2) & 0x000f; | ||
503 | if (rev == 0) { | ||
504 | phy_write(phy, 0x1d, 0x000a); | ||
505 | phy_write(phy, 0x1e, 0x0821); | ||
506 | |||
507 | phy_write(phy, 0x1d, 0x0006); | ||
508 | phy_write(phy, 0x1e, 0x8600); | ||
509 | |||
510 | phy_write(phy, 0x1d, 0x000b); | ||
511 | phy_write(phy, 0x1e, 0x0100); | ||
512 | |||
513 | phy_write(phy, 0x1d, 0x0004); | ||
514 | phy_write(phy, 0x1e, 0x4850); | ||
515 | } | ||
516 | return 0; | ||
517 | } | ||
518 | |||
453 | static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) | 519 | static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) |
454 | { | 520 | { |
455 | u16 ctl, adv; | 521 | u16 ctl, adv; |
@@ -471,6 +537,10 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) | |||
471 | adv |= ADVERTISE_100HALF; | 537 | adv |= ADVERTISE_100HALF; |
472 | if (advertise & ADVERTISED_100baseT_Full) | 538 | if (advertise & ADVERTISED_100baseT_Full) |
473 | adv |= ADVERTISE_100FULL; | 539 | adv |= ADVERTISE_100FULL; |
540 | if (advertise & ADVERTISED_Pause) | ||
541 | adv |= ADVERTISE_PAUSE_CAP; | ||
542 | if (advertise & ADVERTISED_Asym_Pause) | ||
543 | adv |= ADVERTISE_PAUSE_ASYM; | ||
474 | phy_write(phy, MII_ADVERTISE, adv); | 544 | phy_write(phy, MII_ADVERTISE, adv); |
475 | 545 | ||
476 | /* Setup 1000BT advertise & enable crossover detect | 546 | /* Setup 1000BT advertise & enable crossover detect |
@@ -549,7 +619,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) | |||
549 | 619 | ||
550 | static int marvell_read_link(struct mii_phy *phy) | 620 | static int marvell_read_link(struct mii_phy *phy) |
551 | { | 621 | { |
552 | u16 status; | 622 | u16 status, pmask; |
553 | 623 | ||
554 | if (phy->autoneg) { | 624 | if (phy->autoneg) { |
555 | status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); | 625 | status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); |
@@ -565,7 +635,9 @@ static int marvell_read_link(struct mii_phy *phy) | |||
565 | phy->duplex = DUPLEX_FULL; | 635 | phy->duplex = DUPLEX_FULL; |
566 | else | 636 | else |
567 | phy->duplex = DUPLEX_HALF; | 637 | phy->duplex = DUPLEX_HALF; |
568 | phy->pause = 0; /* XXX Check against spec ! */ | 638 | pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE | |
639 | MII_M1011_PHY_SPEC_STATUS_RX_PAUSE; | ||
640 | phy->pause = (status & pmask) == pmask; | ||
569 | } | 641 | } |
570 | /* On non-aneg, we assume what we put in BMCR is the speed, | 642 | /* On non-aneg, we assume what we put in BMCR is the speed, |
571 | * though magic-aneg shouldn't prevent this case from occurring | 643 | * though magic-aneg shouldn't prevent this case from occurring |
@@ -595,6 +667,10 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) | |||
595 | adv |= ADVERTISE_100HALF; | 667 | adv |= ADVERTISE_100HALF; |
596 | if (advertise & ADVERTISED_100baseT_Full) | 668 | if (advertise & ADVERTISED_100baseT_Full) |
597 | adv |= ADVERTISE_100FULL; | 669 | adv |= ADVERTISE_100FULL; |
670 | if (advertise & ADVERTISED_Pause) | ||
671 | adv |= ADVERTISE_PAUSE_CAP; | ||
672 | if (advertise & ADVERTISED_Asym_Pause) | ||
673 | adv |= ADVERTISE_PAUSE_ASYM; | ||
598 | phy_write(phy, MII_ADVERTISE, adv); | 674 | phy_write(phy, MII_ADVERTISE, adv); |
599 | 675 | ||
600 | /* Start/Restart aneg */ | 676 | /* Start/Restart aneg */ |
@@ -666,7 +742,8 @@ static int genmii_read_link(struct mii_phy *phy) | |||
666 | phy->speed = SPEED_100; | 742 | phy->speed = SPEED_100; |
667 | else | 743 | else |
668 | phy->speed = SPEED_10; | 744 | phy->speed = SPEED_10; |
669 | phy->pause = 0; | 745 | phy->pause = (phy->duplex == DUPLEX_FULL) && |
746 | ((lpa & LPA_PAUSE) != 0); | ||
670 | } | 747 | } |
671 | /* On non-aneg, we assume what we put in BMCR is the speed, | 748 | /* On non-aneg, we assume what we put in BMCR is the speed, |
672 | * though magic-aneg shouldn't prevent this case from occurring | 749 | * though magic-aneg shouldn't prevent this case from occurring |
@@ -676,11 +753,19 @@ static int genmii_read_link(struct mii_phy *phy) | |||
676 | } | 753 | } |
677 | 754 | ||
678 | 755 | ||
679 | #define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ | 756 | #define MII_BASIC_FEATURES \ |
680 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ | 757 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ |
681 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII) | 758 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ |
682 | #define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ | 759 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \ |
683 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) | 760 | SUPPORTED_Pause) |
761 | |||
762 | /* On gigabit capable PHYs, we advertise Pause support but not asym pause | ||
763 | * support for now as I'm not sure it's supported and Darwin doesn't do | ||
764 | * it neither. --BenH. | ||
765 | */ | ||
766 | #define MII_GBIT_FEATURES \ | ||
767 | (MII_BASIC_FEATURES | \ | ||
768 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) | ||
684 | 769 | ||
685 | /* Broadcom BCM 5201 */ | 770 | /* Broadcom BCM 5201 */ |
686 | static struct mii_phy_ops bcm5201_phy_ops = { | 771 | static struct mii_phy_ops bcm5201_phy_ops = { |
@@ -720,6 +805,24 @@ static struct mii_phy_def bcm5221_phy_def = { | |||
720 | .ops = &bcm5221_phy_ops | 805 | .ops = &bcm5221_phy_ops |
721 | }; | 806 | }; |
722 | 807 | ||
808 | /* Broadcom BCM 5241 */ | ||
809 | static struct mii_phy_ops bcm5241_phy_ops = { | ||
810 | .suspend = bcm5241_suspend, | ||
811 | .init = bcm5241_init, | ||
812 | .setup_aneg = genmii_setup_aneg, | ||
813 | .setup_forced = genmii_setup_forced, | ||
814 | .poll_link = genmii_poll_link, | ||
815 | .read_link = genmii_read_link, | ||
816 | }; | ||
817 | static struct mii_phy_def bcm5241_phy_def = { | ||
818 | .phy_id = 0x0143bc30, | ||
819 | .phy_id_mask = 0xfffffff0, | ||
820 | .name = "BCM5241", | ||
821 | .features = MII_BASIC_FEATURES, | ||
822 | .magic_aneg = 1, | ||
823 | .ops = &bcm5241_phy_ops | ||
824 | }; | ||
825 | |||
723 | /* Broadcom BCM 5400 */ | 826 | /* Broadcom BCM 5400 */ |
724 | static struct mii_phy_ops bcm5400_phy_ops = { | 827 | static struct mii_phy_ops bcm5400_phy_ops = { |
725 | .init = bcm5400_init, | 828 | .init = bcm5400_init, |
@@ -854,11 +957,8 @@ static struct mii_phy_def bcm5462V_phy_def = { | |||
854 | .ops = &bcm5462V_phy_ops | 957 | .ops = &bcm5462V_phy_ops |
855 | }; | 958 | }; |
856 | 959 | ||
857 | /* Marvell 88E1101 (Apple seem to deal with 2 different revs, | 960 | /* Marvell 88E1101 amd 88E1111 */ |
858 | * I masked out the 8 last bits to get both, but some specs | 961 | static struct mii_phy_ops marvell88e1101_phy_ops = { |
859 | * would be useful here) --BenH. | ||
860 | */ | ||
861 | static struct mii_phy_ops marvell_phy_ops = { | ||
862 | .suspend = generic_suspend, | 962 | .suspend = generic_suspend, |
863 | .setup_aneg = marvell_setup_aneg, | 963 | .setup_aneg = marvell_setup_aneg, |
864 | .setup_forced = marvell_setup_forced, | 964 | .setup_forced = marvell_setup_forced, |
@@ -866,13 +966,41 @@ static struct mii_phy_ops marvell_phy_ops = { | |||
866 | .read_link = marvell_read_link | 966 | .read_link = marvell_read_link |
867 | }; | 967 | }; |
868 | 968 | ||
869 | static struct mii_phy_def marvell_phy_def = { | 969 | static struct mii_phy_ops marvell88e1111_phy_ops = { |
870 | .phy_id = 0x01410c00, | 970 | .init = marvell88e1111_init, |
871 | .phy_id_mask = 0xffffff00, | 971 | .suspend = generic_suspend, |
872 | .name = "Marvell 88E1101", | 972 | .setup_aneg = marvell_setup_aneg, |
973 | .setup_forced = marvell_setup_forced, | ||
974 | .poll_link = genmii_poll_link, | ||
975 | .read_link = marvell_read_link | ||
976 | }; | ||
977 | |||
978 | /* two revs in darwin for the 88e1101 ... I could use a datasheet | ||
979 | * to get the proper names... | ||
980 | */ | ||
981 | static struct mii_phy_def marvell88e1101v1_phy_def = { | ||
982 | .phy_id = 0x01410c20, | ||
983 | .phy_id_mask = 0xfffffff0, | ||
984 | .name = "Marvell 88E1101v1", | ||
985 | .features = MII_GBIT_FEATURES, | ||
986 | .magic_aneg = 1, | ||
987 | .ops = &marvell88e1101_phy_ops | ||
988 | }; | ||
989 | static struct mii_phy_def marvell88e1101v2_phy_def = { | ||
990 | .phy_id = 0x01410c60, | ||
991 | .phy_id_mask = 0xfffffff0, | ||
992 | .name = "Marvell 88E1101v2", | ||
993 | .features = MII_GBIT_FEATURES, | ||
994 | .magic_aneg = 1, | ||
995 | .ops = &marvell88e1101_phy_ops | ||
996 | }; | ||
997 | static struct mii_phy_def marvell88e1111_phy_def = { | ||
998 | .phy_id = 0x01410cc0, | ||
999 | .phy_id_mask = 0xfffffff0, | ||
1000 | .name = "Marvell 88E1111", | ||
873 | .features = MII_GBIT_FEATURES, | 1001 | .features = MII_GBIT_FEATURES, |
874 | .magic_aneg = 1, | 1002 | .magic_aneg = 1, |
875 | .ops = &marvell_phy_ops | 1003 | .ops = &marvell88e1111_phy_ops |
876 | }; | 1004 | }; |
877 | 1005 | ||
878 | /* Generic implementation for most 10/100 PHYs */ | 1006 | /* Generic implementation for most 10/100 PHYs */ |
@@ -895,6 +1023,7 @@ static struct mii_phy_def genmii_phy_def = { | |||
895 | static struct mii_phy_def* mii_phy_table[] = { | 1023 | static struct mii_phy_def* mii_phy_table[] = { |
896 | &bcm5201_phy_def, | 1024 | &bcm5201_phy_def, |
897 | &bcm5221_phy_def, | 1025 | &bcm5221_phy_def, |
1026 | &bcm5241_phy_def, | ||
898 | &bcm5400_phy_def, | 1027 | &bcm5400_phy_def, |
899 | &bcm5401_phy_def, | 1028 | &bcm5401_phy_def, |
900 | &bcm5411_phy_def, | 1029 | &bcm5411_phy_def, |
@@ -902,7 +1031,9 @@ static struct mii_phy_def* mii_phy_table[] = { | |||
902 | &bcm5421k2_phy_def, | 1031 | &bcm5421k2_phy_def, |
903 | &bcm5461_phy_def, | 1032 | &bcm5461_phy_def, |
904 | &bcm5462V_phy_def, | 1033 | &bcm5462V_phy_def, |
905 | &marvell_phy_def, | 1034 | &marvell88e1101v1_phy_def, |
1035 | &marvell88e1101v2_phy_def, | ||
1036 | &marvell88e1111_phy_def, | ||
906 | &genmii_phy_def, | 1037 | &genmii_phy_def, |
907 | NULL | 1038 | NULL |
908 | }; | 1039 | }; |
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h index 8ee1ca0471cf..1d70ba6f9f10 100644 --- a/drivers/net/sungem_phy.h +++ b/drivers/net/sungem_phy.h | |||
@@ -30,7 +30,7 @@ struct mii_phy_def | |||
30 | struct mii_phy | 30 | struct mii_phy |
31 | { | 31 | { |
32 | struct mii_phy_def* def; | 32 | struct mii_phy_def* def; |
33 | int advertising; | 33 | u32 advertising; |
34 | int mii_id; | 34 | int mii_id; |
35 | 35 | ||
36 | /* 1: autoneg enabled, 0: disabled */ | 36 | /* 1: autoneg enabled, 0: disabled */ |
@@ -85,6 +85,9 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id); | |||
85 | #define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 | 85 | #define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 |
86 | #define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 | 86 | #define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 |
87 | 87 | ||
88 | /* MII BCM5241 Additional registers */ | ||
89 | #define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008 | ||
90 | |||
88 | /* MII BCM5400 1000-BASET Control register */ | 91 | /* MII BCM5400 1000-BASET Control register */ |
89 | #define MII_BCM5400_GB_CONTROL 0x09 | 92 | #define MII_BCM5400_GB_CONTROL 0x09 |
90 | #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 | 93 | #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 |
@@ -115,5 +118,7 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id); | |||
115 | #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 | 118 | #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 |
116 | #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 | 119 | #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 |
117 | #define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 | 120 | #define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 |
121 | #define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008 | ||
122 | #define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004 | ||
118 | 123 | ||
119 | #endif /* __SUNGEM_PHY_H__ */ | 124 | #endif /* __SUNGEM_PHY_H__ */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4056ba1ff3c7..f4bf62c2a7a5 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.71" | 71 | #define DRV_MODULE_VERSION "3.72" |
72 | #define DRV_MODULE_RELDATE "December 15, 2006" | 72 | #define DRV_MODULE_RELDATE "January 8, 2007" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -1015,7 +1015,12 @@ out: | |||
1015 | else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { | 1015 | else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { |
1016 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 1016 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); |
1017 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 1017 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); |
1018 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | 1018 | if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { |
1019 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); | ||
1020 | tg3_writephy(tp, MII_TG3_TEST1, | ||
1021 | MII_TG3_TEST1_TRIM_EN | 0x4); | ||
1022 | } else | ||
1023 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | ||
1019 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 1024 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
1020 | } | 1025 | } |
1021 | /* Set Extended packet length bit (bit 14) on all chips that */ | 1026 | /* Set Extended packet length bit (bit 14) on all chips that */ |
@@ -10803,9 +10808,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10803 | 10808 | ||
10804 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 10809 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
10805 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 10810 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
10806 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 10811 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { |
10807 | tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; | 10812 | tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; |
10808 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) | 10813 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) |
10814 | tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; | ||
10815 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) | ||
10809 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 10816 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; |
10810 | } | 10817 | } |
10811 | 10818 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index cf78a7e5997b..80f59ac7ec58 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -1658,6 +1658,9 @@ | |||
1658 | #define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ | 1658 | #define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ |
1659 | #define MII_TG3_EPHY_SHADOW_EN 0x80 | 1659 | #define MII_TG3_EPHY_SHADOW_EN 0x80 |
1660 | 1660 | ||
1661 | #define MII_TG3_TEST1 0x1e | ||
1662 | #define MII_TG3_TEST1_TRIM_EN 0x0010 | ||
1663 | |||
1661 | /* There are two ways to manage the TX descriptors on the tigon3. | 1664 | /* There are two ways to manage the TX descriptors on the tigon3. |
1662 | * Either the descriptors are in host DMA'able memory, or they | 1665 | * Either the descriptors are in host DMA'able memory, or they |
1663 | * exist only in the cards on-chip SRAM. All 16 send bds are under | 1666 | * exist only in the cards on-chip SRAM. All 16 send bds are under |
@@ -2256,6 +2259,7 @@ struct tg3 { | |||
2256 | #define TG3_FLG2_1SHOT_MSI 0x10000000 | 2259 | #define TG3_FLG2_1SHOT_MSI 0x10000000 |
2257 | #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 | 2260 | #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 |
2258 | #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 | 2261 | #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 |
2262 | #define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000 | ||
2259 | 2263 | ||
2260 | u32 split_mode_max_reqs; | 2264 | u32 split_mode_max_reqs; |
2261 | #define SPLIT_MODE_5704_MAX_REQ 3 | 2265 | #define SPLIT_MODE_5704_MAX_REQ 3 |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 0e94fbbf7a94..b85857a84870 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -2664,7 +2664,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) | |||
2664 | break; | 2664 | break; |
2665 | } | 2665 | } |
2666 | #endif | 2666 | #endif |
2667 | if (stats.len < sizeof(u->rx_data.header)) | 2667 | if (stats.len < sizeof(struct ieee80211_hdr_3addr)) |
2668 | break; | 2668 | break; |
2669 | switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { | 2669 | switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { |
2670 | case IEEE80211_FTYPE_MGMT: | 2670 | case IEEE80211_FTYPE_MGMT: |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index f1dd81a1d592..3cfb0a3575e6 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -19,7 +19,7 @@ config PCI_MSI | |||
19 | 19 | ||
20 | config PCI_MULTITHREAD_PROBE | 20 | config PCI_MULTITHREAD_PROBE |
21 | bool "PCI Multi-threaded probe (EXPERIMENTAL)" | 21 | bool "PCI Multi-threaded probe (EXPERIMENTAL)" |
22 | depends on PCI && EXPERIMENTAL | 22 | depends on PCI && EXPERIMENTAL && BROKEN |
23 | help | 23 | help |
24 | Say Y here if you want the PCI core to spawn a new thread for | 24 | Say Y here if you want the PCI core to spawn a new thread for |
25 | every PCI device that is probed. This can cause a huge | 25 | every PCI device that is probed. This can cause a huge |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8f0322d6f3bf..0535efc4f184 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1117,10 +1117,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_h | |||
1117 | static void quirk_sis_96x_smbus(struct pci_dev *dev) | 1117 | static void quirk_sis_96x_smbus(struct pci_dev *dev) |
1118 | { | 1118 | { |
1119 | u8 val = 0; | 1119 | u8 val = 0; |
1120 | printk(KERN_INFO "Enabling SiS 96x SMBus.\n"); | ||
1121 | pci_read_config_byte(dev, 0x77, &val); | ||
1122 | pci_write_config_byte(dev, 0x77, val & ~0x10); | ||
1123 | pci_read_config_byte(dev, 0x77, &val); | 1120 | pci_read_config_byte(dev, 0x77, &val); |
1121 | if (val & 0x10) { | ||
1122 | printk(KERN_INFO "Enabling SiS 96x SMBus.\n"); | ||
1123 | pci_write_config_byte(dev, 0x77, val & ~0x10); | ||
1124 | } | ||
1124 | } | 1125 | } |
1125 | 1126 | ||
1126 | /* | 1127 | /* |
@@ -1152,11 +1153,12 @@ static void quirk_sis_503(struct pci_dev *dev) | |||
1152 | printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); | 1153 | printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); |
1153 | 1154 | ||
1154 | /* | 1155 | /* |
1155 | * Ok, it now shows up as a 96x.. The 96x quirks are after | 1156 | * Ok, it now shows up as a 96x.. run the 96x quirk by |
1156 | * the 503 quirk in the quirk table, so they'll automatically | 1157 | * hand in case it has already been processed. |
1157 | * run and enable things like the SMBus device | 1158 | * (depends on link order, which is apparently not guaranteed) |
1158 | */ | 1159 | */ |
1159 | dev->device = devid; | 1160 | dev->device = devid; |
1161 | quirk_sis_96x_smbus(dev); | ||
1160 | } | 1162 | } |
1161 | 1163 | ||
1162 | static void __init quirk_sis_96x_compatible(struct pci_dev *dev) | 1164 | static void __init quirk_sis_96x_compatible(struct pci_dev *dev) |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 45f2b20ef513..fab381ed853c 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -193,6 +193,18 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor, | |||
193 | struct pci_dev *dev; | 193 | struct pci_dev *dev; |
194 | 194 | ||
195 | WARN_ON(in_interrupt()); | 195 | WARN_ON(in_interrupt()); |
196 | |||
197 | /* | ||
198 | * pci_find_subsys() can be called on the ide_setup() path, super-early | ||
199 | * in boot. But the down_read() will enable local interrupts, which | ||
200 | * can cause some machines to crash. So here we detect and flag that | ||
201 | * situation and bail out early. | ||
202 | */ | ||
203 | if (unlikely(list_empty(&pci_devices))) { | ||
204 | printk(KERN_INFO "pci_find_subsys() called while pci_devices " | ||
205 | "is still empty\n"); | ||
206 | return NULL; | ||
207 | } | ||
196 | down_read(&pci_bus_sem); | 208 | down_read(&pci_bus_sem); |
197 | n = from ? from->global_list.next : pci_devices.next; | 209 | n = from ? from->global_list.next : pci_devices.next; |
198 | 210 | ||
@@ -259,6 +271,18 @@ pci_get_subsys(unsigned int vendor, unsigned int device, | |||
259 | struct pci_dev *dev; | 271 | struct pci_dev *dev; |
260 | 272 | ||
261 | WARN_ON(in_interrupt()); | 273 | WARN_ON(in_interrupt()); |
274 | |||
275 | /* | ||
276 | * pci_get_subsys() can potentially be called by drivers super-early | ||
277 | * in boot. But the down_read() will enable local interrupts, which | ||
278 | * can cause some machines to crash. So here we detect and flag that | ||
279 | * situation and bail out early. | ||
280 | */ | ||
281 | if (unlikely(list_empty(&pci_devices))) { | ||
282 | printk(KERN_NOTICE "pci_get_subsys() called while pci_devices " | ||
283 | "is still empty\n"); | ||
284 | return NULL; | ||
285 | } | ||
262 | down_read(&pci_bus_sem); | 286 | down_read(&pci_bus_sem); |
263 | n = from ? from->global_list.next : pci_devices.next; | 287 | n = from ? from->global_list.next : pci_devices.next; |
264 | 288 | ||
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 4f654c901c64..a724ab49a797 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -33,6 +33,8 @@ | |||
33 | 33 | ||
34 | #include <asm/mach/time.h> | 34 | #include <asm/mach/time.h> |
35 | 35 | ||
36 | #include <asm/arch/at91_rtc.h> | ||
37 | |||
36 | 38 | ||
37 | #define AT91_RTC_FREQ 1 | 39 | #define AT91_RTC_FREQ 1 |
38 | #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ | 40 | #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ |
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index 1460f6b769f2..e7851e3739ab 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * An I2C driver for the Ricoh RS5C372 RTC | 2 | * An I2C driver for Ricoh RS5C372 and RV5C38[67] RTCs |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net> | 4 | * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net> |
5 | * Copyright (C) 2006 Tower Technologies | 5 | * Copyright (C) 2006 Tower Technologies |
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/rtc.h> | 13 | #include <linux/rtc.h> |
14 | #include <linux/bcd.h> | 14 | #include <linux/bcd.h> |
15 | 15 | ||
16 | #define DRV_VERSION "0.3" | 16 | #define DRV_VERSION "0.4" |
17 | 17 | ||
18 | /* Addresses to scan */ | 18 | /* Addresses to scan */ |
19 | static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; | 19 | static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; |
@@ -21,6 +21,13 @@ static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; | |||
21 | /* Insmod parameters */ | 21 | /* Insmod parameters */ |
22 | I2C_CLIENT_INSMOD; | 22 | I2C_CLIENT_INSMOD; |
23 | 23 | ||
24 | |||
25 | /* | ||
26 | * Ricoh has a family of I2C based RTCs, which differ only slightly from | ||
27 | * each other. Differences center on pinout (e.g. how many interrupts, | ||
28 | * output clock, etc) and how the control registers are used. The '372 | ||
29 | * is significant only because that's the one this driver first supported. | ||
30 | */ | ||
24 | #define RS5C372_REG_SECS 0 | 31 | #define RS5C372_REG_SECS 0 |
25 | #define RS5C372_REG_MINS 1 | 32 | #define RS5C372_REG_MINS 1 |
26 | #define RS5C372_REG_HOURS 2 | 33 | #define RS5C372_REG_HOURS 2 |
@@ -29,59 +36,142 @@ I2C_CLIENT_INSMOD; | |||
29 | #define RS5C372_REG_MONTH 5 | 36 | #define RS5C372_REG_MONTH 5 |
30 | #define RS5C372_REG_YEAR 6 | 37 | #define RS5C372_REG_YEAR 6 |
31 | #define RS5C372_REG_TRIM 7 | 38 | #define RS5C372_REG_TRIM 7 |
39 | # define RS5C372_TRIM_XSL 0x80 | ||
40 | # define RS5C372_TRIM_MASK 0x7F | ||
41 | |||
42 | #define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */ | ||
43 | #define RS5C_REG_ALARM_A_HOURS 9 | ||
44 | #define RS5C_REG_ALARM_A_WDAY 10 | ||
45 | |||
46 | #define RS5C_REG_ALARM_B_MIN 11 /* or ALARM_D */ | ||
47 | #define RS5C_REG_ALARM_B_HOURS 12 | ||
48 | #define RS5C_REG_ALARM_B_WDAY 13 /* (ALARM_B only) */ | ||
49 | |||
50 | #define RS5C_REG_CTRL1 14 | ||
51 | # define RS5C_CTRL1_AALE (1 << 7) /* or WALE */ | ||
52 | # define RS5C_CTRL1_BALE (1 << 6) /* or DALE */ | ||
53 | # define RV5C387_CTRL1_24 (1 << 5) | ||
54 | # define RS5C372A_CTRL1_SL1 (1 << 5) | ||
55 | # define RS5C_CTRL1_CT_MASK (7 << 0) | ||
56 | # define RS5C_CTRL1_CT0 (0 << 0) /* no periodic irq */ | ||
57 | # define RS5C_CTRL1_CT4 (4 << 0) /* 1 Hz level irq */ | ||
58 | #define RS5C_REG_CTRL2 15 | ||
59 | # define RS5C372_CTRL2_24 (1 << 5) | ||
60 | # define RS5C_CTRL2_XSTP (1 << 4) | ||
61 | # define RS5C_CTRL2_CTFG (1 << 2) | ||
62 | # define RS5C_CTRL2_AAFG (1 << 1) /* or WAFG */ | ||
63 | # define RS5C_CTRL2_BAFG (1 << 0) /* or DAFG */ | ||
64 | |||
65 | |||
66 | /* to read (style 1) or write registers starting at R */ | ||
67 | #define RS5C_ADDR(R) (((R) << 4) | 0) | ||
68 | |||
69 | |||
70 | enum rtc_type { | ||
71 | rtc_undef = 0, | ||
72 | rtc_rs5c372a, | ||
73 | rtc_rs5c372b, | ||
74 | rtc_rv5c386, | ||
75 | rtc_rv5c387a, | ||
76 | }; | ||
32 | 77 | ||
33 | #define RS5C372_TRIM_XSL 0x80 | 78 | /* REVISIT: this assumes that: |
34 | #define RS5C372_TRIM_MASK 0x7F | 79 | * - we're in the 21st century, so it's safe to ignore the century |
80 | * bit for rv5c38[67] (REG_MONTH bit 7); | ||
81 | * - we should use ALARM_A not ALARM_B (may be wrong on some boards) | ||
82 | */ | ||
83 | struct rs5c372 { | ||
84 | struct i2c_client *client; | ||
85 | struct rtc_device *rtc; | ||
86 | enum rtc_type type; | ||
87 | unsigned time24:1; | ||
88 | unsigned has_irq:1; | ||
89 | char buf[17]; | ||
90 | char *regs; | ||
91 | |||
92 | /* on conversion to a "new style" i2c driver, this vanishes */ | ||
93 | struct i2c_client dev; | ||
94 | }; | ||
35 | 95 | ||
36 | #define RS5C372_REG_BASE 0 | 96 | static int rs5c_get_regs(struct rs5c372 *rs5c) |
97 | { | ||
98 | struct i2c_client *client = rs5c->client; | ||
99 | struct i2c_msg msgs[] = { | ||
100 | { client->addr, I2C_M_RD, sizeof rs5c->buf, rs5c->buf }, | ||
101 | }; | ||
102 | |||
103 | /* This implements the third reading method from the datasheet, using | ||
104 | * an internal address that's reset after each transaction (by STOP) | ||
105 | * to 0x0f ... so we read extra registers, and skip the first one. | ||
106 | * | ||
107 | * The first method doesn't work with the iop3xx adapter driver, on at | ||
108 | * least 80219 chips; this works around that bug. | ||
109 | */ | ||
110 | if ((i2c_transfer(client->adapter, msgs, 1)) != 1) { | ||
111 | pr_debug("%s: can't read registers\n", rs5c->rtc->name); | ||
112 | return -EIO; | ||
113 | } | ||
37 | 114 | ||
38 | static int rs5c372_attach(struct i2c_adapter *adapter); | 115 | dev_dbg(&client->dev, |
39 | static int rs5c372_detach(struct i2c_client *client); | 116 | "%02x %02x %02x (%02x) %02x %02x %02x (%02x), " |
40 | static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind); | 117 | "%02x %02x %02x, %02x %02x %02x; %02x %02x\n", |
118 | rs5c->regs[0], rs5c->regs[1], rs5c->regs[2], rs5c->regs[3], | ||
119 | rs5c->regs[4], rs5c->regs[5], rs5c->regs[6], rs5c->regs[7], | ||
120 | rs5c->regs[8], rs5c->regs[9], rs5c->regs[10], rs5c->regs[11], | ||
121 | rs5c->regs[12], rs5c->regs[13], rs5c->regs[14], rs5c->regs[15]); | ||
41 | 122 | ||
42 | struct rs5c372 { | 123 | return 0; |
43 | u8 reg_addr; | 124 | } |
44 | u8 regs[17]; | ||
45 | struct i2c_msg msg[1]; | ||
46 | struct i2c_client client; | ||
47 | struct rtc_device *rtc; | ||
48 | }; | ||
49 | 125 | ||
50 | static struct i2c_driver rs5c372_driver = { | 126 | static unsigned rs5c_reg2hr(struct rs5c372 *rs5c, unsigned reg) |
51 | .driver = { | 127 | { |
52 | .name = "rs5c372", | 128 | unsigned hour; |
53 | }, | ||
54 | .attach_adapter = &rs5c372_attach, | ||
55 | .detach_client = &rs5c372_detach, | ||
56 | }; | ||
57 | 129 | ||
58 | static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) | 130 | if (rs5c->time24) |
131 | return BCD2BIN(reg & 0x3f); | ||
132 | |||
133 | hour = BCD2BIN(reg & 0x1f); | ||
134 | if (hour == 12) | ||
135 | hour = 0; | ||
136 | if (reg & 0x20) | ||
137 | hour += 12; | ||
138 | return hour; | ||
139 | } | ||
140 | |||
141 | static unsigned rs5c_hr2reg(struct rs5c372 *rs5c, unsigned hour) | ||
59 | { | 142 | { |
143 | if (rs5c->time24) | ||
144 | return BIN2BCD(hour); | ||
145 | |||
146 | if (hour > 12) | ||
147 | return 0x20 | BIN2BCD(hour - 12); | ||
148 | if (hour == 12) | ||
149 | return 0x20 | BIN2BCD(12); | ||
150 | if (hour == 0) | ||
151 | return BIN2BCD(12); | ||
152 | return BIN2BCD(hour); | ||
153 | } | ||
60 | 154 | ||
61 | struct rs5c372 *rs5c372 = i2c_get_clientdata(client); | 155 | static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) |
62 | u8 *buf = &(rs5c372->regs[1]); | 156 | { |
157 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
158 | int status = rs5c_get_regs(rs5c); | ||
63 | 159 | ||
64 | /* this implements the 3rd reading method, according | 160 | if (status < 0) |
65 | * to the datasheet. rs5c372 defaults to internal | 161 | return status; |
66 | * address 0xF, so 0x0 is in regs[1] | ||
67 | */ | ||
68 | 162 | ||
69 | if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) { | 163 | tm->tm_sec = BCD2BIN(rs5c->regs[RS5C372_REG_SECS] & 0x7f); |
70 | dev_err(&client->dev, "%s: read error\n", __FUNCTION__); | 164 | tm->tm_min = BCD2BIN(rs5c->regs[RS5C372_REG_MINS] & 0x7f); |
71 | return -EIO; | 165 | tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]); |
72 | } | ||
73 | 166 | ||
74 | tm->tm_sec = BCD2BIN(buf[RS5C372_REG_SECS] & 0x7f); | 167 | tm->tm_wday = BCD2BIN(rs5c->regs[RS5C372_REG_WDAY] & 0x07); |
75 | tm->tm_min = BCD2BIN(buf[RS5C372_REG_MINS] & 0x7f); | 168 | tm->tm_mday = BCD2BIN(rs5c->regs[RS5C372_REG_DAY] & 0x3f); |
76 | tm->tm_hour = BCD2BIN(buf[RS5C372_REG_HOURS] & 0x3f); | ||
77 | tm->tm_wday = BCD2BIN(buf[RS5C372_REG_WDAY] & 0x07); | ||
78 | tm->tm_mday = BCD2BIN(buf[RS5C372_REG_DAY] & 0x3f); | ||
79 | 169 | ||
80 | /* tm->tm_mon is zero-based */ | 170 | /* tm->tm_mon is zero-based */ |
81 | tm->tm_mon = BCD2BIN(buf[RS5C372_REG_MONTH] & 0x1f) - 1; | 171 | tm->tm_mon = BCD2BIN(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1; |
82 | 172 | ||
83 | /* year is 1900 + tm->tm_year */ | 173 | /* year is 1900 + tm->tm_year */ |
84 | tm->tm_year = BCD2BIN(buf[RS5C372_REG_YEAR]) + 100; | 174 | tm->tm_year = BCD2BIN(rs5c->regs[RS5C372_REG_YEAR]) + 100; |
85 | 175 | ||
86 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " | 176 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " |
87 | "mday=%d, mon=%d, year=%d, wday=%d\n", | 177 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
@@ -89,22 +179,25 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
89 | tm->tm_sec, tm->tm_min, tm->tm_hour, | 179 | tm->tm_sec, tm->tm_min, tm->tm_hour, |
90 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); | 180 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); |
91 | 181 | ||
92 | return 0; | 182 | /* rtc might need initialization */ |
183 | return rtc_valid_tm(tm); | ||
93 | } | 184 | } |
94 | 185 | ||
95 | static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) | 186 | static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) |
96 | { | 187 | { |
97 | unsigned char buf[8] = { RS5C372_REG_BASE }; | 188 | struct rs5c372 *rs5c = i2c_get_clientdata(client); |
189 | unsigned char buf[8]; | ||
98 | 190 | ||
99 | dev_dbg(&client->dev, | 191 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d " |
100 | "%s: secs=%d, mins=%d, hours=%d " | ||
101 | "mday=%d, mon=%d, year=%d, wday=%d\n", | 192 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
102 | __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour, | 193 | __FUNCTION__, |
194 | tm->tm_sec, tm->tm_min, tm->tm_hour, | ||
103 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); | 195 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); |
104 | 196 | ||
197 | buf[0] = RS5C_ADDR(RS5C372_REG_SECS); | ||
105 | buf[1] = BIN2BCD(tm->tm_sec); | 198 | buf[1] = BIN2BCD(tm->tm_sec); |
106 | buf[2] = BIN2BCD(tm->tm_min); | 199 | buf[2] = BIN2BCD(tm->tm_min); |
107 | buf[3] = BIN2BCD(tm->tm_hour); | 200 | buf[3] = rs5c_hr2reg(rs5c, tm->tm_hour); |
108 | buf[4] = BIN2BCD(tm->tm_wday); | 201 | buf[4] = BIN2BCD(tm->tm_wday); |
109 | buf[5] = BIN2BCD(tm->tm_mday); | 202 | buf[5] = BIN2BCD(tm->tm_mday); |
110 | buf[6] = BIN2BCD(tm->tm_mon + 1); | 203 | buf[6] = BIN2BCD(tm->tm_mon + 1); |
@@ -118,21 +211,43 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
118 | return 0; | 211 | return 0; |
119 | } | 212 | } |
120 | 213 | ||
214 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) | ||
215 | #define NEED_TRIM | ||
216 | #endif | ||
217 | |||
218 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | ||
219 | #define NEED_TRIM | ||
220 | #endif | ||
221 | |||
222 | #ifdef NEED_TRIM | ||
121 | static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) | 223 | static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) |
122 | { | 224 | { |
123 | struct rs5c372 *rs5c372 = i2c_get_clientdata(client); | 225 | struct rs5c372 *rs5c372 = i2c_get_clientdata(client); |
124 | u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1]; | 226 | u8 tmp = rs5c372->regs[RS5C372_REG_TRIM]; |
125 | 227 | ||
126 | if (osc) | 228 | if (osc) |
127 | *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; | 229 | *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; |
128 | 230 | ||
129 | if (trim) { | 231 | if (trim) { |
130 | *trim = tmp & RS5C372_TRIM_MASK; | 232 | dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, tmp); |
131 | dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim); | 233 | tmp &= RS5C372_TRIM_MASK; |
234 | if (tmp & 0x3e) { | ||
235 | int t = tmp & 0x3f; | ||
236 | |||
237 | if (tmp & 0x40) | ||
238 | t = (~t | (s8)0xc0) + 1; | ||
239 | else | ||
240 | t = t - 1; | ||
241 | |||
242 | tmp = t * 2; | ||
243 | } else | ||
244 | tmp = 0; | ||
245 | *trim = tmp; | ||
132 | } | 246 | } |
133 | 247 | ||
134 | return 0; | 248 | return 0; |
135 | } | 249 | } |
250 | #endif | ||
136 | 251 | ||
137 | static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm) | 252 | static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm) |
138 | { | 253 | { |
@@ -144,25 +259,190 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
144 | return rs5c372_set_datetime(to_i2c_client(dev), tm); | 259 | return rs5c372_set_datetime(to_i2c_client(dev), tm); |
145 | } | 260 | } |
146 | 261 | ||
262 | #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) | ||
263 | |||
264 | static int | ||
265 | rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | ||
266 | { | ||
267 | struct i2c_client *client = to_i2c_client(dev); | ||
268 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
269 | unsigned char buf[2]; | ||
270 | int status; | ||
271 | |||
272 | buf[1] = rs5c->regs[RS5C_REG_CTRL1]; | ||
273 | switch (cmd) { | ||
274 | case RTC_UIE_OFF: | ||
275 | case RTC_UIE_ON: | ||
276 | /* some 327a modes use a different IRQ pin for 1Hz irqs */ | ||
277 | if (rs5c->type == rtc_rs5c372a | ||
278 | && (buf[1] & RS5C372A_CTRL1_SL1)) | ||
279 | return -ENOIOCTLCMD; | ||
280 | case RTC_AIE_OFF: | ||
281 | case RTC_AIE_ON: | ||
282 | /* these irq management calls only make sense for chips | ||
283 | * which are wired up to an IRQ. | ||
284 | */ | ||
285 | if (!rs5c->has_irq) | ||
286 | return -ENOIOCTLCMD; | ||
287 | break; | ||
288 | default: | ||
289 | return -ENOIOCTLCMD; | ||
290 | } | ||
291 | |||
292 | status = rs5c_get_regs(rs5c); | ||
293 | if (status < 0) | ||
294 | return status; | ||
295 | |||
296 | buf[0] = RS5C_ADDR(RS5C_REG_CTRL1); | ||
297 | switch (cmd) { | ||
298 | case RTC_AIE_OFF: /* alarm off */ | ||
299 | buf[1] &= ~RS5C_CTRL1_AALE; | ||
300 | break; | ||
301 | case RTC_AIE_ON: /* alarm on */ | ||
302 | buf[1] |= RS5C_CTRL1_AALE; | ||
303 | break; | ||
304 | case RTC_UIE_OFF: /* update off */ | ||
305 | buf[1] &= ~RS5C_CTRL1_CT_MASK; | ||
306 | break; | ||
307 | case RTC_UIE_ON: /* update on */ | ||
308 | buf[1] &= ~RS5C_CTRL1_CT_MASK; | ||
309 | buf[1] |= RS5C_CTRL1_CT4; | ||
310 | break; | ||
311 | } | ||
312 | if ((i2c_master_send(client, buf, 2)) != 2) { | ||
313 | printk(KERN_WARNING "%s: can't update alarm\n", | ||
314 | rs5c->rtc->name); | ||
315 | status = -EIO; | ||
316 | } else | ||
317 | rs5c->regs[RS5C_REG_CTRL1] = buf[1]; | ||
318 | return status; | ||
319 | } | ||
320 | |||
321 | #else | ||
322 | #define rs5c_rtc_ioctl NULL | ||
323 | #endif | ||
324 | |||
325 | |||
326 | /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, | ||
327 | * which only exposes a polled programming interface; and since | ||
328 | * these calls map directly to those EFI requests; we don't demand | ||
329 | * we have an IRQ for this chip when we go through this API. | ||
330 | * | ||
331 | * The older x86_pc derived RTC_ALM_{READ,SET} calls require irqs | ||
332 | * though, managed through RTC_AIE_{ON,OFF} requests. | ||
333 | */ | ||
334 | |||
335 | static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t) | ||
336 | { | ||
337 | struct i2c_client *client = to_i2c_client(dev); | ||
338 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
339 | int status; | ||
340 | |||
341 | status = rs5c_get_regs(rs5c); | ||
342 | if (status < 0) | ||
343 | return status; | ||
344 | |||
345 | /* report alarm time */ | ||
346 | t->time.tm_sec = 0; | ||
347 | t->time.tm_min = BCD2BIN(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f); | ||
348 | t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]); | ||
349 | t->time.tm_mday = -1; | ||
350 | t->time.tm_mon = -1; | ||
351 | t->time.tm_year = -1; | ||
352 | t->time.tm_wday = -1; | ||
353 | t->time.tm_yday = -1; | ||
354 | t->time.tm_isdst = -1; | ||
355 | |||
356 | /* ... and status */ | ||
357 | t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE); | ||
358 | t->pending = !!(rs5c->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_AAFG); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t) | ||
364 | { | ||
365 | struct i2c_client *client = to_i2c_client(dev); | ||
366 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
367 | int status; | ||
368 | unsigned char buf[4]; | ||
369 | |||
370 | /* only handle up to 24 hours in the future, like RTC_ALM_SET */ | ||
371 | if (t->time.tm_mday != -1 | ||
372 | || t->time.tm_mon != -1 | ||
373 | || t->time.tm_year != -1) | ||
374 | return -EINVAL; | ||
375 | |||
376 | /* REVISIT: round up tm_sec */ | ||
377 | |||
378 | /* if needed, disable irq (clears pending status) */ | ||
379 | status = rs5c_get_regs(rs5c); | ||
380 | if (status < 0) | ||
381 | return status; | ||
382 | if (rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE) { | ||
383 | buf[0] = RS5C_ADDR(RS5C_REG_CTRL1); | ||
384 | buf[1] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE; | ||
385 | if (i2c_master_send(client, buf, 2) != 2) { | ||
386 | pr_debug("%s: can't disable alarm\n", rs5c->rtc->name); | ||
387 | return -EIO; | ||
388 | } | ||
389 | rs5c->regs[RS5C_REG_CTRL1] = buf[1]; | ||
390 | } | ||
391 | |||
392 | /* set alarm */ | ||
393 | buf[0] = RS5C_ADDR(RS5C_REG_ALARM_A_MIN); | ||
394 | buf[1] = BIN2BCD(t->time.tm_min); | ||
395 | buf[2] = rs5c_hr2reg(rs5c, t->time.tm_hour); | ||
396 | buf[3] = 0x7f; /* any/all days */ | ||
397 | if ((i2c_master_send(client, buf, 4)) != 4) { | ||
398 | pr_debug("%s: can't set alarm time\n", rs5c->rtc->name); | ||
399 | return -EIO; | ||
400 | } | ||
401 | |||
402 | /* ... and maybe enable its irq */ | ||
403 | if (t->enabled) { | ||
404 | buf[0] = RS5C_ADDR(RS5C_REG_CTRL1); | ||
405 | buf[1] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE; | ||
406 | if ((i2c_master_send(client, buf, 2)) != 2) | ||
407 | printk(KERN_WARNING "%s: can't enable alarm\n", | ||
408 | rs5c->rtc->name); | ||
409 | rs5c->regs[RS5C_REG_CTRL1] = buf[1]; | ||
410 | } | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) | ||
416 | |||
147 | static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq) | 417 | static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq) |
148 | { | 418 | { |
149 | int err, osc, trim; | 419 | int err, osc, trim; |
150 | 420 | ||
151 | err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim); | 421 | err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim); |
152 | if (err == 0) { | 422 | if (err == 0) { |
153 | seq_printf(seq, "%d.%03d KHz\n", osc / 1000, osc % 1000); | 423 | seq_printf(seq, "crystal\t\t: %d.%03d KHz\n", |
154 | seq_printf(seq, "trim\t: %d\n", trim); | 424 | osc / 1000, osc % 1000); |
425 | seq_printf(seq, "trim\t\t: %d\n", trim); | ||
155 | } | 426 | } |
156 | 427 | ||
157 | return 0; | 428 | return 0; |
158 | } | 429 | } |
159 | 430 | ||
431 | #else | ||
432 | #define rs5c372_rtc_proc NULL | ||
433 | #endif | ||
434 | |||
160 | static const struct rtc_class_ops rs5c372_rtc_ops = { | 435 | static const struct rtc_class_ops rs5c372_rtc_ops = { |
161 | .proc = rs5c372_rtc_proc, | 436 | .proc = rs5c372_rtc_proc, |
437 | .ioctl = rs5c_rtc_ioctl, | ||
162 | .read_time = rs5c372_rtc_read_time, | 438 | .read_time = rs5c372_rtc_read_time, |
163 | .set_time = rs5c372_rtc_set_time, | 439 | .set_time = rs5c372_rtc_set_time, |
440 | .read_alarm = rs5c_read_alarm, | ||
441 | .set_alarm = rs5c_set_alarm, | ||
164 | }; | 442 | }; |
165 | 443 | ||
444 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | ||
445 | |||
166 | static ssize_t rs5c372_sysfs_show_trim(struct device *dev, | 446 | static ssize_t rs5c372_sysfs_show_trim(struct device *dev, |
167 | struct device_attribute *attr, char *buf) | 447 | struct device_attribute *attr, char *buf) |
168 | { | 448 | { |
@@ -172,7 +452,7 @@ static ssize_t rs5c372_sysfs_show_trim(struct device *dev, | |||
172 | if (err) | 452 | if (err) |
173 | return err; | 453 | return err; |
174 | 454 | ||
175 | return sprintf(buf, "0x%2x\n", trim); | 455 | return sprintf(buf, "%d\n", trim); |
176 | } | 456 | } |
177 | static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL); | 457 | static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL); |
178 | 458 | ||
@@ -189,16 +469,35 @@ static ssize_t rs5c372_sysfs_show_osc(struct device *dev, | |||
189 | } | 469 | } |
190 | static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL); | 470 | static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL); |
191 | 471 | ||
192 | static int rs5c372_attach(struct i2c_adapter *adapter) | 472 | static int rs5c_sysfs_register(struct device *dev) |
193 | { | 473 | { |
194 | return i2c_probe(adapter, &addr_data, rs5c372_probe); | 474 | int err; |
475 | |||
476 | err = device_create_file(dev, &dev_attr_trim); | ||
477 | if (err) | ||
478 | return err; | ||
479 | err = device_create_file(dev, &dev_attr_osc); | ||
480 | if (err) | ||
481 | device_remove_file(dev, &dev_attr_trim); | ||
482 | |||
483 | return err; | ||
484 | } | ||
485 | |||
486 | #else | ||
487 | static int rs5c_sysfs_register(struct device *dev) | ||
488 | { | ||
489 | return 0; | ||
195 | } | 490 | } |
491 | #endif /* SYSFS */ | ||
492 | |||
493 | static struct i2c_driver rs5c372_driver; | ||
196 | 494 | ||
197 | static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) | 495 | static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) |
198 | { | 496 | { |
199 | int err = 0; | 497 | int err = 0; |
200 | struct i2c_client *client; | 498 | struct i2c_client *client; |
201 | struct rs5c372 *rs5c372; | 499 | struct rs5c372 *rs5c372; |
500 | struct rtc_time tm; | ||
202 | 501 | ||
203 | dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__); | 502 | dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__); |
204 | 503 | ||
@@ -211,7 +510,15 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) | |||
211 | err = -ENOMEM; | 510 | err = -ENOMEM; |
212 | goto exit; | 511 | goto exit; |
213 | } | 512 | } |
214 | client = &rs5c372->client; | 513 | |
514 | /* we read registers 0x0f then 0x00-0x0f; skip the first one */ | ||
515 | rs5c372->regs=&rs5c372->buf[1]; | ||
516 | |||
517 | /* On conversion to a "new style" i2c driver, we'll be handed | ||
518 | * the i2c_client (we won't create it) | ||
519 | */ | ||
520 | client = &rs5c372->dev; | ||
521 | rs5c372->client = client; | ||
215 | 522 | ||
216 | /* I2C client */ | 523 | /* I2C client */ |
217 | client->addr = address; | 524 | client->addr = address; |
@@ -222,16 +529,99 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) | |||
222 | 529 | ||
223 | i2c_set_clientdata(client, rs5c372); | 530 | i2c_set_clientdata(client, rs5c372); |
224 | 531 | ||
225 | rs5c372->msg[0].addr = address; | ||
226 | rs5c372->msg[0].flags = I2C_M_RD; | ||
227 | rs5c372->msg[0].len = sizeof(rs5c372->regs); | ||
228 | rs5c372->msg[0].buf = rs5c372->regs; | ||
229 | |||
230 | /* Inform the i2c layer */ | 532 | /* Inform the i2c layer */ |
231 | if ((err = i2c_attach_client(client))) | 533 | if ((err = i2c_attach_client(client))) |
232 | goto exit_kfree; | 534 | goto exit_kfree; |
233 | 535 | ||
234 | dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); | 536 | err = rs5c_get_regs(rs5c372); |
537 | if (err < 0) | ||
538 | goto exit_detach; | ||
539 | |||
540 | /* For "new style" drivers, irq is in i2c_client and chip type | ||
541 | * info comes from i2c_client.dev.platform_data. Meanwhile: | ||
542 | * | ||
543 | * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE | ||
544 | */ | ||
545 | if (rs5c372->type == rtc_undef) { | ||
546 | rs5c372->type = rtc_rs5c372b; | ||
547 | dev_warn(&client->dev, "assuming rs5c372b\n"); | ||
548 | } | ||
549 | |||
550 | /* clock may be set for am/pm or 24 hr time */ | ||
551 | switch (rs5c372->type) { | ||
552 | case rtc_rs5c372a: | ||
553 | case rtc_rs5c372b: | ||
554 | /* alarm uses ALARM_A; and nINTRA on 372a, nINTR on 372b. | ||
555 | * so does periodic irq, except some 327a modes. | ||
556 | */ | ||
557 | if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C372_CTRL2_24) | ||
558 | rs5c372->time24 = 1; | ||
559 | break; | ||
560 | case rtc_rv5c386: | ||
561 | case rtc_rv5c387a: | ||
562 | if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24) | ||
563 | rs5c372->time24 = 1; | ||
564 | /* alarm uses ALARM_W; and nINTRB for alarm and periodic | ||
565 | * irq, on both 386 and 387 | ||
566 | */ | ||
567 | break; | ||
568 | default: | ||
569 | dev_err(&client->dev, "unknown RTC type\n"); | ||
570 | goto exit_detach; | ||
571 | } | ||
572 | |||
573 | /* if the oscillator lost power and no other software (like | ||
574 | * the bootloader) set it up, do it here. | ||
575 | */ | ||
576 | if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_XSTP) { | ||
577 | unsigned char buf[3]; | ||
578 | |||
579 | rs5c372->regs[RS5C_REG_CTRL2] &= ~RS5C_CTRL2_XSTP; | ||
580 | |||
581 | buf[0] = RS5C_ADDR(RS5C_REG_CTRL1); | ||
582 | buf[1] = rs5c372->regs[RS5C_REG_CTRL1]; | ||
583 | buf[2] = rs5c372->regs[RS5C_REG_CTRL2]; | ||
584 | |||
585 | /* use 24hr mode */ | ||
586 | switch (rs5c372->type) { | ||
587 | case rtc_rs5c372a: | ||
588 | case rtc_rs5c372b: | ||
589 | buf[2] |= RS5C372_CTRL2_24; | ||
590 | rs5c372->time24 = 1; | ||
591 | break; | ||
592 | case rtc_rv5c386: | ||
593 | case rtc_rv5c387a: | ||
594 | buf[1] |= RV5C387_CTRL1_24; | ||
595 | rs5c372->time24 = 1; | ||
596 | break; | ||
597 | default: | ||
598 | /* impossible */ | ||
599 | break; | ||
600 | } | ||
601 | |||
602 | if ((i2c_master_send(client, buf, 3)) != 3) { | ||
603 | dev_err(&client->dev, "setup error\n"); | ||
604 | goto exit_detach; | ||
605 | } | ||
606 | rs5c372->regs[RS5C_REG_CTRL1] = buf[1]; | ||
607 | rs5c372->regs[RS5C_REG_CTRL2] = buf[2]; | ||
608 | } | ||
609 | |||
610 | if (rs5c372_get_datetime(client, &tm) < 0) | ||
611 | dev_warn(&client->dev, "clock needs to be set\n"); | ||
612 | |||
613 | dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n", | ||
614 | ({ char *s; switch (rs5c372->type) { | ||
615 | case rtc_rs5c372a: s = "rs5c372a"; break; | ||
616 | case rtc_rs5c372b: s = "rs5c372b"; break; | ||
617 | case rtc_rv5c386: s = "rv5c386"; break; | ||
618 | case rtc_rv5c387a: s = "rv5c387a"; break; | ||
619 | default: s = "chip"; break; | ||
620 | }; s;}), | ||
621 | rs5c372->time24 ? "24hr" : "am/pm" | ||
622 | ); | ||
623 | |||
624 | /* FIXME when client->irq exists, use it to register alarm irq */ | ||
235 | 625 | ||
236 | rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, | 626 | rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, |
237 | &client->dev, &rs5c372_rtc_ops, THIS_MODULE); | 627 | &client->dev, &rs5c372_rtc_ops, THIS_MODULE); |
@@ -241,18 +631,12 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) | |||
241 | goto exit_detach; | 631 | goto exit_detach; |
242 | } | 632 | } |
243 | 633 | ||
244 | err = device_create_file(&client->dev, &dev_attr_trim); | 634 | err = rs5c_sysfs_register(&client->dev); |
245 | if (err) | 635 | if (err) |
246 | goto exit_devreg; | 636 | goto exit_devreg; |
247 | err = device_create_file(&client->dev, &dev_attr_osc); | ||
248 | if (err) | ||
249 | goto exit_trim; | ||
250 | 637 | ||
251 | return 0; | 638 | return 0; |
252 | 639 | ||
253 | exit_trim: | ||
254 | device_remove_file(&client->dev, &dev_attr_trim); | ||
255 | |||
256 | exit_devreg: | 640 | exit_devreg: |
257 | rtc_device_unregister(rs5c372->rtc); | 641 | rtc_device_unregister(rs5c372->rtc); |
258 | 642 | ||
@@ -266,6 +650,11 @@ exit: | |||
266 | return err; | 650 | return err; |
267 | } | 651 | } |
268 | 652 | ||
653 | static int rs5c372_attach(struct i2c_adapter *adapter) | ||
654 | { | ||
655 | return i2c_probe(adapter, &addr_data, rs5c372_probe); | ||
656 | } | ||
657 | |||
269 | static int rs5c372_detach(struct i2c_client *client) | 658 | static int rs5c372_detach(struct i2c_client *client) |
270 | { | 659 | { |
271 | int err; | 660 | int err; |
@@ -274,6 +663,8 @@ static int rs5c372_detach(struct i2c_client *client) | |||
274 | if (rs5c372->rtc) | 663 | if (rs5c372->rtc) |
275 | rtc_device_unregister(rs5c372->rtc); | 664 | rtc_device_unregister(rs5c372->rtc); |
276 | 665 | ||
666 | /* REVISIT properly destroy the sysfs files ... */ | ||
667 | |||
277 | if ((err = i2c_detach_client(client))) | 668 | if ((err = i2c_detach_client(client))) |
278 | return err; | 669 | return err; |
279 | 670 | ||
@@ -281,6 +672,14 @@ static int rs5c372_detach(struct i2c_client *client) | |||
281 | return 0; | 672 | return 0; |
282 | } | 673 | } |
283 | 674 | ||
675 | static struct i2c_driver rs5c372_driver = { | ||
676 | .driver = { | ||
677 | .name = "rtc-rs5c372", | ||
678 | }, | ||
679 | .attach_adapter = &rs5c372_attach, | ||
680 | .detach_client = &rs5c372_detach, | ||
681 | }; | ||
682 | |||
284 | static __init int rs5c372_init(void) | 683 | static __init int rs5c372_init(void) |
285 | { | 684 | { |
286 | return i2c_add_driver(&rs5c372_driver); | 685 | return i2c_add_driver(&rs5c372_driver); |
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 1678b6c757ec..a420cd099041 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -117,7 +117,7 @@ vmcp_write(struct file *file, const char __user * buff, size_t count, | |||
117 | return -ENOMEM; | 117 | return -ENOMEM; |
118 | } | 118 | } |
119 | debug_text_event(vmcp_debug, 1, cmd); | 119 | debug_text_event(vmcp_debug, 1, cmd); |
120 | session->resp_size = __cpcmd(cmd, session->response, | 120 | session->resp_size = cpcmd(cmd, session->response, |
121 | session->bufsize, | 121 | session->bufsize, |
122 | &session->resp_code); | 122 | &session->resp_code); |
123 | up(&session->mutex); | 123 | up(&session->mutex); |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b471ac4a1bf6..ae1bf231d089 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -880,19 +880,15 @@ static void cio_reset_pgm_check_handler(void) | |||
880 | static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) | 880 | static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) |
881 | { | 881 | { |
882 | int rc; | 882 | int rc; |
883 | register struct subchannel_id reg1 asm ("1") = schid; | ||
884 | 883 | ||
885 | pgm_check_occured = 0; | 884 | pgm_check_occured = 0; |
886 | s390_reset_pgm_handler = cio_reset_pgm_check_handler; | 885 | s390_reset_pgm_handler = cio_reset_pgm_check_handler; |
886 | rc = stsch(schid, addr); | ||
887 | s390_reset_pgm_handler = NULL; | ||
887 | 888 | ||
888 | asm volatile( | 889 | /* The program check handler could have changed pgm_check_occured */ |
889 | " stsch 0(%2)\n" | 890 | barrier(); |
890 | " ipm %0\n" | ||
891 | " srl %0,28" | ||
892 | : "=d" (rc) | ||
893 | : "d" (reg1), "a" (addr), "m" (*addr) : "memory", "cc"); | ||
894 | 891 | ||
895 | s390_reset_pgm_handler = NULL; | ||
896 | if (pgm_check_occured) | 892 | if (pgm_check_occured) |
897 | return -EIO; | 893 | return -EIO; |
898 | else | 894 | else |
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 1a93fa684e9f..52625153a4f0 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -27,10 +27,7 @@ config IUCV | |||
27 | help | 27 | help |
28 | Select this option if you want to use inter-user communication | 28 | Select this option if you want to use inter-user communication |
29 | under VM or VIF. If unsure, say "Y" to enable a fast communication | 29 | under VM or VIF. If unsure, say "Y" to enable a fast communication |
30 | link between VM guests. At boot time the user ID of the guest needs | 30 | link between VM guests. |
31 | to be passed to the kernel. Note that both kernels need to be | ||
32 | compiled with this option and both need to be booted with the user ID | ||
33 | of the other VM guest. | ||
34 | 31 | ||
35 | config NETIUCV | 32 | config NETIUCV |
36 | tristate "IUCV network device support (VM only)" | 33 | tristate "IUCV network device support (VM only)" |
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 53c358c7d368..e95c281f1e36 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h | |||
@@ -710,7 +710,7 @@ struct qeth_reply { | |||
710 | int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); | 710 | int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); |
711 | u32 seqno; | 711 | u32 seqno; |
712 | unsigned long offset; | 712 | unsigned long offset; |
713 | int received; | 713 | atomic_t received; |
714 | int rc; | 714 | int rc; |
715 | void *param; | 715 | void *param; |
716 | struct qeth_card *card; | 716 | struct qeth_card *card; |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 2bde4f1fb9c2..d2efa5ff125d 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -471,7 +471,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
471 | channel->state == CH_STATE_UP) | 471 | channel->state == CH_STATE_UP) |
472 | qeth_issue_next_read(card); | 472 | qeth_issue_next_read(card); |
473 | 473 | ||
474 | tasklet_schedule(&channel->irq_tasklet); | 474 | qeth_irq_tasklet((unsigned long)channel); |
475 | return; | 475 | return; |
476 | out: | 476 | out: |
477 | wake_up(&card->wait_q); | 477 | wake_up(&card->wait_q); |
@@ -951,40 +951,6 @@ qeth_do_run_thread(struct qeth_card *card, unsigned long thread) | |||
951 | } | 951 | } |
952 | 952 | ||
953 | static int | 953 | static int |
954 | qeth_register_ip_addresses(void *ptr) | ||
955 | { | ||
956 | struct qeth_card *card; | ||
957 | |||
958 | card = (struct qeth_card *) ptr; | ||
959 | daemonize("qeth_reg_ip"); | ||
960 | QETH_DBF_TEXT(trace,4,"regipth1"); | ||
961 | if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD)) | ||
962 | return 0; | ||
963 | QETH_DBF_TEXT(trace,4,"regipth2"); | ||
964 | qeth_set_ip_addr_list(card); | ||
965 | qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD); | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * Drive the SET_PROMISC_MODE thread | ||
971 | */ | ||
972 | static int | ||
973 | qeth_set_promisc_mode(void *ptr) | ||
974 | { | ||
975 | struct qeth_card *card = (struct qeth_card *) ptr; | ||
976 | |||
977 | daemonize("qeth_setprm"); | ||
978 | QETH_DBF_TEXT(trace,4,"setprm1"); | ||
979 | if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD)) | ||
980 | return 0; | ||
981 | QETH_DBF_TEXT(trace,4,"setprm2"); | ||
982 | qeth_setadp_promisc_mode(card); | ||
983 | qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int | ||
988 | qeth_recover(void *ptr) | 954 | qeth_recover(void *ptr) |
989 | { | 955 | { |
990 | struct qeth_card *card; | 956 | struct qeth_card *card; |
@@ -1047,11 +1013,6 @@ qeth_start_kernel_thread(struct work_struct *work) | |||
1047 | if (card->read.state != CH_STATE_UP && | 1013 | if (card->read.state != CH_STATE_UP && |
1048 | card->write.state != CH_STATE_UP) | 1014 | card->write.state != CH_STATE_UP) |
1049 | return; | 1015 | return; |
1050 | |||
1051 | if (qeth_do_start_thread(card, QETH_SET_IP_THREAD)) | ||
1052 | kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD); | ||
1053 | if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD)) | ||
1054 | kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD); | ||
1055 | if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) | 1016 | if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) |
1056 | kernel_thread(qeth_recover, (void *) card, SIGCHLD); | 1017 | kernel_thread(qeth_recover, (void *) card, SIGCHLD); |
1057 | } | 1018 | } |
@@ -1074,7 +1035,7 @@ qeth_set_intial_options(struct qeth_card *card) | |||
1074 | card->options.layer2 = 1; | 1035 | card->options.layer2 = 1; |
1075 | else | 1036 | else |
1076 | card->options.layer2 = 0; | 1037 | card->options.layer2 = 0; |
1077 | card->options.performance_stats = 1; | 1038 | card->options.performance_stats = 0; |
1078 | } | 1039 | } |
1079 | 1040 | ||
1080 | /** | 1041 | /** |
@@ -1613,8 +1574,6 @@ qeth_issue_next_read(struct qeth_card *card) | |||
1613 | return -ENOMEM; | 1574 | return -ENOMEM; |
1614 | } | 1575 | } |
1615 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 1576 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
1616 | wait_event(card->wait_q, | ||
1617 | atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); | ||
1618 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); | 1577 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); |
1619 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 1578 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
1620 | (addr_t) iob, 0, 0); | 1579 | (addr_t) iob, 0, 0); |
@@ -1635,6 +1594,7 @@ qeth_alloc_reply(struct qeth_card *card) | |||
1635 | reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); | 1594 | reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); |
1636 | if (reply){ | 1595 | if (reply){ |
1637 | atomic_set(&reply->refcnt, 1); | 1596 | atomic_set(&reply->refcnt, 1); |
1597 | atomic_set(&reply->received, 0); | ||
1638 | reply->card = card; | 1598 | reply->card = card; |
1639 | }; | 1599 | }; |
1640 | return reply; | 1600 | return reply; |
@@ -1655,31 +1615,6 @@ qeth_put_reply(struct qeth_reply *reply) | |||
1655 | kfree(reply); | 1615 | kfree(reply); |
1656 | } | 1616 | } |
1657 | 1617 | ||
1658 | static void | ||
1659 | qeth_cmd_timeout(unsigned long data) | ||
1660 | { | ||
1661 | struct qeth_reply *reply, *list_reply, *r; | ||
1662 | unsigned long flags; | ||
1663 | |||
1664 | reply = (struct qeth_reply *) data; | ||
1665 | spin_lock_irqsave(&reply->card->lock, flags); | ||
1666 | list_for_each_entry_safe(list_reply, r, | ||
1667 | &reply->card->cmd_waiter_list, list) { | ||
1668 | if (reply == list_reply){ | ||
1669 | qeth_get_reply(reply); | ||
1670 | list_del_init(&reply->list); | ||
1671 | spin_unlock_irqrestore(&reply->card->lock, flags); | ||
1672 | reply->rc = -ETIME; | ||
1673 | reply->received = 1; | ||
1674 | wake_up(&reply->wait_q); | ||
1675 | qeth_put_reply(reply); | ||
1676 | return; | ||
1677 | } | ||
1678 | } | ||
1679 | spin_unlock_irqrestore(&reply->card->lock, flags); | ||
1680 | } | ||
1681 | |||
1682 | |||
1683 | static struct qeth_ipa_cmd * | 1618 | static struct qeth_ipa_cmd * |
1684 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | 1619 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) |
1685 | { | 1620 | { |
@@ -1745,7 +1680,7 @@ qeth_clear_ipacmd_list(struct qeth_card *card) | |||
1745 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { | 1680 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { |
1746 | qeth_get_reply(reply); | 1681 | qeth_get_reply(reply); |
1747 | reply->rc = -EIO; | 1682 | reply->rc = -EIO; |
1748 | reply->received = 1; | 1683 | atomic_inc(&reply->received); |
1749 | list_del_init(&reply->list); | 1684 | list_del_init(&reply->list); |
1750 | wake_up(&reply->wait_q); | 1685 | wake_up(&reply->wait_q); |
1751 | qeth_put_reply(reply); | 1686 | qeth_put_reply(reply); |
@@ -1814,7 +1749,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel, | |||
1814 | &card->cmd_waiter_list); | 1749 | &card->cmd_waiter_list); |
1815 | spin_unlock_irqrestore(&card->lock, flags); | 1750 | spin_unlock_irqrestore(&card->lock, flags); |
1816 | } else { | 1751 | } else { |
1817 | reply->received = 1; | 1752 | atomic_inc(&reply->received); |
1818 | wake_up(&reply->wait_q); | 1753 | wake_up(&reply->wait_q); |
1819 | } | 1754 | } |
1820 | qeth_put_reply(reply); | 1755 | qeth_put_reply(reply); |
@@ -1858,7 +1793,7 @@ qeth_send_control_data(struct qeth_card *card, int len, | |||
1858 | int rc; | 1793 | int rc; |
1859 | unsigned long flags; | 1794 | unsigned long flags; |
1860 | struct qeth_reply *reply = NULL; | 1795 | struct qeth_reply *reply = NULL; |
1861 | struct timer_list timer; | 1796 | unsigned long timeout; |
1862 | 1797 | ||
1863 | QETH_DBF_TEXT(trace, 2, "sendctl"); | 1798 | QETH_DBF_TEXT(trace, 2, "sendctl"); |
1864 | 1799 | ||
@@ -1873,21 +1808,20 @@ qeth_send_control_data(struct qeth_card *card, int len, | |||
1873 | reply->seqno = QETH_IDX_COMMAND_SEQNO; | 1808 | reply->seqno = QETH_IDX_COMMAND_SEQNO; |
1874 | else | 1809 | else |
1875 | reply->seqno = card->seqno.ipa++; | 1810 | reply->seqno = card->seqno.ipa++; |
1876 | init_timer(&timer); | ||
1877 | timer.function = qeth_cmd_timeout; | ||
1878 | timer.data = (unsigned long) reply; | ||
1879 | init_waitqueue_head(&reply->wait_q); | 1811 | init_waitqueue_head(&reply->wait_q); |
1880 | spin_lock_irqsave(&card->lock, flags); | 1812 | spin_lock_irqsave(&card->lock, flags); |
1881 | list_add_tail(&reply->list, &card->cmd_waiter_list); | 1813 | list_add_tail(&reply->list, &card->cmd_waiter_list); |
1882 | spin_unlock_irqrestore(&card->lock, flags); | 1814 | spin_unlock_irqrestore(&card->lock, flags); |
1883 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); | 1815 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); |
1884 | wait_event(card->wait_q, | 1816 | |
1885 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); | 1817 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; |
1886 | qeth_prepare_control_data(card, len, iob); | 1818 | qeth_prepare_control_data(card, len, iob); |
1819 | |||
1887 | if (IS_IPA(iob->data)) | 1820 | if (IS_IPA(iob->data)) |
1888 | timer.expires = jiffies + QETH_IPA_TIMEOUT; | 1821 | timeout = jiffies + QETH_IPA_TIMEOUT; |
1889 | else | 1822 | else |
1890 | timer.expires = jiffies + QETH_TIMEOUT; | 1823 | timeout = jiffies + QETH_TIMEOUT; |
1824 | |||
1891 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); | 1825 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); |
1892 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1826 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
1893 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, | 1827 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, |
@@ -1906,9 +1840,16 @@ qeth_send_control_data(struct qeth_card *card, int len, | |||
1906 | wake_up(&card->wait_q); | 1840 | wake_up(&card->wait_q); |
1907 | return rc; | 1841 | return rc; |
1908 | } | 1842 | } |
1909 | add_timer(&timer); | 1843 | while (!atomic_read(&reply->received)) { |
1910 | wait_event(reply->wait_q, reply->received); | 1844 | if (time_after(jiffies, timeout)) { |
1911 | del_timer_sync(&timer); | 1845 | spin_lock_irqsave(&reply->card->lock, flags); |
1846 | list_del_init(&reply->list); | ||
1847 | spin_unlock_irqrestore(&reply->card->lock, flags); | ||
1848 | reply->rc = -ETIME; | ||
1849 | atomic_inc(&reply->received); | ||
1850 | wake_up(&reply->wait_q); | ||
1851 | } | ||
1852 | }; | ||
1912 | rc = reply->rc; | 1853 | rc = reply->rc; |
1913 | qeth_put_reply(reply); | 1854 | qeth_put_reply(reply); |
1914 | return rc; | 1855 | return rc; |
@@ -2466,32 +2407,17 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb, | |||
2466 | qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); | 2407 | qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); |
2467 | } | 2408 | } |
2468 | 2409 | ||
2469 | static inline __u16 | 2410 | static inline void |
2470 | qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | 2411 | qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, |
2471 | struct qeth_hdr *hdr) | 2412 | struct qeth_hdr *hdr) |
2472 | { | 2413 | { |
2473 | unsigned short vlan_id = 0; | ||
2474 | #ifdef CONFIG_QETH_VLAN | ||
2475 | struct vlan_hdr *vhdr; | ||
2476 | #endif | ||
2477 | |||
2478 | skb->pkt_type = PACKET_HOST; | 2414 | skb->pkt_type = PACKET_HOST; |
2479 | skb->protocol = qeth_type_trans(skb, skb->dev); | 2415 | skb->protocol = qeth_type_trans(skb, skb->dev); |
2480 | if (card->options.checksum_type == NO_CHECKSUMMING) | 2416 | if (card->options.checksum_type == NO_CHECKSUMMING) |
2481 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2417 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2482 | else | 2418 | else |
2483 | skb->ip_summed = CHECKSUM_NONE; | 2419 | skb->ip_summed = CHECKSUM_NONE; |
2484 | #ifdef CONFIG_QETH_VLAN | ||
2485 | if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) { | ||
2486 | vhdr = (struct vlan_hdr *) skb->data; | ||
2487 | skb->protocol = | ||
2488 | __constant_htons(vhdr->h_vlan_encapsulated_proto); | ||
2489 | vlan_id = hdr->hdr.l2.vlan_id; | ||
2490 | skb_pull(skb, VLAN_HLEN); | ||
2491 | } | ||
2492 | #endif | ||
2493 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | 2420 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; |
2494 | return vlan_id; | ||
2495 | } | 2421 | } |
2496 | 2422 | ||
2497 | static inline __u16 | 2423 | static inline __u16 |
@@ -2560,7 +2486,6 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2560 | int offset; | 2486 | int offset; |
2561 | int rxrc; | 2487 | int rxrc; |
2562 | __u16 vlan_tag = 0; | 2488 | __u16 vlan_tag = 0; |
2563 | __u16 *vlan_addr; | ||
2564 | 2489 | ||
2565 | /* get first element of current buffer */ | 2490 | /* get first element of current buffer */ |
2566 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; | 2491 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; |
@@ -2571,7 +2496,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2571 | &offset, &hdr))) { | 2496 | &offset, &hdr))) { |
2572 | skb->dev = card->dev; | 2497 | skb->dev = card->dev; |
2573 | if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) | 2498 | if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) |
2574 | vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); | 2499 | qeth_layer2_rebuild_skb(card, skb, hdr); |
2575 | else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) | 2500 | else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) |
2576 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); | 2501 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); |
2577 | else { /*in case of OSN*/ | 2502 | else { /*in case of OSN*/ |
@@ -3968,13 +3893,22 @@ static inline struct sk_buff * | |||
3968 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | 3893 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, |
3969 | struct qeth_hdr **hdr, int ipv) | 3894 | struct qeth_hdr **hdr, int ipv) |
3970 | { | 3895 | { |
3971 | struct sk_buff *new_skb; | 3896 | struct sk_buff *new_skb, *new_skb2; |
3972 | 3897 | ||
3973 | QETH_DBF_TEXT(trace, 6, "prepskb"); | 3898 | QETH_DBF_TEXT(trace, 6, "prepskb"); |
3974 | 3899 | new_skb = skb; | |
3975 | new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); | 3900 | new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC); |
3976 | if (new_skb == NULL) | 3901 | if (!new_skb) |
3902 | return NULL; | ||
3903 | new_skb2 = qeth_realloc_headroom(card, new_skb, | ||
3904 | sizeof(struct qeth_hdr)); | ||
3905 | if (!new_skb2) { | ||
3906 | __qeth_free_new_skb(skb, new_skb); | ||
3977 | return NULL; | 3907 | return NULL; |
3908 | } | ||
3909 | if (new_skb != skb) | ||
3910 | __qeth_free_new_skb(new_skb2, new_skb); | ||
3911 | new_skb = new_skb2; | ||
3978 | *hdr = __qeth_prepare_skb(card, new_skb, ipv); | 3912 | *hdr = __qeth_prepare_skb(card, new_skb, ipv); |
3979 | if (*hdr == NULL) { | 3913 | if (*hdr == NULL) { |
3980 | __qeth_free_new_skb(skb, new_skb); | 3914 | __qeth_free_new_skb(skb, new_skb); |
@@ -4844,9 +4778,11 @@ qeth_arp_query(struct qeth_card *card, char __user *udata) | |||
4844 | "(0x%x/%d)\n", | 4778 | "(0x%x/%d)\n", |
4845 | QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), | 4779 | QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), |
4846 | tmp, tmp); | 4780 | tmp, tmp); |
4847 | copy_to_user(udata, qinfo.udata, 4); | 4781 | if (copy_to_user(udata, qinfo.udata, 4)) |
4782 | rc = -EFAULT; | ||
4848 | } else { | 4783 | } else { |
4849 | copy_to_user(udata, qinfo.udata, qinfo.udata_len); | 4784 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
4785 | rc = -EFAULT; | ||
4850 | } | 4786 | } |
4851 | kfree(qinfo.udata); | 4787 | kfree(qinfo.udata); |
4852 | return rc; | 4788 | return rc; |
@@ -4992,8 +4928,10 @@ qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
4992 | if (rc) | 4928 | if (rc) |
4993 | PRINT_WARN("SNMP command failed on %s: (0x%x)\n", | 4929 | PRINT_WARN("SNMP command failed on %s: (0x%x)\n", |
4994 | QETH_CARD_IFNAME(card), rc); | 4930 | QETH_CARD_IFNAME(card), rc); |
4995 | else | 4931 | else { |
4996 | copy_to_user(udata, qinfo.udata, qinfo.udata_len); | 4932 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
4933 | rc = -EFAULT; | ||
4934 | } | ||
4997 | 4935 | ||
4998 | kfree(ureq); | 4936 | kfree(ureq); |
4999 | kfree(qinfo.udata); | 4937 | kfree(qinfo.udata); |
@@ -5544,12 +5482,10 @@ qeth_set_multicast_list(struct net_device *dev) | |||
5544 | qeth_add_multicast_ipv6(card); | 5482 | qeth_add_multicast_ipv6(card); |
5545 | #endif | 5483 | #endif |
5546 | out: | 5484 | out: |
5547 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 5485 | qeth_set_ip_addr_list(card); |
5548 | schedule_work(&card->kernel_thread_starter); | ||
5549 | if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) | 5486 | if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) |
5550 | return; | 5487 | return; |
5551 | if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) | 5488 | qeth_setadp_promisc_mode(card); |
5552 | schedule_work(&card->kernel_thread_starter); | ||
5553 | } | 5489 | } |
5554 | 5490 | ||
5555 | static int | 5491 | static int |
@@ -6351,6 +6287,42 @@ static struct ethtool_ops qeth_ethtool_ops = { | |||
6351 | }; | 6287 | }; |
6352 | 6288 | ||
6353 | static int | 6289 | static int |
6290 | qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr) | ||
6291 | { | ||
6292 | struct qeth_card *card; | ||
6293 | struct ethhdr *eth; | ||
6294 | |||
6295 | card = qeth_get_card_from_dev(skb->dev); | ||
6296 | if (card->options.layer2) | ||
6297 | goto haveheader; | ||
6298 | #ifdef CONFIG_QETH_IPV6 | ||
6299 | /* cause of the manipulated arp constructor and the ARP | ||
6300 | flag for OSAE devices we have some nasty exceptions */ | ||
6301 | if (card->info.type == QETH_CARD_TYPE_OSAE) { | ||
6302 | if (!card->options.fake_ll) { | ||
6303 | if ((skb->pkt_type==PACKET_OUTGOING) && | ||
6304 | (skb->protocol==ETH_P_IPV6)) | ||
6305 | goto haveheader; | ||
6306 | else | ||
6307 | return 0; | ||
6308 | } else { | ||
6309 | if ((skb->pkt_type==PACKET_OUTGOING) && | ||
6310 | (skb->protocol==ETH_P_IP)) | ||
6311 | return 0; | ||
6312 | else | ||
6313 | goto haveheader; | ||
6314 | } | ||
6315 | } | ||
6316 | #endif | ||
6317 | if (!card->options.fake_ll) | ||
6318 | return 0; | ||
6319 | haveheader: | ||
6320 | eth = eth_hdr(skb); | ||
6321 | memcpy(haddr, eth->h_source, ETH_ALEN); | ||
6322 | return ETH_ALEN; | ||
6323 | } | ||
6324 | |||
6325 | static int | ||
6354 | qeth_netdev_init(struct net_device *dev) | 6326 | qeth_netdev_init(struct net_device *dev) |
6355 | { | 6327 | { |
6356 | struct qeth_card *card; | 6328 | struct qeth_card *card; |
@@ -6388,7 +6360,10 @@ qeth_netdev_init(struct net_device *dev) | |||
6388 | if (card->options.fake_ll && | 6360 | if (card->options.fake_ll && |
6389 | (qeth_get_netdev_flags(card) & IFF_NOARP)) | 6361 | (qeth_get_netdev_flags(card) & IFF_NOARP)) |
6390 | dev->hard_header = qeth_fake_header; | 6362 | dev->hard_header = qeth_fake_header; |
6391 | dev->hard_header_parse = NULL; | 6363 | if (dev->type == ARPHRD_IEEE802_TR) |
6364 | dev->hard_header_parse = NULL; | ||
6365 | else | ||
6366 | dev->hard_header_parse = qeth_hard_header_parse; | ||
6392 | dev->set_mac_address = qeth_layer2_set_mac_address; | 6367 | dev->set_mac_address = qeth_layer2_set_mac_address; |
6393 | dev->flags |= qeth_get_netdev_flags(card); | 6368 | dev->flags |= qeth_get_netdev_flags(card); |
6394 | if ((card->options.fake_broadcast) || | 6369 | if ((card->options.fake_broadcast) || |
@@ -8235,8 +8210,7 @@ qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8235 | } | 8210 | } |
8236 | if (!qeth_add_ip(card, ipaddr)) | 8211 | if (!qeth_add_ip(card, ipaddr)) |
8237 | kfree(ipaddr); | 8212 | kfree(ipaddr); |
8238 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8213 | qeth_set_ip_addr_list(card); |
8239 | schedule_work(&card->kernel_thread_starter); | ||
8240 | return rc; | 8214 | return rc; |
8241 | } | 8215 | } |
8242 | 8216 | ||
@@ -8264,8 +8238,7 @@ qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8264 | return; | 8238 | return; |
8265 | if (!qeth_delete_ip(card, ipaddr)) | 8239 | if (!qeth_delete_ip(card, ipaddr)) |
8266 | kfree(ipaddr); | 8240 | kfree(ipaddr); |
8267 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8241 | qeth_set_ip_addr_list(card); |
8268 | schedule_work(&card->kernel_thread_starter); | ||
8269 | } | 8242 | } |
8270 | 8243 | ||
8271 | /* | 8244 | /* |
@@ -8308,8 +8281,7 @@ qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8308 | } | 8281 | } |
8309 | if (!qeth_add_ip(card, ipaddr)) | 8282 | if (!qeth_add_ip(card, ipaddr)) |
8310 | kfree(ipaddr); | 8283 | kfree(ipaddr); |
8311 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8284 | qeth_set_ip_addr_list(card); |
8312 | schedule_work(&card->kernel_thread_starter); | ||
8313 | return 0; | 8285 | return 0; |
8314 | } | 8286 | } |
8315 | 8287 | ||
@@ -8337,8 +8309,7 @@ qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8337 | return; | 8309 | return; |
8338 | if (!qeth_delete_ip(card, ipaddr)) | 8310 | if (!qeth_delete_ip(card, ipaddr)) |
8339 | kfree(ipaddr); | 8311 | kfree(ipaddr); |
8340 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8312 | qeth_set_ip_addr_list(card); |
8341 | schedule_work(&card->kernel_thread_starter); | ||
8342 | } | 8313 | } |
8343 | 8314 | ||
8344 | /** | 8315 | /** |
@@ -8380,8 +8351,7 @@ qeth_ip_event(struct notifier_block *this, | |||
8380 | default: | 8351 | default: |
8381 | break; | 8352 | break; |
8382 | } | 8353 | } |
8383 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8354 | qeth_set_ip_addr_list(card); |
8384 | schedule_work(&card->kernel_thread_starter); | ||
8385 | out: | 8355 | out: |
8386 | return NOTIFY_DONE; | 8356 | return NOTIFY_DONE; |
8387 | } | 8357 | } |
@@ -8433,8 +8403,7 @@ qeth_ip6_event(struct notifier_block *this, | |||
8433 | default: | 8403 | default: |
8434 | break; | 8404 | break; |
8435 | } | 8405 | } |
8436 | if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) | 8406 | qeth_set_ip_addr_list(card); |
8437 | schedule_work(&card->kernel_thread_starter); | ||
8438 | out: | 8407 | out: |
8439 | return NOTIFY_DONE; | 8408 | return NOTIFY_DONE; |
8440 | } | 8409 | } |
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 9d11a75663e6..3c4b6c243712 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c | |||
@@ -789,7 +789,9 @@ static struct console mpc52xx_console = { | |||
789 | static int __init | 789 | static int __init |
790 | mpc52xx_console_init(void) | 790 | mpc52xx_console_init(void) |
791 | { | 791 | { |
792 | #if defined(CONFIG_PPC_MERGE) | ||
792 | mpc52xx_uart_of_enumerate(); | 793 | mpc52xx_uart_of_enumerate(); |
794 | #endif | ||
793 | register_console(&mpc52xx_console); | 795 | register_console(&mpc52xx_console); |
794 | return 0; | 796 | return 0; |
795 | } | 797 | } |
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 24ee8be359f5..6377db1b446d 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c | |||
@@ -217,6 +217,7 @@ static const struct quirk_printer_struct quirk_printers[] = { | |||
217 | { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */ | 217 | { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */ |
218 | { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ | 218 | { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ |
219 | { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ | 219 | { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ |
220 | { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */ | ||
220 | { 0, 0 } | 221 | { 0, 0 } |
221 | }; | 222 | }; |
222 | 223 | ||
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c index c505b767cee1..5e628ae3aec7 100644 --- a/drivers/usb/core/endpoint.c +++ b/drivers/usb/core/endpoint.c | |||
@@ -268,6 +268,7 @@ static void ep_device_release(struct device *dev) | |||
268 | struct ep_device *ep_dev = to_ep_device(dev); | 268 | struct ep_device *ep_dev = to_ep_device(dev); |
269 | 269 | ||
270 | dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id); | 270 | dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id); |
271 | endpoint_free_minor(ep_dev); | ||
271 | kfree(ep_dev); | 272 | kfree(ep_dev); |
272 | } | 273 | } |
273 | 274 | ||
@@ -349,7 +350,6 @@ void usb_remove_ep_files(struct usb_host_endpoint *endpoint) | |||
349 | sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress); | 350 | sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress); |
350 | sysfs_remove_link(&ep_dev->dev.parent->kobj, name); | 351 | sysfs_remove_link(&ep_dev->dev.parent->kobj, name); |
351 | sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp); | 352 | sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp); |
352 | endpoint_free_minor(ep_dev); | ||
353 | device_unregister(&ep_dev->dev); | 353 | device_unregister(&ep_dev->dev); |
354 | endpoint->ep_dev = NULL; | 354 | endpoint->ep_dev = NULL; |
355 | destroy_endpoint_class(); | 355 | destroy_endpoint_class(); |
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 15d77c307930..cdcfd42843d4 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/usb_gadget.h> | 42 | #include <linux/usb_gadget.h> |
43 | #include <linux/usb/otg.h> | 43 | #include <linux/usb/otg.h> |
44 | #include <linux/dma-mapping.h> | 44 | #include <linux/dma-mapping.h> |
45 | #include <linux/clk.h> | ||
45 | 46 | ||
46 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
47 | #include <asm/io.h> | 48 | #include <asm/io.h> |
@@ -60,6 +61,11 @@ | |||
60 | /* bulk DMA seems to be behaving for both IN and OUT */ | 61 | /* bulk DMA seems to be behaving for both IN and OUT */ |
61 | #define USE_DMA | 62 | #define USE_DMA |
62 | 63 | ||
64 | /* FIXME: OMAP2 currently has some problem in DMA mode */ | ||
65 | #ifdef CONFIG_ARCH_OMAP2 | ||
66 | #undef USE_DMA | ||
67 | #endif | ||
68 | |||
63 | /* ISO too */ | 69 | /* ISO too */ |
64 | #define USE_ISO | 70 | #define USE_ISO |
65 | 71 | ||
@@ -99,7 +105,7 @@ static unsigned fifo_mode = 0; | |||
99 | * boot parameter "omap_udc:fifo_mode=42" | 105 | * boot parameter "omap_udc:fifo_mode=42" |
100 | */ | 106 | */ |
101 | module_param (fifo_mode, uint, 0); | 107 | module_param (fifo_mode, uint, 0); |
102 | MODULE_PARM_DESC (fifo_mode, "endpoint setup (0 == default)"); | 108 | MODULE_PARM_DESC (fifo_mode, "endpoint configuration"); |
103 | 109 | ||
104 | #ifdef USE_DMA | 110 | #ifdef USE_DMA |
105 | static unsigned use_dma = 1; | 111 | static unsigned use_dma = 1; |
@@ -122,7 +128,7 @@ static const char driver_desc [] = DRIVER_DESC; | |||
122 | /*-------------------------------------------------------------------------*/ | 128 | /*-------------------------------------------------------------------------*/ |
123 | 129 | ||
124 | /* there's a notion of "current endpoint" for modifying endpoint | 130 | /* there's a notion of "current endpoint" for modifying endpoint |
125 | * state, and PIO access to its FIFO. | 131 | * state, and PIO access to its FIFO. |
126 | */ | 132 | */ |
127 | 133 | ||
128 | static void use_ep(struct omap_ep *ep, u16 select) | 134 | static void use_ep(struct omap_ep *ep, u16 select) |
@@ -391,7 +397,7 @@ done(struct omap_ep *ep, struct omap_req *req, int status) | |||
391 | #define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY) | 397 | #define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY) |
392 | #define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY) | 398 | #define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY) |
393 | 399 | ||
394 | static inline int | 400 | static inline int |
395 | write_packet(u8 *buf, struct omap_req *req, unsigned max) | 401 | write_packet(u8 *buf, struct omap_req *req, unsigned max) |
396 | { | 402 | { |
397 | unsigned len; | 403 | unsigned len; |
@@ -456,7 +462,7 @@ static int write_fifo(struct omap_ep *ep, struct omap_req *req) | |||
456 | return is_last; | 462 | return is_last; |
457 | } | 463 | } |
458 | 464 | ||
459 | static inline int | 465 | static inline int |
460 | read_packet(u8 *buf, struct omap_req *req, unsigned avail) | 466 | read_packet(u8 *buf, struct omap_req *req, unsigned avail) |
461 | { | 467 | { |
462 | unsigned len; | 468 | unsigned len; |
@@ -542,9 +548,9 @@ static inline dma_addr_t dma_csac(unsigned lch) | |||
542 | /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is | 548 | /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is |
543 | * read before the DMA controller finished disabling the channel. | 549 | * read before the DMA controller finished disabling the channel. |
544 | */ | 550 | */ |
545 | csac = omap_readw(OMAP_DMA_CSAC(lch)); | 551 | csac = OMAP_DMA_CSAC_REG(lch); |
546 | if (csac == 0) | 552 | if (csac == 0) |
547 | csac = omap_readw(OMAP_DMA_CSAC(lch)); | 553 | csac = OMAP_DMA_CSAC_REG(lch); |
548 | return csac; | 554 | return csac; |
549 | } | 555 | } |
550 | 556 | ||
@@ -555,9 +561,9 @@ static inline dma_addr_t dma_cdac(unsigned lch) | |||
555 | /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is | 561 | /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is |
556 | * read before the DMA controller finished disabling the channel. | 562 | * read before the DMA controller finished disabling the channel. |
557 | */ | 563 | */ |
558 | cdac = omap_readw(OMAP_DMA_CDAC(lch)); | 564 | cdac = OMAP_DMA_CDAC_REG(lch); |
559 | if (cdac == 0) | 565 | if (cdac == 0) |
560 | cdac = omap_readw(OMAP_DMA_CDAC(lch)); | 566 | cdac = OMAP_DMA_CDAC_REG(lch); |
561 | return cdac; | 567 | return cdac; |
562 | } | 568 | } |
563 | 569 | ||
@@ -582,7 +588,7 @@ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start) | |||
582 | } | 588 | } |
583 | 589 | ||
584 | #define DMA_DEST_LAST(x) (cpu_is_omap15xx() \ | 590 | #define DMA_DEST_LAST(x) (cpu_is_omap15xx() \ |
585 | ? omap_readw(OMAP_DMA_CSAC(x)) /* really: CPC */ \ | 591 | ? OMAP_DMA_CSAC_REG(x) /* really: CPC */ \ |
586 | : dma_cdac(x)) | 592 | : dma_cdac(x)) |
587 | 593 | ||
588 | static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) | 594 | static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) |
@@ -620,17 +626,19 @@ static void next_in_dma(struct omap_ep *ep, struct omap_req *req) | |||
620 | || (cpu_is_omap15xx() && length < ep->maxpacket)) { | 626 | || (cpu_is_omap15xx() && length < ep->maxpacket)) { |
621 | txdma_ctrl = UDC_TXN_EOT | length; | 627 | txdma_ctrl = UDC_TXN_EOT | length; |
622 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, | 628 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, |
623 | length, 1, sync_mode); | 629 | length, 1, sync_mode, 0, 0); |
624 | } else { | 630 | } else { |
625 | length = min(length / ep->maxpacket, | 631 | length = min(length / ep->maxpacket, |
626 | (unsigned) UDC_TXN_TSC + 1); | 632 | (unsigned) UDC_TXN_TSC + 1); |
627 | txdma_ctrl = length; | 633 | txdma_ctrl = length; |
628 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, | 634 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, |
629 | ep->ep.maxpacket >> 1, length, sync_mode); | 635 | ep->ep.maxpacket >> 1, length, sync_mode, |
636 | 0, 0); | ||
630 | length *= ep->maxpacket; | 637 | length *= ep->maxpacket; |
631 | } | 638 | } |
632 | omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF, | 639 | omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF, |
633 | OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual); | 640 | OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, |
641 | 0, 0); | ||
634 | 642 | ||
635 | omap_start_dma(ep->lch); | 643 | omap_start_dma(ep->lch); |
636 | ep->dma_counter = dma_csac(ep->lch); | 644 | ep->dma_counter = dma_csac(ep->lch); |
@@ -675,9 +683,11 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req) | |||
675 | req->dma_bytes = packets * ep->ep.maxpacket; | 683 | req->dma_bytes = packets * ep->ep.maxpacket; |
676 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, | 684 | omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, |
677 | ep->ep.maxpacket >> 1, packets, | 685 | ep->ep.maxpacket >> 1, packets, |
678 | OMAP_DMA_SYNC_ELEMENT); | 686 | OMAP_DMA_SYNC_ELEMENT, |
687 | 0, 0); | ||
679 | omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, | 688 | omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, |
680 | OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual); | 689 | OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, |
690 | 0, 0); | ||
681 | ep->dma_counter = DMA_DEST_LAST(ep->lch); | 691 | ep->dma_counter = DMA_DEST_LAST(ep->lch); |
682 | 692 | ||
683 | UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1); | 693 | UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1); |
@@ -820,7 +830,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) | |||
820 | omap_set_dma_dest_params(ep->lch, | 830 | omap_set_dma_dest_params(ep->lch, |
821 | OMAP_DMA_PORT_TIPB, | 831 | OMAP_DMA_PORT_TIPB, |
822 | OMAP_DMA_AMODE_CONSTANT, | 832 | OMAP_DMA_AMODE_CONSTANT, |
823 | (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG)); | 833 | (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG), |
834 | 0, 0); | ||
824 | } | 835 | } |
825 | } else { | 836 | } else { |
826 | status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel, | 837 | status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel, |
@@ -831,7 +842,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) | |||
831 | omap_set_dma_src_params(ep->lch, | 842 | omap_set_dma_src_params(ep->lch, |
832 | OMAP_DMA_PORT_TIPB, | 843 | OMAP_DMA_PORT_TIPB, |
833 | OMAP_DMA_AMODE_CONSTANT, | 844 | OMAP_DMA_AMODE_CONSTANT, |
834 | (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG)); | 845 | (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG), |
846 | 0, 0); | ||
835 | /* EMIFF */ | 847 | /* EMIFF */ |
836 | omap_set_dma_dest_burst_mode(ep->lch, | 848 | omap_set_dma_dest_burst_mode(ep->lch, |
837 | OMAP_DMA_DATA_BURST_4); | 849 | OMAP_DMA_DATA_BURST_4); |
@@ -846,7 +858,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) | |||
846 | 858 | ||
847 | /* channel type P: hw synch (fifo) */ | 859 | /* channel type P: hw synch (fifo) */ |
848 | if (!cpu_is_omap15xx()) | 860 | if (!cpu_is_omap15xx()) |
849 | omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch)); | 861 | OMAP1_DMA_LCH_CTRL_REG(ep->lch) = 2; |
850 | } | 862 | } |
851 | 863 | ||
852 | just_restart: | 864 | just_restart: |
@@ -893,7 +905,7 @@ static void dma_channel_release(struct omap_ep *ep) | |||
893 | else | 905 | else |
894 | req = NULL; | 906 | req = NULL; |
895 | 907 | ||
896 | active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0; | 908 | active = ((1 << 7) & OMAP_DMA_CCR_REG(ep->lch)) != 0; |
897 | 909 | ||
898 | DBG("%s release %s %cxdma%d %p\n", ep->ep.name, | 910 | DBG("%s release %s %cxdma%d %p\n", ep->ep.name, |
899 | active ? "active" : "idle", | 911 | active ? "active" : "idle", |
@@ -1117,7 +1129,7 @@ static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
1117 | */ | 1129 | */ |
1118 | dma_channel_release(ep); | 1130 | dma_channel_release(ep); |
1119 | dma_channel_claim(ep, channel); | 1131 | dma_channel_claim(ep, channel); |
1120 | } else | 1132 | } else |
1121 | done(ep, req, -ECONNRESET); | 1133 | done(ep, req, -ECONNRESET); |
1122 | spin_unlock_irqrestore(&ep->udc->lock, flags); | 1134 | spin_unlock_irqrestore(&ep->udc->lock, flags); |
1123 | return 0; | 1135 | return 0; |
@@ -1153,7 +1165,7 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value) | |||
1153 | 1165 | ||
1154 | /* IN endpoints must already be idle */ | 1166 | /* IN endpoints must already be idle */ |
1155 | if ((ep->bEndpointAddress & USB_DIR_IN) | 1167 | if ((ep->bEndpointAddress & USB_DIR_IN) |
1156 | && !list_empty(&ep->queue)) { | 1168 | && !list_empty(&ep->queue)) { |
1157 | status = -EAGAIN; | 1169 | status = -EAGAIN; |
1158 | goto done; | 1170 | goto done; |
1159 | } | 1171 | } |
@@ -1298,6 +1310,23 @@ static void pullup_disable(struct omap_udc *udc) | |||
1298 | UDC_SYSCON1_REG &= ~UDC_PULLUP_EN; | 1310 | UDC_SYSCON1_REG &= ~UDC_PULLUP_EN; |
1299 | } | 1311 | } |
1300 | 1312 | ||
1313 | static struct omap_udc *udc; | ||
1314 | |||
1315 | static void omap_udc_enable_clock(int enable) | ||
1316 | { | ||
1317 | if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL) | ||
1318 | return; | ||
1319 | |||
1320 | if (enable) { | ||
1321 | clk_enable(udc->dc_clk); | ||
1322 | clk_enable(udc->hhc_clk); | ||
1323 | udelay(100); | ||
1324 | } else { | ||
1325 | clk_disable(udc->hhc_clk); | ||
1326 | clk_disable(udc->dc_clk); | ||
1327 | } | ||
1328 | } | ||
1329 | |||
1301 | /* | 1330 | /* |
1302 | * Called by whatever detects VBUS sessions: external transceiver | 1331 | * Called by whatever detects VBUS sessions: external transceiver |
1303 | * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock. | 1332 | * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock. |
@@ -1318,10 +1347,22 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active) | |||
1318 | else | 1347 | else |
1319 | FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510; | 1348 | FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510; |
1320 | } | 1349 | } |
1350 | if (udc->dc_clk != NULL && is_active) { | ||
1351 | if (!udc->clk_requested) { | ||
1352 | omap_udc_enable_clock(1); | ||
1353 | udc->clk_requested = 1; | ||
1354 | } | ||
1355 | } | ||
1321 | if (can_pullup(udc)) | 1356 | if (can_pullup(udc)) |
1322 | pullup_enable(udc); | 1357 | pullup_enable(udc); |
1323 | else | 1358 | else |
1324 | pullup_disable(udc); | 1359 | pullup_disable(udc); |
1360 | if (udc->dc_clk != NULL && !is_active) { | ||
1361 | if (udc->clk_requested) { | ||
1362 | omap_udc_enable_clock(0); | ||
1363 | udc->clk_requested = 0; | ||
1364 | } | ||
1365 | } | ||
1325 | spin_unlock_irqrestore(&udc->lock, flags); | 1366 | spin_unlock_irqrestore(&udc->lock, flags); |
1326 | return 0; | 1367 | return 0; |
1327 | } | 1368 | } |
@@ -1441,7 +1482,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src) | |||
1441 | } | 1482 | } |
1442 | } | 1483 | } |
1443 | 1484 | ||
1444 | /* IN/OUT packets mean we're in the DATA or STATUS stage. | 1485 | /* IN/OUT packets mean we're in the DATA or STATUS stage. |
1445 | * This driver uses only uses protocol stalls (ep0 never halts), | 1486 | * This driver uses only uses protocol stalls (ep0 never halts), |
1446 | * and if we got this far the gadget driver already had a | 1487 | * and if we got this far the gadget driver already had a |
1447 | * chance to stall. Tries to be forgiving of host oddities. | 1488 | * chance to stall. Tries to be forgiving of host oddities. |
@@ -1509,7 +1550,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src) | |||
1509 | } else if (stat == 0) | 1550 | } else if (stat == 0) |
1510 | UDC_CTRL_REG = UDC_SET_FIFO_EN; | 1551 | UDC_CTRL_REG = UDC_SET_FIFO_EN; |
1511 | UDC_EP_NUM_REG = 0; | 1552 | UDC_EP_NUM_REG = 0; |
1512 | 1553 | ||
1513 | /* activate status stage */ | 1554 | /* activate status stage */ |
1514 | if (stat == 1) { | 1555 | if (stat == 1) { |
1515 | done(ep0, req, 0); | 1556 | done(ep0, req, 0); |
@@ -1866,7 +1907,7 @@ static void pio_out_timer(unsigned long _ep) | |||
1866 | 1907 | ||
1867 | spin_lock_irqsave(&ep->udc->lock, flags); | 1908 | spin_lock_irqsave(&ep->udc->lock, flags); |
1868 | if (!list_empty(&ep->queue) && ep->ackwait) { | 1909 | if (!list_empty(&ep->queue) && ep->ackwait) { |
1869 | use_ep(ep, 0); | 1910 | use_ep(ep, UDC_EP_SEL); |
1870 | stat_flg = UDC_STAT_FLG_REG; | 1911 | stat_flg = UDC_STAT_FLG_REG; |
1871 | 1912 | ||
1872 | if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN) | 1913 | if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN) |
@@ -1876,12 +1917,12 @@ static void pio_out_timer(unsigned long _ep) | |||
1876 | VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg); | 1917 | VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg); |
1877 | req = container_of(ep->queue.next, | 1918 | req = container_of(ep->queue.next, |
1878 | struct omap_req, queue); | 1919 | struct omap_req, queue); |
1879 | UDC_EP_NUM_REG = ep->bEndpointAddress | UDC_EP_SEL; | ||
1880 | (void) read_fifo(ep, req); | 1920 | (void) read_fifo(ep, req); |
1881 | UDC_EP_NUM_REG = ep->bEndpointAddress; | 1921 | UDC_EP_NUM_REG = ep->bEndpointAddress; |
1882 | UDC_CTRL_REG = UDC_SET_FIFO_EN; | 1922 | UDC_CTRL_REG = UDC_SET_FIFO_EN; |
1883 | ep->ackwait = 1 + ep->double_buf; | 1923 | ep->ackwait = 1 + ep->double_buf; |
1884 | } | 1924 | } else |
1925 | deselect_ep(); | ||
1885 | } | 1926 | } |
1886 | mod_timer(&ep->timer, PIO_OUT_TIMEOUT); | 1927 | mod_timer(&ep->timer, PIO_OUT_TIMEOUT); |
1887 | spin_unlock_irqrestore(&ep->udc->lock, flags); | 1928 | spin_unlock_irqrestore(&ep->udc->lock, flags); |
@@ -2028,7 +2069,17 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev) | |||
2028 | 2069 | ||
2029 | /*-------------------------------------------------------------------------*/ | 2070 | /*-------------------------------------------------------------------------*/ |
2030 | 2071 | ||
2031 | static struct omap_udc *udc; | 2072 | static inline int machine_needs_vbus_session(void) |
2073 | { | ||
2074 | return (machine_is_omap_innovator() | ||
2075 | || machine_is_omap_osk() | ||
2076 | || machine_is_omap_apollon() | ||
2077 | #ifndef CONFIG_MACH_OMAP_H4_OTG | ||
2078 | || machine_is_omap_h4() | ||
2079 | #endif | ||
2080 | || machine_is_sx1() | ||
2081 | ); | ||
2082 | } | ||
2032 | 2083 | ||
2033 | int usb_gadget_register_driver (struct usb_gadget_driver *driver) | 2084 | int usb_gadget_register_driver (struct usb_gadget_driver *driver) |
2034 | { | 2085 | { |
@@ -2070,6 +2121,9 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver) | |||
2070 | udc->gadget.dev.driver = &driver->driver; | 2121 | udc->gadget.dev.driver = &driver->driver; |
2071 | spin_unlock_irqrestore(&udc->lock, flags); | 2122 | spin_unlock_irqrestore(&udc->lock, flags); |
2072 | 2123 | ||
2124 | if (udc->dc_clk != NULL) | ||
2125 | omap_udc_enable_clock(1); | ||
2126 | |||
2073 | status = driver->bind (&udc->gadget); | 2127 | status = driver->bind (&udc->gadget); |
2074 | if (status) { | 2128 | if (status) { |
2075 | DBG("bind to %s --> %d\n", driver->driver.name, status); | 2129 | DBG("bind to %s --> %d\n", driver->driver.name, status); |
@@ -2103,10 +2157,12 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver) | |||
2103 | /* boards that don't have VBUS sensing can't autogate 48MHz; | 2157 | /* boards that don't have VBUS sensing can't autogate 48MHz; |
2104 | * can't enter deep sleep while a gadget driver is active. | 2158 | * can't enter deep sleep while a gadget driver is active. |
2105 | */ | 2159 | */ |
2106 | if (machine_is_omap_innovator() || machine_is_omap_osk()) | 2160 | if (machine_needs_vbus_session()) |
2107 | omap_vbus_session(&udc->gadget, 1); | 2161 | omap_vbus_session(&udc->gadget, 1); |
2108 | 2162 | ||
2109 | done: | 2163 | done: |
2164 | if (udc->dc_clk != NULL) | ||
2165 | omap_udc_enable_clock(0); | ||
2110 | return status; | 2166 | return status; |
2111 | } | 2167 | } |
2112 | EXPORT_SYMBOL(usb_gadget_register_driver); | 2168 | EXPORT_SYMBOL(usb_gadget_register_driver); |
@@ -2121,7 +2177,10 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) | |||
2121 | if (!driver || driver != udc->driver || !driver->unbind) | 2177 | if (!driver || driver != udc->driver || !driver->unbind) |
2122 | return -EINVAL; | 2178 | return -EINVAL; |
2123 | 2179 | ||
2124 | if (machine_is_omap_innovator() || machine_is_omap_osk()) | 2180 | if (udc->dc_clk != NULL) |
2181 | omap_udc_enable_clock(1); | ||
2182 | |||
2183 | if (machine_needs_vbus_session()) | ||
2125 | omap_vbus_session(&udc->gadget, 0); | 2184 | omap_vbus_session(&udc->gadget, 0); |
2126 | 2185 | ||
2127 | if (udc->transceiver) | 2186 | if (udc->transceiver) |
@@ -2137,6 +2196,8 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) | |||
2137 | udc->gadget.dev.driver = NULL; | 2196 | udc->gadget.dev.driver = NULL; |
2138 | udc->driver = NULL; | 2197 | udc->driver = NULL; |
2139 | 2198 | ||
2199 | if (udc->dc_clk != NULL) | ||
2200 | omap_udc_enable_clock(0); | ||
2140 | DBG("unregistered driver '%s'\n", driver->driver.name); | 2201 | DBG("unregistered driver '%s'\n", driver->driver.name); |
2141 | return status; | 2202 | return status; |
2142 | } | 2203 | } |
@@ -2219,7 +2280,7 @@ static char *trx_mode(unsigned m, int enabled) | |||
2219 | case 0: return enabled ? "*6wire" : "unused"; | 2280 | case 0: return enabled ? "*6wire" : "unused"; |
2220 | case 1: return "4wire"; | 2281 | case 1: return "4wire"; |
2221 | case 2: return "3wire"; | 2282 | case 2: return "3wire"; |
2222 | case 3: return "6wire"; | 2283 | case 3: return "6wire"; |
2223 | default: return "unknown"; | 2284 | default: return "unknown"; |
2224 | } | 2285 | } |
2225 | } | 2286 | } |
@@ -2228,11 +2289,18 @@ static int proc_otg_show(struct seq_file *s) | |||
2228 | { | 2289 | { |
2229 | u32 tmp; | 2290 | u32 tmp; |
2230 | u32 trans; | 2291 | u32 trans; |
2292 | char *ctrl_name; | ||
2231 | 2293 | ||
2232 | tmp = OTG_REV_REG; | 2294 | tmp = OTG_REV_REG; |
2233 | trans = USB_TRANSCEIVER_CTRL_REG; | 2295 | if (cpu_is_omap24xx()) { |
2234 | seq_printf(s, "\nOTG rev %d.%d, transceiver_ctrl %05x\n", | 2296 | ctrl_name = "control_devconf"; |
2235 | tmp >> 4, tmp & 0xf, trans); | 2297 | trans = CONTROL_DEVCONF_REG; |
2298 | } else { | ||
2299 | ctrl_name = "tranceiver_ctrl"; | ||
2300 | trans = USB_TRANSCEIVER_CTRL_REG; | ||
2301 | } | ||
2302 | seq_printf(s, "\nOTG rev %d.%d, %s %05x\n", | ||
2303 | tmp >> 4, tmp & 0xf, ctrl_name, trans); | ||
2236 | tmp = OTG_SYSCON_1_REG; | 2304 | tmp = OTG_SYSCON_1_REG; |
2237 | seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s," | 2305 | seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s," |
2238 | FOURBITS "\n", tmp, | 2306 | FOURBITS "\n", tmp, |
@@ -2307,7 +2375,7 @@ static int proc_udc_show(struct seq_file *s, void *_) | |||
2307 | driver_desc, | 2375 | driver_desc, |
2308 | use_dma ? " (dma)" : ""); | 2376 | use_dma ? " (dma)" : ""); |
2309 | 2377 | ||
2310 | tmp = UDC_REV_REG & 0xff; | 2378 | tmp = UDC_REV_REG & 0xff; |
2311 | seq_printf(s, | 2379 | seq_printf(s, |
2312 | "UDC rev %d.%d, fifo mode %d, gadget %s\n" | 2380 | "UDC rev %d.%d, fifo mode %d, gadget %s\n" |
2313 | "hmc %d, transceiver %s\n", | 2381 | "hmc %d, transceiver %s\n", |
@@ -2315,11 +2383,16 @@ static int proc_udc_show(struct seq_file *s, void *_) | |||
2315 | fifo_mode, | 2383 | fifo_mode, |
2316 | udc->driver ? udc->driver->driver.name : "(none)", | 2384 | udc->driver ? udc->driver->driver.name : "(none)", |
2317 | HMC, | 2385 | HMC, |
2318 | udc->transceiver ? udc->transceiver->label : "(none)"); | 2386 | udc->transceiver |
2319 | seq_printf(s, "ULPD control %04x req %04x status %04x\n", | 2387 | ? udc->transceiver->label |
2320 | __REG16(ULPD_CLOCK_CTRL), | 2388 | : ((cpu_is_omap1710() || cpu_is_omap24xx()) |
2321 | __REG16(ULPD_SOFT_REQ), | 2389 | ? "external" : "(none)")); |
2322 | __REG16(ULPD_STATUS_REQ)); | 2390 | if (cpu_class_is_omap1()) { |
2391 | seq_printf(s, "ULPD control %04x req %04x status %04x\n", | ||
2392 | __REG16(ULPD_CLOCK_CTRL), | ||
2393 | __REG16(ULPD_SOFT_REQ), | ||
2394 | __REG16(ULPD_STATUS_REQ)); | ||
2395 | } | ||
2323 | 2396 | ||
2324 | /* OTG controller registers */ | 2397 | /* OTG controller registers */ |
2325 | if (!cpu_is_omap15xx()) | 2398 | if (!cpu_is_omap15xx()) |
@@ -2504,9 +2577,10 @@ omap_ep_setup(char *name, u8 addr, u8 type, | |||
2504 | dbuf = 1; | 2577 | dbuf = 1; |
2505 | } else { | 2578 | } else { |
2506 | /* double-buffering "not supported" on 15xx, | 2579 | /* double-buffering "not supported" on 15xx, |
2507 | * and ignored for PIO-IN on 16xx | 2580 | * and ignored for PIO-IN on newer chips |
2581 | * (for more reliable behavior) | ||
2508 | */ | 2582 | */ |
2509 | if (!use_dma || cpu_is_omap15xx()) | 2583 | if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx()) |
2510 | dbuf = 0; | 2584 | dbuf = 0; |
2511 | 2585 | ||
2512 | switch (maxp) { | 2586 | switch (maxp) { |
@@ -2549,7 +2623,7 @@ omap_ep_setup(char *name, u8 addr, u8 type, | |||
2549 | ep->bEndpointAddress = addr; | 2623 | ep->bEndpointAddress = addr; |
2550 | ep->bmAttributes = type; | 2624 | ep->bmAttributes = type; |
2551 | ep->double_buf = dbuf; | 2625 | ep->double_buf = dbuf; |
2552 | ep->udc = udc; | 2626 | ep->udc = udc; |
2553 | 2627 | ||
2554 | ep->ep.name = ep->name; | 2628 | ep->ep.name = ep->name; |
2555 | ep->ep.ops = &omap_ep_ops; | 2629 | ep->ep.ops = &omap_ep_ops; |
@@ -2709,15 +2783,37 @@ static int __init omap_udc_probe(struct platform_device *pdev) | |||
2709 | struct otg_transceiver *xceiv = NULL; | 2783 | struct otg_transceiver *xceiv = NULL; |
2710 | const char *type = NULL; | 2784 | const char *type = NULL; |
2711 | struct omap_usb_config *config = pdev->dev.platform_data; | 2785 | struct omap_usb_config *config = pdev->dev.platform_data; |
2786 | struct clk *dc_clk; | ||
2787 | struct clk *hhc_clk; | ||
2712 | 2788 | ||
2713 | /* NOTE: "knows" the order of the resources! */ | 2789 | /* NOTE: "knows" the order of the resources! */ |
2714 | if (!request_mem_region(pdev->resource[0].start, | 2790 | if (!request_mem_region(pdev->resource[0].start, |
2715 | pdev->resource[0].end - pdev->resource[0].start + 1, | 2791 | pdev->resource[0].end - pdev->resource[0].start + 1, |
2716 | driver_name)) { | 2792 | driver_name)) { |
2717 | DBG("request_mem_region failed\n"); | 2793 | DBG("request_mem_region failed\n"); |
2718 | return -EBUSY; | 2794 | return -EBUSY; |
2719 | } | 2795 | } |
2720 | 2796 | ||
2797 | if (cpu_is_omap16xx()) { | ||
2798 | dc_clk = clk_get(&pdev->dev, "usb_dc_ck"); | ||
2799 | hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck"); | ||
2800 | BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk)); | ||
2801 | /* can't use omap_udc_enable_clock yet */ | ||
2802 | clk_enable(dc_clk); | ||
2803 | clk_enable(hhc_clk); | ||
2804 | udelay(100); | ||
2805 | } | ||
2806 | |||
2807 | if (cpu_is_omap24xx()) { | ||
2808 | dc_clk = clk_get(&pdev->dev, "usb_fck"); | ||
2809 | hhc_clk = clk_get(&pdev->dev, "usb_l4_ick"); | ||
2810 | BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk)); | ||
2811 | /* can't use omap_udc_enable_clock yet */ | ||
2812 | clk_enable(dc_clk); | ||
2813 | clk_enable(hhc_clk); | ||
2814 | udelay(100); | ||
2815 | } | ||
2816 | |||
2721 | INFO("OMAP UDC rev %d.%d%s\n", | 2817 | INFO("OMAP UDC rev %d.%d%s\n", |
2722 | UDC_REV_REG >> 4, UDC_REV_REG & 0xf, | 2818 | UDC_REV_REG >> 4, UDC_REV_REG & 0xf, |
2723 | config->otg ? ", Mini-AB" : ""); | 2819 | config->otg ? ", Mini-AB" : ""); |
@@ -2727,7 +2823,7 @@ static int __init omap_udc_probe(struct platform_device *pdev) | |||
2727 | hmc = HMC_1510; | 2823 | hmc = HMC_1510; |
2728 | type = "(unknown)"; | 2824 | type = "(unknown)"; |
2729 | 2825 | ||
2730 | if (machine_is_omap_innovator()) { | 2826 | if (machine_is_omap_innovator() || machine_is_sx1()) { |
2731 | /* just set up software VBUS detect, and then | 2827 | /* just set up software VBUS detect, and then |
2732 | * later rig it so we always report VBUS. | 2828 | * later rig it so we always report VBUS. |
2733 | * FIXME without really sensing VBUS, we can't | 2829 | * FIXME without really sensing VBUS, we can't |
@@ -2756,6 +2852,15 @@ static int __init omap_udc_probe(struct platform_device *pdev) | |||
2756 | } | 2852 | } |
2757 | 2853 | ||
2758 | hmc = HMC_1610; | 2854 | hmc = HMC_1610; |
2855 | |||
2856 | if (cpu_is_omap24xx()) { | ||
2857 | /* this could be transceiverless in one of the | ||
2858 | * "we don't need to know" modes. | ||
2859 | */ | ||
2860 | type = "external"; | ||
2861 | goto known; | ||
2862 | } | ||
2863 | |||
2759 | switch (hmc) { | 2864 | switch (hmc) { |
2760 | case 0: /* POWERUP DEFAULT == 0 */ | 2865 | case 0: /* POWERUP DEFAULT == 0 */ |
2761 | case 4: | 2866 | case 4: |
@@ -2794,6 +2899,7 @@ bad_on_1710: | |||
2794 | goto cleanup0; | 2899 | goto cleanup0; |
2795 | } | 2900 | } |
2796 | } | 2901 | } |
2902 | known: | ||
2797 | INFO("hmc mode %d, %s transceiver\n", hmc, type); | 2903 | INFO("hmc mode %d, %s transceiver\n", hmc, type); |
2798 | 2904 | ||
2799 | /* a "gadget" abstracts/virtualizes the controller */ | 2905 | /* a "gadget" abstracts/virtualizes the controller */ |
@@ -2818,8 +2924,8 @@ bad_on_1710: | |||
2818 | status = request_irq(pdev->resource[1].start, omap_udc_irq, | 2924 | status = request_irq(pdev->resource[1].start, omap_udc_irq, |
2819 | IRQF_SAMPLE_RANDOM, driver_name, udc); | 2925 | IRQF_SAMPLE_RANDOM, driver_name, udc); |
2820 | if (status != 0) { | 2926 | if (status != 0) { |
2821 | ERR( "can't get irq %ld, err %d\n", | 2927 | ERR("can't get irq %d, err %d\n", |
2822 | pdev->resource[1].start, status); | 2928 | (int) pdev->resource[1].start, status); |
2823 | goto cleanup1; | 2929 | goto cleanup1; |
2824 | } | 2930 | } |
2825 | 2931 | ||
@@ -2827,24 +2933,41 @@ bad_on_1710: | |||
2827 | status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, | 2933 | status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, |
2828 | IRQF_SAMPLE_RANDOM, "omap_udc pio", udc); | 2934 | IRQF_SAMPLE_RANDOM, "omap_udc pio", udc); |
2829 | if (status != 0) { | 2935 | if (status != 0) { |
2830 | ERR( "can't get irq %ld, err %d\n", | 2936 | ERR("can't get irq %d, err %d\n", |
2831 | pdev->resource[2].start, status); | 2937 | (int) pdev->resource[2].start, status); |
2832 | goto cleanup2; | 2938 | goto cleanup2; |
2833 | } | 2939 | } |
2834 | #ifdef USE_ISO | 2940 | #ifdef USE_ISO |
2835 | status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, | 2941 | status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, |
2836 | IRQF_DISABLED, "omap_udc iso", udc); | 2942 | IRQF_DISABLED, "omap_udc iso", udc); |
2837 | if (status != 0) { | 2943 | if (status != 0) { |
2838 | ERR("can't get irq %ld, err %d\n", | 2944 | ERR("can't get irq %d, err %d\n", |
2839 | pdev->resource[3].start, status); | 2945 | (int) pdev->resource[3].start, status); |
2840 | goto cleanup3; | 2946 | goto cleanup3; |
2841 | } | 2947 | } |
2842 | #endif | 2948 | #endif |
2949 | if (cpu_is_omap16xx()) { | ||
2950 | udc->dc_clk = dc_clk; | ||
2951 | udc->hhc_clk = hhc_clk; | ||
2952 | clk_disable(hhc_clk); | ||
2953 | clk_disable(dc_clk); | ||
2954 | } | ||
2955 | |||
2956 | if (cpu_is_omap24xx()) { | ||
2957 | udc->dc_clk = dc_clk; | ||
2958 | udc->hhc_clk = hhc_clk; | ||
2959 | /* FIXME OMAP2 don't release hhc & dc clock */ | ||
2960 | #if 0 | ||
2961 | clk_disable(hhc_clk); | ||
2962 | clk_disable(dc_clk); | ||
2963 | #endif | ||
2964 | } | ||
2843 | 2965 | ||
2844 | create_proc_file(); | 2966 | create_proc_file(); |
2845 | device_add(&udc->gadget.dev); | 2967 | status = device_add(&udc->gadget.dev); |
2846 | return 0; | 2968 | if (!status) |
2847 | 2969 | return status; | |
2970 | /* If fail, fall through */ | ||
2848 | #ifdef USE_ISO | 2971 | #ifdef USE_ISO |
2849 | cleanup3: | 2972 | cleanup3: |
2850 | free_irq(pdev->resource[2].start, udc); | 2973 | free_irq(pdev->resource[2].start, udc); |
@@ -2860,8 +2983,17 @@ cleanup1: | |||
2860 | cleanup0: | 2983 | cleanup0: |
2861 | if (xceiv) | 2984 | if (xceiv) |
2862 | put_device(xceiv->dev); | 2985 | put_device(xceiv->dev); |
2986 | |||
2987 | if (cpu_is_omap16xx() || cpu_is_omap24xx()) { | ||
2988 | clk_disable(hhc_clk); | ||
2989 | clk_disable(dc_clk); | ||
2990 | clk_put(hhc_clk); | ||
2991 | clk_put(dc_clk); | ||
2992 | } | ||
2993 | |||
2863 | release_mem_region(pdev->resource[0].start, | 2994 | release_mem_region(pdev->resource[0].start, |
2864 | pdev->resource[0].end - pdev->resource[0].start + 1); | 2995 | pdev->resource[0].end - pdev->resource[0].start + 1); |
2996 | |||
2865 | return status; | 2997 | return status; |
2866 | } | 2998 | } |
2867 | 2999 | ||
@@ -2891,6 +3023,13 @@ static int __exit omap_udc_remove(struct platform_device *pdev) | |||
2891 | free_irq(pdev->resource[2].start, udc); | 3023 | free_irq(pdev->resource[2].start, udc); |
2892 | free_irq(pdev->resource[1].start, udc); | 3024 | free_irq(pdev->resource[1].start, udc); |
2893 | 3025 | ||
3026 | if (udc->dc_clk) { | ||
3027 | if (udc->clk_requested) | ||
3028 | omap_udc_enable_clock(0); | ||
3029 | clk_put(udc->hhc_clk); | ||
3030 | clk_put(udc->dc_clk); | ||
3031 | } | ||
3032 | |||
2894 | release_mem_region(pdev->resource[0].start, | 3033 | release_mem_region(pdev->resource[0].start, |
2895 | pdev->resource[0].end - pdev->resource[0].start + 1); | 3034 | pdev->resource[0].end - pdev->resource[0].start + 1); |
2896 | 3035 | ||
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h index 652ee4627344..1dc398bb9ab2 100644 --- a/drivers/usb/gadget/omap_udc.h +++ b/drivers/usb/gadget/omap_udc.h | |||
@@ -175,6 +175,9 @@ struct omap_udc { | |||
175 | unsigned ep0_reset_config:1; | 175 | unsigned ep0_reset_config:1; |
176 | unsigned ep0_setup:1; | 176 | unsigned ep0_setup:1; |
177 | struct completion *done; | 177 | struct completion *done; |
178 | struct clk *dc_clk; | ||
179 | struct clk *hhc_clk; | ||
180 | unsigned clk_requested:1; | ||
178 | }; | 181 | }; |
179 | 182 | ||
180 | /*-------------------------------------------------------------------------*/ | 183 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index acd101caeeeb..e0d4c2358b39 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c | |||
@@ -209,24 +209,16 @@ static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) | |||
209 | 209 | ||
210 | static int remote_wakeup_is_broken(struct uhci_hcd *uhci) | 210 | static int remote_wakeup_is_broken(struct uhci_hcd *uhci) |
211 | { | 211 | { |
212 | static struct dmi_system_id broken_wakeup_table[] = { | ||
213 | { | ||
214 | .ident = "Asus A7V8X", | ||
215 | .matches = { | ||
216 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK"), | ||
217 | DMI_MATCH(DMI_BOARD_NAME, "A7V8X"), | ||
218 | DMI_MATCH(DMI_BOARD_VERSION, "REV 1.xx"), | ||
219 | } | ||
220 | }, | ||
221 | { } | ||
222 | }; | ||
223 | int port; | 212 | int port; |
213 | char *sys_info; | ||
214 | static char bad_Asus_board[] = "A7V8X"; | ||
224 | 215 | ||
225 | /* One of Asus's motherboards has a bug which causes it to | 216 | /* One of Asus's motherboards has a bug which causes it to |
226 | * wake up immediately from suspend-to-RAM if any of the ports | 217 | * wake up immediately from suspend-to-RAM if any of the ports |
227 | * are connected. In such cases we will not set EGSM. | 218 | * are connected. In such cases we will not set EGSM. |
228 | */ | 219 | */ |
229 | if (dmi_check_system(broken_wakeup_table)) { | 220 | sys_info = dmi_get_system_info(DMI_BOARD_NAME); |
221 | if (sys_info && !strcmp(sys_info, bad_Asus_board)) { | ||
230 | for (port = 0; port < uhci->rh_numports; ++port) { | 222 | for (port = 0; port < uhci->rh_numports; ++port) { |
231 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | 223 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & |
232 | USBPORTSC_CCS) | 224 | USBPORTSC_CCS) |
@@ -265,7 +257,9 @@ __acquires(uhci->lock) | |||
265 | int_enable = USBINTR_RESUME; | 257 | int_enable = USBINTR_RESUME; |
266 | if (remote_wakeup_is_broken(uhci)) | 258 | if (remote_wakeup_is_broken(uhci)) |
267 | egsm_enable = 0; | 259 | egsm_enable = 0; |
268 | if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable) | 260 | if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable || |
261 | !device_may_wakeup( | ||
262 | &uhci_to_hcd(uhci)->self.root_hub->dev)) | ||
269 | uhci->working_RD = int_enable = 0; | 263 | uhci->working_RD = int_enable = 0; |
270 | 264 | ||
271 | outw(int_enable, uhci->io_addr + USBINTR); | 265 | outw(int_enable, uhci->io_addr + USBINTR); |
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig index f877cd4f317a..258a5d09d3dc 100644 --- a/drivers/usb/input/Kconfig +++ b/drivers/usb/input/Kconfig | |||
@@ -12,10 +12,8 @@ config USB_HID | |||
12 | ---help--- | 12 | ---help--- |
13 | Say Y here if you want full HID support to connect USB keyboards, | 13 | Say Y here if you want full HID support to connect USB keyboards, |
14 | mice, joysticks, graphic tablets, or any other HID based devices | 14 | mice, joysticks, graphic tablets, or any other HID based devices |
15 | to your computer via USB. You also need to select HID Input layer | 15 | to your computer via USB, as well as Uninterruptible Power Supply |
16 | support (below) if you want to use keyboards, mice, joysticks and | 16 | (UPS) and monitor control devices. |
17 | the like ... as well as Uninterruptible Power Supply (UPS) and | ||
18 | monitor control devices. | ||
19 | 17 | ||
20 | You can't use this driver and the HIDBP (Boot Protocol) keyboard | 18 | You can't use this driver and the HIDBP (Boot Protocol) keyboard |
21 | and mouse drivers at the same time. More information is available: | 19 | and mouse drivers at the same time. More information is available: |
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index bf26c3c56990..9148694627d5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c | |||
@@ -403,7 +403,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x) | |||
403 | 403 | ||
404 | 404 | ||
405 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), | 405 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), |
406 | (u32)SISUSB_HADDR(x, y), 2, &written); | 406 | (long)SISUSB_HADDR(x, y), 2, &written); |
407 | 407 | ||
408 | mutex_unlock(&sisusb->lock); | 408 | mutex_unlock(&sisusb->lock); |
409 | } | 409 | } |
@@ -438,7 +438,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s, | |||
438 | } | 438 | } |
439 | 439 | ||
440 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), | 440 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), |
441 | (u32)SISUSB_HADDR(x, y), count * 2, &written); | 441 | (long)SISUSB_HADDR(x, y), count * 2, &written); |
442 | 442 | ||
443 | mutex_unlock(&sisusb->lock); | 443 | mutex_unlock(&sisusb->lock); |
444 | } | 444 | } |
@@ -492,7 +492,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width) | |||
492 | 492 | ||
493 | 493 | ||
494 | sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y), | 494 | sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y), |
495 | (u32)SISUSB_HADDR(x, y), length, &written); | 495 | (long)SISUSB_HADDR(x, y), length, &written); |
496 | 496 | ||
497 | mutex_unlock(&sisusb->lock); | 497 | mutex_unlock(&sisusb->lock); |
498 | } | 498 | } |
@@ -564,7 +564,7 @@ sisusbcon_bmove(struct vc_data *c, int sy, int sx, | |||
564 | 564 | ||
565 | 565 | ||
566 | sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy), | 566 | sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy), |
567 | (u32)SISUSB_HADDR(dx, dy), length, &written); | 567 | (long)SISUSB_HADDR(dx, dy), length, &written); |
568 | 568 | ||
569 | mutex_unlock(&sisusb->lock); | 569 | mutex_unlock(&sisusb->lock); |
570 | } | 570 | } |
@@ -612,7 +612,7 @@ sisusbcon_switch(struct vc_data *c) | |||
612 | length); | 612 | length); |
613 | 613 | ||
614 | sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin, | 614 | sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin, |
615 | (u32)SISUSB_HADDR(0, 0), | 615 | (long)SISUSB_HADDR(0, 0), |
616 | length, &written); | 616 | length, &written); |
617 | 617 | ||
618 | mutex_unlock(&sisusb->lock); | 618 | mutex_unlock(&sisusb->lock); |
@@ -939,7 +939,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb, | |||
939 | } | 939 | } |
940 | 940 | ||
941 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t), | 941 | sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t), |
942 | (u32)SISUSB_HADDR(0, t), length, &written); | 942 | (long)SISUSB_HADDR(0, t), length, &written); |
943 | 943 | ||
944 | mutex_unlock(&sisusb->lock); | 944 | mutex_unlock(&sisusb->lock); |
945 | 945 | ||
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index 95e682e2c9d6..f538013965b0 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c | |||
@@ -920,7 +920,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) | |||
920 | goto out2; | 920 | goto out2; |
921 | 921 | ||
922 | if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, | 922 | if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, |
923 | 0x0000, 0, 0, buf)) < 0) { | 923 | 1, 0, 0, buf)) < 0) { |
924 | dbg("Select PHY #1 failed: %d", ret); | 924 | dbg("Select PHY #1 failed: %d", ret); |
925 | goto out2; | 925 | goto out2; |
926 | } | 926 | } |
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 2f4d303ee36f..c8999ae58652 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig | |||
@@ -170,7 +170,7 @@ config USB_SERIAL_FTDI_SIO | |||
170 | 170 | ||
171 | config USB_SERIAL_FUNSOFT | 171 | config USB_SERIAL_FUNSOFT |
172 | tristate "USB Fundamental Software Dongle Driver" | 172 | tristate "USB Fundamental Software Dongle Driver" |
173 | depends on USB_SERIAL | 173 | depends on USB_SERIAL && !(SPARC || SPARC64) |
174 | ---help--- | 174 | ---help--- |
175 | Say Y here if you want to use the Fundamental Software dongle. | 175 | Say Y here if you want to use the Fundamental Software dongle. |
176 | 176 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 819266b7e2f8..5ca04e82ea19 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -625,6 +625,9 @@ static int option_send_setup(struct usb_serial_port *port) | |||
625 | 625 | ||
626 | dbg("%s", __FUNCTION__); | 626 | dbg("%s", __FUNCTION__); |
627 | 627 | ||
628 | if (port->number != 0) | ||
629 | return 0; | ||
630 | |||
628 | portdata = usb_get_serial_port_data(port); | 631 | portdata = usb_get_serial_port_data(port); |
629 | 632 | ||
630 | if (port->tty) { | 633 | if (port->tty) { |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 5fe7ff441a09..cddef3efba0a 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -728,7 +728,7 @@ UNUSUAL_DEV( 0x05ac, 0x1204, 0x0000, 0x9999, | |||
728 | "Apple", | 728 | "Apple", |
729 | "iPod", | 729 | "iPod", |
730 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 730 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
731 | US_FL_FIX_CAPACITY ), | 731 | US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ), |
732 | 732 | ||
733 | UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999, | 733 | UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999, |
734 | "Apple", | 734 | "Apple", |
@@ -1358,6 +1358,21 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, | |||
1358 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1358 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1359 | US_FL_IGNORE_RESIDUE ), | 1359 | US_FL_IGNORE_RESIDUE ), |
1360 | 1360 | ||
1361 | /* Reported by Francesco Foresti <frafore@tiscali.it> */ | ||
1362 | UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, | ||
1363 | "Super Top", | ||
1364 | "IDE DEVICE", | ||
1365 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1366 | US_FL_IGNORE_RESIDUE ), | ||
1367 | |||
1368 | /* Reported by Robert Schedel <r.schedel@yahoo.de> | ||
1369 | * Note: this is a 'super top' device like the above 14cd/6600 device */ | ||
1370 | UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, | ||
1371 | "Teac", | ||
1372 | "HD-35PUK-B", | ||
1373 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1374 | US_FL_IGNORE_RESIDUE ), | ||
1375 | |||
1361 | /* patch submitted by Davide Perini <perini.davide@dpsoftware.org> | 1376 | /* patch submitted by Davide Perini <perini.davide@dpsoftware.org> |
1362 | * and Renato Perini <rperini@email.it> | 1377 | * and Renato Perini <rperini@email.it> |
1363 | */ | 1378 | */ |
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c index 61587ca2cdbb..fde1d9518123 100644 --- a/drivers/video/backlight/corgi_bl.c +++ b/drivers/video/backlight/corgi_bl.c | |||
@@ -121,7 +121,7 @@ static int corgibl_probe(struct platform_device *pdev) | |||
121 | machinfo->limit_mask = -1; | 121 | machinfo->limit_mask = -1; |
122 | 122 | ||
123 | corgi_backlight_device = backlight_device_register ("corgi-bl", | 123 | corgi_backlight_device = backlight_device_register ("corgi-bl", |
124 | NULL, &corgibl_data); | 124 | &pdev->dev, NULL, &corgibl_data); |
125 | if (IS_ERR (corgi_backlight_device)) | 125 | if (IS_ERR (corgi_backlight_device)) |
126 | return PTR_ERR (corgi_backlight_device); | 126 | return PTR_ERR (corgi_backlight_device); |
127 | 127 | ||
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index 1c569fb543ae..c07d8207fb54 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c | |||
@@ -105,7 +105,7 @@ static struct backlight_properties hp680bl_data = { | |||
105 | static int __init hp680bl_probe(struct platform_device *dev) | 105 | static int __init hp680bl_probe(struct platform_device *dev) |
106 | { | 106 | { |
107 | hp680_backlight_device = backlight_device_register ("hp680-bl", | 107 | hp680_backlight_device = backlight_device_register ("hp680-bl", |
108 | NULL, &hp680bl_data); | 108 | &dev->dev, NULL, &hp680bl_data); |
109 | if (IS_ERR (hp680_backlight_device)) | 109 | if (IS_ERR (hp680_backlight_device)) |
110 | return PTR_ERR (hp680_backlight_device); | 110 | return PTR_ERR (hp680_backlight_device); |
111 | 111 | ||
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c index 2d7905410b2a..fc812d96c31d 100644 --- a/drivers/video/backlight/locomolcd.c +++ b/drivers/video/backlight/locomolcd.c | |||
@@ -184,7 +184,7 @@ static int locomolcd_probe(struct locomo_dev *ldev) | |||
184 | 184 | ||
185 | local_irq_restore(flags); | 185 | local_irq_restore(flags); |
186 | 186 | ||
187 | locomolcd_bl_device = backlight_device_register("locomo-bl", NULL, &locomobl_data); | 187 | locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data); |
188 | 188 | ||
189 | if (IS_ERR (locomolcd_bl_device)) | 189 | if (IS_ERR (locomolcd_bl_device)) |
190 | return PTR_ERR (locomolcd_bl_device); | 190 | return PTR_ERR (locomolcd_bl_device); |
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index bbfc86259272..b9b2b27b68c3 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c | |||
@@ -53,7 +53,7 @@ static inline int adfs_readname(char *buf, char *ptr, int maxlen) | |||
53 | { | 53 | { |
54 | char *old_buf = buf; | 54 | char *old_buf = buf; |
55 | 55 | ||
56 | while (*ptr >= ' ' && maxlen--) { | 56 | while ((unsigned char)*ptr >= ' ' && maxlen--) { |
57 | if (*ptr == '/') | 57 | if (*ptr == '/') |
58 | *buf++ = '.'; | 58 | *buf++ = '.'; |
59 | else | 59 | else |
diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 34e6d7b220c3..869f5193ecc2 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c | |||
@@ -14,59 +14,307 @@ | |||
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
16 | #include <linux/namei.h> | 16 | #include <linux/namei.h> |
17 | #include <linux/poll.h> | ||
17 | 18 | ||
18 | static int return_EIO(void) | 19 | |
20 | static loff_t bad_file_llseek(struct file *file, loff_t offset, int origin) | ||
21 | { | ||
22 | return -EIO; | ||
23 | } | ||
24 | |||
25 | static ssize_t bad_file_read(struct file *filp, char __user *buf, | ||
26 | size_t size, loff_t *ppos) | ||
27 | { | ||
28 | return -EIO; | ||
29 | } | ||
30 | |||
31 | static ssize_t bad_file_write(struct file *filp, const char __user *buf, | ||
32 | size_t siz, loff_t *ppos) | ||
33 | { | ||
34 | return -EIO; | ||
35 | } | ||
36 | |||
37 | static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | ||
38 | unsigned long nr_segs, loff_t pos) | ||
39 | { | ||
40 | return -EIO; | ||
41 | } | ||
42 | |||
43 | static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | ||
44 | unsigned long nr_segs, loff_t pos) | ||
45 | { | ||
46 | return -EIO; | ||
47 | } | ||
48 | |||
49 | static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
50 | { | ||
51 | return -EIO; | ||
52 | } | ||
53 | |||
54 | static unsigned int bad_file_poll(struct file *filp, poll_table *wait) | ||
55 | { | ||
56 | return POLLERR; | ||
57 | } | ||
58 | |||
59 | static int bad_file_ioctl (struct inode *inode, struct file *filp, | ||
60 | unsigned int cmd, unsigned long arg) | ||
61 | { | ||
62 | return -EIO; | ||
63 | } | ||
64 | |||
65 | static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd, | ||
66 | unsigned long arg) | ||
67 | { | ||
68 | return -EIO; | ||
69 | } | ||
70 | |||
71 | static long bad_file_compat_ioctl(struct file *file, unsigned int cmd, | ||
72 | unsigned long arg) | ||
73 | { | ||
74 | return -EIO; | ||
75 | } | ||
76 | |||
77 | static int bad_file_mmap(struct file *file, struct vm_area_struct *vma) | ||
78 | { | ||
79 | return -EIO; | ||
80 | } | ||
81 | |||
82 | static int bad_file_open(struct inode *inode, struct file *filp) | ||
83 | { | ||
84 | return -EIO; | ||
85 | } | ||
86 | |||
87 | static int bad_file_flush(struct file *file, fl_owner_t id) | ||
88 | { | ||
89 | return -EIO; | ||
90 | } | ||
91 | |||
92 | static int bad_file_release(struct inode *inode, struct file *filp) | ||
93 | { | ||
94 | return -EIO; | ||
95 | } | ||
96 | |||
97 | static int bad_file_fsync(struct file *file, struct dentry *dentry, | ||
98 | int datasync) | ||
99 | { | ||
100 | return -EIO; | ||
101 | } | ||
102 | |||
103 | static int bad_file_aio_fsync(struct kiocb *iocb, int datasync) | ||
104 | { | ||
105 | return -EIO; | ||
106 | } | ||
107 | |||
108 | static int bad_file_fasync(int fd, struct file *filp, int on) | ||
109 | { | ||
110 | return -EIO; | ||
111 | } | ||
112 | |||
113 | static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl) | ||
114 | { | ||
115 | return -EIO; | ||
116 | } | ||
117 | |||
118 | static ssize_t bad_file_sendfile(struct file *in_file, loff_t *ppos, | ||
119 | size_t count, read_actor_t actor, void *target) | ||
120 | { | ||
121 | return -EIO; | ||
122 | } | ||
123 | |||
124 | static ssize_t bad_file_sendpage(struct file *file, struct page *page, | ||
125 | int off, size_t len, loff_t *pos, int more) | ||
126 | { | ||
127 | return -EIO; | ||
128 | } | ||
129 | |||
130 | static unsigned long bad_file_get_unmapped_area(struct file *file, | ||
131 | unsigned long addr, unsigned long len, | ||
132 | unsigned long pgoff, unsigned long flags) | ||
133 | { | ||
134 | return -EIO; | ||
135 | } | ||
136 | |||
137 | static int bad_file_check_flags(int flags) | ||
19 | { | 138 | { |
20 | return -EIO; | 139 | return -EIO; |
21 | } | 140 | } |
22 | 141 | ||
23 | #define EIO_ERROR ((void *) (return_EIO)) | 142 | static int bad_file_dir_notify(struct file *file, unsigned long arg) |
143 | { | ||
144 | return -EIO; | ||
145 | } | ||
146 | |||
147 | static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) | ||
148 | { | ||
149 | return -EIO; | ||
150 | } | ||
151 | |||
152 | static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe, | ||
153 | struct file *out, loff_t *ppos, size_t len, | ||
154 | unsigned int flags) | ||
155 | { | ||
156 | return -EIO; | ||
157 | } | ||
158 | |||
159 | static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos, | ||
160 | struct pipe_inode_info *pipe, size_t len, | ||
161 | unsigned int flags) | ||
162 | { | ||
163 | return -EIO; | ||
164 | } | ||
24 | 165 | ||
25 | static const struct file_operations bad_file_ops = | 166 | static const struct file_operations bad_file_ops = |
26 | { | 167 | { |
27 | .llseek = EIO_ERROR, | 168 | .llseek = bad_file_llseek, |
28 | .aio_read = EIO_ERROR, | 169 | .read = bad_file_read, |
29 | .read = EIO_ERROR, | 170 | .write = bad_file_write, |
30 | .write = EIO_ERROR, | 171 | .aio_read = bad_file_aio_read, |
31 | .aio_write = EIO_ERROR, | 172 | .aio_write = bad_file_aio_write, |
32 | .readdir = EIO_ERROR, | 173 | .readdir = bad_file_readdir, |
33 | .poll = EIO_ERROR, | 174 | .poll = bad_file_poll, |
34 | .ioctl = EIO_ERROR, | 175 | .ioctl = bad_file_ioctl, |
35 | .mmap = EIO_ERROR, | 176 | .unlocked_ioctl = bad_file_unlocked_ioctl, |
36 | .open = EIO_ERROR, | 177 | .compat_ioctl = bad_file_compat_ioctl, |
37 | .flush = EIO_ERROR, | 178 | .mmap = bad_file_mmap, |
38 | .release = EIO_ERROR, | 179 | .open = bad_file_open, |
39 | .fsync = EIO_ERROR, | 180 | .flush = bad_file_flush, |
40 | .aio_fsync = EIO_ERROR, | 181 | .release = bad_file_release, |
41 | .fasync = EIO_ERROR, | 182 | .fsync = bad_file_fsync, |
42 | .lock = EIO_ERROR, | 183 | .aio_fsync = bad_file_aio_fsync, |
43 | .sendfile = EIO_ERROR, | 184 | .fasync = bad_file_fasync, |
44 | .sendpage = EIO_ERROR, | 185 | .lock = bad_file_lock, |
45 | .get_unmapped_area = EIO_ERROR, | 186 | .sendfile = bad_file_sendfile, |
187 | .sendpage = bad_file_sendpage, | ||
188 | .get_unmapped_area = bad_file_get_unmapped_area, | ||
189 | .check_flags = bad_file_check_flags, | ||
190 | .dir_notify = bad_file_dir_notify, | ||
191 | .flock = bad_file_flock, | ||
192 | .splice_write = bad_file_splice_write, | ||
193 | .splice_read = bad_file_splice_read, | ||
46 | }; | 194 | }; |
47 | 195 | ||
196 | static int bad_inode_create (struct inode *dir, struct dentry *dentry, | ||
197 | int mode, struct nameidata *nd) | ||
198 | { | ||
199 | return -EIO; | ||
200 | } | ||
201 | |||
202 | static struct dentry *bad_inode_lookup(struct inode *dir, | ||
203 | struct dentry *dentry, struct nameidata *nd) | ||
204 | { | ||
205 | return ERR_PTR(-EIO); | ||
206 | } | ||
207 | |||
208 | static int bad_inode_link (struct dentry *old_dentry, struct inode *dir, | ||
209 | struct dentry *dentry) | ||
210 | { | ||
211 | return -EIO; | ||
212 | } | ||
213 | |||
214 | static int bad_inode_unlink(struct inode *dir, struct dentry *dentry) | ||
215 | { | ||
216 | return -EIO; | ||
217 | } | ||
218 | |||
219 | static int bad_inode_symlink (struct inode *dir, struct dentry *dentry, | ||
220 | const char *symname) | ||
221 | { | ||
222 | return -EIO; | ||
223 | } | ||
224 | |||
225 | static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry, | ||
226 | int mode) | ||
227 | { | ||
228 | return -EIO; | ||
229 | } | ||
230 | |||
231 | static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry) | ||
232 | { | ||
233 | return -EIO; | ||
234 | } | ||
235 | |||
236 | static int bad_inode_mknod (struct inode *dir, struct dentry *dentry, | ||
237 | int mode, dev_t rdev) | ||
238 | { | ||
239 | return -EIO; | ||
240 | } | ||
241 | |||
242 | static int bad_inode_rename (struct inode *old_dir, struct dentry *old_dentry, | ||
243 | struct inode *new_dir, struct dentry *new_dentry) | ||
244 | { | ||
245 | return -EIO; | ||
246 | } | ||
247 | |||
248 | static int bad_inode_readlink(struct dentry *dentry, char __user *buffer, | ||
249 | int buflen) | ||
250 | { | ||
251 | return -EIO; | ||
252 | } | ||
253 | |||
254 | static int bad_inode_permission(struct inode *inode, int mask, | ||
255 | struct nameidata *nd) | ||
256 | { | ||
257 | return -EIO; | ||
258 | } | ||
259 | |||
260 | static int bad_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||
261 | struct kstat *stat) | ||
262 | { | ||
263 | return -EIO; | ||
264 | } | ||
265 | |||
266 | static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs) | ||
267 | { | ||
268 | return -EIO; | ||
269 | } | ||
270 | |||
271 | static int bad_inode_setxattr(struct dentry *dentry, const char *name, | ||
272 | const void *value, size_t size, int flags) | ||
273 | { | ||
274 | return -EIO; | ||
275 | } | ||
276 | |||
277 | static ssize_t bad_inode_getxattr(struct dentry *dentry, const char *name, | ||
278 | void *buffer, size_t size) | ||
279 | { | ||
280 | return -EIO; | ||
281 | } | ||
282 | |||
283 | static ssize_t bad_inode_listxattr(struct dentry *dentry, char *buffer, | ||
284 | size_t buffer_size) | ||
285 | { | ||
286 | return -EIO; | ||
287 | } | ||
288 | |||
289 | static int bad_inode_removexattr(struct dentry *dentry, const char *name) | ||
290 | { | ||
291 | return -EIO; | ||
292 | } | ||
293 | |||
48 | static struct inode_operations bad_inode_ops = | 294 | static struct inode_operations bad_inode_ops = |
49 | { | 295 | { |
50 | .create = EIO_ERROR, | 296 | .create = bad_inode_create, |
51 | .lookup = EIO_ERROR, | 297 | .lookup = bad_inode_lookup, |
52 | .link = EIO_ERROR, | 298 | .link = bad_inode_link, |
53 | .unlink = EIO_ERROR, | 299 | .unlink = bad_inode_unlink, |
54 | .symlink = EIO_ERROR, | 300 | .symlink = bad_inode_symlink, |
55 | .mkdir = EIO_ERROR, | 301 | .mkdir = bad_inode_mkdir, |
56 | .rmdir = EIO_ERROR, | 302 | .rmdir = bad_inode_rmdir, |
57 | .mknod = EIO_ERROR, | 303 | .mknod = bad_inode_mknod, |
58 | .rename = EIO_ERROR, | 304 | .rename = bad_inode_rename, |
59 | .readlink = EIO_ERROR, | 305 | .readlink = bad_inode_readlink, |
60 | /* follow_link must be no-op, otherwise unmounting this inode | 306 | /* follow_link must be no-op, otherwise unmounting this inode |
61 | won't work */ | 307 | won't work */ |
62 | .truncate = EIO_ERROR, | 308 | /* put_link returns void */ |
63 | .permission = EIO_ERROR, | 309 | /* truncate returns void */ |
64 | .getattr = EIO_ERROR, | 310 | .permission = bad_inode_permission, |
65 | .setattr = EIO_ERROR, | 311 | .getattr = bad_inode_getattr, |
66 | .setxattr = EIO_ERROR, | 312 | .setattr = bad_inode_setattr, |
67 | .getxattr = EIO_ERROR, | 313 | .setxattr = bad_inode_setxattr, |
68 | .listxattr = EIO_ERROR, | 314 | .getxattr = bad_inode_getxattr, |
69 | .removexattr = EIO_ERROR, | 315 | .listxattr = bad_inode_listxattr, |
316 | .removexattr = bad_inode_removexattr, | ||
317 | /* truncate_range returns void */ | ||
70 | }; | 318 | }; |
71 | 319 | ||
72 | 320 | ||
@@ -88,7 +336,7 @@ static struct inode_operations bad_inode_ops = | |||
88 | * on it to fail from this point on. | 336 | * on it to fail from this point on. |
89 | */ | 337 | */ |
90 | 338 | ||
91 | void make_bad_inode(struct inode * inode) | 339 | void make_bad_inode(struct inode *inode) |
92 | { | 340 | { |
93 | remove_inode_hash(inode); | 341 | remove_inode_hash(inode); |
94 | 342 | ||
@@ -113,7 +361,7 @@ EXPORT_SYMBOL(make_bad_inode); | |||
113 | * Returns true if the inode in question has been marked as bad. | 361 | * Returns true if the inode in question has been marked as bad. |
114 | */ | 362 | */ |
115 | 363 | ||
116 | int is_bad_inode(struct inode * inode) | 364 | int is_bad_inode(struct inode *inode) |
117 | { | 365 | { |
118 | return (inode->i_op == &bad_inode_ops); | 366 | return (inode->i_op == &bad_inode_ops); |
119 | } | 367 | } |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index d3adfd353ff9..7cb28720f90e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -854,13 +854,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
854 | * default mmap base, as well as whatever program they | 854 | * default mmap base, as well as whatever program they |
855 | * might try to exec. This is because the brk will | 855 | * might try to exec. This is because the brk will |
856 | * follow the loader, and is not movable. */ | 856 | * follow the loader, and is not movable. */ |
857 | if (current->flags & PF_RANDOMIZE) | 857 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); |
858 | load_bias = randomize_range(0x10000, | ||
859 | ELF_ET_DYN_BASE, | ||
860 | 0); | ||
861 | else | ||
862 | load_bias = ELF_ET_DYN_BASE; | ||
863 | load_bias = ELF_PAGESTART(load_bias - vaddr); | ||
864 | } | 858 | } |
865 | 859 | ||
866 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, | 860 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index b82381475779..2e0021e8f366 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -275,6 +275,25 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, | |||
275 | UFSD("EXIT\n"); | 275 | UFSD("EXIT\n"); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n, | ||
279 | int sync) | ||
280 | { | ||
281 | struct buffer_head *bh; | ||
282 | sector_t end = beg + n; | ||
283 | |||
284 | for (; beg < end; ++beg) { | ||
285 | bh = sb_getblk(inode->i_sb, beg); | ||
286 | lock_buffer(bh); | ||
287 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | ||
288 | set_buffer_uptodate(bh); | ||
289 | mark_buffer_dirty(bh); | ||
290 | unlock_buffer(bh); | ||
291 | if (IS_SYNC(inode) || sync) | ||
292 | sync_dirty_buffer(bh); | ||
293 | brelse(bh); | ||
294 | } | ||
295 | } | ||
296 | |||
278 | unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, | 297 | unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, |
279 | unsigned goal, unsigned count, int * err, struct page *locked_page) | 298 | unsigned goal, unsigned count, int * err, struct page *locked_page) |
280 | { | 299 | { |
@@ -350,6 +369,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, | |||
350 | *p = cpu_to_fs32(sb, result); | 369 | *p = cpu_to_fs32(sb, result); |
351 | *err = 0; | 370 | *err = 0; |
352 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); | 371 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); |
372 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | ||
373 | locked_page != NULL); | ||
353 | } | 374 | } |
354 | unlock_super(sb); | 375 | unlock_super(sb); |
355 | UFSD("EXIT, result %u\n", result); | 376 | UFSD("EXIT, result %u\n", result); |
@@ -363,6 +384,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, | |||
363 | if (result) { | 384 | if (result) { |
364 | *err = 0; | 385 | *err = 0; |
365 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); | 386 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); |
387 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | ||
388 | locked_page != NULL); | ||
366 | unlock_super(sb); | 389 | unlock_super(sb); |
367 | UFSD("EXIT, result %u\n", result); | 390 | UFSD("EXIT, result %u\n", result); |
368 | return result; | 391 | return result; |
@@ -398,6 +421,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, | |||
398 | *p = cpu_to_fs32(sb, result); | 421 | *p = cpu_to_fs32(sb, result); |
399 | *err = 0; | 422 | *err = 0; |
400 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); | 423 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); |
424 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | ||
425 | locked_page != NULL); | ||
401 | unlock_super(sb); | 426 | unlock_super(sb); |
402 | if (newcount < request) | 427 | if (newcount < request) |
403 | ufs_free_fragments (inode, result + newcount, request - newcount); | 428 | ufs_free_fragments (inode, result + newcount, request - newcount); |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index ee1eaa6f4ec2..2fbab0aab688 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -156,36 +156,6 @@ out: | |||
156 | return ret; | 156 | return ret; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) | ||
160 | { | ||
161 | lock_buffer(bh); | ||
162 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | ||
163 | set_buffer_uptodate(bh); | ||
164 | mark_buffer_dirty(bh); | ||
165 | unlock_buffer(bh); | ||
166 | if (IS_SYNC(inode)) | ||
167 | sync_dirty_buffer(bh); | ||
168 | } | ||
169 | |||
170 | static struct buffer_head * | ||
171 | ufs_clear_frags(struct inode *inode, sector_t beg, | ||
172 | unsigned int n, sector_t want) | ||
173 | { | ||
174 | struct buffer_head *res = NULL, *bh; | ||
175 | sector_t end = beg + n; | ||
176 | |||
177 | for (; beg < end; ++beg) { | ||
178 | bh = sb_getblk(inode->i_sb, beg); | ||
179 | ufs_clear_frag(inode, bh); | ||
180 | if (want != beg) | ||
181 | brelse(bh); | ||
182 | else | ||
183 | res = bh; | ||
184 | } | ||
185 | BUG_ON(!res); | ||
186 | return res; | ||
187 | } | ||
188 | |||
189 | /** | 159 | /** |
190 | * ufs_inode_getfrag() - allocate new fragment(s) | 160 | * ufs_inode_getfrag() - allocate new fragment(s) |
191 | * @inode - pointer to inode | 161 | * @inode - pointer to inode |
@@ -302,7 +272,7 @@ repeat: | |||
302 | } | 272 | } |
303 | 273 | ||
304 | if (!phys) { | 274 | if (!phys) { |
305 | result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); | 275 | result = sb_getblk(sb, tmp + blockoff); |
306 | } else { | 276 | } else { |
307 | *phys = tmp + blockoff; | 277 | *phys = tmp + blockoff; |
308 | result = NULL; | 278 | result = NULL; |
@@ -403,8 +373,7 @@ repeat: | |||
403 | 373 | ||
404 | 374 | ||
405 | if (!phys) { | 375 | if (!phys) { |
406 | result = ufs_clear_frags(inode, tmp, uspi->s_fpb, | 376 | result = sb_getblk(sb, tmp + blockoff); |
407 | tmp + blockoff); | ||
408 | } else { | 377 | } else { |
409 | *phys = tmp + blockoff; | 378 | *phys = tmp + blockoff; |
410 | *new = 1; | 379 | *new = 1; |
@@ -471,13 +440,13 @@ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head | |||
471 | #define GET_INODE_DATABLOCK(x) \ | 440 | #define GET_INODE_DATABLOCK(x) \ |
472 | ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page) | 441 | ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page) |
473 | #define GET_INODE_PTR(x) \ | 442 | #define GET_INODE_PTR(x) \ |
474 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, bh_result->b_page) | 443 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, NULL) |
475 | #define GET_INDIRECT_DATABLOCK(x) \ | 444 | #define GET_INDIRECT_DATABLOCK(x) \ |
476 | ufs_inode_getblock(inode, bh, x, fragment, \ | 445 | ufs_inode_getblock(inode, bh, x, fragment, \ |
477 | &err, &phys, &new, bh_result->b_page); | 446 | &err, &phys, &new, bh_result->b_page) |
478 | #define GET_INDIRECT_PTR(x) \ | 447 | #define GET_INDIRECT_PTR(x) \ |
479 | ufs_inode_getblock(inode, bh, x, fragment, \ | 448 | ufs_inode_getblock(inode, bh, x, fragment, \ |
480 | &err, NULL, NULL, bh_result->b_page); | 449 | &err, NULL, NULL, NULL) |
481 | 450 | ||
482 | if (ptr < UFS_NDIR_FRAGMENT) { | 451 | if (ptr < UFS_NDIR_FRAGMENT) { |
483 | bh = GET_INODE_DATABLOCK(ptr); | 452 | bh = GET_INODE_DATABLOCK(ptr); |
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 9e6c23c360b2..ebc1f697615a 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h | |||
@@ -105,7 +105,7 @@ | |||
105 | 105 | ||
106 | /* Maximum object reference count (detects object deletion issues) */ | 106 | /* Maximum object reference count (detects object deletion issues) */ |
107 | 107 | ||
108 | #define ACPI_MAX_REFERENCE_COUNT 0x800 | 108 | #define ACPI_MAX_REFERENCE_COUNT 0x1000 |
109 | 109 | ||
110 | /* Size of cached memory mapping for system memory operation region */ | 110 | /* Size of cached memory mapping for system memory operation region */ |
111 | 111 | ||
diff --git a/include/asm-arm/arch-iop32x/iop32x.h b/include/asm-arm/arch-iop32x/iop32x.h index 4bbd85f3ed2a..2e9469047eb1 100644 --- a/include/asm-arm/arch-iop32x/iop32x.h +++ b/include/asm-arm/arch-iop32x/iop32x.h | |||
@@ -19,7 +19,7 @@ | |||
19 | * Peripherals that are shared between the iop32x and iop33x but | 19 | * Peripherals that are shared between the iop32x and iop33x but |
20 | * located at different addresses. | 20 | * located at different addresses. |
21 | */ | 21 | */ |
22 | #define IOP3XX_GPIO_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07c0 + (reg)) | 22 | #define IOP3XX_GPIO_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07c4 + (reg)) |
23 | #define IOP3XX_TIMER_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07e0 + (reg)) | 23 | #define IOP3XX_TIMER_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07e0 + (reg)) |
24 | 24 | ||
25 | #include <asm/hardware/iop3xx.h> | 25 | #include <asm/hardware/iop3xx.h> |
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index d51049522cd0..5f531ea03059 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h | |||
@@ -357,6 +357,16 @@ extern void flush_dcache_page(struct page *); | |||
357 | 357 | ||
358 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | 358 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); |
359 | 359 | ||
360 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
361 | static inline void flush_anon_page(struct vm_area_struct *vma, | ||
362 | struct page *page, unsigned long vmaddr) | ||
363 | { | ||
364 | extern void __flush_anon_page(struct vm_area_struct *vma, | ||
365 | struct page *, unsigned long); | ||
366 | if (PageAnon(page)) | ||
367 | __flush_anon_page(vma, page, vmaddr); | ||
368 | } | ||
369 | |||
360 | #define flush_dcache_mmap_lock(mapping) \ | 370 | #define flush_dcache_mmap_lock(mapping) \ |
361 | write_lock_irq(&(mapping)->tree_lock) | 371 | write_lock_irq(&(mapping)->tree_lock) |
362 | #define flush_dcache_mmap_unlock(mapping) \ | 372 | #define flush_dcache_mmap_unlock(mapping) \ |
diff --git a/include/asm-arm/hardware/iop3xx.h b/include/asm-arm/hardware/iop3xx.h index 1018a7486ab7..13ac8a4cd01f 100644 --- a/include/asm-arm/hardware/iop3xx.h +++ b/include/asm-arm/hardware/iop3xx.h | |||
@@ -168,9 +168,9 @@ extern void gpio_line_set(int line, int value); | |||
168 | #define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710) | 168 | #define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710) |
169 | 169 | ||
170 | /* General Purpose I/O */ | 170 | /* General Purpose I/O */ |
171 | #define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0004) | 171 | #define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0000) |
172 | #define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0008) | 172 | #define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0004) |
173 | #define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x000c) | 173 | #define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x0008) |
174 | 174 | ||
175 | /* Timers */ | 175 | /* Timers */ |
176 | #define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000) | 176 | #define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000) |
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h index 8ce79a6fa891..e7686d0a8413 100644 --- a/include/asm-i386/boot.h +++ b/include/asm-i386/boot.h | |||
@@ -13,7 +13,8 @@ | |||
13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ | 13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ |
14 | 14 | ||
15 | /* Physical address where kenrel should be loaded. */ | 15 | /* Physical address where kenrel should be loaded. */ |
16 | #define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \ | 16 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ |
17 | + (CONFIG_PHYSICAL_ALIGN - 1)) \ | ||
17 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | 18 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) |
18 | 19 | ||
19 | #endif /* _LINUX_BOOT_H */ | 20 | #endif /* _LINUX_BOOT_H */ |
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h index 9b768c3b96b3..24cdcc6eaab8 100644 --- a/include/asm-mips/checksum.h +++ b/include/asm-mips/checksum.h | |||
@@ -29,31 +29,38 @@ | |||
29 | */ | 29 | */ |
30 | __wsum csum_partial(const void *buff, int len, __wsum sum); | 30 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
31 | 31 | ||
32 | __wsum __csum_partial_copy_user(const void *src, void *dst, | ||
33 | int len, __wsum sum, int *err_ptr); | ||
34 | |||
32 | /* | 35 | /* |
33 | * this is a new version of the above that records errors it finds in *errp, | 36 | * this is a new version of the above that records errors it finds in *errp, |
34 | * but continues and zeros the rest of the buffer. | 37 | * but continues and zeros the rest of the buffer. |
35 | */ | 38 | */ |
36 | __wsum csum_partial_copy_from_user(const void __user *src, | 39 | static inline |
37 | void *dst, int len, | 40 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, |
38 | __wsum sum, int *errp); | 41 | __wsum sum, int *err_ptr) |
42 | { | ||
43 | might_sleep(); | ||
44 | return __csum_partial_copy_user((__force void *)src, dst, | ||
45 | len, sum, err_ptr); | ||
46 | } | ||
39 | 47 | ||
40 | /* | 48 | /* |
41 | * Copy and checksum to user | 49 | * Copy and checksum to user |
42 | */ | 50 | */ |
43 | #define HAVE_CSUM_COPY_USER | 51 | #define HAVE_CSUM_COPY_USER |
44 | static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, | 52 | static inline |
45 | int len, __wsum sum, | 53 | __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, |
46 | int *err_ptr) | 54 | __wsum sum, int *err_ptr) |
47 | { | 55 | { |
48 | might_sleep(); | 56 | might_sleep(); |
49 | sum = csum_partial(src, len, sum); | 57 | if (access_ok(VERIFY_WRITE, dst, len)) |
50 | 58 | return __csum_partial_copy_user(src, (__force void *)dst, | |
51 | if (copy_to_user(dst, src, len)) { | 59 | len, sum, err_ptr); |
60 | if (len) | ||
52 | *err_ptr = -EFAULT; | 61 | *err_ptr = -EFAULT; |
53 | return (__force __wsum)-1; | ||
54 | } | ||
55 | 62 | ||
56 | return sum; | 63 | return (__force __wsum)-1; /* invalid checksum */ |
57 | } | 64 | } |
58 | 65 | ||
59 | /* | 66 | /* |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 67657089efa7..386da82e5774 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -31,14 +31,14 @@ static inline int irq_canonicalize(int irq) | |||
31 | * functions will take over re-enabling the low-level mask. | 31 | * functions will take over re-enabling the low-level mask. |
32 | * Otherwise it will be done on return from exception. | 32 | * Otherwise it will be done on return from exception. |
33 | */ | 33 | */ |
34 | #define __DO_IRQ_SMTC_HOOK() \ | 34 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
35 | do { \ | 35 | do { \ |
36 | if (irq_hwmask[irq] & 0x0000ff00) \ | 36 | if (irq_hwmask[irq] & 0x0000ff00) \ |
37 | write_c0_tccontext(read_c0_tccontext() & \ | 37 | write_c0_tccontext(read_c0_tccontext() & \ |
38 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | 38 | ~(irq_hwmask[irq] & 0x0000ff00)); \ |
39 | } while (0) | 39 | } while (0) |
40 | #else | 40 | #else |
41 | #define __DO_IRQ_SMTC_HOOK() do { } while (0) | 41 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | 44 | /* |
@@ -52,7 +52,7 @@ do { \ | |||
52 | #define do_IRQ(irq) \ | 52 | #define do_IRQ(irq) \ |
53 | do { \ | 53 | do { \ |
54 | irq_enter(); \ | 54 | irq_enter(); \ |
55 | __DO_IRQ_SMTC_HOOK(); \ | 55 | __DO_IRQ_SMTC_HOOK(irq); \ |
56 | generic_handle_irq(irq); \ | 56 | generic_handle_irq(irq); \ |
57 | irq_exit(); \ | 57 | irq_exit(); \ |
58 | } while (0) | 58 | } while (0) |
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index aedb0512cb04..a799dd8ef395 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
@@ -186,7 +186,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
186 | } | 186 | } |
187 | 187 | ||
188 | static inline void | 188 | static inline void |
189 | flush_anon_page(struct page *page, unsigned long vmaddr) | 189 | flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
190 | { | 190 | { |
191 | if (PageAnon(page)) | 191 | if (PageAnon(page)) |
192 | flush_user_dcache_page(vmaddr); | 192 | flush_user_dcache_page(vmaddr); |
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h index 709568879f73..f6fa39474846 100644 --- a/include/asm-powerpc/bug.h +++ b/include/asm-powerpc/bug.h | |||
@@ -11,10 +11,31 @@ | |||
11 | #define BUG_OPCODE .long 0x00b00b00 /* For asm */ | 11 | #define BUG_OPCODE .long 0x00b00b00 /* For asm */ |
12 | #define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ | 12 | #define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ |
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | ||
15 | |||
16 | #ifdef CONFIG_BUG | 14 | #ifdef CONFIG_BUG |
17 | 15 | ||
16 | #ifdef __ASSEMBLY__ | ||
17 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
18 | .macro EMIT_BUG_ENTRY addr,file,line,flags | ||
19 | .section __bug_table,"a" | ||
20 | 5001: PPC_LONG \addr, 5002f | ||
21 | .short \line, \flags | ||
22 | .org 5001b+BUG_ENTRY_SIZE | ||
23 | .previous | ||
24 | .section .rodata,"a" | ||
25 | 5002: .asciz "\file" | ||
26 | .previous | ||
27 | .endm | ||
28 | #else | ||
29 | .macro EMIT_BUG_ENTRY addr,file,line,flags | ||
30 | .section __bug_table,"a" | ||
31 | 5001: PPC_LONG \addr | ||
32 | .short \flags | ||
33 | .org 5001b+BUG_ENTRY_SIZE | ||
34 | .previous | ||
35 | .endm | ||
36 | #endif /* verbose */ | ||
37 | |||
38 | #else /* !__ASSEMBLY__ */ | ||
18 | /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and | 39 | /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and |
19 | sizeof(struct bug_entry), respectively */ | 40 | sizeof(struct bug_entry), respectively */ |
20 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 41 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
@@ -91,8 +112,8 @@ | |||
91 | #define HAVE_ARCH_BUG | 112 | #define HAVE_ARCH_BUG |
92 | #define HAVE_ARCH_BUG_ON | 113 | #define HAVE_ARCH_BUG_ON |
93 | #define HAVE_ARCH_WARN_ON | 114 | #define HAVE_ARCH_WARN_ON |
94 | #endif /* CONFIG_BUG */ | ||
95 | #endif /* __ASSEMBLY __ */ | 115 | #endif /* __ASSEMBLY __ */ |
116 | #endif /* CONFIG_BUG */ | ||
96 | 117 | ||
97 | #include <asm-generic/bug.h> | 118 | #include <asm-generic/bug.h> |
98 | 119 | ||
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index 257d1cecb8c9..7a500732b671 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h | |||
@@ -252,8 +252,6 @@ struct hcall_stats { | |||
252 | unsigned long tb_total; /* total wall time (mftb) of calls. */ | 252 | unsigned long tb_total; /* total wall time (mftb) of calls. */ |
253 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ | 253 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ |
254 | }; | 254 | }; |
255 | void update_hcall_stats(unsigned long opcode, unsigned long tb_delta, | ||
256 | unsigned long purr_delta); | ||
257 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) | 255 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) |
258 | 256 | ||
259 | #endif /* __ASSEMBLY__ */ | 257 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h index 4a28a850998c..4560d72fc758 100644 --- a/include/asm-powerpc/mpc52xx.h +++ b/include/asm-powerpc/mpc52xx.h | |||
@@ -244,6 +244,7 @@ struct mpc52xx_cdm { | |||
244 | extern void __iomem * mpc52xx_find_and_map(const char *); | 244 | extern void __iomem * mpc52xx_find_and_map(const char *); |
245 | extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); | 245 | extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); |
246 | extern void mpc52xx_setup_cpu(void); | 246 | extern void mpc52xx_setup_cpu(void); |
247 | extern void mpc52xx_declare_of_platform_devices(void); | ||
247 | 248 | ||
248 | extern void mpc52xx_init_irq(void); | 249 | extern void mpc52xx_init_irq(void); |
249 | extern unsigned int mpc52xx_get_irq(void); | 250 | extern unsigned int mpc52xx_get_irq(void); |
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index 5e261e1de671..5c5d02de49e9 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h | |||
@@ -4,8 +4,8 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/futex.h> | 6 | #include <linux/futex.h> |
7 | #include <linux/uaccess.h> | ||
7 | #include <asm/errno.h> | 8 | #include <asm/errno.h> |
8 | #include <asm/uaccess.h> | ||
9 | 9 | ||
10 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 10 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) |
11 | { | 11 | { |
@@ -21,7 +21,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) |
22 | return -EFAULT; | 22 | return -EFAULT; |
23 | 23 | ||
24 | pagefault_disable(); | ||
24 | ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); | 25 | ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); |
26 | pagefault_enable(); | ||
25 | 27 | ||
26 | if (!ret) { | 28 | if (!ret) { |
27 | switch (cmp) { | 29 | switch (cmp) { |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ca9a602cffd7..645d440807c2 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
9 | 9 | ||
10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | 10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
11 | static inline void flush_anon_page(struct page *page, unsigned long vmaddr) | 11 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
12 | { | 12 | { |
13 | } | 13 | } |
14 | #endif | 14 | #endif |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 28fdce1ac1db..bc8b4616bad7 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <asm/types.h> | 11 | #include <asm/types.h> |
12 | #include <linux/ioctl.h> | 12 | #include <linux/ioctl.h> |
13 | 13 | ||
14 | #define KVM_API_VERSION 1 | 14 | #define KVM_API_VERSION 2 |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Architectural interrupt line count, and the size of the bitmap needed | 17 | * Architectural interrupt line count, and the size of the bitmap needed |
@@ -45,6 +45,7 @@ enum kvm_exit_reason { | |||
45 | KVM_EXIT_DEBUG = 4, | 45 | KVM_EXIT_DEBUG = 4, |
46 | KVM_EXIT_HLT = 5, | 46 | KVM_EXIT_HLT = 5, |
47 | KVM_EXIT_MMIO = 6, | 47 | KVM_EXIT_MMIO = 6, |
48 | KVM_EXIT_IRQ_WINDOW_OPEN = 7, | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | /* for KVM_RUN */ | 51 | /* for KVM_RUN */ |
@@ -53,11 +54,19 @@ struct kvm_run { | |||
53 | __u32 vcpu; | 54 | __u32 vcpu; |
54 | __u32 emulated; /* skip current instruction */ | 55 | __u32 emulated; /* skip current instruction */ |
55 | __u32 mmio_completed; /* mmio request completed */ | 56 | __u32 mmio_completed; /* mmio request completed */ |
57 | __u8 request_interrupt_window; | ||
58 | __u8 padding1[3]; | ||
56 | 59 | ||
57 | /* out */ | 60 | /* out */ |
58 | __u32 exit_type; | 61 | __u32 exit_type; |
59 | __u32 exit_reason; | 62 | __u32 exit_reason; |
60 | __u32 instruction_length; | 63 | __u32 instruction_length; |
64 | __u8 ready_for_interrupt_injection; | ||
65 | __u8 if_flag; | ||
66 | __u16 padding2; | ||
67 | __u64 cr8; | ||
68 | __u64 apic_base; | ||
69 | |||
61 | union { | 70 | union { |
62 | /* KVM_EXIT_UNKNOWN */ | 71 | /* KVM_EXIT_UNKNOWN */ |
63 | struct { | 72 | struct { |
diff --git a/include/linux/magic.h b/include/linux/magic.h index 156c40fc664e..b78bbf42135a 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #define ADFS_SUPER_MAGIC 0xadf5 | 4 | #define ADFS_SUPER_MAGIC 0xadf5 |
5 | #define AFFS_SUPER_MAGIC 0xadff | 5 | #define AFFS_SUPER_MAGIC 0xadff |
6 | #define AFS_SUPER_MAGIC 0x5346414F | ||
6 | #define AUTOFS_SUPER_MAGIC 0x0187 | 7 | #define AUTOFS_SUPER_MAGIC 0x0187 |
7 | #define CODA_SUPER_MAGIC 0x73757245 | 8 | #define CODA_SUPER_MAGIC 0x73757245 |
8 | #define EFS_SUPER_MAGIC 0x414A53 | 9 | #define EFS_SUPER_MAGIC 0x414A53 |
diff --git a/include/linux/swap.h b/include/linux/swap.h index add51cebc8d9..5423559a44a6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -245,7 +245,7 @@ extern int swap_duplicate(swp_entry_t); | |||
245 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 245 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
246 | extern void swap_free(swp_entry_t); | 246 | extern void swap_free(swp_entry_t); |
247 | extern void free_swap_and_cache(swp_entry_t); | 247 | extern void free_swap_and_cache(swp_entry_t); |
248 | extern int swap_type_of(dev_t, sector_t); | 248 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
249 | extern unsigned int count_swap_pages(int, int); | 249 | extern unsigned int count_swap_pages(int, int); |
250 | extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); | 250 | extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); |
251 | extern sector_t swapdev_block(int, pgoff_t); | 251 | extern sector_t swapdev_block(int, pgoff_t); |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index e6af381e206d..e02d85f56e60 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -218,7 +218,7 @@ struct ieee80211_snap_hdr { | |||
218 | #define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) | 218 | #define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) |
219 | 219 | ||
220 | #define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) | 220 | #define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) |
221 | #define WLAN_GET_SEQ_SEQ(seq) ((seq) & IEEE80211_SCTL_SEQ) | 221 | #define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) |
222 | 222 | ||
223 | /* Authentication algorithms */ | 223 | /* Authentication algorithms */ |
224 | #define WLAN_AUTH_OPEN 0 | 224 | #define WLAN_AUTH_OPEN 0 |
diff --git a/include/net/tcp.h b/include/net/tcp.h index b7d8317f22ac..cd8fa0c858ae 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -242,7 +242,7 @@ extern int tcp_memory_pressure; | |||
242 | 242 | ||
243 | static inline int before(__u32 seq1, __u32 seq2) | 243 | static inline int before(__u32 seq1, __u32 seq2) |
244 | { | 244 | { |
245 | return (__s32)(seq2-seq1) > 0; | 245 | return (__s32)(seq1-seq2) < 0; |
246 | } | 246 | } |
247 | #define after(seq2, seq1) before(seq1, seq2) | 247 | #define after(seq2, seq1) before(seq1, seq2) |
248 | 248 | ||
diff --git a/include/net/x25.h b/include/net/x25.h index 0ad90ebcf86e..e47fe440d9d7 100644 --- a/include/net/x25.h +++ b/include/net/x25.h | |||
@@ -259,6 +259,7 @@ extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int | |||
259 | extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char); | 259 | extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char); |
260 | 260 | ||
261 | /* x25_timer.c */ | 261 | /* x25_timer.c */ |
262 | extern void x25_init_timers(struct sock *sk); | ||
262 | extern void x25_start_heartbeat(struct sock *); | 263 | extern void x25_start_heartbeat(struct sock *); |
263 | extern void x25_start_t2timer(struct sock *); | 264 | extern void x25_start_t2timer(struct sock *); |
264 | extern void x25_start_t21timer(struct sock *); | 265 | extern void x25_start_t21timer(struct sock *); |
diff --git a/include/sound/version.h b/include/sound/version.h index 2949b9b991b5..20f7babad514 100644 --- a/include/sound/version.h +++ b/include/sound/version.h | |||
@@ -1,3 +1,3 @@ | |||
1 | /* include/version.h. Generated by alsa/ksync script. */ | 1 | /* include/version.h. Generated by alsa/ksync script. */ |
2 | #define CONFIG_SND_VERSION "1.0.14rc1" | 2 | #define CONFIG_SND_VERSION "1.0.14rc1" |
3 | #define CONFIG_SND_DATE " (Wed Dec 20 08:11:48 2006 UTC)" | 3 | #define CONFIG_SND_DATE " (Tue Jan 09 09:56:17 2007 UTC)" |
diff --git a/init/main.c b/init/main.c index 2b1cdaab45e6..bc27d72bbb19 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -538,6 +538,11 @@ asmlinkage void __init start_kernel(void) | |||
538 | parse_args("Booting kernel", command_line, __start___param, | 538 | parse_args("Booting kernel", command_line, __start___param, |
539 | __stop___param - __start___param, | 539 | __stop___param - __start___param, |
540 | &unknown_bootoption); | 540 | &unknown_bootoption); |
541 | if (!irqs_disabled()) { | ||
542 | printk(KERN_WARNING "start_kernel(): bug: interrupts were " | ||
543 | "enabled *very* early, fixing it\n"); | ||
544 | local_irq_disable(); | ||
545 | } | ||
541 | sort_main_extable(); | 546 | sort_main_extable(); |
542 | trap_init(); | 547 | trap_init(); |
543 | rcu_init(); | 548 | rcu_init(); |
diff --git a/kernel/module.c b/kernel/module.c index dbce132b354c..d0f2260a0210 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1148,10 +1148,10 @@ static int mod_sysfs_setup(struct module *mod, | |||
1148 | kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); | 1148 | kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); |
1149 | return 0; | 1149 | return 0; |
1150 | 1150 | ||
1151 | out_unreg_drivers: | ||
1152 | kobject_unregister(mod->drivers_dir); | ||
1153 | out_unreg_param: | 1151 | out_unreg_param: |
1154 | module_param_sysfs_remove(mod); | 1152 | module_param_sysfs_remove(mod); |
1153 | out_unreg_drivers: | ||
1154 | kobject_unregister(mod->drivers_dir); | ||
1155 | out_unreg: | 1155 | out_unreg: |
1156 | kobject_del(&mod->mkobj.kobj); | 1156 | kobject_del(&mod->mkobj.kobj); |
1157 | kobject_put(&mod->mkobj.kobj); | 1157 | kobject_put(&mod->mkobj.kobj); |
@@ -2327,8 +2327,22 @@ void print_modules(void) | |||
2327 | printk("\n"); | 2327 | printk("\n"); |
2328 | } | 2328 | } |
2329 | 2329 | ||
2330 | static char *make_driver_name(struct device_driver *drv) | ||
2331 | { | ||
2332 | char *driver_name; | ||
2333 | |||
2334 | driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2, | ||
2335 | GFP_KERNEL); | ||
2336 | if (!driver_name) | ||
2337 | return NULL; | ||
2338 | |||
2339 | sprintf(driver_name, "%s:%s", drv->bus->name, drv->name); | ||
2340 | return driver_name; | ||
2341 | } | ||
2342 | |||
2330 | void module_add_driver(struct module *mod, struct device_driver *drv) | 2343 | void module_add_driver(struct module *mod, struct device_driver *drv) |
2331 | { | 2344 | { |
2345 | char *driver_name; | ||
2332 | int no_warn; | 2346 | int no_warn; |
2333 | 2347 | ||
2334 | if (!mod || !drv) | 2348 | if (!mod || !drv) |
@@ -2336,17 +2350,31 @@ void module_add_driver(struct module *mod, struct device_driver *drv) | |||
2336 | 2350 | ||
2337 | /* Don't check return codes; these calls are idempotent */ | 2351 | /* Don't check return codes; these calls are idempotent */ |
2338 | no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module"); | 2352 | no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module"); |
2339 | no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, drv->name); | 2353 | driver_name = make_driver_name(drv); |
2354 | if (driver_name) { | ||
2355 | no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, | ||
2356 | driver_name); | ||
2357 | kfree(driver_name); | ||
2358 | } | ||
2340 | } | 2359 | } |
2341 | EXPORT_SYMBOL(module_add_driver); | 2360 | EXPORT_SYMBOL(module_add_driver); |
2342 | 2361 | ||
2343 | void module_remove_driver(struct device_driver *drv) | 2362 | void module_remove_driver(struct device_driver *drv) |
2344 | { | 2363 | { |
2364 | char *driver_name; | ||
2365 | |||
2345 | if (!drv) | 2366 | if (!drv) |
2346 | return; | 2367 | return; |
2368 | |||
2347 | sysfs_remove_link(&drv->kobj, "module"); | 2369 | sysfs_remove_link(&drv->kobj, "module"); |
2348 | if (drv->owner && drv->owner->drivers_dir) | 2370 | if (drv->owner && drv->owner->drivers_dir) { |
2349 | sysfs_remove_link(drv->owner->drivers_dir, drv->name); | 2371 | driver_name = make_driver_name(drv); |
2372 | if (driver_name) { | ||
2373 | sysfs_remove_link(drv->owner->drivers_dir, | ||
2374 | driver_name); | ||
2375 | kfree(driver_name); | ||
2376 | } | ||
2377 | } | ||
2350 | } | 2378 | } |
2351 | EXPORT_SYMBOL(module_remove_driver); | 2379 | EXPORT_SYMBOL(module_remove_driver); |
2352 | 2380 | ||
diff --git a/kernel/params.c b/kernel/params.c index f406655d6653..718945da8f58 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -143,9 +143,15 @@ int parse_args(const char *name, | |||
143 | 143 | ||
144 | while (*args) { | 144 | while (*args) { |
145 | int ret; | 145 | int ret; |
146 | int irq_was_disabled; | ||
146 | 147 | ||
147 | args = next_arg(args, ¶m, &val); | 148 | args = next_arg(args, ¶m, &val); |
149 | irq_was_disabled = irqs_disabled(); | ||
148 | ret = parse_one(param, val, params, num, unknown); | 150 | ret = parse_one(param, val, params, num, unknown); |
151 | if (irq_was_disabled && !irqs_disabled()) { | ||
152 | printk(KERN_WARNING "parse_args(): option '%s' enabled " | ||
153 | "irq's!\n", param); | ||
154 | } | ||
149 | switch (ret) { | 155 | switch (ret) { |
150 | case -ENOENT: | 156 | case -ENOENT: |
151 | printk(KERN_ERR "%s: Unknown parameter `%s'\n", | 157 | printk(KERN_ERR "%s: Unknown parameter `%s'\n", |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index f133d4a6d817..3581f8f86acd 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -165,14 +165,15 @@ static int swsusp_swap_check(void) /* This is called before saving image */ | |||
165 | { | 165 | { |
166 | int res; | 166 | int res; |
167 | 167 | ||
168 | res = swap_type_of(swsusp_resume_device, swsusp_resume_block); | 168 | res = swap_type_of(swsusp_resume_device, swsusp_resume_block, |
169 | &resume_bdev); | ||
169 | if (res < 0) | 170 | if (res < 0) |
170 | return res; | 171 | return res; |
171 | 172 | ||
172 | root_swap = res; | 173 | root_swap = res; |
173 | resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE); | 174 | res = blkdev_get(resume_bdev, FMODE_WRITE, O_RDWR); |
174 | if (IS_ERR(resume_bdev)) | 175 | if (res) |
175 | return PTR_ERR(resume_bdev); | 176 | return res; |
176 | 177 | ||
177 | res = set_blocksize(resume_bdev, PAGE_SIZE); | 178 | res = set_blocksize(resume_bdev, PAGE_SIZE); |
178 | if (res < 0) | 179 | if (res < 0) |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 89443b85163b..f7b7a785a5c6 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -57,7 +57,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
57 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); | 57 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); |
58 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { | 58 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { |
59 | data->swap = swsusp_resume_device ? | 59 | data->swap = swsusp_resume_device ? |
60 | swap_type_of(swsusp_resume_device, 0) : -1; | 60 | swap_type_of(swsusp_resume_device, 0, NULL) : -1; |
61 | data->mode = O_RDONLY; | 61 | data->mode = O_RDONLY; |
62 | } else { | 62 | } else { |
63 | data->swap = -1; | 63 | data->swap = -1; |
@@ -268,7 +268,8 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
268 | * so we need to recode them | 268 | * so we need to recode them |
269 | */ | 269 | */ |
270 | if (old_decode_dev(arg)) { | 270 | if (old_decode_dev(arg)) { |
271 | data->swap = swap_type_of(old_decode_dev(arg), 0); | 271 | data->swap = swap_type_of(old_decode_dev(arg), |
272 | 0, NULL); | ||
272 | if (data->swap < 0) | 273 | if (data->swap < 0) |
273 | error = -ENODEV; | 274 | error = -ENODEV; |
274 | } else { | 275 | } else { |
@@ -365,7 +366,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
365 | swdev = old_decode_dev(swap_area.dev); | 366 | swdev = old_decode_dev(swap_area.dev); |
366 | if (swdev) { | 367 | if (swdev) { |
367 | offset = swap_area.offset; | 368 | offset = swap_area.offset; |
368 | data->swap = swap_type_of(swdev, offset); | 369 | data->swap = swap_type_of(swdev, offset, NULL); |
369 | if (data->swap < 0) | 370 | if (data->swap < 0) |
370 | error = -ENODEV; | 371 | error = -ENODEV; |
371 | } else { | 372 | } else { |
diff --git a/kernel/profile.c b/kernel/profile.c index fb5e03d57e9d..11550b2290b6 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -63,7 +63,7 @@ static int __init profile_setup(char * str) | |||
63 | printk(KERN_INFO | 63 | printk(KERN_INFO |
64 | "kernel sleep profiling enabled (shift: %ld)\n", | 64 | "kernel sleep profiling enabled (shift: %ld)\n", |
65 | prof_shift); | 65 | prof_shift); |
66 | } else if (!strncmp(str, sleepstr, strlen(sleepstr))) { | 66 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
67 | prof_on = SCHED_PROFILING; | 67 | prof_on = SCHED_PROFILING; |
68 | if (str[strlen(schedstr)] == ',') | 68 | if (str[strlen(schedstr)] == ',') |
69 | str += strlen(schedstr) + 1; | 69 | str += strlen(schedstr) + 1; |
diff --git a/mm/memory.c b/mm/memory.c index 563792f4f687..af227d26e104 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1091,7 +1091,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1091 | if (pages) { | 1091 | if (pages) { |
1092 | pages[i] = page; | 1092 | pages[i] = page; |
1093 | 1093 | ||
1094 | flush_anon_page(page, start); | 1094 | flush_anon_page(vma, page, start); |
1095 | flush_dcache_page(page); | 1095 | flush_dcache_page(page); |
1096 | } | 1096 | } |
1097 | if (vmas) | 1097 | if (vmas) |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6969cfb33901..b278b8d60eee 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -61,12 +61,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * swapoff can easily use up all memory, so kill those first. | ||
65 | */ | ||
66 | if (p->flags & PF_SWAPOFF) | ||
67 | return ULONG_MAX; | ||
68 | |||
69 | /* | ||
70 | * The memory size of the process is the basis for the badness. | 64 | * The memory size of the process is the basis for the badness. |
71 | */ | 65 | */ |
72 | points = mm->total_vm; | 66 | points = mm->total_vm; |
@@ -77,6 +71,12 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
77 | task_unlock(p); | 71 | task_unlock(p); |
78 | 72 | ||
79 | /* | 73 | /* |
74 | * swapoff can easily use up all memory, so kill those first. | ||
75 | */ | ||
76 | if (p->flags & PF_SWAPOFF) | ||
77 | return ULONG_MAX; | ||
78 | |||
79 | /* | ||
80 | * Processes which fork a lot of child processes are likely | 80 | * Processes which fork a lot of child processes are likely |
81 | * a good choice. We add half the vmsize of the children if they | 81 | * a good choice. We add half the vmsize of the children if they |
82 | * have an own mm. This prevents forking servers to flood the | 82 | * have an own mm. This prevents forking servers to flood the |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8c1a116875bc..a49f96b7ea43 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -711,6 +711,9 @@ static void __drain_pages(unsigned int cpu) | |||
711 | for_each_zone(zone) { | 711 | for_each_zone(zone) { |
712 | struct per_cpu_pageset *pset; | 712 | struct per_cpu_pageset *pset; |
713 | 713 | ||
714 | if (!populated_zone(zone)) | ||
715 | continue; | ||
716 | |||
714 | pset = zone_pcp(zone, cpu); | 717 | pset = zone_pcp(zone, cpu); |
715 | for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { | 718 | for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { |
716 | struct per_cpu_pages *pcp; | 719 | struct per_cpu_pages *pcp; |
@@ -3321,6 +3324,10 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
3321 | numentries >>= (scale - PAGE_SHIFT); | 3324 | numentries >>= (scale - PAGE_SHIFT); |
3322 | else | 3325 | else |
3323 | numentries <<= (PAGE_SHIFT - scale); | 3326 | numentries <<= (PAGE_SHIFT - scale); |
3327 | |||
3328 | /* Make sure we've got at least a 0-order allocation.. */ | ||
3329 | if (unlikely((numentries * bucketsize) < PAGE_SIZE)) | ||
3330 | numentries = PAGE_SIZE / bucketsize; | ||
3324 | } | 3331 | } |
3325 | numentries = roundup_pow_of_two(numentries); | 3332 | numentries = roundup_pow_of_two(numentries); |
3326 | 3333 | ||
@@ -3281,7 +3281,7 @@ retry: | |||
3281 | flags | GFP_THISNODE, nid); | 3281 | flags | GFP_THISNODE, nid); |
3282 | } | 3282 | } |
3283 | 3283 | ||
3284 | if (!obj) { | 3284 | if (!obj && !(flags & __GFP_NO_GROW)) { |
3285 | /* | 3285 | /* |
3286 | * This allocation will be performed within the constraints | 3286 | * This allocation will be performed within the constraints |
3287 | * of the current cpuset / memory policy requirements. | 3287 | * of the current cpuset / memory policy requirements. |
@@ -3310,7 +3310,7 @@ retry: | |||
3310 | */ | 3310 | */ |
3311 | goto retry; | 3311 | goto retry; |
3312 | } else { | 3312 | } else { |
3313 | kmem_freepages(cache, obj); | 3313 | /* cache_grow already freed obj */ |
3314 | obj = NULL; | 3314 | obj = NULL; |
3315 | } | 3315 | } |
3316 | } | 3316 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index b9fc0e5de6d5..a2d9bb4e80df 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -434,7 +434,7 @@ void free_swap_and_cache(swp_entry_t entry) | |||
434 | * | 434 | * |
435 | * This is needed for the suspend to disk (aka swsusp). | 435 | * This is needed for the suspend to disk (aka swsusp). |
436 | */ | 436 | */ |
437 | int swap_type_of(dev_t device, sector_t offset) | 437 | int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) |
438 | { | 438 | { |
439 | struct block_device *bdev = NULL; | 439 | struct block_device *bdev = NULL; |
440 | int i; | 440 | int i; |
@@ -450,6 +450,9 @@ int swap_type_of(dev_t device, sector_t offset) | |||
450 | continue; | 450 | continue; |
451 | 451 | ||
452 | if (!bdev) { | 452 | if (!bdev) { |
453 | if (bdev_p) | ||
454 | *bdev_p = sis->bdev; | ||
455 | |||
453 | spin_unlock(&swap_lock); | 456 | spin_unlock(&swap_lock); |
454 | return i; | 457 | return i; |
455 | } | 458 | } |
@@ -459,6 +462,9 @@ int swap_type_of(dev_t device, sector_t offset) | |||
459 | se = list_entry(sis->extent_list.next, | 462 | se = list_entry(sis->extent_list.next, |
460 | struct swap_extent, list); | 463 | struct swap_extent, list); |
461 | if (se->start_block == offset) { | 464 | if (se->start_block == offset) { |
465 | if (bdev_p) | ||
466 | *bdev_p = sis->bdev; | ||
467 | |||
462 | spin_unlock(&swap_lock); | 468 | spin_unlock(&swap_lock); |
463 | bdput(bdev); | 469 | bdput(bdev); |
464 | return i; | 470 | return i; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 40fea4918390..7430df68cb64 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1406 | return ret; | 1406 | return ret; |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | static unsigned long count_lru_pages(void) | ||
1410 | { | ||
1411 | struct zone *zone; | ||
1412 | unsigned long ret = 0; | ||
1413 | |||
1414 | for_each_zone(zone) | ||
1415 | ret += zone->nr_active + zone->nr_inactive; | ||
1416 | return ret; | ||
1417 | } | ||
1418 | |||
1409 | /* | 1419 | /* |
1410 | * Try to free `nr_pages' of memory, system-wide, and return the number of | 1420 | * Try to free `nr_pages' of memory, system-wide, and return the number of |
1411 | * freed pages. | 1421 | * freed pages. |
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1420 | unsigned long ret = 0; | 1430 | unsigned long ret = 0; |
1421 | int pass; | 1431 | int pass; |
1422 | struct reclaim_state reclaim_state; | 1432 | struct reclaim_state reclaim_state; |
1423 | struct zone *zone; | ||
1424 | struct scan_control sc = { | 1433 | struct scan_control sc = { |
1425 | .gfp_mask = GFP_KERNEL, | 1434 | .gfp_mask = GFP_KERNEL, |
1426 | .may_swap = 0, | 1435 | .may_swap = 0, |
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1431 | 1440 | ||
1432 | current->reclaim_state = &reclaim_state; | 1441 | current->reclaim_state = &reclaim_state; |
1433 | 1442 | ||
1434 | lru_pages = 0; | 1443 | lru_pages = count_lru_pages(); |
1435 | for_each_zone(zone) | ||
1436 | lru_pages += zone->nr_active + zone->nr_inactive; | ||
1437 | |||
1438 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); | 1444 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); |
1439 | /* If slab caches are huge, it's better to hit them first */ | 1445 | /* If slab caches are huge, it's better to hit them first */ |
1440 | while (nr_slab >= lru_pages) { | 1446 | while (nr_slab >= lru_pages) { |
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1461 | for (pass = 0; pass < 5; pass++) { | 1467 | for (pass = 0; pass < 5; pass++) { |
1462 | int prio; | 1468 | int prio; |
1463 | 1469 | ||
1464 | /* Needed for shrinking slab caches later on */ | ||
1465 | if (!lru_pages) | ||
1466 | for_each_zone(zone) { | ||
1467 | lru_pages += zone->nr_active; | ||
1468 | lru_pages += zone->nr_inactive; | ||
1469 | } | ||
1470 | |||
1471 | /* Force reclaiming mapped pages in the passes #3 and #4 */ | 1470 | /* Force reclaiming mapped pages in the passes #3 and #4 */ |
1472 | if (pass > 2) { | 1471 | if (pass > 2) { |
1473 | sc.may_swap = 1; | 1472 | sc.may_swap = 1; |
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1483 | goto out; | 1482 | goto out; |
1484 | 1483 | ||
1485 | reclaim_state.reclaimed_slab = 0; | 1484 | reclaim_state.reclaimed_slab = 0; |
1486 | shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); | 1485 | shrink_slab(sc.nr_scanned, sc.gfp_mask, |
1486 | count_lru_pages()); | ||
1487 | ret += reclaim_state.reclaimed_slab; | 1487 | ret += reclaim_state.reclaimed_slab; |
1488 | if (ret >= nr_pages) | 1488 | if (ret >= nr_pages) |
1489 | goto out; | 1489 | goto out; |
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
1491 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) | 1491 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) |
1492 | congestion_wait(WRITE, HZ / 10); | 1492 | congestion_wait(WRITE, HZ / 10); |
1493 | } | 1493 | } |
1494 | |||
1495 | lru_pages = 0; | ||
1496 | } | 1494 | } |
1497 | 1495 | ||
1498 | /* | 1496 | /* |
1499 | * If ret = 0, we could not shrink LRUs, but there may be something | 1497 | * If ret = 0, we could not shrink LRUs, but there may be something |
1500 | * in slab caches | 1498 | * in slab caches |
1501 | */ | 1499 | */ |
1502 | if (!ret) | 1500 | if (!ret) { |
1503 | do { | 1501 | do { |
1504 | reclaim_state.reclaimed_slab = 0; | 1502 | reclaim_state.reclaimed_slab = 0; |
1505 | shrink_slab(nr_pages, sc.gfp_mask, lru_pages); | 1503 | shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); |
1506 | ret += reclaim_state.reclaimed_slab; | 1504 | ret += reclaim_state.reclaimed_slab; |
1507 | } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); | 1505 | } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); |
1506 | } | ||
1508 | 1507 | ||
1509 | out: | 1508 | out: |
1510 | current->reclaim_state = NULL; | 1509 | current->reclaim_state = NULL; |
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index be04e9fb11f6..ab166b48ce8d 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c | |||
@@ -196,6 +196,9 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s | |||
196 | 196 | ||
197 | switch (CAPIMSG_SUBCOMMAND(skb->data)) { | 197 | switch (CAPIMSG_SUBCOMMAND(skb->data)) { |
198 | case CAPI_CONF: | 198 | case CAPI_CONF: |
199 | if (skb->len < CAPI_MSG_BASELEN + 10) | ||
200 | break; | ||
201 | |||
199 | func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); | 202 | func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); |
200 | info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); | 203 | info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); |
201 | 204 | ||
@@ -226,6 +229,9 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s | |||
226 | break; | 229 | break; |
227 | 230 | ||
228 | case CAPI_FUNCTION_GET_PROFILE: | 231 | case CAPI_FUNCTION_GET_PROFILE: |
232 | if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) | ||
233 | break; | ||
234 | |||
229 | controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); | 235 | controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); |
230 | msgnum = CAPIMSG_MSGID(skb->data); | 236 | msgnum = CAPIMSG_MSGID(skb->data); |
231 | 237 | ||
@@ -246,17 +252,26 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s | |||
246 | break; | 252 | break; |
247 | 253 | ||
248 | case CAPI_FUNCTION_GET_MANUFACTURER: | 254 | case CAPI_FUNCTION_GET_MANUFACTURER: |
255 | if (skb->len < CAPI_MSG_BASELEN + 15) | ||
256 | break; | ||
257 | |||
249 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10); | 258 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10); |
250 | 259 | ||
251 | if (!info && ctrl) { | 260 | if (!info && ctrl) { |
261 | int len = min_t(uint, CAPI_MANUFACTURER_LEN, | ||
262 | skb->data[CAPI_MSG_BASELEN + 14]); | ||
263 | |||
264 | memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); | ||
252 | strncpy(ctrl->manu, | 265 | strncpy(ctrl->manu, |
253 | skb->data + CAPI_MSG_BASELEN + 15, | 266 | skb->data + CAPI_MSG_BASELEN + 15, len); |
254 | skb->data[CAPI_MSG_BASELEN + 14]); | ||
255 | } | 267 | } |
256 | 268 | ||
257 | break; | 269 | break; |
258 | 270 | ||
259 | case CAPI_FUNCTION_GET_VERSION: | 271 | case CAPI_FUNCTION_GET_VERSION: |
272 | if (skb->len < CAPI_MSG_BASELEN + 32) | ||
273 | break; | ||
274 | |||
260 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); | 275 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); |
261 | 276 | ||
262 | if (!info && ctrl) { | 277 | if (!info && ctrl) { |
@@ -269,13 +284,18 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s | |||
269 | break; | 284 | break; |
270 | 285 | ||
271 | case CAPI_FUNCTION_GET_SERIAL_NUMBER: | 286 | case CAPI_FUNCTION_GET_SERIAL_NUMBER: |
287 | if (skb->len < CAPI_MSG_BASELEN + 17) | ||
288 | break; | ||
289 | |||
272 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); | 290 | controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); |
273 | 291 | ||
274 | if (!info && ctrl) { | 292 | if (!info && ctrl) { |
293 | int len = min_t(uint, CAPI_SERIAL_LEN, | ||
294 | skb->data[CAPI_MSG_BASELEN + 16]); | ||
295 | |||
275 | memset(ctrl->serial, 0, CAPI_SERIAL_LEN); | 296 | memset(ctrl->serial, 0, CAPI_SERIAL_LEN); |
276 | strncpy(ctrl->serial, | 297 | strncpy(ctrl->serial, |
277 | skb->data + CAPI_MSG_BASELEN + 17, | 298 | skb->data + CAPI_MSG_BASELEN + 17, len); |
278 | skb->data[CAPI_MSG_BASELEN + 16]); | ||
279 | } | 299 | } |
280 | 300 | ||
281 | break; | 301 | break; |
@@ -284,14 +304,18 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s | |||
284 | break; | 304 | break; |
285 | 305 | ||
286 | case CAPI_IND: | 306 | case CAPI_IND: |
307 | if (skb->len < CAPI_MSG_BASELEN + 6) | ||
308 | break; | ||
309 | |||
287 | func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); | 310 | func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); |
288 | 311 | ||
289 | if (func == CAPI_FUNCTION_LOOPBACK) { | 312 | if (func == CAPI_FUNCTION_LOOPBACK) { |
313 | int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, | ||
314 | skb->data[CAPI_MSG_BASELEN + 5]); | ||
290 | appl = CAPIMSG_APPID(skb->data); | 315 | appl = CAPIMSG_APPID(skb->data); |
291 | msgnum = CAPIMSG_MSGID(skb->data); | 316 | msgnum = CAPIMSG_MSGID(skb->data); |
292 | cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, | 317 | cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, |
293 | skb->data + CAPI_MSG_BASELEN + 6, | 318 | skb->data + CAPI_MSG_BASELEN + 6, len); |
294 | skb->data[CAPI_MSG_BASELEN + 5]); | ||
295 | } | 319 | } |
296 | 320 | ||
297 | break; | 321 | break; |
@@ -309,6 +333,9 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) | |||
309 | 333 | ||
310 | BT_DBG("session %p skb %p len %d", session, skb, skb->len); | 334 | BT_DBG("session %p skb %p len %d", session, skb, skb->len); |
311 | 335 | ||
336 | if (skb->len < CAPI_MSG_BASELEN) | ||
337 | return; | ||
338 | |||
312 | if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { | 339 | if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { |
313 | cmtp_recv_interopmsg(session, skb); | 340 | cmtp_recv_interopmsg(session, skb); |
314 | return; | 341 | return; |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index d4c935692ccf..801d687ea4ef 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -242,7 +242,7 @@ static void add_conn(struct work_struct *work) | |||
242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); | 242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | if (device_register(&conn->dev) < 0) { | 245 | if (device_add(&conn->dev) < 0) { |
246 | BT_ERR("Failed to register connection device"); | 246 | BT_ERR("Failed to register connection device"); |
247 | return; | 247 | return; |
248 | } | 248 | } |
@@ -272,6 +272,8 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
272 | 272 | ||
273 | dev_set_drvdata(&conn->dev, conn); | 273 | dev_set_drvdata(&conn->dev, conn); |
274 | 274 | ||
275 | device_initialize(&conn->dev); | ||
276 | |||
275 | INIT_WORK(&conn->work, add_conn); | 277 | INIT_WORK(&conn->work, add_conn); |
276 | 278 | ||
277 | schedule_work(&conn->work); | 279 | schedule_work(&conn->work); |
@@ -287,6 +289,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn) | |||
287 | { | 289 | { |
288 | BT_DBG("conn %p", conn); | 290 | BT_DBG("conn %p", conn); |
289 | 291 | ||
292 | if (!device_is_registered(&conn->dev)) | ||
293 | return; | ||
294 | |||
290 | INIT_WORK(&conn->work, del_conn); | 295 | INIT_WORK(&conn->work, del_conn); |
291 | 296 | ||
292 | schedule_work(&conn->work); | 297 | schedule_work(&conn->work); |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 544d65b7baa7..cb7e855f0828 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -557,7 +557,6 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
557 | struct sock *sk = sock->sk; | 557 | struct sock *sk = sock->sk; |
558 | struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; | 558 | struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; |
559 | struct sk_buff *skb; | 559 | struct sk_buff *skb; |
560 | int err; | ||
561 | int sent = 0; | 560 | int sent = 0; |
562 | 561 | ||
563 | if (msg->msg_flags & MSG_OOB) | 562 | if (msg->msg_flags & MSG_OOB) |
@@ -572,6 +571,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
572 | 571 | ||
573 | while (len) { | 572 | while (len) { |
574 | size_t size = min_t(size_t, len, d->mtu); | 573 | size_t size = min_t(size_t, len, d->mtu); |
574 | int err; | ||
575 | 575 | ||
576 | skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, | 576 | skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, |
577 | msg->msg_flags & MSG_DONTWAIT, &err); | 577 | msg->msg_flags & MSG_DONTWAIT, &err); |
@@ -582,13 +582,16 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
582 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); | 582 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); |
583 | if (err) { | 583 | if (err) { |
584 | kfree_skb(skb); | 584 | kfree_skb(skb); |
585 | sent = err; | 585 | if (sent == 0) |
586 | sent = err; | ||
586 | break; | 587 | break; |
587 | } | 588 | } |
588 | 589 | ||
589 | err = rfcomm_dlc_send(d, skb); | 590 | err = rfcomm_dlc_send(d, skb); |
590 | if (err < 0) { | 591 | if (err < 0) { |
591 | kfree_skb(skb); | 592 | kfree_skb(skb); |
593 | if (sent == 0) | ||
594 | sent = err; | ||
592 | break; | 595 | break; |
593 | } | 596 | } |
594 | 597 | ||
@@ -598,7 +601,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
598 | 601 | ||
599 | release_sock(sk); | 602 | release_sock(sk); |
600 | 603 | ||
601 | return sent ? sent : err; | 604 | return sent; |
602 | } | 605 | } |
603 | 606 | ||
604 | static long rfcomm_sock_data_wait(struct sock *sk, long timeo) | 607 | static long rfcomm_sock_data_wait(struct sock *sk, long timeo) |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index e0e0d09023b2..eb2b52484c70 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -697,9 +697,13 @@ static int rfcomm_tty_write_room(struct tty_struct *tty) | |||
697 | 697 | ||
698 | BT_DBG("tty %p", tty); | 698 | BT_DBG("tty %p", tty); |
699 | 699 | ||
700 | if (!dev || !dev->dlc) | ||
701 | return 0; | ||
702 | |||
700 | room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc); | 703 | room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc); |
701 | if (room < 0) | 704 | if (room < 0) |
702 | room = 0; | 705 | room = 0; |
706 | |||
703 | return room; | 707 | return room; |
704 | } | 708 | } |
705 | 709 | ||
@@ -915,12 +919,14 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty) | |||
915 | static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) | 919 | static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) |
916 | { | 920 | { |
917 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; | 921 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; |
918 | struct rfcomm_dlc *dlc = dev->dlc; | ||
919 | 922 | ||
920 | BT_DBG("tty %p dev %p", tty, dev); | 923 | BT_DBG("tty %p dev %p", tty, dev); |
921 | 924 | ||
922 | if (!skb_queue_empty(&dlc->tx_queue)) | 925 | if (!dev || !dev->dlc) |
923 | return dlc->mtu; | 926 | return 0; |
927 | |||
928 | if (!skb_queue_empty(&dev->dlc->tx_queue)) | ||
929 | return dev->dlc->mtu; | ||
924 | 930 | ||
925 | return 0; | 931 | return 0; |
926 | } | 932 | } |
@@ -928,11 +934,12 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) | |||
928 | static void rfcomm_tty_flush_buffer(struct tty_struct *tty) | 934 | static void rfcomm_tty_flush_buffer(struct tty_struct *tty) |
929 | { | 935 | { |
930 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; | 936 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; |
931 | if (!dev) | ||
932 | return; | ||
933 | 937 | ||
934 | BT_DBG("tty %p dev %p", tty, dev); | 938 | BT_DBG("tty %p dev %p", tty, dev); |
935 | 939 | ||
940 | if (!dev || !dev->dlc) | ||
941 | return; | ||
942 | |||
936 | skb_queue_purge(&dev->dlc->tx_queue); | 943 | skb_queue_purge(&dev->dlc->tx_queue); |
937 | 944 | ||
938 | if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && tty->ldisc.write_wakeup) | 945 | if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && tty->ldisc.write_wakeup) |
@@ -952,11 +959,12 @@ static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) | |||
952 | static void rfcomm_tty_hangup(struct tty_struct *tty) | 959 | static void rfcomm_tty_hangup(struct tty_struct *tty) |
953 | { | 960 | { |
954 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; | 961 | struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; |
955 | if (!dev) | ||
956 | return; | ||
957 | 962 | ||
958 | BT_DBG("tty %p dev %p", tty, dev); | 963 | BT_DBG("tty %p dev %p", tty, dev); |
959 | 964 | ||
965 | if (!dev) | ||
966 | return; | ||
967 | |||
960 | rfcomm_tty_flush_buffer(tty); | 968 | rfcomm_tty_flush_buffer(tty); |
961 | 969 | ||
962 | if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) | 970 | if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index bee558a41800..6c84ccb8c9d7 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -610,7 +610,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | |||
610 | struct ebt_entry_target *t; | 610 | struct ebt_entry_target *t; |
611 | struct ebt_target *target; | 611 | struct ebt_target *target; |
612 | unsigned int i, j, hook = 0, hookmask = 0; | 612 | unsigned int i, j, hook = 0, hookmask = 0; |
613 | size_t gap = e->next_offset - e->target_offset; | 613 | size_t gap; |
614 | int ret; | 614 | int ret; |
615 | 615 | ||
616 | /* don't mess with the struct ebt_entries */ | 616 | /* don't mess with the struct ebt_entries */ |
@@ -660,6 +660,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | |||
660 | if (ret != 0) | 660 | if (ret != 0) |
661 | goto cleanup_watchers; | 661 | goto cleanup_watchers; |
662 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); | 662 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); |
663 | gap = e->next_offset - e->target_offset; | ||
663 | target = find_target_lock(t->u.name, &ret, &ebt_mutex); | 664 | target = find_target_lock(t->u.name, &ret, &ebt_mutex); |
664 | if (!target) | 665 | if (!target) |
665 | goto cleanup_watchers; | 666 | goto cleanup_watchers; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 1897a3a385d8..04d4b93c68eb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -148,6 +148,7 @@ | |||
148 | #include <linux/seq_file.h> | 148 | #include <linux/seq_file.h> |
149 | #include <linux/wait.h> | 149 | #include <linux/wait.h> |
150 | #include <linux/etherdevice.h> | 150 | #include <linux/etherdevice.h> |
151 | #include <linux/kthread.h> | ||
151 | #include <net/checksum.h> | 152 | #include <net/checksum.h> |
152 | #include <net/ipv6.h> | 153 | #include <net/ipv6.h> |
153 | #include <net/addrconf.h> | 154 | #include <net/addrconf.h> |
@@ -360,8 +361,7 @@ struct pktgen_thread { | |||
360 | spinlock_t if_lock; | 361 | spinlock_t if_lock; |
361 | struct list_head if_list; /* All device here */ | 362 | struct list_head if_list; /* All device here */ |
362 | struct list_head th_list; | 363 | struct list_head th_list; |
363 | int removed; | 364 | struct task_struct *tsk; |
364 | char name[32]; | ||
365 | char result[512]; | 365 | char result[512]; |
366 | u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ | 366 | u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ |
367 | 367 | ||
@@ -1689,7 +1689,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v) | |||
1689 | BUG_ON(!t); | 1689 | BUG_ON(!t); |
1690 | 1690 | ||
1691 | seq_printf(seq, "Name: %s max_before_softirq: %d\n", | 1691 | seq_printf(seq, "Name: %s max_before_softirq: %d\n", |
1692 | t->name, t->max_before_softirq); | 1692 | t->tsk->comm, t->max_before_softirq); |
1693 | 1693 | ||
1694 | seq_printf(seq, "Running: "); | 1694 | seq_printf(seq, "Running: "); |
1695 | 1695 | ||
@@ -3112,7 +3112,7 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3112 | { | 3112 | { |
3113 | /* Remove from the thread list */ | 3113 | /* Remove from the thread list */ |
3114 | 3114 | ||
3115 | remove_proc_entry(t->name, pg_proc_dir); | 3115 | remove_proc_entry(t->tsk->comm, pg_proc_dir); |
3116 | 3116 | ||
3117 | mutex_lock(&pktgen_thread_lock); | 3117 | mutex_lock(&pktgen_thread_lock); |
3118 | 3118 | ||
@@ -3260,58 +3260,40 @@ out:; | |||
3260 | * Main loop of the thread goes here | 3260 | * Main loop of the thread goes here |
3261 | */ | 3261 | */ |
3262 | 3262 | ||
3263 | static void pktgen_thread_worker(struct pktgen_thread *t) | 3263 | static int pktgen_thread_worker(void *arg) |
3264 | { | 3264 | { |
3265 | DEFINE_WAIT(wait); | 3265 | DEFINE_WAIT(wait); |
3266 | struct pktgen_thread *t = arg; | ||
3266 | struct pktgen_dev *pkt_dev = NULL; | 3267 | struct pktgen_dev *pkt_dev = NULL; |
3267 | int cpu = t->cpu; | 3268 | int cpu = t->cpu; |
3268 | sigset_t tmpsig; | ||
3269 | u32 max_before_softirq; | 3269 | u32 max_before_softirq; |
3270 | u32 tx_since_softirq = 0; | 3270 | u32 tx_since_softirq = 0; |
3271 | 3271 | ||
3272 | daemonize("pktgen/%d", cpu); | 3272 | BUG_ON(smp_processor_id() != cpu); |
3273 | |||
3274 | /* Block all signals except SIGKILL, SIGSTOP and SIGTERM */ | ||
3275 | |||
3276 | spin_lock_irq(¤t->sighand->siglock); | ||
3277 | tmpsig = current->blocked; | ||
3278 | siginitsetinv(¤t->blocked, | ||
3279 | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGTERM)); | ||
3280 | |||
3281 | recalc_sigpending(); | ||
3282 | spin_unlock_irq(¤t->sighand->siglock); | ||
3283 | |||
3284 | /* Migrate to the right CPU */ | ||
3285 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
3286 | if (smp_processor_id() != cpu) | ||
3287 | BUG(); | ||
3288 | 3273 | ||
3289 | init_waitqueue_head(&t->queue); | 3274 | init_waitqueue_head(&t->queue); |
3290 | 3275 | ||
3291 | t->control &= ~(T_TERMINATE); | ||
3292 | t->control &= ~(T_RUN); | ||
3293 | t->control &= ~(T_STOP); | ||
3294 | t->control &= ~(T_REMDEVALL); | ||
3295 | t->control &= ~(T_REMDEV); | ||
3296 | |||
3297 | t->pid = current->pid; | 3276 | t->pid = current->pid; |
3298 | 3277 | ||
3299 | PG_DEBUG(printk("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid)); | 3278 | PG_DEBUG(printk("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid)); |
3300 | 3279 | ||
3301 | max_before_softirq = t->max_before_softirq; | 3280 | max_before_softirq = t->max_before_softirq; |
3302 | 3281 | ||
3303 | __set_current_state(TASK_INTERRUPTIBLE); | 3282 | set_current_state(TASK_INTERRUPTIBLE); |
3304 | mb(); | ||
3305 | 3283 | ||
3306 | while (1) { | 3284 | while (!kthread_should_stop()) { |
3307 | 3285 | pkt_dev = next_to_run(t); | |
3308 | __set_current_state(TASK_RUNNING); | ||
3309 | 3286 | ||
3310 | /* | 3287 | if (!pkt_dev && |
3311 | * Get next dev to xmit -- if any. | 3288 | (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) |
3312 | */ | 3289 | == 0) { |
3290 | prepare_to_wait(&(t->queue), &wait, | ||
3291 | TASK_INTERRUPTIBLE); | ||
3292 | schedule_timeout(HZ / 10); | ||
3293 | finish_wait(&(t->queue), &wait); | ||
3294 | } | ||
3313 | 3295 | ||
3314 | pkt_dev = next_to_run(t); | 3296 | __set_current_state(TASK_RUNNING); |
3315 | 3297 | ||
3316 | if (pkt_dev) { | 3298 | if (pkt_dev) { |
3317 | 3299 | ||
@@ -3329,21 +3311,8 @@ static void pktgen_thread_worker(struct pktgen_thread *t) | |||
3329 | do_softirq(); | 3311 | do_softirq(); |
3330 | tx_since_softirq = 0; | 3312 | tx_since_softirq = 0; |
3331 | } | 3313 | } |
3332 | } else { | ||
3333 | prepare_to_wait(&(t->queue), &wait, TASK_INTERRUPTIBLE); | ||
3334 | schedule_timeout(HZ / 10); | ||
3335 | finish_wait(&(t->queue), &wait); | ||
3336 | } | 3314 | } |
3337 | 3315 | ||
3338 | /* | ||
3339 | * Back from sleep, either due to the timeout or signal. | ||
3340 | * We check if we have any "posted" work for us. | ||
3341 | */ | ||
3342 | |||
3343 | if (t->control & T_TERMINATE || signal_pending(current)) | ||
3344 | /* we received a request to terminate ourself */ | ||
3345 | break; | ||
3346 | |||
3347 | if (t->control & T_STOP) { | 3316 | if (t->control & T_STOP) { |
3348 | pktgen_stop(t); | 3317 | pktgen_stop(t); |
3349 | t->control &= ~(T_STOP); | 3318 | t->control &= ~(T_STOP); |
@@ -3364,20 +3333,19 @@ static void pktgen_thread_worker(struct pktgen_thread *t) | |||
3364 | t->control &= ~(T_REMDEV); | 3333 | t->control &= ~(T_REMDEV); |
3365 | } | 3334 | } |
3366 | 3335 | ||
3367 | if (need_resched()) | 3336 | set_current_state(TASK_INTERRUPTIBLE); |
3368 | schedule(); | ||
3369 | } | 3337 | } |
3370 | 3338 | ||
3371 | PG_DEBUG(printk("pktgen: %s stopping all device\n", t->name)); | 3339 | PG_DEBUG(printk("pktgen: %s stopping all device\n", t->tsk->comm)); |
3372 | pktgen_stop(t); | 3340 | pktgen_stop(t); |
3373 | 3341 | ||
3374 | PG_DEBUG(printk("pktgen: %s removing all device\n", t->name)); | 3342 | PG_DEBUG(printk("pktgen: %s removing all device\n", t->tsk->comm)); |
3375 | pktgen_rem_all_ifs(t); | 3343 | pktgen_rem_all_ifs(t); |
3376 | 3344 | ||
3377 | PG_DEBUG(printk("pktgen: %s removing thread.\n", t->name)); | 3345 | PG_DEBUG(printk("pktgen: %s removing thread.\n", t->tsk->comm)); |
3378 | pktgen_rem_thread(t); | 3346 | pktgen_rem_thread(t); |
3379 | 3347 | ||
3380 | t->removed = 1; | 3348 | return 0; |
3381 | } | 3349 | } |
3382 | 3350 | ||
3383 | static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, | 3351 | static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, |
@@ -3495,37 +3463,11 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3495 | return add_dev_to_thread(t, pkt_dev); | 3463 | return add_dev_to_thread(t, pkt_dev); |
3496 | } | 3464 | } |
3497 | 3465 | ||
3498 | static struct pktgen_thread *__init pktgen_find_thread(const char *name) | 3466 | static int __init pktgen_create_thread(int cpu) |
3499 | { | 3467 | { |
3500 | struct pktgen_thread *t; | 3468 | struct pktgen_thread *t; |
3501 | |||
3502 | mutex_lock(&pktgen_thread_lock); | ||
3503 | |||
3504 | list_for_each_entry(t, &pktgen_threads, th_list) | ||
3505 | if (strcmp(t->name, name) == 0) { | ||
3506 | mutex_unlock(&pktgen_thread_lock); | ||
3507 | return t; | ||
3508 | } | ||
3509 | |||
3510 | mutex_unlock(&pktgen_thread_lock); | ||
3511 | return NULL; | ||
3512 | } | ||
3513 | |||
3514 | static int __init pktgen_create_thread(const char *name, int cpu) | ||
3515 | { | ||
3516 | int err; | ||
3517 | struct pktgen_thread *t = NULL; | ||
3518 | struct proc_dir_entry *pe; | 3469 | struct proc_dir_entry *pe; |
3519 | 3470 | struct task_struct *p; | |
3520 | if (strlen(name) > 31) { | ||
3521 | printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); | ||
3522 | return -EINVAL; | ||
3523 | } | ||
3524 | |||
3525 | if (pktgen_find_thread(name)) { | ||
3526 | printk("pktgen: ERROR: thread: %s already exists\n", name); | ||
3527 | return -EINVAL; | ||
3528 | } | ||
3529 | 3471 | ||
3530 | t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); | 3472 | t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); |
3531 | if (!t) { | 3473 | if (!t) { |
@@ -3533,14 +3475,29 @@ static int __init pktgen_create_thread(const char *name, int cpu) | |||
3533 | return -ENOMEM; | 3475 | return -ENOMEM; |
3534 | } | 3476 | } |
3535 | 3477 | ||
3536 | strcpy(t->name, name); | ||
3537 | spin_lock_init(&t->if_lock); | 3478 | spin_lock_init(&t->if_lock); |
3538 | t->cpu = cpu; | 3479 | t->cpu = cpu; |
3539 | 3480 | ||
3540 | pe = create_proc_entry(t->name, 0600, pg_proc_dir); | 3481 | INIT_LIST_HEAD(&t->if_list); |
3482 | |||
3483 | list_add_tail(&t->th_list, &pktgen_threads); | ||
3484 | |||
3485 | p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); | ||
3486 | if (IS_ERR(p)) { | ||
3487 | printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu); | ||
3488 | list_del(&t->th_list); | ||
3489 | kfree(t); | ||
3490 | return PTR_ERR(p); | ||
3491 | } | ||
3492 | kthread_bind(p, cpu); | ||
3493 | t->tsk = p; | ||
3494 | |||
3495 | pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir); | ||
3541 | if (!pe) { | 3496 | if (!pe) { |
3542 | printk("pktgen: cannot create %s/%s procfs entry.\n", | 3497 | printk("pktgen: cannot create %s/%s procfs entry.\n", |
3543 | PG_PROC_DIR, t->name); | 3498 | PG_PROC_DIR, t->tsk->comm); |
3499 | kthread_stop(p); | ||
3500 | list_del(&t->th_list); | ||
3544 | kfree(t); | 3501 | kfree(t); |
3545 | return -EINVAL; | 3502 | return -EINVAL; |
3546 | } | 3503 | } |
@@ -3548,21 +3505,7 @@ static int __init pktgen_create_thread(const char *name, int cpu) | |||
3548 | pe->proc_fops = &pktgen_thread_fops; | 3505 | pe->proc_fops = &pktgen_thread_fops; |
3549 | pe->data = t; | 3506 | pe->data = t; |
3550 | 3507 | ||
3551 | INIT_LIST_HEAD(&t->if_list); | 3508 | wake_up_process(p); |
3552 | |||
3553 | list_add_tail(&t->th_list, &pktgen_threads); | ||
3554 | |||
3555 | t->removed = 0; | ||
3556 | |||
3557 | err = kernel_thread((void *)pktgen_thread_worker, (void *)t, | ||
3558 | CLONE_FS | CLONE_FILES | CLONE_SIGHAND); | ||
3559 | if (err < 0) { | ||
3560 | printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu); | ||
3561 | remove_proc_entry(t->name, pg_proc_dir); | ||
3562 | list_del(&t->th_list); | ||
3563 | kfree(t); | ||
3564 | return err; | ||
3565 | } | ||
3566 | 3509 | ||
3567 | return 0; | 3510 | return 0; |
3568 | } | 3511 | } |
@@ -3643,10 +3586,8 @@ static int __init pg_init(void) | |||
3643 | 3586 | ||
3644 | for_each_online_cpu(cpu) { | 3587 | for_each_online_cpu(cpu) { |
3645 | int err; | 3588 | int err; |
3646 | char buf[30]; | ||
3647 | 3589 | ||
3648 | sprintf(buf, "kpktgend_%i", cpu); | 3590 | err = pktgen_create_thread(cpu); |
3649 | err = pktgen_create_thread(buf, cpu); | ||
3650 | if (err) | 3591 | if (err) |
3651 | printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n", | 3592 | printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n", |
3652 | cpu, err); | 3593 | cpu, err); |
@@ -3674,9 +3615,8 @@ static void __exit pg_cleanup(void) | |||
3674 | 3615 | ||
3675 | list_for_each_safe(q, n, &pktgen_threads) { | 3616 | list_for_each_safe(q, n, &pktgen_threads) { |
3676 | t = list_entry(q, struct pktgen_thread, th_list); | 3617 | t = list_entry(q, struct pktgen_thread, th_list); |
3677 | t->control |= (T_TERMINATE); | 3618 | kthread_stop(t->tsk); |
3678 | 3619 | kfree(t); | |
3679 | wait_event_interruptible_timeout(queue, (t->removed == 1), HZ); | ||
3680 | } | 3620 | } |
3681 | 3621 | ||
3682 | /* Un-register us from receiving netdevice events */ | 3622 | /* Un-register us from receiving netdevice events */ |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1144900d37f6..d60fd7321e63 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -305,7 +305,7 @@ lookup_protocol: | |||
305 | sk->sk_reuse = 1; | 305 | sk->sk_reuse = 1; |
306 | 306 | ||
307 | inet = inet_sk(sk); | 307 | inet = inet_sk(sk); |
308 | inet->is_icsk = INET_PROTOSW_ICSK & answer_flags; | 308 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) == INET_PROTOSW_ICSK; |
309 | 309 | ||
310 | if (SOCK_RAW == sock->type) { | 310 | if (SOCK_RAW == sock->type) { |
311 | inet->num = protocol; | 311 | inet->num = protocol; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 84bed40273ad..25c8a42965df 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -165,9 +165,8 @@ struct in_device *inetdev_init(struct net_device *dev) | |||
165 | NET_IPV4_NEIGH, "ipv4", NULL, NULL); | 165 | NET_IPV4_NEIGH, "ipv4", NULL, NULL); |
166 | #endif | 166 | #endif |
167 | 167 | ||
168 | /* Account for reference dev->ip_ptr */ | 168 | /* Account for reference dev->ip_ptr (below) */ |
169 | in_dev_hold(in_dev); | 169 | in_dev_hold(in_dev); |
170 | rcu_assign_pointer(dev->ip_ptr, in_dev); | ||
171 | 170 | ||
172 | #ifdef CONFIG_SYSCTL | 171 | #ifdef CONFIG_SYSCTL |
173 | devinet_sysctl_register(in_dev, &in_dev->cnf); | 172 | devinet_sysctl_register(in_dev, &in_dev->cnf); |
@@ -176,6 +175,8 @@ struct in_device *inetdev_init(struct net_device *dev) | |||
176 | if (dev->flags & IFF_UP) | 175 | if (dev->flags & IFF_UP) |
177 | ip_mc_up(in_dev); | 176 | ip_mc_up(in_dev); |
178 | out: | 177 | out: |
178 | /* we can receive as soon as ip_ptr is set -- do this last */ | ||
179 | rcu_assign_pointer(dev->ip_ptr, in_dev); | ||
179 | return in_dev; | 180 | return in_dev; |
180 | out_kfree: | 181 | out_kfree: |
181 | kfree(in_dev); | 182 | kfree(in_dev); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index a68966059b50..c47ce7076bd5 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -15,16 +15,19 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
15 | struct flowi fl = {}; | 15 | struct flowi fl = {}; |
16 | struct dst_entry *odst; | 16 | struct dst_entry *odst; |
17 | unsigned int hh_len; | 17 | unsigned int hh_len; |
18 | unsigned int type; | ||
18 | 19 | ||
20 | type = inet_addr_type(iph->saddr); | ||
19 | if (addr_type == RTN_UNSPEC) | 21 | if (addr_type == RTN_UNSPEC) |
20 | addr_type = inet_addr_type(iph->saddr); | 22 | addr_type = type; |
21 | 23 | ||
22 | /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause | 24 | /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause |
23 | * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. | 25 | * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. |
24 | */ | 26 | */ |
25 | if (addr_type == RTN_LOCAL) { | 27 | if (addr_type == RTN_LOCAL) { |
26 | fl.nl_u.ip4_u.daddr = iph->daddr; | 28 | fl.nl_u.ip4_u.daddr = iph->daddr; |
27 | fl.nl_u.ip4_u.saddr = iph->saddr; | 29 | if (type == RTN_LOCAL) |
30 | fl.nl_u.ip4_u.saddr = iph->saddr; | ||
28 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); | 31 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); |
29 | fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; | 32 | fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; |
30 | fl.mark = (*pskb)->mark; | 33 | fl.mark = (*pskb)->mark; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index f6026d4ac428..47bd3ad18b71 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -6,8 +6,8 @@ menu "IP: Netfilter Configuration" | |||
6 | depends on INET && NETFILTER | 6 | depends on INET && NETFILTER |
7 | 7 | ||
8 | config NF_CONNTRACK_IPV4 | 8 | config NF_CONNTRACK_IPV4 |
9 | tristate "IPv4 connection tracking support (required for NAT) (EXPERIMENTAL)" | 9 | tristate "IPv4 connection tracking support (required for NAT)" |
10 | depends on EXPERIMENTAL && NF_CONNTRACK | 10 | depends on NF_CONNTRACK |
11 | ---help--- | 11 | ---help--- |
12 | Connection tracking keeps a record of what packets have passed | 12 | Connection tracking keeps a record of what packets have passed |
13 | through your machine, in order to figure out how they are related | 13 | through your machine, in order to figure out how they are related |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 09696f16aa95..fc1f153c86ba 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -919,13 +919,13 @@ copy_entries_to_user(unsigned int total_size, | |||
919 | #ifdef CONFIG_COMPAT | 919 | #ifdef CONFIG_COMPAT |
920 | struct compat_delta { | 920 | struct compat_delta { |
921 | struct compat_delta *next; | 921 | struct compat_delta *next; |
922 | u_int16_t offset; | 922 | unsigned int offset; |
923 | short delta; | 923 | short delta; |
924 | }; | 924 | }; |
925 | 925 | ||
926 | static struct compat_delta *compat_offsets = NULL; | 926 | static struct compat_delta *compat_offsets = NULL; |
927 | 927 | ||
928 | static int compat_add_offset(u_int16_t offset, short delta) | 928 | static int compat_add_offset(unsigned int offset, short delta) |
929 | { | 929 | { |
930 | struct compat_delta *tmp; | 930 | struct compat_delta *tmp; |
931 | 931 | ||
@@ -957,7 +957,7 @@ static void compat_flush_offsets(void) | |||
957 | } | 957 | } |
958 | } | 958 | } |
959 | 959 | ||
960 | static short compat_calc_jump(u_int16_t offset) | 960 | static short compat_calc_jump(unsigned int offset) |
961 | { | 961 | { |
962 | struct compat_delta *tmp; | 962 | struct compat_delta *tmp; |
963 | short delta; | 963 | short delta; |
@@ -997,7 +997,7 @@ static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info, | |||
997 | void *base, struct xt_table_info *newinfo) | 997 | void *base, struct xt_table_info *newinfo) |
998 | { | 998 | { |
999 | struct ipt_entry_target *t; | 999 | struct ipt_entry_target *t; |
1000 | u_int16_t entry_offset; | 1000 | unsigned int entry_offset; |
1001 | int off, i, ret; | 1001 | int off, i, ret; |
1002 | 1002 | ||
1003 | off = 0; | 1003 | off = 0; |
@@ -1467,7 +1467,7 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e, | |||
1467 | { | 1467 | { |
1468 | struct ipt_entry_target *t; | 1468 | struct ipt_entry_target *t; |
1469 | struct ipt_target *target; | 1469 | struct ipt_target *target; |
1470 | u_int16_t entry_offset; | 1470 | unsigned int entry_offset; |
1471 | int ret, off, h, j; | 1471 | int ret, off, h, j; |
1472 | 1472 | ||
1473 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1473 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 28b9233956b5..d669685afd04 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -127,10 +127,13 @@ masquerade_target(struct sk_buff **pskb, | |||
127 | static inline int | 127 | static inline int |
128 | device_cmp(struct ip_conntrack *i, void *ifindex) | 128 | device_cmp(struct ip_conntrack *i, void *ifindex) |
129 | { | 129 | { |
130 | int ret; | ||
130 | #ifdef CONFIG_NF_NAT_NEEDED | 131 | #ifdef CONFIG_NF_NAT_NEEDED |
131 | struct nf_conn_nat *nat = nfct_nat(i); | 132 | struct nf_conn_nat *nat = nfct_nat(i); |
133 | |||
134 | if (!nat) | ||
135 | return 0; | ||
132 | #endif | 136 | #endif |
133 | int ret; | ||
134 | 137 | ||
135 | read_lock_bh(&masq_lock); | 138 | read_lock_bh(&masq_lock); |
136 | #ifdef CONFIG_NF_NAT_NEEDED | 139 | #ifdef CONFIG_NF_NAT_NEEDED |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bf7a22412bcb..12de90a5047c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -648,7 +648,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
648 | TCPOLEN_TIMESTAMP); | 648 | TCPOLEN_TIMESTAMP); |
649 | rep.opt[1] = htonl(tcp_time_stamp); | 649 | rep.opt[1] = htonl(tcp_time_stamp); |
650 | rep.opt[2] = htonl(ts); | 650 | rep.opt[2] = htonl(ts); |
651 | arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED; | 651 | arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; |
652 | } | 652 | } |
653 | 653 | ||
654 | /* Swap the send and the receive. */ | 654 | /* Swap the send and the receive. */ |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9b0a90643151..171e5b55d7d6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -413,8 +413,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
413 | if (netif_carrier_ok(dev)) | 413 | if (netif_carrier_ok(dev)) |
414 | ndev->if_flags |= IF_READY; | 414 | ndev->if_flags |= IF_READY; |
415 | 415 | ||
416 | /* protected by rtnl_lock */ | ||
417 | rcu_assign_pointer(dev->ip6_ptr, ndev); | ||
418 | 416 | ||
419 | ipv6_mc_init_dev(ndev); | 417 | ipv6_mc_init_dev(ndev); |
420 | ndev->tstamp = jiffies; | 418 | ndev->tstamp = jiffies; |
@@ -425,6 +423,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
425 | NULL); | 423 | NULL); |
426 | addrconf_sysctl_register(ndev, &ndev->cnf); | 424 | addrconf_sysctl_register(ndev, &ndev->cnf); |
427 | #endif | 425 | #endif |
426 | /* protected by rtnl_lock */ | ||
427 | rcu_assign_pointer(dev->ip6_ptr, ndev); | ||
428 | return ndev; | 428 | return ndev; |
429 | } | 429 | } |
430 | 430 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e5cd83b2205d..832a5e6e2d7e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -171,7 +171,7 @@ lookup_protocol: | |||
171 | sk->sk_reuse = 1; | 171 | sk->sk_reuse = 1; |
172 | 172 | ||
173 | inet = inet_sk(sk); | 173 | inet = inet_sk(sk); |
174 | inet->is_icsk = INET_PROTOSW_ICSK & answer_flags; | 174 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) == INET_PROTOSW_ICSK; |
175 | 175 | ||
176 | if (SOCK_RAW == sock->type) { | 176 | if (SOCK_RAW == sock->type) { |
177 | inet->num = protocol; | 177 | inet->num = protocol; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 1b853c34d301..cd10e44db015 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -44,8 +44,7 @@ choice | |||
44 | depends on NF_CONNTRACK_ENABLED | 44 | depends on NF_CONNTRACK_ENABLED |
45 | 45 | ||
46 | config NF_CONNTRACK_SUPPORT | 46 | config NF_CONNTRACK_SUPPORT |
47 | bool "Layer 3 Independent Connection tracking (EXPERIMENTAL)" | 47 | bool "Layer 3 Independent Connection tracking" |
48 | depends on EXPERIMENTAL | ||
49 | help | 48 | help |
50 | Layer 3 independent connection tracking is experimental scheme | 49 | Layer 3 independent connection tracking is experimental scheme |
51 | which generalize ip_conntrack to support other layer 3 protocols. | 50 | which generalize ip_conntrack to support other layer 3 protocols. |
@@ -122,7 +121,7 @@ config NF_CONNTRACK_EVENTS | |||
122 | 121 | ||
123 | config NF_CT_PROTO_GRE | 122 | config NF_CT_PROTO_GRE |
124 | tristate | 123 | tristate |
125 | depends on EXPERIMENTAL && NF_CONNTRACK | 124 | depends on NF_CONNTRACK |
126 | 125 | ||
127 | config NF_CT_PROTO_SCTP | 126 | config NF_CT_PROTO_SCTP |
128 | tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' | 127 | tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' |
@@ -136,8 +135,8 @@ config NF_CT_PROTO_SCTP | |||
136 | Documentation/modules.txt. If unsure, say `N'. | 135 | Documentation/modules.txt. If unsure, say `N'. |
137 | 136 | ||
138 | config NF_CONNTRACK_AMANDA | 137 | config NF_CONNTRACK_AMANDA |
139 | tristate "Amanda backup protocol support (EXPERIMENTAL)" | 138 | tristate "Amanda backup protocol support" |
140 | depends on EXPERIMENTAL && NF_CONNTRACK | 139 | depends on NF_CONNTRACK |
141 | select TEXTSEARCH | 140 | select TEXTSEARCH |
142 | select TEXTSEARCH_KMP | 141 | select TEXTSEARCH_KMP |
143 | help | 142 | help |
@@ -151,8 +150,8 @@ config NF_CONNTRACK_AMANDA | |||
151 | To compile it as a module, choose M here. If unsure, say N. | 150 | To compile it as a module, choose M here. If unsure, say N. |
152 | 151 | ||
153 | config NF_CONNTRACK_FTP | 152 | config NF_CONNTRACK_FTP |
154 | tristate "FTP protocol support (EXPERIMENTAL)" | 153 | tristate "FTP protocol support" |
155 | depends on EXPERIMENTAL && NF_CONNTRACK | 154 | depends on NF_CONNTRACK |
156 | help | 155 | help |
157 | Tracking FTP connections is problematic: special helpers are | 156 | Tracking FTP connections is problematic: special helpers are |
158 | required for tracking them, and doing masquerading and other forms | 157 | required for tracking them, and doing masquerading and other forms |
@@ -184,8 +183,8 @@ config NF_CONNTRACK_H323 | |||
184 | To compile it as a module, choose M here. If unsure, say N. | 183 | To compile it as a module, choose M here. If unsure, say N. |
185 | 184 | ||
186 | config NF_CONNTRACK_IRC | 185 | config NF_CONNTRACK_IRC |
187 | tristate "IRC protocol support (EXPERIMENTAL)" | 186 | tristate "IRC protocol support" |
188 | depends on EXPERIMENTAL && NF_CONNTRACK | 187 | depends on NF_CONNTRACK |
189 | help | 188 | help |
190 | There is a commonly-used extension to IRC called | 189 | There is a commonly-used extension to IRC called |
191 | Direct Client-to-Client Protocol (DCC). This enables users to send | 190 | Direct Client-to-Client Protocol (DCC). This enables users to send |
@@ -218,8 +217,8 @@ config NF_CONNTRACK_NETBIOS_NS | |||
218 | To compile it as a module, choose M here. If unsure, say N. | 217 | To compile it as a module, choose M here. If unsure, say N. |
219 | 218 | ||
220 | config NF_CONNTRACK_PPTP | 219 | config NF_CONNTRACK_PPTP |
221 | tristate "PPtP protocol support (EXPERIMENTAL)" | 220 | tristate "PPtP protocol support" |
222 | depends on EXPERIMENTAL && NF_CONNTRACK | 221 | depends on NF_CONNTRACK |
223 | select NF_CT_PROTO_GRE | 222 | select NF_CT_PROTO_GRE |
224 | help | 223 | help |
225 | This module adds support for PPTP (Point to Point Tunnelling | 224 | This module adds support for PPTP (Point to Point Tunnelling |
@@ -249,8 +248,8 @@ config NF_CONNTRACK_SIP | |||
249 | To compile it as a module, choose M here. If unsure, say N. | 248 | To compile it as a module, choose M here. If unsure, say N. |
250 | 249 | ||
251 | config NF_CONNTRACK_TFTP | 250 | config NF_CONNTRACK_TFTP |
252 | tristate "TFTP protocol support (EXPERIMENTAL)" | 251 | tristate "TFTP protocol support" |
253 | depends on EXPERIMENTAL && NF_CONNTRACK | 252 | depends on NF_CONNTRACK |
254 | help | 253 | help |
255 | TFTP connection tracking helper, this is required depending | 254 | TFTP connection tracking helper, this is required depending |
256 | on how restrictive your ruleset is. | 255 | on how restrictive your ruleset is. |
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index a5b234e444dc..2a48efdf0d67 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -89,6 +89,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff, | |||
89 | 89 | ||
90 | exp->expectfn = NULL; | 90 | exp->expectfn = NULL; |
91 | exp->flags = NF_CT_EXPECT_PERMANENT; | 91 | exp->flags = NF_CT_EXPECT_PERMANENT; |
92 | exp->helper = NULL; | ||
92 | 93 | ||
93 | nf_conntrack_expect_related(exp); | 94 | nf_conntrack_expect_related(exp); |
94 | nf_conntrack_expect_put(exp); | 95 | nf_conntrack_expect_put(exp); |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a5a6e192ac2d..f28bf69d3d42 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -745,7 +745,7 @@ static int __init xt_hashlimit_init(void) | |||
745 | } | 745 | } |
746 | hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net); | 746 | hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net); |
747 | if (!hashlimit_procdir6) { | 747 | if (!hashlimit_procdir6) { |
748 | printk(KERN_ERR "xt_hashlimit: tnable to create proc dir " | 748 | printk(KERN_ERR "xt_hashlimit: unable to create proc dir " |
749 | "entry\n"); | 749 | "entry\n"); |
750 | goto err4; | 750 | goto err4; |
751 | } | 751 | } |
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 4afc75f9e377..73e0ff469bff 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
@@ -130,12 +130,12 @@ static int netlbl_cipsov4_add_common(struct genl_info *info, | |||
130 | 130 | ||
131 | nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) | 131 | nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) |
132 | if (nla->nla_type == NLBL_CIPSOV4_A_TAG) { | 132 | if (nla->nla_type == NLBL_CIPSOV4_A_TAG) { |
133 | if (iter > CIPSO_V4_TAG_MAXCNT) | 133 | if (iter >= CIPSO_V4_TAG_MAXCNT) |
134 | return -EINVAL; | 134 | return -EINVAL; |
135 | doi_def->tags[iter++] = nla_get_u8(nla); | 135 | doi_def->tags[iter++] = nla_get_u8(nla); |
136 | } | 136 | } |
137 | if (iter < CIPSO_V4_TAG_MAXCNT) | 137 | while (iter < CIPSO_V4_TAG_MAXCNT) |
138 | doi_def->tags[iter] = CIPSO_V4_TAG_INVALID; | 138 | doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; |
139 | 139 | ||
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 276131fe56dd..383dd4e82ee1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -472,8 +472,7 @@ static int netlink_release(struct socket *sock) | |||
472 | NETLINK_URELEASE, &n); | 472 | NETLINK_URELEASE, &n); |
473 | } | 473 | } |
474 | 474 | ||
475 | if (nlk->module) | 475 | module_put(nlk->module); |
476 | module_put(nlk->module); | ||
477 | 476 | ||
478 | netlink_table_grab(); | 477 | netlink_table_grab(); |
479 | if (nlk->flags & NETLINK_KERNEL_SOCKET) { | 478 | if (nlk->flags & NETLINK_KERNEL_SOCKET) { |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 52a2726d327f..b5c80b189902 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -484,8 +484,6 @@ out: | |||
484 | return sk; | 484 | return sk; |
485 | } | 485 | } |
486 | 486 | ||
487 | void x25_init_timers(struct sock *sk); | ||
488 | |||
489 | static int x25_create(struct socket *sock, int protocol) | 487 | static int x25_create(struct socket *sock, int protocol) |
490 | { | 488 | { |
491 | struct sock *sk; | 489 | struct sock *sk; |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 9f42b9c9de37..27f5cc7966f6 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -254,7 +254,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
254 | * They want reverse charging, we won't accept it. | 254 | * They want reverse charging, we won't accept it. |
255 | */ | 255 | */ |
256 | if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) { | 256 | if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) { |
257 | SOCK_DEBUG(sk, "X.25: rejecting reverse charging request"); | 257 | SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n"); |
258 | return -1; | 258 | return -1; |
259 | } | 259 | } |
260 | 260 | ||
@@ -262,29 +262,29 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
262 | 262 | ||
263 | if (theirs.throughput) { | 263 | if (theirs.throughput) { |
264 | if (theirs.throughput < ours->throughput) { | 264 | if (theirs.throughput < ours->throughput) { |
265 | SOCK_DEBUG(sk, "X.25: throughput negotiated down"); | 265 | SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); |
266 | new->throughput = theirs.throughput; | 266 | new->throughput = theirs.throughput; |
267 | } | 267 | } |
268 | } | 268 | } |
269 | 269 | ||
270 | if (theirs.pacsize_in && theirs.pacsize_out) { | 270 | if (theirs.pacsize_in && theirs.pacsize_out) { |
271 | if (theirs.pacsize_in < ours->pacsize_in) { | 271 | if (theirs.pacsize_in < ours->pacsize_in) { |
272 | SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down"); | 272 | SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n"); |
273 | new->pacsize_in = theirs.pacsize_in; | 273 | new->pacsize_in = theirs.pacsize_in; |
274 | } | 274 | } |
275 | if (theirs.pacsize_out < ours->pacsize_out) { | 275 | if (theirs.pacsize_out < ours->pacsize_out) { |
276 | SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down"); | 276 | SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n"); |
277 | new->pacsize_out = theirs.pacsize_out; | 277 | new->pacsize_out = theirs.pacsize_out; |
278 | } | 278 | } |
279 | } | 279 | } |
280 | 280 | ||
281 | if (theirs.winsize_in && theirs.winsize_out) { | 281 | if (theirs.winsize_in && theirs.winsize_out) { |
282 | if (theirs.winsize_in < ours->winsize_in) { | 282 | if (theirs.winsize_in < ours->winsize_in) { |
283 | SOCK_DEBUG(sk, "X.25: window size inwards negotiated down"); | 283 | SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n"); |
284 | new->winsize_in = theirs.winsize_in; | 284 | new->winsize_in = theirs.winsize_in; |
285 | } | 285 | } |
286 | if (theirs.winsize_out < ours->winsize_out) { | 286 | if (theirs.winsize_out < ours->winsize_out) { |
287 | SOCK_DEBUG(sk, "X.25: window size outwards negotiated down"); | 287 | SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n"); |
288 | new->winsize_out = theirs.winsize_out; | 288 | new->winsize_out = theirs.winsize_out; |
289 | } | 289 | } |
290 | } | 290 | } |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e5372b11fc8f..82f36d396fca 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -434,18 +434,19 @@ error_no_put: | |||
434 | return NULL; | 434 | return NULL; |
435 | } | 435 | } |
436 | 436 | ||
437 | static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 437 | static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, |
438 | struct rtattr **xfrma) | ||
438 | { | 439 | { |
439 | struct xfrm_usersa_info *p = NLMSG_DATA(nlh); | 440 | struct xfrm_usersa_info *p = NLMSG_DATA(nlh); |
440 | struct xfrm_state *x; | 441 | struct xfrm_state *x; |
441 | int err; | 442 | int err; |
442 | struct km_event c; | 443 | struct km_event c; |
443 | 444 | ||
444 | err = verify_newsa_info(p, (struct rtattr **)xfrma); | 445 | err = verify_newsa_info(p, xfrma); |
445 | if (err) | 446 | if (err) |
446 | return err; | 447 | return err; |
447 | 448 | ||
448 | x = xfrm_state_construct(p, (struct rtattr **)xfrma, &err); | 449 | x = xfrm_state_construct(p, xfrma, &err); |
449 | if (!x) | 450 | if (!x) |
450 | return err; | 451 | return err; |
451 | 452 | ||
@@ -507,14 +508,15 @@ static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, | |||
507 | return x; | 508 | return x; |
508 | } | 509 | } |
509 | 510 | ||
510 | static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 511 | static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, |
512 | struct rtattr **xfrma) | ||
511 | { | 513 | { |
512 | struct xfrm_state *x; | 514 | struct xfrm_state *x; |
513 | int err = -ESRCH; | 515 | int err = -ESRCH; |
514 | struct km_event c; | 516 | struct km_event c; |
515 | struct xfrm_usersa_id *p = NLMSG_DATA(nlh); | 517 | struct xfrm_usersa_id *p = NLMSG_DATA(nlh); |
516 | 518 | ||
517 | x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err); | 519 | x = xfrm_user_state_lookup(p, xfrma, &err); |
518 | if (x == NULL) | 520 | if (x == NULL) |
519 | return err; | 521 | return err; |
520 | 522 | ||
@@ -672,14 +674,15 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
672 | return skb; | 674 | return skb; |
673 | } | 675 | } |
674 | 676 | ||
675 | static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 677 | static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, |
678 | struct rtattr **xfrma) | ||
676 | { | 679 | { |
677 | struct xfrm_usersa_id *p = NLMSG_DATA(nlh); | 680 | struct xfrm_usersa_id *p = NLMSG_DATA(nlh); |
678 | struct xfrm_state *x; | 681 | struct xfrm_state *x; |
679 | struct sk_buff *resp_skb; | 682 | struct sk_buff *resp_skb; |
680 | int err = -ESRCH; | 683 | int err = -ESRCH; |
681 | 684 | ||
682 | x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err); | 685 | x = xfrm_user_state_lookup(p, xfrma, &err); |
683 | if (x == NULL) | 686 | if (x == NULL) |
684 | goto out_noput; | 687 | goto out_noput; |
685 | 688 | ||
@@ -718,7 +721,8 @@ static int verify_userspi_info(struct xfrm_userspi_info *p) | |||
718 | return 0; | 721 | return 0; |
719 | } | 722 | } |
720 | 723 | ||
721 | static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 724 | static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, |
725 | struct rtattr **xfrma) | ||
722 | { | 726 | { |
723 | struct xfrm_state *x; | 727 | struct xfrm_state *x; |
724 | struct xfrm_userspi_info *p; | 728 | struct xfrm_userspi_info *p; |
@@ -1013,7 +1017,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, | |||
1013 | return NULL; | 1017 | return NULL; |
1014 | } | 1018 | } |
1015 | 1019 | ||
1016 | static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1020 | static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, |
1021 | struct rtattr **xfrma) | ||
1017 | { | 1022 | { |
1018 | struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh); | 1023 | struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh); |
1019 | struct xfrm_policy *xp; | 1024 | struct xfrm_policy *xp; |
@@ -1024,11 +1029,11 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1024 | err = verify_newpolicy_info(p); | 1029 | err = verify_newpolicy_info(p); |
1025 | if (err) | 1030 | if (err) |
1026 | return err; | 1031 | return err; |
1027 | err = verify_sec_ctx_len((struct rtattr **)xfrma); | 1032 | err = verify_sec_ctx_len(xfrma); |
1028 | if (err) | 1033 | if (err) |
1029 | return err; | 1034 | return err; |
1030 | 1035 | ||
1031 | xp = xfrm_policy_construct(p, (struct rtattr **)xfrma, &err); | 1036 | xp = xfrm_policy_construct(p, xfrma, &err); |
1032 | if (!xp) | 1037 | if (!xp) |
1033 | return err; | 1038 | return err; |
1034 | 1039 | ||
@@ -1227,7 +1232,8 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1227 | return skb; | 1232 | return skb; |
1228 | } | 1233 | } |
1229 | 1234 | ||
1230 | static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1235 | static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, |
1236 | struct rtattr **xfrma) | ||
1231 | { | 1237 | { |
1232 | struct xfrm_policy *xp; | 1238 | struct xfrm_policy *xp; |
1233 | struct xfrm_userpolicy_id *p; | 1239 | struct xfrm_userpolicy_id *p; |
@@ -1239,7 +1245,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1239 | p = NLMSG_DATA(nlh); | 1245 | p = NLMSG_DATA(nlh); |
1240 | delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; | 1246 | delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; |
1241 | 1247 | ||
1242 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1248 | err = copy_from_user_policy_type(&type, xfrma); |
1243 | if (err) | 1249 | if (err) |
1244 | return err; | 1250 | return err; |
1245 | 1251 | ||
@@ -1250,11 +1256,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1250 | if (p->index) | 1256 | if (p->index) |
1251 | xp = xfrm_policy_byid(type, p->dir, p->index, delete); | 1257 | xp = xfrm_policy_byid(type, p->dir, p->index, delete); |
1252 | else { | 1258 | else { |
1253 | struct rtattr **rtattrs = (struct rtattr **)xfrma; | 1259 | struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; |
1254 | struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1]; | ||
1255 | struct xfrm_policy tmp; | 1260 | struct xfrm_policy tmp; |
1256 | 1261 | ||
1257 | err = verify_sec_ctx_len(rtattrs); | 1262 | err = verify_sec_ctx_len(xfrma); |
1258 | if (err) | 1263 | if (err) |
1259 | return err; | 1264 | return err; |
1260 | 1265 | ||
@@ -1302,7 +1307,8 @@ out: | |||
1302 | return err; | 1307 | return err; |
1303 | } | 1308 | } |
1304 | 1309 | ||
1305 | static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1310 | static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, |
1311 | struct rtattr **xfrma) | ||
1306 | { | 1312 | { |
1307 | struct km_event c; | 1313 | struct km_event c; |
1308 | struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); | 1314 | struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); |
@@ -1367,7 +1373,8 @@ nlmsg_failure: | |||
1367 | return -1; | 1373 | return -1; |
1368 | } | 1374 | } |
1369 | 1375 | ||
1370 | static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1376 | static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, |
1377 | struct rtattr **xfrma) | ||
1371 | { | 1378 | { |
1372 | struct xfrm_state *x; | 1379 | struct xfrm_state *x; |
1373 | struct sk_buff *r_skb; | 1380 | struct sk_buff *r_skb; |
@@ -1415,7 +1422,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | |||
1415 | return err; | 1422 | return err; |
1416 | } | 1423 | } |
1417 | 1424 | ||
1418 | static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1425 | static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, |
1426 | struct rtattr **xfrma) | ||
1419 | { | 1427 | { |
1420 | struct xfrm_state *x; | 1428 | struct xfrm_state *x; |
1421 | struct km_event c; | 1429 | struct km_event c; |
@@ -1439,7 +1447,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | |||
1439 | goto out; | 1447 | goto out; |
1440 | 1448 | ||
1441 | spin_lock_bh(&x->lock); | 1449 | spin_lock_bh(&x->lock); |
1442 | err = xfrm_update_ae_params(x,(struct rtattr **)xfrma); | 1450 | err = xfrm_update_ae_params(x, xfrma); |
1443 | spin_unlock_bh(&x->lock); | 1451 | spin_unlock_bh(&x->lock); |
1444 | if (err < 0) | 1452 | if (err < 0) |
1445 | goto out; | 1453 | goto out; |
@@ -1455,14 +1463,15 @@ out: | |||
1455 | return err; | 1463 | return err; |
1456 | } | 1464 | } |
1457 | 1465 | ||
1458 | static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1466 | static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, |
1467 | struct rtattr **xfrma) | ||
1459 | { | 1468 | { |
1460 | struct km_event c; | 1469 | struct km_event c; |
1461 | u8 type = XFRM_POLICY_TYPE_MAIN; | 1470 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1462 | int err; | 1471 | int err; |
1463 | struct xfrm_audit audit_info; | 1472 | struct xfrm_audit audit_info; |
1464 | 1473 | ||
1465 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1474 | err = copy_from_user_policy_type(&type, xfrma); |
1466 | if (err) | 1475 | if (err) |
1467 | return err; | 1476 | return err; |
1468 | 1477 | ||
@@ -1477,7 +1486,8 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x | |||
1477 | return 0; | 1486 | return 0; |
1478 | } | 1487 | } |
1479 | 1488 | ||
1480 | static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1489 | static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, |
1490 | struct rtattr **xfrma) | ||
1481 | { | 1491 | { |
1482 | struct xfrm_policy *xp; | 1492 | struct xfrm_policy *xp; |
1483 | struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); | 1493 | struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); |
@@ -1485,18 +1495,17 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void * | |||
1485 | u8 type = XFRM_POLICY_TYPE_MAIN; | 1495 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1486 | int err = -ENOENT; | 1496 | int err = -ENOENT; |
1487 | 1497 | ||
1488 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1498 | err = copy_from_user_policy_type(&type, xfrma); |
1489 | if (err) | 1499 | if (err) |
1490 | return err; | 1500 | return err; |
1491 | 1501 | ||
1492 | if (p->index) | 1502 | if (p->index) |
1493 | xp = xfrm_policy_byid(type, p->dir, p->index, 0); | 1503 | xp = xfrm_policy_byid(type, p->dir, p->index, 0); |
1494 | else { | 1504 | else { |
1495 | struct rtattr **rtattrs = (struct rtattr **)xfrma; | 1505 | struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; |
1496 | struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1]; | ||
1497 | struct xfrm_policy tmp; | 1506 | struct xfrm_policy tmp; |
1498 | 1507 | ||
1499 | err = verify_sec_ctx_len(rtattrs); | 1508 | err = verify_sec_ctx_len(xfrma); |
1500 | if (err) | 1509 | if (err) |
1501 | return err; | 1510 | return err; |
1502 | 1511 | ||
@@ -1537,7 +1546,8 @@ out: | |||
1537 | return err; | 1546 | return err; |
1538 | } | 1547 | } |
1539 | 1548 | ||
1540 | static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1549 | static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, |
1550 | struct rtattr **xfrma) | ||
1541 | { | 1551 | { |
1542 | struct xfrm_state *x; | 1552 | struct xfrm_state *x; |
1543 | int err; | 1553 | int err; |
@@ -1568,7 +1578,8 @@ out: | |||
1568 | return err; | 1578 | return err; |
1569 | } | 1579 | } |
1570 | 1580 | ||
1571 | static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1581 | static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, |
1582 | struct rtattr **xfrma) | ||
1572 | { | 1583 | { |
1573 | struct xfrm_policy *xp; | 1584 | struct xfrm_policy *xp; |
1574 | struct xfrm_user_tmpl *ut; | 1585 | struct xfrm_user_tmpl *ut; |
@@ -1647,7 +1658,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { | |||
1647 | #undef XMSGSIZE | 1658 | #undef XMSGSIZE |
1648 | 1659 | ||
1649 | static struct xfrm_link { | 1660 | static struct xfrm_link { |
1650 | int (*doit)(struct sk_buff *, struct nlmsghdr *, void **); | 1661 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **); |
1651 | int (*dump)(struct sk_buff *, struct netlink_callback *); | 1662 | int (*dump)(struct sk_buff *, struct netlink_callback *); |
1652 | } xfrm_dispatch[XFRM_NR_MSGTYPES] = { | 1663 | } xfrm_dispatch[XFRM_NR_MSGTYPES] = { |
1653 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, | 1664 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, |
@@ -1735,7 +1746,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err | |||
1735 | 1746 | ||
1736 | if (link->doit == NULL) | 1747 | if (link->doit == NULL) |
1737 | goto err_einval; | 1748 | goto err_einval; |
1738 | *errp = link->doit(skb, nlh, (void **) &xfrma); | 1749 | *errp = link->doit(skb, nlh, xfrma); |
1739 | 1750 | ||
1740 | return *errp; | 1751 | return *errp; |
1741 | 1752 | ||
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 0b2fcc417f59..a8ffc329666a 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc | |||
@@ -925,6 +925,8 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name) | |||
925 | configSettings->endGroup(); | 925 | configSettings->endGroup(); |
926 | connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings())); | 926 | connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings())); |
927 | } | 927 | } |
928 | |||
929 | has_dbg_info = 0; | ||
928 | } | 930 | } |
929 | 931 | ||
930 | void ConfigInfoView::saveSettings(void) | 932 | void ConfigInfoView::saveSettings(void) |
@@ -953,10 +955,13 @@ void ConfigInfoView::setInfo(struct menu *m) | |||
953 | if (menu == m) | 955 | if (menu == m) |
954 | return; | 956 | return; |
955 | menu = m; | 957 | menu = m; |
956 | if (!menu) | 958 | if (!menu) { |
959 | has_dbg_info = 0; | ||
957 | clear(); | 960 | clear(); |
958 | else | 961 | } else { |
962 | has_dbg_info = 1; | ||
959 | menuInfo(); | 963 | menuInfo(); |
964 | } | ||
960 | } | 965 | } |
961 | 966 | ||
962 | void ConfigInfoView::setSource(const QString& name) | 967 | void ConfigInfoView::setSource(const QString& name) |
@@ -991,6 +996,9 @@ void ConfigInfoView::symbolInfo(void) | |||
991 | { | 996 | { |
992 | QString str; | 997 | QString str; |
993 | 998 | ||
999 | if (!has_dbg_info) | ||
1000 | return; | ||
1001 | |||
994 | str += "<big>Symbol: <b>"; | 1002 | str += "<big>Symbol: <b>"; |
995 | str += print_filter(sym->name); | 1003 | str += print_filter(sym->name); |
996 | str += "</b></big><br><br>value: "; | 1004 | str += "</b></big><br><br>value: "; |
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h index 6fc1c5f14425..a397edb5adcf 100644 --- a/scripts/kconfig/qconf.h +++ b/scripts/kconfig/qconf.h | |||
@@ -273,6 +273,8 @@ protected: | |||
273 | struct symbol *sym; | 273 | struct symbol *sym; |
274 | struct menu *menu; | 274 | struct menu *menu; |
275 | bool _showDebug; | 275 | bool _showDebug; |
276 | |||
277 | int has_dbg_info; | ||
276 | }; | 278 | }; |
277 | 279 | ||
278 | class ConfigSearchWindow : public QDialog { | 280 | class ConfigSearchWindow : public QDialog { |
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h index 0562bacb7b99..2eee0dab524d 100644 --- a/security/selinux/ss/context.h +++ b/security/selinux/ss/context.h | |||
@@ -55,6 +55,29 @@ out: | |||
55 | return rc; | 55 | return rc; |
56 | } | 56 | } |
57 | 57 | ||
58 | /* | ||
59 | * Sets both levels in the MLS range of 'dst' to the low level of 'src'. | ||
60 | */ | ||
61 | static inline int mls_context_cpy_low(struct context *dst, struct context *src) | ||
62 | { | ||
63 | int rc; | ||
64 | |||
65 | if (!selinux_mls_enabled) | ||
66 | return 0; | ||
67 | |||
68 | dst->range.level[0].sens = src->range.level[0].sens; | ||
69 | rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); | ||
70 | if (rc) | ||
71 | goto out; | ||
72 | |||
73 | dst->range.level[1].sens = src->range.level[0].sens; | ||
74 | rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat); | ||
75 | if (rc) | ||
76 | ebitmap_destroy(&dst->range.level[0].cat); | ||
77 | out: | ||
78 | return rc; | ||
79 | } | ||
80 | |||
58 | static inline int mls_context_cmp(struct context *c1, struct context *c2) | 81 | static inline int mls_context_cmp(struct context *c1, struct context *c2) |
59 | { | 82 | { |
60 | if (!selinux_mls_enabled) | 83 | if (!selinux_mls_enabled) |
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index b4f682dc13ff..4a8bab2f3c71 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c | |||
@@ -270,7 +270,7 @@ int mls_context_to_sid(char oldc, | |||
270 | if (!defcon) | 270 | if (!defcon) |
271 | goto out; | 271 | goto out; |
272 | 272 | ||
273 | rc = mls_copy_context(context, defcon); | 273 | rc = mls_context_cpy(context, defcon); |
274 | goto out; | 274 | goto out; |
275 | } | 275 | } |
276 | 276 | ||
@@ -401,26 +401,6 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask) | |||
401 | } | 401 | } |
402 | 402 | ||
403 | /* | 403 | /* |
404 | * Copies the effective MLS range from `src' into `dst'. | ||
405 | */ | ||
406 | static inline int mls_scopy_context(struct context *dst, | ||
407 | struct context *src) | ||
408 | { | ||
409 | int l, rc = 0; | ||
410 | |||
411 | /* Copy the MLS range from the source context */ | ||
412 | for (l = 0; l < 2; l++) { | ||
413 | dst->range.level[l].sens = src->range.level[0].sens; | ||
414 | rc = ebitmap_cpy(&dst->range.level[l].cat, | ||
415 | &src->range.level[0].cat); | ||
416 | if (rc) | ||
417 | break; | ||
418 | } | ||
419 | |||
420 | return rc; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * Copies the MLS range `range' into `context'. | 404 | * Copies the MLS range `range' into `context'. |
425 | */ | 405 | */ |
426 | static inline int mls_range_set(struct context *context, | 406 | static inline int mls_range_set(struct context *context, |
@@ -552,19 +532,19 @@ int mls_compute_sid(struct context *scontext, | |||
552 | case AVTAB_CHANGE: | 532 | case AVTAB_CHANGE: |
553 | if (tclass == SECCLASS_PROCESS) | 533 | if (tclass == SECCLASS_PROCESS) |
554 | /* Use the process MLS attributes. */ | 534 | /* Use the process MLS attributes. */ |
555 | return mls_copy_context(newcontext, scontext); | 535 | return mls_context_cpy(newcontext, scontext); |
556 | else | 536 | else |
557 | /* Use the process effective MLS attributes. */ | 537 | /* Use the process effective MLS attributes. */ |
558 | return mls_scopy_context(newcontext, scontext); | 538 | return mls_context_cpy_low(newcontext, scontext); |
559 | case AVTAB_MEMBER: | 539 | case AVTAB_MEMBER: |
560 | /* Only polyinstantiate the MLS attributes if | 540 | /* Only polyinstantiate the MLS attributes if |
561 | the type is being polyinstantiated */ | 541 | the type is being polyinstantiated */ |
562 | if (newcontext->type != tcontext->type) { | 542 | if (newcontext->type != tcontext->type) { |
563 | /* Use the process effective MLS attributes. */ | 543 | /* Use the process effective MLS attributes. */ |
564 | return mls_scopy_context(newcontext, scontext); | 544 | return mls_context_cpy_low(newcontext, scontext); |
565 | } else { | 545 | } else { |
566 | /* Use the related object MLS attributes. */ | 546 | /* Use the related object MLS attributes. */ |
567 | return mls_copy_context(newcontext, tcontext); | 547 | return mls_context_cpy(newcontext, tcontext); |
568 | } | 548 | } |
569 | default: | 549 | default: |
570 | return -EINVAL; | 550 | return -EINVAL; |
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h index 661d6fc76966..096d1b4ef7fb 100644 --- a/security/selinux/ss/mls.h +++ b/security/selinux/ss/mls.h | |||
@@ -24,26 +24,6 @@ | |||
24 | #include "context.h" | 24 | #include "context.h" |
25 | #include "policydb.h" | 25 | #include "policydb.h" |
26 | 26 | ||
27 | /* | ||
28 | * Copies the MLS range from `src' into `dst'. | ||
29 | */ | ||
30 | static inline int mls_copy_context(struct context *dst, | ||
31 | struct context *src) | ||
32 | { | ||
33 | int l, rc = 0; | ||
34 | |||
35 | /* Copy the MLS range from the source context */ | ||
36 | for (l = 0; l < 2; l++) { | ||
37 | dst->range.level[l].sens = src->range.level[l].sens; | ||
38 | rc = ebitmap_cpy(&dst->range.level[l].cat, | ||
39 | &src->range.level[l].cat); | ||
40 | if (rc) | ||
41 | break; | ||
42 | } | ||
43 | |||
44 | return rc; | ||
45 | } | ||
46 | |||
47 | int mls_compute_context_len(struct context *context); | 27 | int mls_compute_context_len(struct context *context); |
48 | void mls_sid_to_context(struct context *context, char **scontext); | 28 | void mls_sid_to_context(struct context *context, char **scontext); |
49 | int mls_context_isvalid(struct policydb *p, struct context *c); | 29 | int mls_context_isvalid(struct policydb *p, struct context *c); |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index ee0581557966..3eb1fa9f0de1 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -1916,11 +1916,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) | |||
1916 | newcon.user = context1->user; | 1916 | newcon.user = context1->user; |
1917 | newcon.role = context1->role; | 1917 | newcon.role = context1->role; |
1918 | newcon.type = context1->type; | 1918 | newcon.type = context1->type; |
1919 | rc = mls_copy_context(&newcon, context2); | 1919 | rc = mls_context_cpy(&newcon, context2); |
1920 | if (rc) | 1920 | if (rc) |
1921 | goto out_unlock; | 1921 | goto out_unlock; |
1922 | 1922 | ||
1923 | |||
1924 | /* Check the validity of the new context. */ | 1923 | /* Check the validity of the new context. */ |
1925 | if (!policydb_context_isvalid(&policydb, &newcon)) { | 1924 | if (!policydb_context_isvalid(&policydb, &newcon)) { |
1926 | rc = convert_context_handle_invalid_context(&newcon); | 1925 | rc = convert_context_handle_invalid_context(&newcon); |
@@ -2492,9 +2491,9 @@ static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid) | |||
2492 | 2491 | ||
2493 | rc = netlbl_socket_setattr(sock, &secattr); | 2492 | rc = netlbl_socket_setattr(sock, &secattr); |
2494 | if (rc == 0) { | 2493 | if (rc == 0) { |
2495 | spin_lock(&sksec->nlbl_lock); | 2494 | spin_lock_bh(&sksec->nlbl_lock); |
2496 | sksec->nlbl_state = NLBL_LABELED; | 2495 | sksec->nlbl_state = NLBL_LABELED; |
2497 | spin_unlock(&sksec->nlbl_lock); | 2496 | spin_unlock_bh(&sksec->nlbl_lock); |
2498 | } | 2497 | } |
2499 | 2498 | ||
2500 | netlbl_socket_setsid_return: | 2499 | netlbl_socket_setsid_return: |
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 71c58df4af28..70face7e1048 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c | |||
@@ -2198,7 +2198,8 @@ static int _snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol, | |||
2198 | val = inb(cm->iobase + args->reg); | 2198 | val = inb(cm->iobase + args->reg); |
2199 | else | 2199 | else |
2200 | val = snd_cmipci_read(cm, args->reg); | 2200 | val = snd_cmipci_read(cm, args->reg); |
2201 | change = (val & args->mask) != (ucontrol->value.integer.value[0] ? args->mask : 0); | 2201 | change = (val & args->mask) != (ucontrol->value.integer.value[0] ? |
2202 | args->mask_on : (args->mask & ~args->mask_on)); | ||
2202 | if (change) { | 2203 | if (change) { |
2203 | val &= ~args->mask; | 2204 | val &= ~args->mask; |
2204 | if (ucontrol->value.integer.value[0]) | 2205 | if (ucontrol->value.integer.value[0]) |
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c index e31f0f11e3a8..91f5bff66d3f 100644 --- a/sound/pci/echoaudio/midi.c +++ b/sound/pci/echoaudio/midi.c | |||
@@ -213,7 +213,7 @@ static void snd_echo_midi_output_write(unsigned long data) | |||
213 | sent = bytes = 0; | 213 | sent = bytes = 0; |
214 | spin_lock_irqsave(&chip->lock, flags); | 214 | spin_lock_irqsave(&chip->lock, flags); |
215 | chip->midi_full = 0; | 215 | chip->midi_full = 0; |
216 | if (chip->midi_out && !snd_rawmidi_transmit_empty(chip->midi_out)) { | 216 | if (!snd_rawmidi_transmit_empty(chip->midi_out)) { |
217 | bytes = snd_rawmidi_transmit_peek(chip->midi_out, buf, | 217 | bytes = snd_rawmidi_transmit_peek(chip->midi_out, buf, |
218 | MIDI_OUT_BUFFER_SIZE - 1); | 218 | MIDI_OUT_BUFFER_SIZE - 1); |
219 | DE_MID(("Try to send %d bytes...\n", bytes)); | 219 | DE_MID(("Try to send %d bytes...\n", bytes)); |
@@ -264,9 +264,11 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream | |||
264 | } | 264 | } |
265 | } else { | 265 | } else { |
266 | if (chip->tinuse) { | 266 | if (chip->tinuse) { |
267 | del_timer(&chip->timer); | ||
268 | chip->tinuse = 0; | 267 | chip->tinuse = 0; |
268 | spin_unlock_irq(&chip->lock); | ||
269 | del_timer_sync(&chip->timer); | ||
269 | DE_MID(("Timer removed\n")); | 270 | DE_MID(("Timer removed\n")); |
271 | return; | ||
270 | } | 272 | } |
271 | } | 273 | } |
272 | spin_unlock_irq(&chip->lock); | 274 | spin_unlock_irq(&chip->lock); |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 97e9af130b71..1589d2f2917f 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -485,8 +485,9 @@ static const char *get_input_type(struct hda_gnode *node, unsigned int *pinctl) | |||
485 | return "Front Aux"; | 485 | return "Front Aux"; |
486 | return "Aux"; | 486 | return "Aux"; |
487 | case AC_JACK_MIC_IN: | 487 | case AC_JACK_MIC_IN: |
488 | if (node->pin_caps & | 488 | if (pinctl && |
489 | (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT)) | 489 | (node->pin_caps & |
490 | (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT))) | ||
490 | *pinctl |= AC_PINCTL_VREF_80; | 491 | *pinctl |= AC_PINCTL_VREF_80; |
491 | if ((location & 0x0f) == AC_JACK_LOC_FRONT) | 492 | if ((location & 0x0f) == AC_JACK_LOC_FRONT) |
492 | return "Front Mic"; | 493 | return "Front Mic"; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9fd34f85cad5..1a7e82104bb9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -83,6 +83,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," | |||
83 | "{Intel, ICH7}," | 83 | "{Intel, ICH7}," |
84 | "{Intel, ESB2}," | 84 | "{Intel, ESB2}," |
85 | "{Intel, ICH8}," | 85 | "{Intel, ICH8}," |
86 | "{Intel, ICH9}," | ||
86 | "{ATI, SB450}," | 87 | "{ATI, SB450}," |
87 | "{ATI, SB600}," | 88 | "{ATI, SB600}," |
88 | "{ATI, RS600}," | 89 | "{ATI, RS600}," |
@@ -1711,6 +1712,8 @@ static struct pci_device_id azx_ids[] = { | |||
1711 | { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */ | 1712 | { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */ |
1712 | { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */ | 1713 | { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */ |
1713 | { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */ | 1714 | { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */ |
1715 | { 0x8086, 0x293e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */ | ||
1716 | { 0x8086, 0x293f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */ | ||
1714 | { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */ | 1717 | { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */ |
1715 | { 0x1002, 0x4383, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB600 */ | 1718 | { 0x1002, 0x4383, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB600 */ |
1716 | { 0x1002, 0x793b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATIHDMI }, /* ATI RS600 HDMI */ | 1719 | { 0x1002, 0x793b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATIHDMI }, /* ATI RS600 HDMI */ |
@@ -1718,9 +1721,14 @@ static struct pci_device_id azx_ids[] = { | |||
1718 | { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */ | 1721 | { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */ |
1719 | { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */ | 1722 | { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */ |
1720 | { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */ | 1723 | { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */ |
1721 | { 0x10de, 0x026c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 026c */ | 1724 | { 0x10de, 0x026c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP51 */ |
1722 | { 0x10de, 0x0371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 0371 */ | 1725 | { 0x10de, 0x0371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP55 */ |
1723 | { 0x10de, 0x03f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 03f0 */ | 1726 | { 0x10de, 0x03e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP61 */ |
1727 | { 0x10de, 0x03f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP61 */ | ||
1728 | { 0x10de, 0x044a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP65 */ | ||
1729 | { 0x10de, 0x044b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP65 */ | ||
1730 | { 0x10de, 0x055c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP67 */ | ||
1731 | { 0x10de, 0x055d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP67 */ | ||
1724 | { 0, } | 1732 | { 0, } |
1725 | }; | 1733 | }; |
1726 | MODULE_DEVICE_TABLE(pci, azx_ids); | 1734 | MODULE_DEVICE_TABLE(pci, azx_ids); |
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c index edeb3d3c4c7e..f5956d557f70 100644 --- a/sound/sparc/cs4231.c +++ b/sound/sparc/cs4231.c | |||
@@ -1268,7 +1268,7 @@ static struct snd_pcm_hardware snd_cs4231_playback = | |||
1268 | .channels_min = 1, | 1268 | .channels_min = 1, |
1269 | .channels_max = 2, | 1269 | .channels_max = 2, |
1270 | .buffer_bytes_max = (32*1024), | 1270 | .buffer_bytes_max = (32*1024), |
1271 | .period_bytes_min = 4096, | 1271 | .period_bytes_min = 64, |
1272 | .period_bytes_max = (32*1024), | 1272 | .period_bytes_max = (32*1024), |
1273 | .periods_min = 1, | 1273 | .periods_min = 1, |
1274 | .periods_max = 1024, | 1274 | .periods_max = 1024, |
@@ -1288,7 +1288,7 @@ static struct snd_pcm_hardware snd_cs4231_capture = | |||
1288 | .channels_min = 1, | 1288 | .channels_min = 1, |
1289 | .channels_max = 2, | 1289 | .channels_max = 2, |
1290 | .buffer_bytes_max = (32*1024), | 1290 | .buffer_bytes_max = (32*1024), |
1291 | .period_bytes_min = 4096, | 1291 | .period_bytes_min = 64, |
1292 | .period_bytes_max = (32*1024), | 1292 | .period_bytes_max = (32*1024), |
1293 | .periods_min = 1, | 1293 | .periods_min = 1, |
1294 | .periods_max = 1024, | 1294 | .periods_max = 1024, |
@@ -1796,7 +1796,7 @@ static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id) | |||
1796 | snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0); | 1796 | snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0); |
1797 | spin_unlock_irqrestore(&chip->lock, flags); | 1797 | spin_unlock_irqrestore(&chip->lock, flags); |
1798 | 1798 | ||
1799 | return 0; | 1799 | return IRQ_HANDLED; |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | /* | 1802 | /* |
@@ -1821,7 +1821,6 @@ static int sbus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_ | |||
1821 | if (!(csr & test)) | 1821 | if (!(csr & test)) |
1822 | goto out; | 1822 | goto out; |
1823 | err = -EBUSY; | 1823 | err = -EBUSY; |
1824 | csr = sbus_readl(base->regs + APCCSR); | ||
1825 | test = APC_XINT_CNVA; | 1824 | test = APC_XINT_CNVA; |
1826 | if ( base->dir == APC_PLAY ) | 1825 | if ( base->dir == APC_PLAY ) |
1827 | test = APC_XINT_PNVA; | 1826 | test = APC_XINT_PNVA; |
@@ -1862,17 +1861,16 @@ static void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on) | |||
1862 | 1861 | ||
1863 | spin_lock_irqsave(&base->lock, flags); | 1862 | spin_lock_irqsave(&base->lock, flags); |
1864 | if (!on) { | 1863 | if (!on) { |
1865 | if (base->dir == APC_PLAY) { | 1864 | sbus_writel(0, base->regs + base->dir + APCNC); |
1866 | sbus_writel(0, base->regs + base->dir + APCNVA); | 1865 | sbus_writel(0, base->regs + base->dir + APCNVA); |
1867 | sbus_writel(1, base->regs + base->dir + APCC); | 1866 | sbus_writel(0, base->regs + base->dir + APCC); |
1868 | } | 1867 | sbus_writel(0, base->regs + base->dir + APCVA); |
1869 | else | 1868 | |
1870 | { | 1869 | /* ACK any APC interrupts. */ |
1871 | sbus_writel(0, base->regs + base->dir + APCNC); | 1870 | csr = sbus_readl(base->regs + APCCSR); |
1872 | sbus_writel(0, base->regs + base->dir + APCVA); | 1871 | sbus_writel(csr, base->regs + APCCSR); |
1873 | } | ||
1874 | } | 1872 | } |
1875 | udelay(600); | 1873 | udelay(1000); |
1876 | csr = sbus_readl(base->regs + APCCSR); | 1874 | csr = sbus_readl(base->regs + APCCSR); |
1877 | shift = 0; | 1875 | shift = 0; |
1878 | if ( base->dir == APC_PLAY ) | 1876 | if ( base->dir == APC_PLAY ) |
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index 3d7f36fb4cf0..19bdcc74c96c 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c | |||
@@ -2471,7 +2471,13 @@ static int parse_audio_format_rates(struct snd_usb_audio *chip, struct audioform | |||
2471 | fp->nr_rates = nr_rates; | 2471 | fp->nr_rates = nr_rates; |
2472 | fp->rate_min = fp->rate_max = combine_triple(&fmt[8]); | 2472 | fp->rate_min = fp->rate_max = combine_triple(&fmt[8]); |
2473 | for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) { | 2473 | for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) { |
2474 | unsigned int rate = fp->rate_table[r] = combine_triple(&fmt[idx]); | 2474 | unsigned int rate = combine_triple(&fmt[idx]); |
2475 | /* C-Media CM6501 mislabels its 96 kHz altsetting */ | ||
2476 | if (rate == 48000 && nr_rates == 1 && | ||
2477 | chip->usb_id == USB_ID(0x0d8c, 0x0201) && | ||
2478 | fp->altsetting == 5 && fp->maxpacksize == 392) | ||
2479 | rate = 96000; | ||
2480 | fp->rate_table[r] = rate; | ||
2475 | if (rate < fp->rate_min) | 2481 | if (rate < fp->rate_min) |
2476 | fp->rate_min = rate; | 2482 | fp->rate_min = rate; |
2477 | else if (rate > fp->rate_max) | 2483 | else if (rate > fp->rate_max) |
@@ -3280,6 +3286,7 @@ static void snd_usb_audio_create_proc(struct snd_usb_audio *chip) | |||
3280 | 3286 | ||
3281 | static int snd_usb_audio_free(struct snd_usb_audio *chip) | 3287 | static int snd_usb_audio_free(struct snd_usb_audio *chip) |
3282 | { | 3288 | { |
3289 | usb_chip[chip->index] = NULL; | ||
3283 | kfree(chip); | 3290 | kfree(chip); |
3284 | return 0; | 3291 | return 0; |
3285 | } | 3292 | } |
@@ -3541,7 +3548,6 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) | |||
3541 | list_for_each(p, &chip->mixer_list) { | 3548 | list_for_each(p, &chip->mixer_list) { |
3542 | snd_usb_mixer_disconnect(p); | 3549 | snd_usb_mixer_disconnect(p); |
3543 | } | 3550 | } |
3544 | usb_chip[chip->index] = NULL; | ||
3545 | mutex_unlock(®ister_mutex); | 3551 | mutex_unlock(®ister_mutex); |
3546 | snd_card_free_when_closed(card); | 3552 | snd_card_free_when_closed(card); |
3547 | } else { | 3553 | } else { |
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c index e74eb1bc8d87..7b3bf3545a3b 100644 --- a/sound/usb/usbmixer.c +++ b/sound/usb/usbmixer.c | |||
@@ -1526,7 +1526,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi | |||
1526 | namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); | 1526 | namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); |
1527 | if (! namelist[i]) { | 1527 | if (! namelist[i]) { |
1528 | snd_printk(KERN_ERR "cannot malloc\n"); | 1528 | snd_printk(KERN_ERR "cannot malloc\n"); |
1529 | while (--i > 0) | 1529 | while (i--) |
1530 | kfree(namelist[i]); | 1530 | kfree(namelist[i]); |
1531 | kfree(namelist); | 1531 | kfree(namelist); |
1532 | kfree(cval); | 1532 | kfree(cval); |