diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-14 12:46:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-14 12:46:06 -0500 |
commit | 414f827c46973ba39320cfb43feb55a0eeb9b4e8 (patch) | |
tree | 45e860974ef698e71370a0ebdddcff4f14fbdf9e /arch/x86_64 | |
parent | 86a71dbd3e81e8870d0f0e56b87875f57e58222b (diff) | |
parent | 126b1922367fbe5513daa675a2abd13ed3917f4e (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (94 commits)
[PATCH] x86-64: Remove mk_pte_phys()
[PATCH] i386: Fix broken CONFIG_COMPAT_VDSO on i386
[PATCH] i386: fix 32-bit ioctls on x64_32
[PATCH] x86: Unify pcspeaker platform device code between i386/x86-64
[PATCH] i386: Remove extern declaration from mm/discontig.c, put in header.
[PATCH] i386: Rename cpu_gdt_descr and remove extern declaration from smpboot.c
[PATCH] i386: Move mce_disabled to asm/mce.h
[PATCH] i386: paravirt unhandled fallthrough
[PATCH] x86_64: Wire up compat epoll_pwait
[PATCH] x86: Don't require the vDSO for handling a.out signals
[PATCH] i386: Fix Cyrix MediaGX detection
[PATCH] i386: Fix warning in cpu initialization
[PATCH] i386: Fix warning in microcode.c
[PATCH] x86: Enable NMI watchdog for AMD Family 0x10 CPUs
[PATCH] x86: Add new CPUID bits for AMD Family 10 CPUs in /proc/cpuinfo
[PATCH] i386: Remove fastcall in paravirt.[ch]
[PATCH] x86-64: Fix wrong gcc check in bitops.h
[PATCH] x86-64: survive having no irq mapping for a vector
[PATCH] i386: geode configuration fixes
[PATCH] i386: add option to show more code in oops reports
...
Diffstat (limited to 'arch/x86_64')
30 files changed, 761 insertions, 416 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 02dd39457bcf..7982cbc3bc94 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -152,18 +152,18 @@ config MPSC | |||
152 | Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs | 152 | Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs |
153 | with Intel Extended Memory 64 Technology(EM64T). For details see | 153 | with Intel Extended Memory 64 Technology(EM64T). For details see |
154 | <http://www.intel.com/technology/64bitextensions/>. | 154 | <http://www.intel.com/technology/64bitextensions/>. |
155 | Note the the latest Xeons (Xeon 51xx and 53xx) are not based on the | 155 | Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the |
156 | Netburst core and shouldn't use this option. You can distingush them | 156 | Netburst core and shouldn't use this option. You can distinguish them |
157 | using the cpu family field | 157 | using the cpu family field |
158 | in /proc/cpuinfo. Family 15 is a older Xeon, Family 6 a newer one | 158 | in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one |
159 | (this rule only applies to system that support EM64T) | 159 | (this rule only applies to systems that support EM64T) |
160 | 160 | ||
161 | config MCORE2 | 161 | config MCORE2 |
162 | bool "Intel Core2 / newer Xeon" | 162 | bool "Intel Core2 / newer Xeon" |
163 | help | 163 | help |
164 | Optimize for Intel Core2 and newer Xeons (51xx) | 164 | Optimize for Intel Core2 and newer Xeons (51xx) |
165 | You can distingush the newer Xeons from the older ones using | 165 | You can distinguish the newer Xeons from the older ones using |
166 | the cpu family field in /proc/cpuinfo. 15 is a older Xeon | 166 | the cpu family field in /proc/cpuinfo. 15 is an older Xeon |
167 | (use CONFIG_MPSC then), 6 is a newer one. This rule only | 167 | (use CONFIG_MPSC then), 6 is a newer one. This rule only |
168 | applies to CPUs that support EM64T. | 168 | applies to CPUs that support EM64T. |
169 | 169 | ||
@@ -458,8 +458,8 @@ config IOMMU | |||
458 | on systems with more than 3GB. This is usually needed for USB, | 458 | on systems with more than 3GB. This is usually needed for USB, |
459 | sound, many IDE/SATA chipsets and some other devices. | 459 | sound, many IDE/SATA chipsets and some other devices. |
460 | Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART | 460 | Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART |
461 | based IOMMU and a software bounce buffer based IOMMU used on Intel | 461 | based hardware IOMMU and a software bounce buffer based IOMMU used |
462 | systems and as fallback. | 462 | on Intel systems and as fallback. |
463 | The code is only active when needed (enough memory and limited | 463 | The code is only active when needed (enough memory and limited |
464 | device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified | 464 | device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified |
465 | too. | 465 | too. |
@@ -496,6 +496,12 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT | |||
496 | # need this always selected by IOMMU for the VIA workaround | 496 | # need this always selected by IOMMU for the VIA workaround |
497 | config SWIOTLB | 497 | config SWIOTLB |
498 | bool | 498 | bool |
499 | help | ||
500 | Support for software bounce buffers used on x86-64 systems | ||
501 | which don't have a hardware IOMMU (e.g. the current generation | ||
502 | of Intel's x86-64 CPUs). Using this PCI devices which can only | ||
503 | access 32-bits of memory can be used on systems with more than | ||
504 | 3 GB of memory. If unsure, say Y. | ||
499 | 505 | ||
500 | config X86_MCE | 506 | config X86_MCE |
501 | bool "Machine check support" if EMBEDDED | 507 | bool "Machine check support" if EMBEDDED |
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 69584c295305..293a4a4c609e 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.20-rc3 | 3 | # Linux kernel version: 2.6.20-git8 |
4 | # Fri Jan 5 11:54:41 2007 | 4 | # Tue Feb 13 11:25:16 2007 |
5 | # | 5 | # |
6 | CONFIG_X86_64=y | 6 | CONFIG_X86_64=y |
7 | CONFIG_64BIT=y | 7 | CONFIG_64BIT=y |
@@ -11,6 +11,7 @@ CONFIG_LOCKDEP_SUPPORT=y | |||
11 | CONFIG_STACKTRACE_SUPPORT=y | 11 | CONFIG_STACKTRACE_SUPPORT=y |
12 | CONFIG_SEMAPHORE_SLEEPERS=y | 12 | CONFIG_SEMAPHORE_SLEEPERS=y |
13 | CONFIG_MMU=y | 13 | CONFIG_MMU=y |
14 | CONFIG_ZONE_DMA=y | ||
14 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 15 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
15 | CONFIG_GENERIC_HWEIGHT=y | 16 | CONFIG_GENERIC_HWEIGHT=y |
16 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 17 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
@@ -153,6 +154,7 @@ CONFIG_NEED_MULTIPLE_NODES=y | |||
153 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 154 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
154 | CONFIG_MIGRATION=y | 155 | CONFIG_MIGRATION=y |
155 | CONFIG_RESOURCES_64BIT=y | 156 | CONFIG_RESOURCES_64BIT=y |
157 | CONFIG_ZONE_DMA_FLAG=1 | ||
156 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y | 158 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y |
157 | CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y | 159 | CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y |
158 | CONFIG_NR_CPUS=32 | 160 | CONFIG_NR_CPUS=32 |
@@ -201,13 +203,14 @@ CONFIG_ACPI=y | |||
201 | CONFIG_ACPI_SLEEP=y | 203 | CONFIG_ACPI_SLEEP=y |
202 | CONFIG_ACPI_SLEEP_PROC_FS=y | 204 | CONFIG_ACPI_SLEEP_PROC_FS=y |
203 | CONFIG_ACPI_SLEEP_PROC_SLEEP=y | 205 | CONFIG_ACPI_SLEEP_PROC_SLEEP=y |
206 | CONFIG_ACPI_PROCFS=y | ||
204 | CONFIG_ACPI_AC=y | 207 | CONFIG_ACPI_AC=y |
205 | CONFIG_ACPI_BATTERY=y | 208 | CONFIG_ACPI_BATTERY=y |
206 | CONFIG_ACPI_BUTTON=y | 209 | CONFIG_ACPI_BUTTON=y |
207 | # CONFIG_ACPI_VIDEO is not set | ||
208 | # CONFIG_ACPI_HOTKEY is not set | 210 | # CONFIG_ACPI_HOTKEY is not set |
209 | CONFIG_ACPI_FAN=y | 211 | CONFIG_ACPI_FAN=y |
210 | # CONFIG_ACPI_DOCK is not set | 212 | # CONFIG_ACPI_DOCK is not set |
213 | # CONFIG_ACPI_BAY is not set | ||
211 | CONFIG_ACPI_PROCESSOR=y | 214 | CONFIG_ACPI_PROCESSOR=y |
212 | CONFIG_ACPI_HOTPLUG_CPU=y | 215 | CONFIG_ACPI_HOTPLUG_CPU=y |
213 | CONFIG_ACPI_THERMAL=y | 216 | CONFIG_ACPI_THERMAL=y |
@@ -263,7 +266,6 @@ CONFIG_PCI_MMCONFIG=y | |||
263 | CONFIG_PCIEPORTBUS=y | 266 | CONFIG_PCIEPORTBUS=y |
264 | CONFIG_PCIEAER=y | 267 | CONFIG_PCIEAER=y |
265 | CONFIG_PCI_MSI=y | 268 | CONFIG_PCI_MSI=y |
266 | # CONFIG_PCI_MULTITHREAD_PROBE is not set | ||
267 | # CONFIG_PCI_DEBUG is not set | 269 | # CONFIG_PCI_DEBUG is not set |
268 | # CONFIG_HT_IRQ is not set | 270 | # CONFIG_HT_IRQ is not set |
269 | 271 | ||
@@ -398,6 +400,7 @@ CONFIG_STANDALONE=y | |||
398 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 400 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
399 | CONFIG_FW_LOADER=y | 401 | CONFIG_FW_LOADER=y |
400 | # CONFIG_DEBUG_DRIVER is not set | 402 | # CONFIG_DEBUG_DRIVER is not set |
403 | # CONFIG_DEBUG_DEVRES is not set | ||
401 | # CONFIG_SYS_HYPERVISOR is not set | 404 | # CONFIG_SYS_HYPERVISOR is not set |
402 | 405 | ||
403 | # | 406 | # |
@@ -466,6 +469,7 @@ CONFIG_BLK_DEV_IDECD=y | |||
466 | # CONFIG_BLK_DEV_IDETAPE is not set | 469 | # CONFIG_BLK_DEV_IDETAPE is not set |
467 | # CONFIG_BLK_DEV_IDEFLOPPY is not set | 470 | # CONFIG_BLK_DEV_IDEFLOPPY is not set |
468 | # CONFIG_BLK_DEV_IDESCSI is not set | 471 | # CONFIG_BLK_DEV_IDESCSI is not set |
472 | CONFIG_BLK_DEV_IDEACPI=y | ||
469 | # CONFIG_IDE_TASK_IOCTL is not set | 473 | # CONFIG_IDE_TASK_IOCTL is not set |
470 | 474 | ||
471 | # | 475 | # |
@@ -497,6 +501,7 @@ CONFIG_BLK_DEV_ATIIXP=y | |||
497 | # CONFIG_BLK_DEV_JMICRON is not set | 501 | # CONFIG_BLK_DEV_JMICRON is not set |
498 | # CONFIG_BLK_DEV_SC1200 is not set | 502 | # CONFIG_BLK_DEV_SC1200 is not set |
499 | CONFIG_BLK_DEV_PIIX=y | 503 | CONFIG_BLK_DEV_PIIX=y |
504 | # CONFIG_BLK_DEV_IT8213 is not set | ||
500 | # CONFIG_BLK_DEV_IT821X is not set | 505 | # CONFIG_BLK_DEV_IT821X is not set |
501 | # CONFIG_BLK_DEV_NS87415 is not set | 506 | # CONFIG_BLK_DEV_NS87415 is not set |
502 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | 507 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set |
@@ -507,6 +512,7 @@ CONFIG_BLK_DEV_PDC202XX_NEW=y | |||
507 | # CONFIG_BLK_DEV_SLC90E66 is not set | 512 | # CONFIG_BLK_DEV_SLC90E66 is not set |
508 | # CONFIG_BLK_DEV_TRM290 is not set | 513 | # CONFIG_BLK_DEV_TRM290 is not set |
509 | # CONFIG_BLK_DEV_VIA82CXXX is not set | 514 | # CONFIG_BLK_DEV_VIA82CXXX is not set |
515 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
510 | # CONFIG_IDE_ARM is not set | 516 | # CONFIG_IDE_ARM is not set |
511 | CONFIG_BLK_DEV_IDEDMA=y | 517 | CONFIG_BLK_DEV_IDEDMA=y |
512 | # CONFIG_IDEDMA_IVB is not set | 518 | # CONFIG_IDEDMA_IVB is not set |
@@ -599,6 +605,7 @@ CONFIG_MEGARAID_SAS=y | |||
599 | # Serial ATA (prod) and Parallel ATA (experimental) drivers | 605 | # Serial ATA (prod) and Parallel ATA (experimental) drivers |
600 | # | 606 | # |
601 | CONFIG_ATA=y | 607 | CONFIG_ATA=y |
608 | # CONFIG_ATA_NONSTANDARD is not set | ||
602 | CONFIG_SATA_AHCI=y | 609 | CONFIG_SATA_AHCI=y |
603 | CONFIG_SATA_SVW=y | 610 | CONFIG_SATA_SVW=y |
604 | CONFIG_ATA_PIIX=y | 611 | CONFIG_ATA_PIIX=y |
@@ -614,6 +621,7 @@ CONFIG_SATA_SIL=y | |||
614 | # CONFIG_SATA_ULI is not set | 621 | # CONFIG_SATA_ULI is not set |
615 | CONFIG_SATA_VIA=y | 622 | CONFIG_SATA_VIA=y |
616 | # CONFIG_SATA_VITESSE is not set | 623 | # CONFIG_SATA_VITESSE is not set |
624 | # CONFIG_SATA_INIC162X is not set | ||
617 | CONFIG_SATA_INTEL_COMBINED=y | 625 | CONFIG_SATA_INTEL_COMBINED=y |
618 | # CONFIG_PATA_ALI is not set | 626 | # CONFIG_PATA_ALI is not set |
619 | # CONFIG_PATA_AMD is not set | 627 | # CONFIG_PATA_AMD is not set |
@@ -630,6 +638,7 @@ CONFIG_SATA_INTEL_COMBINED=y | |||
630 | # CONFIG_PATA_HPT3X2N is not set | 638 | # CONFIG_PATA_HPT3X2N is not set |
631 | # CONFIG_PATA_HPT3X3 is not set | 639 | # CONFIG_PATA_HPT3X3 is not set |
632 | # CONFIG_PATA_IT821X is not set | 640 | # CONFIG_PATA_IT821X is not set |
641 | # CONFIG_PATA_IT8213 is not set | ||
633 | # CONFIG_PATA_JMICRON is not set | 642 | # CONFIG_PATA_JMICRON is not set |
634 | # CONFIG_PATA_TRIFLEX is not set | 643 | # CONFIG_PATA_TRIFLEX is not set |
635 | # CONFIG_PATA_MARVELL is not set | 644 | # CONFIG_PATA_MARVELL is not set |
@@ -682,9 +691,7 @@ CONFIG_IEEE1394=y | |||
682 | # Subsystem Options | 691 | # Subsystem Options |
683 | # | 692 | # |
684 | # CONFIG_IEEE1394_VERBOSEDEBUG is not set | 693 | # CONFIG_IEEE1394_VERBOSEDEBUG is not set |
685 | # CONFIG_IEEE1394_OUI_DB is not set | ||
686 | # CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set | 694 | # CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set |
687 | # CONFIG_IEEE1394_EXPORT_FULL_API is not set | ||
688 | 695 | ||
689 | # | 696 | # |
690 | # Device Drivers | 697 | # Device Drivers |
@@ -707,6 +714,11 @@ CONFIG_IEEE1394_RAWIO=y | |||
707 | # CONFIG_I2O is not set | 714 | # CONFIG_I2O is not set |
708 | 715 | ||
709 | # | 716 | # |
717 | # Macintosh device drivers | ||
718 | # | ||
719 | # CONFIG_MAC_EMUMOUSEBTN is not set | ||
720 | |||
721 | # | ||
710 | # Network device support | 722 | # Network device support |
711 | # | 723 | # |
712 | CONFIG_NETDEVICES=y | 724 | CONFIG_NETDEVICES=y |
@@ -774,6 +786,7 @@ CONFIG_8139TOO=y | |||
774 | # CONFIG_EPIC100 is not set | 786 | # CONFIG_EPIC100 is not set |
775 | # CONFIG_SUNDANCE is not set | 787 | # CONFIG_SUNDANCE is not set |
776 | # CONFIG_VIA_RHINE is not set | 788 | # CONFIG_VIA_RHINE is not set |
789 | # CONFIG_SC92031 is not set | ||
777 | 790 | ||
778 | # | 791 | # |
779 | # Ethernet (1000 Mbit) | 792 | # Ethernet (1000 Mbit) |
@@ -795,11 +808,13 @@ CONFIG_E1000=y | |||
795 | CONFIG_TIGON3=y | 808 | CONFIG_TIGON3=y |
796 | CONFIG_BNX2=y | 809 | CONFIG_BNX2=y |
797 | # CONFIG_QLA3XXX is not set | 810 | # CONFIG_QLA3XXX is not set |
811 | # CONFIG_ATL1 is not set | ||
798 | 812 | ||
799 | # | 813 | # |
800 | # Ethernet (10000 Mbit) | 814 | # Ethernet (10000 Mbit) |
801 | # | 815 | # |
802 | # CONFIG_CHELSIO_T1 is not set | 816 | # CONFIG_CHELSIO_T1 is not set |
817 | # CONFIG_CHELSIO_T3 is not set | ||
803 | # CONFIG_IXGB is not set | 818 | # CONFIG_IXGB is not set |
804 | CONFIG_S2IO=m | 819 | CONFIG_S2IO=m |
805 | # CONFIG_S2IO_NAPI is not set | 820 | # CONFIG_S2IO_NAPI is not set |
@@ -1115,6 +1130,7 @@ CONFIG_SOUND=y | |||
1115 | # Open Sound System | 1130 | # Open Sound System |
1116 | # | 1131 | # |
1117 | CONFIG_SOUND_PRIME=y | 1132 | CONFIG_SOUND_PRIME=y |
1133 | CONFIG_OBSOLETE_OSS=y | ||
1118 | # CONFIG_SOUND_BT878 is not set | 1134 | # CONFIG_SOUND_BT878 is not set |
1119 | # CONFIG_SOUND_ES1371 is not set | 1135 | # CONFIG_SOUND_ES1371 is not set |
1120 | CONFIG_SOUND_ICH=y | 1136 | CONFIG_SOUND_ICH=y |
@@ -1128,6 +1144,7 @@ CONFIG_SOUND_ICH=y | |||
1128 | # HID Devices | 1144 | # HID Devices |
1129 | # | 1145 | # |
1130 | CONFIG_HID=y | 1146 | CONFIG_HID=y |
1147 | # CONFIG_HID_DEBUG is not set | ||
1131 | 1148 | ||
1132 | # | 1149 | # |
1133 | # USB support | 1150 | # USB support |
@@ -1142,10 +1159,8 @@ CONFIG_USB=y | |||
1142 | # Miscellaneous USB options | 1159 | # Miscellaneous USB options |
1143 | # | 1160 | # |
1144 | CONFIG_USB_DEVICEFS=y | 1161 | CONFIG_USB_DEVICEFS=y |
1145 | # CONFIG_USB_BANDWIDTH is not set | ||
1146 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1162 | # CONFIG_USB_DYNAMIC_MINORS is not set |
1147 | # CONFIG_USB_SUSPEND is not set | 1163 | # CONFIG_USB_SUSPEND is not set |
1148 | # CONFIG_USB_MULTITHREAD_PROBE is not set | ||
1149 | # CONFIG_USB_OTG is not set | 1164 | # CONFIG_USB_OTG is not set |
1150 | 1165 | ||
1151 | # | 1166 | # |
@@ -1155,9 +1170,11 @@ CONFIG_USB_EHCI_HCD=y | |||
1155 | # CONFIG_USB_EHCI_SPLIT_ISO is not set | 1170 | # CONFIG_USB_EHCI_SPLIT_ISO is not set |
1156 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set | 1171 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set |
1157 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | 1172 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set |
1173 | # CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set | ||
1158 | # CONFIG_USB_ISP116X_HCD is not set | 1174 | # CONFIG_USB_ISP116X_HCD is not set |
1159 | CONFIG_USB_OHCI_HCD=y | 1175 | CONFIG_USB_OHCI_HCD=y |
1160 | # CONFIG_USB_OHCI_BIG_ENDIAN is not set | 1176 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set |
1177 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | ||
1161 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | 1178 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y |
1162 | CONFIG_USB_UHCI_HCD=y | 1179 | CONFIG_USB_UHCI_HCD=y |
1163 | # CONFIG_USB_SL811_HCD is not set | 1180 | # CONFIG_USB_SL811_HCD is not set |
@@ -1208,6 +1225,7 @@ CONFIG_USB_HID=y | |||
1208 | # CONFIG_USB_ATI_REMOTE2 is not set | 1225 | # CONFIG_USB_ATI_REMOTE2 is not set |
1209 | # CONFIG_USB_KEYSPAN_REMOTE is not set | 1226 | # CONFIG_USB_KEYSPAN_REMOTE is not set |
1210 | # CONFIG_USB_APPLETOUCH is not set | 1227 | # CONFIG_USB_APPLETOUCH is not set |
1228 | # CONFIG_USB_GTCO is not set | ||
1211 | 1229 | ||
1212 | # | 1230 | # |
1213 | # USB Imaging devices | 1231 | # USB Imaging devices |
@@ -1313,6 +1331,10 @@ CONFIG_USB_MON=y | |||
1313 | # | 1331 | # |
1314 | 1332 | ||
1315 | # | 1333 | # |
1334 | # Auxiliary Display support | ||
1335 | # | ||
1336 | |||
1337 | # | ||
1316 | # Virtualization | 1338 | # Virtualization |
1317 | # | 1339 | # |
1318 | # CONFIG_KVM is not set | 1340 | # CONFIG_KVM is not set |
@@ -1512,6 +1534,7 @@ CONFIG_UNUSED_SYMBOLS=y | |||
1512 | CONFIG_DEBUG_FS=y | 1534 | CONFIG_DEBUG_FS=y |
1513 | # CONFIG_HEADERS_CHECK is not set | 1535 | # CONFIG_HEADERS_CHECK is not set |
1514 | CONFIG_DEBUG_KERNEL=y | 1536 | CONFIG_DEBUG_KERNEL=y |
1537 | # CONFIG_DEBUG_SHIRQ is not set | ||
1515 | CONFIG_LOG_BUF_SHIFT=18 | 1538 | CONFIG_LOG_BUF_SHIFT=18 |
1516 | CONFIG_DETECT_SOFTLOCKUP=y | 1539 | CONFIG_DETECT_SOFTLOCKUP=y |
1517 | # CONFIG_SCHEDSTATS is not set | 1540 | # CONFIG_SCHEDSTATS is not set |
@@ -1520,7 +1543,6 @@ CONFIG_DETECT_SOFTLOCKUP=y | |||
1520 | # CONFIG_RT_MUTEX_TESTER is not set | 1543 | # CONFIG_RT_MUTEX_TESTER is not set |
1521 | # CONFIG_DEBUG_SPINLOCK is not set | 1544 | # CONFIG_DEBUG_SPINLOCK is not set |
1522 | # CONFIG_DEBUG_MUTEXES is not set | 1545 | # CONFIG_DEBUG_MUTEXES is not set |
1523 | # CONFIG_DEBUG_RWSEMS is not set | ||
1524 | # CONFIG_DEBUG_LOCK_ALLOC is not set | 1546 | # CONFIG_DEBUG_LOCK_ALLOC is not set |
1525 | # CONFIG_PROVE_LOCKING is not set | 1547 | # CONFIG_PROVE_LOCKING is not set |
1526 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1548 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
@@ -1560,4 +1582,5 @@ CONFIG_CRC32=y | |||
1560 | # CONFIG_LIBCRC32C is not set | 1582 | # CONFIG_LIBCRC32C is not set |
1561 | CONFIG_ZLIB_INFLATE=y | 1583 | CONFIG_ZLIB_INFLATE=y |
1562 | CONFIG_PLIST=y | 1584 | CONFIG_PLIST=y |
1563 | CONFIG_IOMAP_COPY=y | 1585 | CONFIG_HAS_IOMEM=y |
1586 | CONFIG_HAS_IOPORT=y | ||
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c index ff499ef2a1ba..359eacc38509 100644 --- a/arch/x86_64/ia32/ia32_signal.c +++ b/arch/x86_64/ia32/ia32_signal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
23 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
24 | #include <linux/binfmts.h> | ||
24 | #include <asm/ucontext.h> | 25 | #include <asm/ucontext.h> |
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
@@ -449,7 +450,11 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
449 | 450 | ||
450 | /* Return stub is in 32bit vsyscall page */ | 451 | /* Return stub is in 32bit vsyscall page */ |
451 | { | 452 | { |
452 | void __user *restorer = VSYSCALL32_SIGRETURN; | 453 | void __user *restorer; |
454 | if (current->binfmt->hasvdso) | ||
455 | restorer = VSYSCALL32_SIGRETURN; | ||
456 | else | ||
457 | restorer = (void *)&frame->retcode; | ||
453 | if (ka->sa.sa_flags & SA_RESTORER) | 458 | if (ka->sa.sa_flags & SA_RESTORER) |
454 | restorer = ka->sa.sa_restorer; | 459 | restorer = ka->sa.sa_restorer; |
455 | err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); | 460 | err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); |
@@ -495,7 +500,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
495 | ptrace_notify(SIGTRAP); | 500 | ptrace_notify(SIGTRAP); |
496 | 501 | ||
497 | #if DEBUG_SIG | 502 | #if DEBUG_SIG |
498 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | 503 | printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", |
499 | current->comm, current->pid, frame, regs->rip, frame->pretcode); | 504 | current->comm, current->pid, frame, regs->rip, frame->pretcode); |
500 | #endif | 505 | #endif |
501 | 506 | ||
@@ -601,7 +606,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
601 | ptrace_notify(SIGTRAP); | 606 | ptrace_notify(SIGTRAP); |
602 | 607 | ||
603 | #if DEBUG_SIG | 608 | #if DEBUG_SIG |
604 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | 609 | printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", |
605 | current->comm, current->pid, frame, regs->rip, frame->pretcode); | 610 | current->comm, current->pid, frame, regs->rip, frame->pretcode); |
606 | #endif | 611 | #endif |
607 | 612 | ||
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 5f32cf4de5fb..eda7a0d4dc15 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -718,4 +718,5 @@ ia32_sys_call_table: | |||
718 | .quad compat_sys_vmsplice | 718 | .quad compat_sys_vmsplice |
719 | .quad compat_sys_move_pages | 719 | .quad compat_sys_move_pages |
720 | .quad sys_getcpu | 720 | .quad sys_getcpu |
721 | .quad sys_epoll_pwait | ||
721 | ia32_syscall_end: | 722 | ia32_syscall_end: |
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index 3c7cbff04d3d..ae399458024b 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile | |||
@@ -43,6 +43,7 @@ obj-$(CONFIG_PCI) += early-quirks.o | |||
43 | 43 | ||
44 | obj-y += topology.o | 44 | obj-y += topology.o |
45 | obj-y += intel_cacheinfo.o | 45 | obj-y += intel_cacheinfo.o |
46 | obj-y += pcspeaker.o | ||
46 | 47 | ||
47 | CFLAGS_vsyscall.o := $(PROFILING) -g0 | 48 | CFLAGS_vsyscall.o := $(PROFILING) -g0 |
48 | 49 | ||
@@ -56,3 +57,4 @@ quirks-y += ../../i386/kernel/quirks.o | |||
56 | i8237-y += ../../i386/kernel/i8237.o | 57 | i8237-y += ../../i386/kernel/i8237.o |
57 | msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o | 58 | msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o |
58 | alternative-y += ../../i386/kernel/alternative.o | 59 | alternative-y += ../../i386/kernel/alternative.o |
60 | pcspeaker-y += ../../i386/kernel/pcspeaker.o | ||
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c index 5ebf62c7a3d2..23178ce6c783 100644 --- a/arch/x86_64/kernel/acpi/sleep.c +++ b/arch/x86_64/kernel/acpi/sleep.c | |||
@@ -58,7 +58,7 @@ unsigned long acpi_wakeup_address = 0; | |||
58 | unsigned long acpi_video_flags; | 58 | unsigned long acpi_video_flags; |
59 | extern char wakeup_start, wakeup_end; | 59 | extern char wakeup_start, wakeup_end; |
60 | 60 | ||
61 | extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); | 61 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); |
62 | 62 | ||
63 | static pgd_t low_ptr; | 63 | static pgd_t low_ptr; |
64 | 64 | ||
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 6fe191c58084..4651fd22b213 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c | |||
@@ -83,6 +83,13 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
83 | return 1; | 83 | return 1; |
84 | } | 84 | } |
85 | 85 | ||
86 | #ifdef CONFIG_NUMA | ||
87 | /* NUMA memory to node map */ | ||
88 | if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) { | ||
89 | *addrp = nodemap_addr + nodemap_size; | ||
90 | return 1; | ||
91 | } | ||
92 | #endif | ||
86 | /* XXX ramdisk image here? */ | 93 | /* XXX ramdisk image here? */ |
87 | return 0; | 94 | return 0; |
88 | } | 95 | } |
@@ -184,6 +191,37 @@ unsigned long __init e820_end_of_ram(void) | |||
184 | } | 191 | } |
185 | 192 | ||
186 | /* | 193 | /* |
194 | * Find the hole size in the range. | ||
195 | */ | ||
196 | unsigned long __init e820_hole_size(unsigned long start, unsigned long end) | ||
197 | { | ||
198 | unsigned long ram = 0; | ||
199 | int i; | ||
200 | |||
201 | for (i = 0; i < e820.nr_map; i++) { | ||
202 | struct e820entry *ei = &e820.map[i]; | ||
203 | unsigned long last, addr; | ||
204 | |||
205 | if (ei->type != E820_RAM || | ||
206 | ei->addr+ei->size <= start || | ||
207 | ei->addr >= end) | ||
208 | continue; | ||
209 | |||
210 | addr = round_up(ei->addr, PAGE_SIZE); | ||
211 | if (addr < start) | ||
212 | addr = start; | ||
213 | |||
214 | last = round_down(ei->addr + ei->size, PAGE_SIZE); | ||
215 | if (last >= end) | ||
216 | last = end; | ||
217 | |||
218 | if (last > addr) | ||
219 | ram += last - addr; | ||
220 | } | ||
221 | return ((end - start) - ram); | ||
222 | } | ||
223 | |||
224 | /* | ||
187 | * Mark e820 reserved areas as busy for the resource manager. | 225 | * Mark e820 reserved areas as busy for the resource manager. |
188 | */ | 226 | */ |
189 | void __init e820_reserve_resources(void) | 227 | void __init e820_reserve_resources(void) |
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 1e6f80870679..598a4d0351fc 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S | |||
@@ -163,6 +163,20 @@ startup_64: | |||
163 | */ | 163 | */ |
164 | lgdt cpu_gdt_descr | 164 | lgdt cpu_gdt_descr |
165 | 165 | ||
166 | /* set up data segments. actually 0 would do too */ | ||
167 | movl $__KERNEL_DS,%eax | ||
168 | movl %eax,%ds | ||
169 | movl %eax,%ss | ||
170 | movl %eax,%es | ||
171 | |||
172 | /* | ||
173 | * We don't really need to load %fs or %gs, but load them anyway | ||
174 | * to kill any stale realmode selectors. This allows execution | ||
175 | * under VT hardware. | ||
176 | */ | ||
177 | movl %eax,%fs | ||
178 | movl %eax,%gs | ||
179 | |||
166 | /* | 180 | /* |
167 | * Setup up a dummy PDA. this is just for some early bootup code | 181 | * Setup up a dummy PDA. this is just for some early bootup code |
168 | * that does in_interrupt() | 182 | * that does in_interrupt() |
@@ -173,12 +187,6 @@ startup_64: | |||
173 | shrq $32,%rdx | 187 | shrq $32,%rdx |
174 | wrmsr | 188 | wrmsr |
175 | 189 | ||
176 | /* set up data segments. actually 0 would do too */ | ||
177 | movl $__KERNEL_DS,%eax | ||
178 | movl %eax,%ds | ||
179 | movl %eax,%ss | ||
180 | movl %eax,%es | ||
181 | |||
182 | /* esi is pointer to real mode structure with interesting info. | 190 | /* esi is pointer to real mode structure with interesting info. |
183 | pass it to C */ | 191 | pass it to C */ |
184 | movl %esi, %edi | 192 | movl %esi, %edi |
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 6be6730acb5c..566e64d966c4 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -831,7 +831,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq) | |||
831 | entry.delivery_mode = INT_DELIVERY_MODE; | 831 | entry.delivery_mode = INT_DELIVERY_MODE; |
832 | entry.dest_mode = INT_DEST_MODE; | 832 | entry.dest_mode = INT_DEST_MODE; |
833 | entry.mask = 0; /* enable IRQ */ | 833 | entry.mask = 0; /* enable IRQ */ |
834 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | 834 | entry.dest = cpu_mask_to_apicid(TARGET_CPUS); |
835 | 835 | ||
836 | entry.trigger = irq_trigger(idx); | 836 | entry.trigger = irq_trigger(idx); |
837 | entry.polarity = irq_polarity(idx); | 837 | entry.polarity = irq_polarity(idx); |
@@ -839,7 +839,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq) | |||
839 | if (irq_trigger(idx)) { | 839 | if (irq_trigger(idx)) { |
840 | entry.trigger = 1; | 840 | entry.trigger = 1; |
841 | entry.mask = 1; | 841 | entry.mask = 1; |
842 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | 842 | entry.dest = cpu_mask_to_apicid(TARGET_CPUS); |
843 | } | 843 | } |
844 | 844 | ||
845 | if (!apic && !IO_APIC_IRQ(irq)) | 845 | if (!apic && !IO_APIC_IRQ(irq)) |
@@ -851,7 +851,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq) | |||
851 | if (vector < 0) | 851 | if (vector < 0) |
852 | return; | 852 | return; |
853 | 853 | ||
854 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask); | 854 | entry.dest = cpu_mask_to_apicid(mask); |
855 | entry.vector = vector; | 855 | entry.vector = vector; |
856 | 856 | ||
857 | ioapic_register_intr(irq, vector, IOAPIC_AUTO); | 857 | ioapic_register_intr(irq, vector, IOAPIC_AUTO); |
@@ -920,7 +920,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in | |||
920 | */ | 920 | */ |
921 | entry.dest_mode = INT_DEST_MODE; | 921 | entry.dest_mode = INT_DEST_MODE; |
922 | entry.mask = 0; /* unmask IRQ now */ | 922 | entry.mask = 0; /* unmask IRQ now */ |
923 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | 923 | entry.dest = cpu_mask_to_apicid(TARGET_CPUS); |
924 | entry.delivery_mode = INT_DELIVERY_MODE; | 924 | entry.delivery_mode = INT_DELIVERY_MODE; |
925 | entry.polarity = 0; | 925 | entry.polarity = 0; |
926 | entry.trigger = 0; | 926 | entry.trigger = 0; |
@@ -1020,18 +1020,17 @@ void __apicdebuginit print_IO_APIC(void) | |||
1020 | 1020 | ||
1021 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); | 1021 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); |
1022 | 1022 | ||
1023 | printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" | 1023 | printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" |
1024 | " Stat Dest Deli Vect: \n"); | 1024 | " Stat Dmod Deli Vect: \n"); |
1025 | 1025 | ||
1026 | for (i = 0; i <= reg_01.bits.entries; i++) { | 1026 | for (i = 0; i <= reg_01.bits.entries; i++) { |
1027 | struct IO_APIC_route_entry entry; | 1027 | struct IO_APIC_route_entry entry; |
1028 | 1028 | ||
1029 | entry = ioapic_read_entry(apic, i); | 1029 | entry = ioapic_read_entry(apic, i); |
1030 | 1030 | ||
1031 | printk(KERN_DEBUG " %02x %03X %02X ", | 1031 | printk(KERN_DEBUG " %02x %03X ", |
1032 | i, | 1032 | i, |
1033 | entry.dest.logical.logical_dest, | 1033 | entry.dest |
1034 | entry.dest.physical.physical_dest | ||
1035 | ); | 1034 | ); |
1036 | 1035 | ||
1037 | printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", | 1036 | printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", |
@@ -1293,8 +1292,7 @@ void disable_IO_APIC(void) | |||
1293 | entry.dest_mode = 0; /* Physical */ | 1292 | entry.dest_mode = 0; /* Physical */ |
1294 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1293 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1295 | entry.vector = 0; | 1294 | entry.vector = 0; |
1296 | entry.dest.physical.physical_dest = | 1295 | entry.dest = GET_APIC_ID(apic_read(APIC_ID)); |
1297 | GET_APIC_ID(apic_read(APIC_ID)); | ||
1298 | 1296 | ||
1299 | /* | 1297 | /* |
1300 | * Add it to the IO-APIC irq-routing table: | 1298 | * Add it to the IO-APIC irq-routing table: |
@@ -1556,7 +1554,7 @@ static inline void unlock_ExtINT_logic(void) | |||
1556 | 1554 | ||
1557 | entry1.dest_mode = 0; /* physical delivery */ | 1555 | entry1.dest_mode = 0; /* physical delivery */ |
1558 | entry1.mask = 0; /* unmask IRQ now */ | 1556 | entry1.mask = 0; /* unmask IRQ now */ |
1559 | entry1.dest.physical.physical_dest = hard_smp_processor_id(); | 1557 | entry1.dest = hard_smp_processor_id(); |
1560 | entry1.delivery_mode = dest_ExtINT; | 1558 | entry1.delivery_mode = dest_ExtINT; |
1561 | entry1.polarity = entry0.polarity; | 1559 | entry1.polarity = entry0.polarity; |
1562 | entry1.trigger = 0; | 1560 | entry1.trigger = 0; |
@@ -2131,7 +2129,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p | |||
2131 | 2129 | ||
2132 | entry.delivery_mode = INT_DELIVERY_MODE; | 2130 | entry.delivery_mode = INT_DELIVERY_MODE; |
2133 | entry.dest_mode = INT_DEST_MODE; | 2131 | entry.dest_mode = INT_DEST_MODE; |
2134 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask); | 2132 | entry.dest = cpu_mask_to_apicid(mask); |
2135 | entry.trigger = triggering; | 2133 | entry.trigger = triggering; |
2136 | entry.polarity = polarity; | 2134 | entry.polarity = polarity; |
2137 | entry.mask = 1; /* Disabled (masked) */ | 2135 | entry.mask = 1; /* Disabled (masked) */ |
diff --git a/arch/x86_64/kernel/ioport.c b/arch/x86_64/kernel/ioport.c index fe063d3cfe42..745b1f0f494e 100644 --- a/arch/x86_64/kernel/ioport.c +++ b/arch/x86_64/kernel/ioport.c | |||
@@ -114,6 +114,6 @@ asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs) | |||
114 | if (!capable(CAP_SYS_RAWIO)) | 114 | if (!capable(CAP_SYS_RAWIO)) |
115 | return -EPERM; | 115 | return -EPERM; |
116 | } | 116 | } |
117 | regs->eflags = (regs->eflags &~ 0x3000UL) | (level << 12); | 117 | regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12); |
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 0c06af6c13bc..3bc30d2c13d3 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | #include <asm/io_apic.h> | 19 | #include <asm/io_apic.h> |
20 | #include <asm/idle.h> | 20 | #include <asm/idle.h> |
21 | #include <asm/smp.h> | ||
21 | 22 | ||
22 | atomic_t irq_err_count; | 23 | atomic_t irq_err_count; |
23 | 24 | ||
@@ -120,9 +121,14 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) | |||
120 | 121 | ||
121 | if (likely(irq < NR_IRQS)) | 122 | if (likely(irq < NR_IRQS)) |
122 | generic_handle_irq(irq); | 123 | generic_handle_irq(irq); |
123 | else if (printk_ratelimit()) | 124 | else { |
124 | printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", | 125 | if (!disable_apic) |
125 | __func__, smp_processor_id(), vector); | 126 | ack_APIC_irq(); |
127 | |||
128 | if (printk_ratelimit()) | ||
129 | printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", | ||
130 | __func__, smp_processor_id(), vector); | ||
131 | } | ||
126 | 132 | ||
127 | irq_exit(); | 133 | irq_exit(); |
128 | 134 | ||
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index bdb54a2c9f18..8011a8e1c7d4 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/percpu.h> | 20 | #include <linux/percpu.h> |
21 | #include <linux/ctype.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/kmod.h> | ||
22 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
23 | #include <asm/msr.h> | 24 | #include <asm/msr.h> |
24 | #include <asm/mce.h> | 25 | #include <asm/mce.h> |
@@ -42,6 +43,10 @@ static unsigned long console_logged; | |||
42 | static int notify_user; | 43 | static int notify_user; |
43 | static int rip_msr; | 44 | static int rip_msr; |
44 | static int mce_bootlog = 1; | 45 | static int mce_bootlog = 1; |
46 | static atomic_t mce_events; | ||
47 | |||
48 | static char trigger[128]; | ||
49 | static char *trigger_argv[2] = { trigger, NULL }; | ||
45 | 50 | ||
46 | /* | 51 | /* |
47 | * Lockless MCE logging infrastructure. | 52 | * Lockless MCE logging infrastructure. |
@@ -57,6 +62,7 @@ struct mce_log mcelog = { | |||
57 | void mce_log(struct mce *mce) | 62 | void mce_log(struct mce *mce) |
58 | { | 63 | { |
59 | unsigned next, entry; | 64 | unsigned next, entry; |
65 | atomic_inc(&mce_events); | ||
60 | mce->finished = 0; | 66 | mce->finished = 0; |
61 | wmb(); | 67 | wmb(); |
62 | for (;;) { | 68 | for (;;) { |
@@ -161,6 +167,17 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
161 | } | 167 | } |
162 | } | 168 | } |
163 | 169 | ||
170 | static void do_mce_trigger(void) | ||
171 | { | ||
172 | static atomic_t mce_logged; | ||
173 | int events = atomic_read(&mce_events); | ||
174 | if (events != atomic_read(&mce_logged) && trigger[0]) { | ||
175 | /* Small race window, but should be harmless. */ | ||
176 | atomic_set(&mce_logged, events); | ||
177 | call_usermodehelper(trigger, trigger_argv, NULL, -1); | ||
178 | } | ||
179 | } | ||
180 | |||
164 | /* | 181 | /* |
165 | * The actual machine check handler | 182 | * The actual machine check handler |
166 | */ | 183 | */ |
@@ -234,8 +251,12 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
234 | } | 251 | } |
235 | 252 | ||
236 | /* Never do anything final in the polling timer */ | 253 | /* Never do anything final in the polling timer */ |
237 | if (!regs) | 254 | if (!regs) { |
255 | /* Normal interrupt context here. Call trigger for any new | ||
256 | events. */ | ||
257 | do_mce_trigger(); | ||
238 | goto out; | 258 | goto out; |
259 | } | ||
239 | 260 | ||
240 | /* If we didn't find an uncorrectable error, pick | 261 | /* If we didn't find an uncorrectable error, pick |
241 | the last one (shouldn't happen, just being safe). */ | 262 | the last one (shouldn't happen, just being safe). */ |
@@ -606,17 +627,42 @@ DEFINE_PER_CPU(struct sys_device, device_mce); | |||
606 | } \ | 627 | } \ |
607 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 628 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
608 | 629 | ||
630 | /* TBD should generate these dynamically based on number of available banks */ | ||
609 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 631 | ACCESSOR(bank0ctl,bank[0],mce_restart()) |
610 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 632 | ACCESSOR(bank1ctl,bank[1],mce_restart()) |
611 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 633 | ACCESSOR(bank2ctl,bank[2],mce_restart()) |
612 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | 634 | ACCESSOR(bank3ctl,bank[3],mce_restart()) |
613 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | 635 | ACCESSOR(bank4ctl,bank[4],mce_restart()) |
614 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | 636 | ACCESSOR(bank5ctl,bank[5],mce_restart()) |
615 | static struct sysdev_attribute * bank_attributes[NR_BANKS] = { | 637 | |
616 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | 638 | static ssize_t show_trigger(struct sys_device *s, char *buf) |
617 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl}; | 639 | { |
640 | strcpy(buf, trigger); | ||
641 | strcat(buf, "\n"); | ||
642 | return strlen(trigger) + 1; | ||
643 | } | ||
644 | |||
645 | static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz) | ||
646 | { | ||
647 | char *p; | ||
648 | int len; | ||
649 | strncpy(trigger, buf, sizeof(trigger)); | ||
650 | trigger[sizeof(trigger)-1] = 0; | ||
651 | len = strlen(trigger); | ||
652 | p = strchr(trigger, '\n'); | ||
653 | if (*p) *p = 0; | ||
654 | return len; | ||
655 | } | ||
656 | |||
657 | static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | ||
618 | ACCESSOR(tolerant,tolerant,) | 658 | ACCESSOR(tolerant,tolerant,) |
619 | ACCESSOR(check_interval,check_interval,mce_restart()) | 659 | ACCESSOR(check_interval,check_interval,mce_restart()) |
660 | static struct sysdev_attribute *mce_attributes[] = { | ||
661 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
662 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
663 | &attr_tolerant, &attr_check_interval, &attr_trigger, | ||
664 | NULL | ||
665 | }; | ||
620 | 666 | ||
621 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | 667 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ |
622 | static __cpuinit int mce_create_device(unsigned int cpu) | 668 | static __cpuinit int mce_create_device(unsigned int cpu) |
@@ -632,11 +678,9 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
632 | err = sysdev_register(&per_cpu(device_mce,cpu)); | 678 | err = sysdev_register(&per_cpu(device_mce,cpu)); |
633 | 679 | ||
634 | if (!err) { | 680 | if (!err) { |
635 | for (i = 0; i < banks; i++) | 681 | for (i = 0; mce_attributes[i]; i++) |
636 | sysdev_create_file(&per_cpu(device_mce,cpu), | 682 | sysdev_create_file(&per_cpu(device_mce,cpu), |
637 | bank_attributes[i]); | 683 | mce_attributes[i]); |
638 | sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant); | ||
639 | sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval); | ||
640 | } | 684 | } |
641 | return err; | 685 | return err; |
642 | } | 686 | } |
@@ -645,11 +689,9 @@ static void mce_remove_device(unsigned int cpu) | |||
645 | { | 689 | { |
646 | int i; | 690 | int i; |
647 | 691 | ||
648 | for (i = 0; i < banks; i++) | 692 | for (i = 0; mce_attributes[i]; i++) |
649 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 693 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
650 | bank_attributes[i]); | 694 | mce_attributes[i]); |
651 | sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant); | ||
652 | sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval); | ||
653 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 695 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
654 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | 696 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); |
655 | } | 697 | } |
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index 93c707257637..d0bd5d66e103 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #define THRESHOLD_MAX 0xFFF | 37 | #define THRESHOLD_MAX 0xFFF |
38 | #define INT_TYPE_APIC 0x00020000 | 38 | #define INT_TYPE_APIC 0x00020000 |
39 | #define MASK_VALID_HI 0x80000000 | 39 | #define MASK_VALID_HI 0x80000000 |
40 | #define MASK_CNTP_HI 0x40000000 | ||
41 | #define MASK_LOCKED_HI 0x20000000 | ||
40 | #define MASK_LVTOFF_HI 0x00F00000 | 42 | #define MASK_LVTOFF_HI 0x00F00000 |
41 | #define MASK_COUNT_EN_HI 0x00080000 | 43 | #define MASK_COUNT_EN_HI 0x00080000 |
42 | #define MASK_INT_TYPE_HI 0x00060000 | 44 | #define MASK_INT_TYPE_HI 0x00060000 |
@@ -122,14 +124,17 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
122 | for (block = 0; block < NR_BLOCKS; ++block) { | 124 | for (block = 0; block < NR_BLOCKS; ++block) { |
123 | if (block == 0) | 125 | if (block == 0) |
124 | address = MSR_IA32_MC0_MISC + bank * 4; | 126 | address = MSR_IA32_MC0_MISC + bank * 4; |
125 | else if (block == 1) | 127 | else if (block == 1) { |
126 | address = MCG_XBLK_ADDR | 128 | address = (low & MASK_BLKPTR_LO) >> 21; |
127 | + ((low & MASK_BLKPTR_LO) >> 21); | 129 | if (!address) |
130 | break; | ||
131 | address += MCG_XBLK_ADDR; | ||
132 | } | ||
128 | else | 133 | else |
129 | ++address; | 134 | ++address; |
130 | 135 | ||
131 | if (rdmsr_safe(address, &low, &high)) | 136 | if (rdmsr_safe(address, &low, &high)) |
132 | continue; | 137 | break; |
133 | 138 | ||
134 | if (!(high & MASK_VALID_HI)) { | 139 | if (!(high & MASK_VALID_HI)) { |
135 | if (block) | 140 | if (block) |
@@ -138,8 +143,8 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
138 | break; | 143 | break; |
139 | } | 144 | } |
140 | 145 | ||
141 | if (!(high & MASK_VALID_HI >> 1) || | 146 | if (!(high & MASK_CNTP_HI) || |
142 | (high & MASK_VALID_HI >> 2)) | 147 | (high & MASK_LOCKED_HI)) |
143 | continue; | 148 | continue; |
144 | 149 | ||
145 | if (!block) | 150 | if (!block) |
@@ -187,17 +192,22 @@ asmlinkage void mce_threshold_interrupt(void) | |||
187 | 192 | ||
188 | /* assume first bank caused it */ | 193 | /* assume first bank caused it */ |
189 | for (bank = 0; bank < NR_BANKS; ++bank) { | 194 | for (bank = 0; bank < NR_BANKS; ++bank) { |
195 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) | ||
196 | continue; | ||
190 | for (block = 0; block < NR_BLOCKS; ++block) { | 197 | for (block = 0; block < NR_BLOCKS; ++block) { |
191 | if (block == 0) | 198 | if (block == 0) |
192 | address = MSR_IA32_MC0_MISC + bank * 4; | 199 | address = MSR_IA32_MC0_MISC + bank * 4; |
193 | else if (block == 1) | 200 | else if (block == 1) { |
194 | address = MCG_XBLK_ADDR | 201 | address = (low & MASK_BLKPTR_LO) >> 21; |
195 | + ((low & MASK_BLKPTR_LO) >> 21); | 202 | if (!address) |
203 | break; | ||
204 | address += MCG_XBLK_ADDR; | ||
205 | } | ||
196 | else | 206 | else |
197 | ++address; | 207 | ++address; |
198 | 208 | ||
199 | if (rdmsr_safe(address, &low, &high)) | 209 | if (rdmsr_safe(address, &low, &high)) |
200 | continue; | 210 | break; |
201 | 211 | ||
202 | if (!(high & MASK_VALID_HI)) { | 212 | if (!(high & MASK_VALID_HI)) { |
203 | if (block) | 213 | if (block) |
@@ -206,10 +216,14 @@ asmlinkage void mce_threshold_interrupt(void) | |||
206 | break; | 216 | break; |
207 | } | 217 | } |
208 | 218 | ||
209 | if (!(high & MASK_VALID_HI >> 1) || | 219 | if (!(high & MASK_CNTP_HI) || |
210 | (high & MASK_VALID_HI >> 2)) | 220 | (high & MASK_LOCKED_HI)) |
211 | continue; | 221 | continue; |
212 | 222 | ||
223 | /* Log the machine check that caused the threshold | ||
224 | event. */ | ||
225 | do_machine_check(NULL, 0); | ||
226 | |||
213 | if (high & MASK_OVERFLOW_HI) { | 227 | if (high & MASK_OVERFLOW_HI) { |
214 | rdmsrl(address, m.misc); | 228 | rdmsrl(address, m.misc); |
215 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, | 229 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, |
@@ -385,7 +399,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | |||
385 | return 0; | 399 | return 0; |
386 | 400 | ||
387 | if (rdmsr_safe(address, &low, &high)) | 401 | if (rdmsr_safe(address, &low, &high)) |
388 | goto recurse; | 402 | return 0; |
389 | 403 | ||
390 | if (!(high & MASK_VALID_HI)) { | 404 | if (!(high & MASK_VALID_HI)) { |
391 | if (block) | 405 | if (block) |
@@ -394,8 +408,8 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | |||
394 | return 0; | 408 | return 0; |
395 | } | 409 | } |
396 | 410 | ||
397 | if (!(high & MASK_VALID_HI >> 1) || | 411 | if (!(high & MASK_CNTP_HI) || |
398 | (high & MASK_VALID_HI >> 2)) | 412 | (high & MASK_LOCKED_HI)) |
399 | goto recurse; | 413 | goto recurse; |
400 | 414 | ||
401 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); | 415 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 9cb42ecb7f89..486f4c61a948 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -172,7 +172,7 @@ static __cpuinit inline int nmi_known_cpu(void) | |||
172 | { | 172 | { |
173 | switch (boot_cpu_data.x86_vendor) { | 173 | switch (boot_cpu_data.x86_vendor) { |
174 | case X86_VENDOR_AMD: | 174 | case X86_VENDOR_AMD: |
175 | return boot_cpu_data.x86 == 15; | 175 | return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16; |
176 | case X86_VENDOR_INTEL: | 176 | case X86_VENDOR_INTEL: |
177 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 177 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
178 | return 1; | 178 | return 1; |
@@ -214,6 +214,23 @@ static __init void nmi_cpu_busy(void *data) | |||
214 | } | 214 | } |
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
218 | { | ||
219 | unsigned int retval = hz; | ||
220 | |||
221 | /* | ||
222 | * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter | ||
223 | * are writable, with higher bits sign extending from bit 31. | ||
224 | * So, we can only program the counter with 31 bit values and | ||
225 | * 32nd bit should be 1, for 33.. to be 1. | ||
226 | * Find the appropriate nmi_hz | ||
227 | */ | ||
228 | if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) { | ||
229 | retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1; | ||
230 | } | ||
231 | return retval; | ||
232 | } | ||
233 | |||
217 | int __init check_nmi_watchdog (void) | 234 | int __init check_nmi_watchdog (void) |
218 | { | 235 | { |
219 | int *counts; | 236 | int *counts; |
@@ -268,17 +285,8 @@ int __init check_nmi_watchdog (void) | |||
268 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 285 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
269 | 286 | ||
270 | nmi_hz = 1; | 287 | nmi_hz = 1; |
271 | /* | 288 | if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) |
272 | * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter | 289 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
273 | * are writable, with higher bits sign extending from bit 31. | ||
274 | * So, we can only program the counter with 31 bit values and | ||
275 | * 32nd bit should be 1, for 33.. to be 1. | ||
276 | * Find the appropriate nmi_hz | ||
277 | */ | ||
278 | if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 && | ||
279 | ((u64)cpu_khz * 1000) > 0x7fffffffULL) { | ||
280 | nmi_hz = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1; | ||
281 | } | ||
282 | } | 290 | } |
283 | 291 | ||
284 | kfree(counts); | 292 | kfree(counts); |
@@ -360,6 +368,33 @@ void enable_timer_nmi_watchdog(void) | |||
360 | } | 368 | } |
361 | } | 369 | } |
362 | 370 | ||
371 | static void __acpi_nmi_disable(void *__unused) | ||
372 | { | ||
373 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * Disable timer based NMIs on all CPUs: | ||
378 | */ | ||
379 | void acpi_nmi_disable(void) | ||
380 | { | ||
381 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
382 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
383 | } | ||
384 | |||
385 | static void __acpi_nmi_enable(void *__unused) | ||
386 | { | ||
387 | apic_write(APIC_LVT0, APIC_DM_NMI); | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * Enable timer based NMIs on all CPUs: | ||
392 | */ | ||
393 | void acpi_nmi_enable(void) | ||
394 | { | ||
395 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
396 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
397 | } | ||
363 | #ifdef CONFIG_PM | 398 | #ifdef CONFIG_PM |
364 | 399 | ||
365 | static int nmi_pm_active; /* nmi_active before suspend */ | 400 | static int nmi_pm_active; /* nmi_active before suspend */ |
@@ -634,7 +669,9 @@ static int setup_intel_arch_watchdog(void) | |||
634 | 669 | ||
635 | /* setup the timer */ | 670 | /* setup the timer */ |
636 | wrmsr(evntsel_msr, evntsel, 0); | 671 | wrmsr(evntsel_msr, evntsel, 0); |
637 | wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | 672 | |
673 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
674 | wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0); | ||
638 | 675 | ||
639 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 676 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
640 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 677 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
@@ -855,15 +892,23 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
855 | dummy &= ~P4_CCCR_OVF; | 892 | dummy &= ~P4_CCCR_OVF; |
856 | wrmsrl(wd->cccr_msr, dummy); | 893 | wrmsrl(wd->cccr_msr, dummy); |
857 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 894 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
895 | /* start the cycle over again */ | ||
896 | wrmsrl(wd->perfctr_msr, | ||
897 | -((u64)cpu_khz * 1000 / nmi_hz)); | ||
858 | } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | 898 | } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { |
859 | /* | 899 | /* |
860 | * ArchPerfom/Core Duo needs to re-unmask | 900 | * ArchPerfom/Core Duo needs to re-unmask |
861 | * the apic vector | 901 | * the apic vector |
862 | */ | 902 | */ |
863 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 903 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
904 | /* ARCH_PERFMON has 32 bit counter writes */ | ||
905 | wrmsr(wd->perfctr_msr, | ||
906 | (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0); | ||
907 | } else { | ||
908 | /* start the cycle over again */ | ||
909 | wrmsrl(wd->perfctr_msr, | ||
910 | -((u64)cpu_khz * 1000 / nmi_hz)); | ||
864 | } | 911 | } |
865 | /* start the cycle over again */ | ||
866 | wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | ||
867 | rc = 1; | 912 | rc = 1; |
868 | } else if (nmi_watchdog == NMI_IO_APIC) { | 913 | } else if (nmi_watchdog == NMI_IO_APIC) { |
869 | /* don't know how to accurately check for this. | 914 | /* don't know how to accurately check for this. |
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c index 3d65b1d4c2b3..04480c3b68f5 100644 --- a/arch/x86_64/kernel/pci-calgary.c +++ b/arch/x86_64/kernel/pci-calgary.c | |||
@@ -138,6 +138,8 @@ static const unsigned long phb_debug_offsets[] = { | |||
138 | 138 | ||
139 | #define PHB_DEBUG_STUFF_OFFSET 0x0020 | 139 | #define PHB_DEBUG_STUFF_OFFSET 0x0020 |
140 | 140 | ||
141 | #define EMERGENCY_PAGES 32 /* = 128KB */ | ||
142 | |||
141 | unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; | 143 | unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; |
142 | static int translate_empty_slots __read_mostly = 0; | 144 | static int translate_empty_slots __read_mostly = 0; |
143 | static int calgary_detected __read_mostly = 0; | 145 | static int calgary_detected __read_mostly = 0; |
@@ -296,6 +298,16 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
296 | { | 298 | { |
297 | unsigned long entry; | 299 | unsigned long entry; |
298 | unsigned long badbit; | 300 | unsigned long badbit; |
301 | unsigned long badend; | ||
302 | |||
303 | /* were we called with bad_dma_address? */ | ||
304 | badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); | ||
305 | if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { | ||
306 | printk(KERN_ERR "Calgary: driver tried unmapping bad DMA " | ||
307 | "address 0x%Lx\n", dma_addr); | ||
308 | WARN_ON(1); | ||
309 | return; | ||
310 | } | ||
299 | 311 | ||
300 | entry = dma_addr >> PAGE_SHIFT; | 312 | entry = dma_addr >> PAGE_SHIFT; |
301 | 313 | ||
@@ -656,8 +668,8 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) | |||
656 | u64 start; | 668 | u64 start; |
657 | struct iommu_table *tbl = dev->sysdata; | 669 | struct iommu_table *tbl = dev->sysdata; |
658 | 670 | ||
659 | /* reserve bad_dma_address in case it's a legal address */ | 671 | /* reserve EMERGENCY_PAGES from bad_dma_address and up */ |
660 | iommu_range_reserve(tbl, bad_dma_address, 1); | 672 | iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); |
661 | 673 | ||
662 | /* avoid the BIOS/VGA first 640KB-1MB region */ | 674 | /* avoid the BIOS/VGA first 640KB-1MB region */ |
663 | start = (640 * 1024); | 675 | start = (640 * 1024); |
@@ -1176,6 +1188,7 @@ int __init calgary_iommu_init(void) | |||
1176 | } | 1188 | } |
1177 | 1189 | ||
1178 | force_iommu = 1; | 1190 | force_iommu = 1; |
1191 | bad_dma_address = 0x0; | ||
1179 | dma_ops = &calgary_dma_ops; | 1192 | dma_ops = &calgary_dma_ops; |
1180 | 1193 | ||
1181 | return 0; | 1194 | return 0; |
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index 683b7a5c1ab3..651ccfb06697 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c | |||
@@ -223,30 +223,10 @@ int dma_set_mask(struct device *dev, u64 mask) | |||
223 | } | 223 | } |
224 | EXPORT_SYMBOL(dma_set_mask); | 224 | EXPORT_SYMBOL(dma_set_mask); |
225 | 225 | ||
226 | /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge] | 226 | /* |
227 | [,forcesac][,fullflush][,nomerge][,biomerge] | 227 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter |
228 | size set size of iommu (in bytes) | 228 | * documentation. |
229 | noagp don't initialize the AGP driver and use full aperture. | 229 | */ |
230 | off don't use the IOMMU | ||
231 | leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on) | ||
232 | memaper[=order] allocate an own aperture over RAM with size 32MB^order. | ||
233 | noforce don't force IOMMU usage. Default. | ||
234 | force Force IOMMU. | ||
235 | merge Do lazy merging. This may improve performance on some block devices. | ||
236 | Implies force (experimental) | ||
237 | biomerge Do merging at the BIO layer. This is more efficient than merge, | ||
238 | but should be only done with very big IOMMUs. Implies merge,force. | ||
239 | nomerge Don't do SG merging. | ||
240 | forcesac For SAC mode for masks <40bits (experimental) | ||
241 | fullflush Flush IOMMU on each allocation (default) | ||
242 | nofullflush Don't use IOMMU fullflush | ||
243 | allowed overwrite iommu off workarounds for specific chipsets. | ||
244 | soft Use software bounce buffering (default for Intel machines) | ||
245 | noaperture Don't touch the aperture for AGP. | ||
246 | allowdac Allow DMA >4GB | ||
247 | nodac Forbid DMA >4GB | ||
248 | panic Force panic when IOMMU overflows | ||
249 | */ | ||
250 | __init int iommu_setup(char *p) | 230 | __init int iommu_setup(char *p) |
251 | { | 231 | { |
252 | iommu_merge = 1; | 232 | iommu_merge = 1; |
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index fc1960f1f243..030eb3753358 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c | |||
@@ -185,7 +185,7 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
185 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | 185 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) |
186 | { | 186 | { |
187 | u64 mask = *dev->dma_mask; | 187 | u64 mask = *dev->dma_mask; |
188 | int high = addr + size >= mask; | 188 | int high = addr + size > mask; |
189 | int mmu = high; | 189 | int mmu = high; |
190 | if (force_iommu) | 190 | if (force_iommu) |
191 | mmu = 1; | 191 | mmu = 1; |
@@ -195,7 +195,7 @@ static inline int need_iommu(struct device *dev, unsigned long addr, size_t size | |||
195 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 195 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
196 | { | 196 | { |
197 | u64 mask = *dev->dma_mask; | 197 | u64 mask = *dev->dma_mask; |
198 | int high = addr + size >= mask; | 198 | int high = addr + size > mask; |
199 | int mmu = high; | 199 | int mmu = high; |
200 | return mmu; | 200 | return mmu; |
201 | } | 201 | } |
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c index addc14af0c56..4326a690a509 100644 --- a/arch/x86_64/kernel/ptrace.c +++ b/arch/x86_64/kernel/ptrace.c | |||
@@ -536,8 +536,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
536 | } | 536 | } |
537 | ret = 0; | 537 | ret = 0; |
538 | for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) { | 538 | for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) { |
539 | ret |= __get_user(tmp, (unsigned long __user *) data); | 539 | ret = __get_user(tmp, (unsigned long __user *) data); |
540 | putreg(child, ui, tmp); | 540 | if (ret) |
541 | break; | ||
542 | ret = putreg(child, ui, tmp); | ||
543 | if (ret) | ||
544 | break; | ||
541 | data += sizeof(long); | 545 | data += sizeof(long); |
542 | } | 546 | } |
543 | break; | 547 | break; |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 60477244d1a3..3d98b696881d 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -138,128 +138,6 @@ struct resource code_resource = { | |||
138 | .flags = IORESOURCE_RAM, | 138 | .flags = IORESOURCE_RAM, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM) | ||
142 | |||
143 | static struct resource system_rom_resource = { | ||
144 | .name = "System ROM", | ||
145 | .start = 0xf0000, | ||
146 | .end = 0xfffff, | ||
147 | .flags = IORESOURCE_ROM, | ||
148 | }; | ||
149 | |||
150 | static struct resource extension_rom_resource = { | ||
151 | .name = "Extension ROM", | ||
152 | .start = 0xe0000, | ||
153 | .end = 0xeffff, | ||
154 | .flags = IORESOURCE_ROM, | ||
155 | }; | ||
156 | |||
157 | static struct resource adapter_rom_resources[] = { | ||
158 | { .name = "Adapter ROM", .start = 0xc8000, .end = 0, | ||
159 | .flags = IORESOURCE_ROM }, | ||
160 | { .name = "Adapter ROM", .start = 0, .end = 0, | ||
161 | .flags = IORESOURCE_ROM }, | ||
162 | { .name = "Adapter ROM", .start = 0, .end = 0, | ||
163 | .flags = IORESOURCE_ROM }, | ||
164 | { .name = "Adapter ROM", .start = 0, .end = 0, | ||
165 | .flags = IORESOURCE_ROM }, | ||
166 | { .name = "Adapter ROM", .start = 0, .end = 0, | ||
167 | .flags = IORESOURCE_ROM }, | ||
168 | { .name = "Adapter ROM", .start = 0, .end = 0, | ||
169 | .flags = IORESOURCE_ROM } | ||
170 | }; | ||
171 | |||
172 | static struct resource video_rom_resource = { | ||
173 | .name = "Video ROM", | ||
174 | .start = 0xc0000, | ||
175 | .end = 0xc7fff, | ||
176 | .flags = IORESOURCE_ROM, | ||
177 | }; | ||
178 | |||
179 | static struct resource video_ram_resource = { | ||
180 | .name = "Video RAM area", | ||
181 | .start = 0xa0000, | ||
182 | .end = 0xbffff, | ||
183 | .flags = IORESOURCE_RAM, | ||
184 | }; | ||
185 | |||
186 | #define romsignature(x) (*(unsigned short *)(x) == 0xaa55) | ||
187 | |||
188 | static int __init romchecksum(unsigned char *rom, unsigned long length) | ||
189 | { | ||
190 | unsigned char *p, sum = 0; | ||
191 | |||
192 | for (p = rom; p < rom + length; p++) | ||
193 | sum += *p; | ||
194 | return sum == 0; | ||
195 | } | ||
196 | |||
197 | static void __init probe_roms(void) | ||
198 | { | ||
199 | unsigned long start, length, upper; | ||
200 | unsigned char *rom; | ||
201 | int i; | ||
202 | |||
203 | /* video rom */ | ||
204 | upper = adapter_rom_resources[0].start; | ||
205 | for (start = video_rom_resource.start; start < upper; start += 2048) { | ||
206 | rom = isa_bus_to_virt(start); | ||
207 | if (!romsignature(rom)) | ||
208 | continue; | ||
209 | |||
210 | video_rom_resource.start = start; | ||
211 | |||
212 | /* 0 < length <= 0x7f * 512, historically */ | ||
213 | length = rom[2] * 512; | ||
214 | |||
215 | /* if checksum okay, trust length byte */ | ||
216 | if (length && romchecksum(rom, length)) | ||
217 | video_rom_resource.end = start + length - 1; | ||
218 | |||
219 | request_resource(&iomem_resource, &video_rom_resource); | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | start = (video_rom_resource.end + 1 + 2047) & ~2047UL; | ||
224 | if (start < upper) | ||
225 | start = upper; | ||
226 | |||
227 | /* system rom */ | ||
228 | request_resource(&iomem_resource, &system_rom_resource); | ||
229 | upper = system_rom_resource.start; | ||
230 | |||
231 | /* check for extension rom (ignore length byte!) */ | ||
232 | rom = isa_bus_to_virt(extension_rom_resource.start); | ||
233 | if (romsignature(rom)) { | ||
234 | length = extension_rom_resource.end - extension_rom_resource.start + 1; | ||
235 | if (romchecksum(rom, length)) { | ||
236 | request_resource(&iomem_resource, &extension_rom_resource); | ||
237 | upper = extension_rom_resource.start; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* check for adapter roms on 2k boundaries */ | ||
242 | for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; | ||
243 | start += 2048) { | ||
244 | rom = isa_bus_to_virt(start); | ||
245 | if (!romsignature(rom)) | ||
246 | continue; | ||
247 | |||
248 | /* 0 < length <= 0x7f * 512, historically */ | ||
249 | length = rom[2] * 512; | ||
250 | |||
251 | /* but accept any length that fits if checksum okay */ | ||
252 | if (!length || start + length > upper || !romchecksum(rom, length)) | ||
253 | continue; | ||
254 | |||
255 | adapter_rom_resources[i].start = start; | ||
256 | adapter_rom_resources[i].end = start + length - 1; | ||
257 | request_resource(&iomem_resource, &adapter_rom_resources[i]); | ||
258 | |||
259 | start = adapter_rom_resources[i++].end & ~2047UL; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | #ifdef CONFIG_PROC_VMCORE | 141 | #ifdef CONFIG_PROC_VMCORE |
264 | /* elfcorehdr= specifies the location of elf core header | 142 | /* elfcorehdr= specifies the location of elf core header |
265 | * stored by the crashed kernel. This option will be passed | 143 | * stored by the crashed kernel. This option will be passed |
@@ -444,6 +322,11 @@ void __init setup_arch(char **cmdline_p) | |||
444 | /* reserve ebda region */ | 322 | /* reserve ebda region */ |
445 | if (ebda_addr) | 323 | if (ebda_addr) |
446 | reserve_bootmem_generic(ebda_addr, ebda_size); | 324 | reserve_bootmem_generic(ebda_addr, ebda_size); |
325 | #ifdef CONFIG_NUMA | ||
326 | /* reserve nodemap region */ | ||
327 | if (nodemap_addr) | ||
328 | reserve_bootmem_generic(nodemap_addr, nodemap_size); | ||
329 | #endif | ||
447 | 330 | ||
448 | #ifdef CONFIG_SMP | 331 | #ifdef CONFIG_SMP |
449 | /* | 332 | /* |
@@ -519,15 +402,11 @@ void __init setup_arch(char **cmdline_p) | |||
519 | init_apic_mappings(); | 402 | init_apic_mappings(); |
520 | 403 | ||
521 | /* | 404 | /* |
522 | * Request address space for all standard RAM and ROM resources | 405 | * We trust e820 completely. No explicit ROM probing in memory. |
523 | * and also for regions reported as reserved by the e820. | 406 | */ |
524 | */ | ||
525 | probe_roms(); | ||
526 | e820_reserve_resources(); | 407 | e820_reserve_resources(); |
527 | e820_mark_nosave_regions(); | 408 | e820_mark_nosave_regions(); |
528 | 409 | ||
529 | request_resource(&iomem_resource, &video_ram_resource); | ||
530 | |||
531 | { | 410 | { |
532 | unsigned i; | 411 | unsigned i; |
533 | /* request I/O space for devices used on all i[345]86 PCs */ | 412 | /* request I/O space for devices used on all i[345]86 PCs */ |
@@ -1063,7 +942,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1063 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 942 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1064 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | 943 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, |
1065 | NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, | 944 | NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, |
1066 | NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow", | 945 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", |
946 | "3dnowext", "3dnow", | ||
1067 | 947 | ||
1068 | /* Transmeta-defined */ | 948 | /* Transmeta-defined */ |
1069 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | 949 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, |
@@ -1081,7 +961,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1081 | /* Intel-defined (#2) */ | 961 | /* Intel-defined (#2) */ |
1082 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | 962 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", |
1083 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | 963 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, |
1084 | NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL, | 964 | NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", |
1085 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 965 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1086 | 966 | ||
1087 | /* VIA/Cyrix/Centaur-defined */ | 967 | /* VIA/Cyrix/Centaur-defined */ |
@@ -1091,8 +971,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1091 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 971 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1092 | 972 | ||
1093 | /* AMD-defined (#2) */ | 973 | /* AMD-defined (#2) */ |
1094 | "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL, | 974 | "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy", |
1095 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 975 | "altmovcr8", "abm", "sse4a", |
976 | "misalignsse", "3dnowprefetch", | ||
977 | "osvw", "ibs", NULL, NULL, NULL, NULL, | ||
1096 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 978 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1097 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 979 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1098 | }; | 980 | }; |
@@ -1103,6 +985,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1103 | "ttp", /* thermal trip */ | 985 | "ttp", /* thermal trip */ |
1104 | "tm", | 986 | "tm", |
1105 | "stc", | 987 | "stc", |
988 | "100mhzsteps", | ||
989 | "hwpstate", | ||
990 | NULL, /* tsc invariant mapped to constant_tsc */ | ||
1106 | NULL, | 991 | NULL, |
1107 | /* nothing */ /* constant_tsc - moved to flags */ | 992 | /* nothing */ /* constant_tsc - moved to flags */ |
1108 | }; | 993 | }; |
@@ -1219,23 +1104,3 @@ struct seq_operations cpuinfo_op = { | |||
1219 | .stop = c_stop, | 1104 | .stop = c_stop, |
1220 | .show = show_cpuinfo, | 1105 | .show = show_cpuinfo, |
1221 | }; | 1106 | }; |
1222 | |||
1223 | #if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE) | ||
1224 | #include <linux/platform_device.h> | ||
1225 | static __init int add_pcspkr(void) | ||
1226 | { | ||
1227 | struct platform_device *pd; | ||
1228 | int ret; | ||
1229 | |||
1230 | pd = platform_device_alloc("pcspkr", -1); | ||
1231 | if (!pd) | ||
1232 | return -ENOMEM; | ||
1233 | |||
1234 | ret = platform_device_add(pd); | ||
1235 | if (ret) | ||
1236 | platform_device_put(pd); | ||
1237 | |||
1238 | return ret; | ||
1239 | } | ||
1240 | device_initcall(add_pcspkr); | ||
1241 | #endif | ||
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 8c4b80fe71a1..6a70b55f719d 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c | |||
@@ -37,7 +37,6 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | |||
37 | char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); | 37 | char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); |
38 | 38 | ||
39 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | 39 | unsigned long __supported_pte_mask __read_mostly = ~0UL; |
40 | EXPORT_SYMBOL(__supported_pte_mask); | ||
41 | static int do_not_nx __cpuinitdata = 0; | 40 | static int do_not_nx __cpuinitdata = 0; |
42 | 41 | ||
43 | /* noexec=on|off | 42 | /* noexec=on|off |
diff --git a/arch/x86_64/kernel/stacktrace.c b/arch/x86_64/kernel/stacktrace.c index 6026b31d037e..65ac2c6b34a6 100644 --- a/arch/x86_64/kernel/stacktrace.c +++ b/arch/x86_64/kernel/stacktrace.c | |||
@@ -32,7 +32,7 @@ static void save_stack_address(void *data, unsigned long addr) | |||
32 | trace->skip--; | 32 | trace->skip--; |
33 | return; | 33 | return; |
34 | } | 34 | } |
35 | if (trace->nr_entries < trace->max_entries - 1) | 35 | if (trace->nr_entries < trace->max_entries) |
36 | trace->entries[trace->nr_entries++] = addr; | 36 | trace->entries[trace->nr_entries++] = addr; |
37 | } | 37 | } |
38 | 38 | ||
@@ -49,7 +49,8 @@ static struct stacktrace_ops save_stack_ops = { | |||
49 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | 49 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) |
50 | { | 50 | { |
51 | dump_trace(task, NULL, NULL, &save_stack_ops, trace); | 51 | dump_trace(task, NULL, NULL, &save_stack_ops, trace); |
52 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 52 | if (trace->nr_entries < trace->max_entries) |
53 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
53 | } | 54 | } |
54 | EXPORT_SYMBOL(save_stack_trace); | 55 | EXPORT_SYMBOL(save_stack_trace); |
55 | 56 | ||
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 335cc91c49b7..3cc6886f1fb7 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -657,6 +657,7 @@ core_initcall(cpufreq_tsc); | |||
657 | 657 | ||
658 | #define TICK_COUNT 100000000 | 658 | #define TICK_COUNT 100000000 |
659 | #define TICK_MIN 5000 | 659 | #define TICK_MIN 5000 |
660 | #define MAX_READ_RETRIES 5 | ||
660 | 661 | ||
661 | /* | 662 | /* |
662 | * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none | 663 | * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none |
@@ -664,13 +665,17 @@ core_initcall(cpufreq_tsc); | |||
664 | */ | 665 | */ |
665 | static void __init read_hpet_tsc(int *hpet, int *tsc) | 666 | static void __init read_hpet_tsc(int *hpet, int *tsc) |
666 | { | 667 | { |
667 | int tsc1, tsc2, hpet1; | 668 | int tsc1, tsc2, hpet1, retries = 0; |
669 | static int msg; | ||
668 | 670 | ||
669 | do { | 671 | do { |
670 | tsc1 = get_cycles_sync(); | 672 | tsc1 = get_cycles_sync(); |
671 | hpet1 = hpet_readl(HPET_COUNTER); | 673 | hpet1 = hpet_readl(HPET_COUNTER); |
672 | tsc2 = get_cycles_sync(); | 674 | tsc2 = get_cycles_sync(); |
673 | } while (tsc2 - tsc1 > TICK_MIN); | 675 | } while (tsc2 - tsc1 > TICK_MIN && retries++ < MAX_READ_RETRIES); |
676 | if (retries >= MAX_READ_RETRIES && !msg++) | ||
677 | printk(KERN_WARNING | ||
678 | "hpet.c: exceeded max retries to read HPET & TSC\n"); | ||
674 | *hpet = hpet1; | 679 | *hpet = hpet1; |
675 | *tsc = tsc2; | 680 | *tsc = tsc2; |
676 | } | 681 | } |
@@ -1221,8 +1226,9 @@ static void hpet_rtc_timer_reinit(void) | |||
1221 | if (PIE_on) | 1226 | if (PIE_on) |
1222 | PIE_count += lost_ints; | 1227 | PIE_count += lost_ints; |
1223 | 1228 | ||
1224 | printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", | 1229 | if (printk_ratelimit()) |
1225 | hpet_rtc_int_freq); | 1230 | printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", |
1231 | hpet_rtc_int_freq); | ||
1226 | } | 1232 | } |
1227 | } | 1233 | } |
1228 | 1234 | ||
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 6d77e4797a47..0dffae69f4ad 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c | |||
@@ -26,6 +26,7 @@ EXPORT_SYMBOL(__put_user_4); | |||
26 | EXPORT_SYMBOL(__put_user_8); | 26 | EXPORT_SYMBOL(__put_user_8); |
27 | 27 | ||
28 | EXPORT_SYMBOL(copy_user_generic); | 28 | EXPORT_SYMBOL(copy_user_generic); |
29 | EXPORT_SYMBOL(__copy_user_nocache); | ||
29 | EXPORT_SYMBOL(copy_from_user); | 30 | EXPORT_SYMBOL(copy_from_user); |
30 | EXPORT_SYMBOL(copy_to_user); | 31 | EXPORT_SYMBOL(copy_to_user); |
31 | EXPORT_SYMBOL(__copy_from_user_inatomic); | 32 | EXPORT_SYMBOL(__copy_from_user_inatomic); |
@@ -34,8 +35,8 @@ EXPORT_SYMBOL(copy_page); | |||
34 | EXPORT_SYMBOL(clear_page); | 35 | EXPORT_SYMBOL(clear_page); |
35 | 36 | ||
36 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
37 | extern void FASTCALL( __write_lock_failed(rwlock_t *rw)); | 38 | extern void __write_lock_failed(rwlock_t *rw); |
38 | extern void FASTCALL( __read_lock_failed(rwlock_t *rw)); | 39 | extern void __read_lock_failed(rwlock_t *rw); |
39 | EXPORT_SYMBOL(__write_lock_failed); | 40 | EXPORT_SYMBOL(__write_lock_failed); |
40 | EXPORT_SYMBOL(__read_lock_failed); | 41 | EXPORT_SYMBOL(__read_lock_failed); |
41 | #endif | 42 | #endif |
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile index b78d4170fce2..8d5f835af481 100644 --- a/arch/x86_64/lib/Makefile +++ b/arch/x86_64/lib/Makefile | |||
@@ -9,4 +9,4 @@ obj-y := io.o iomap_copy.o | |||
9 | lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ | 9 | lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ |
10 | usercopy.o getuser.o putuser.o \ | 10 | usercopy.o getuser.o putuser.o \ |
11 | thunk.o clear_page.o copy_page.o bitstr.o bitops.o | 11 | thunk.o clear_page.o copy_page.o bitstr.o bitops.o |
12 | lib-y += memcpy.o memmove.o memset.o copy_user.o rwlock.o | 12 | lib-y += memcpy.o memmove.o memset.o copy_user.o rwlock.o copy_user_nocache.o |
diff --git a/arch/x86_64/lib/copy_user_nocache.S b/arch/x86_64/lib/copy_user_nocache.S new file mode 100644 index 000000000000..4620efb12f13 --- /dev/null +++ b/arch/x86_64/lib/copy_user_nocache.S | |||
@@ -0,0 +1,217 @@ | |||
1 | /* Copyright 2002 Andi Kleen, SuSE Labs. | ||
2 | * Subject to the GNU Public License v2. | ||
3 | * | ||
4 | * Functions to copy from and to user space. | ||
5 | */ | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | #include <asm/dwarf2.h> | ||
9 | |||
10 | #define FIX_ALIGNMENT 1 | ||
11 | |||
12 | #include <asm/current.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/cpufeature.h> | ||
16 | |||
17 | /* | ||
18 | * copy_user_nocache - Uncached memory copy with exception handling | ||
19 | * This will force destination/source out of cache for more performance. | ||
20 | * | ||
21 | * Input: | ||
22 | * rdi destination | ||
23 | * rsi source | ||
24 | * rdx count | ||
25 | * rcx zero flag when 1 zero on exception | ||
26 | * | ||
27 | * Output: | ||
28 | * eax uncopied bytes or 0 if successful. | ||
29 | */ | ||
30 | ENTRY(__copy_user_nocache) | ||
31 | CFI_STARTPROC | ||
32 | pushq %rbx | ||
33 | CFI_ADJUST_CFA_OFFSET 8 | ||
34 | CFI_REL_OFFSET rbx, 0 | ||
35 | pushq %rcx /* save zero flag */ | ||
36 | CFI_ADJUST_CFA_OFFSET 8 | ||
37 | CFI_REL_OFFSET rcx, 0 | ||
38 | |||
39 | xorl %eax,%eax /* zero for the exception handler */ | ||
40 | |||
41 | #ifdef FIX_ALIGNMENT | ||
42 | /* check for bad alignment of destination */ | ||
43 | movl %edi,%ecx | ||
44 | andl $7,%ecx | ||
45 | jnz .Lbad_alignment | ||
46 | .Lafter_bad_alignment: | ||
47 | #endif | ||
48 | |||
49 | movq %rdx,%rcx | ||
50 | |||
51 | movl $64,%ebx | ||
52 | shrq $6,%rdx | ||
53 | decq %rdx | ||
54 | js .Lhandle_tail | ||
55 | |||
56 | .p2align 4 | ||
57 | .Lloop: | ||
58 | .Ls1: movq (%rsi),%r11 | ||
59 | .Ls2: movq 1*8(%rsi),%r8 | ||
60 | .Ls3: movq 2*8(%rsi),%r9 | ||
61 | .Ls4: movq 3*8(%rsi),%r10 | ||
62 | .Ld1: movnti %r11,(%rdi) | ||
63 | .Ld2: movnti %r8,1*8(%rdi) | ||
64 | .Ld3: movnti %r9,2*8(%rdi) | ||
65 | .Ld4: movnti %r10,3*8(%rdi) | ||
66 | |||
67 | .Ls5: movq 4*8(%rsi),%r11 | ||
68 | .Ls6: movq 5*8(%rsi),%r8 | ||
69 | .Ls7: movq 6*8(%rsi),%r9 | ||
70 | .Ls8: movq 7*8(%rsi),%r10 | ||
71 | .Ld5: movnti %r11,4*8(%rdi) | ||
72 | .Ld6: movnti %r8,5*8(%rdi) | ||
73 | .Ld7: movnti %r9,6*8(%rdi) | ||
74 | .Ld8: movnti %r10,7*8(%rdi) | ||
75 | |||
76 | dec %rdx | ||
77 | |||
78 | leaq 64(%rsi),%rsi | ||
79 | leaq 64(%rdi),%rdi | ||
80 | |||
81 | jns .Lloop | ||
82 | |||
83 | .p2align 4 | ||
84 | .Lhandle_tail: | ||
85 | movl %ecx,%edx | ||
86 | andl $63,%ecx | ||
87 | shrl $3,%ecx | ||
88 | jz .Lhandle_7 | ||
89 | movl $8,%ebx | ||
90 | .p2align 4 | ||
91 | .Lloop_8: | ||
92 | .Ls9: movq (%rsi),%r8 | ||
93 | .Ld9: movnti %r8,(%rdi) | ||
94 | decl %ecx | ||
95 | leaq 8(%rdi),%rdi | ||
96 | leaq 8(%rsi),%rsi | ||
97 | jnz .Lloop_8 | ||
98 | |||
99 | .Lhandle_7: | ||
100 | movl %edx,%ecx | ||
101 | andl $7,%ecx | ||
102 | jz .Lende | ||
103 | .p2align 4 | ||
104 | .Lloop_1: | ||
105 | .Ls10: movb (%rsi),%bl | ||
106 | .Ld10: movb %bl,(%rdi) | ||
107 | incq %rdi | ||
108 | incq %rsi | ||
109 | decl %ecx | ||
110 | jnz .Lloop_1 | ||
111 | |||
112 | CFI_REMEMBER_STATE | ||
113 | .Lende: | ||
114 | popq %rcx | ||
115 | CFI_ADJUST_CFA_OFFSET -8 | ||
116 | CFI_RESTORE %rcx | ||
117 | popq %rbx | ||
118 | CFI_ADJUST_CFA_OFFSET -8 | ||
119 | CFI_RESTORE rbx | ||
120 | ret | ||
121 | CFI_RESTORE_STATE | ||
122 | |||
123 | #ifdef FIX_ALIGNMENT | ||
124 | /* align destination */ | ||
125 | .p2align 4 | ||
126 | .Lbad_alignment: | ||
127 | movl $8,%r9d | ||
128 | subl %ecx,%r9d | ||
129 | movl %r9d,%ecx | ||
130 | cmpq %r9,%rdx | ||
131 | jz .Lhandle_7 | ||
132 | js .Lhandle_7 | ||
133 | .Lalign_1: | ||
134 | .Ls11: movb (%rsi),%bl | ||
135 | .Ld11: movb %bl,(%rdi) | ||
136 | incq %rsi | ||
137 | incq %rdi | ||
138 | decl %ecx | ||
139 | jnz .Lalign_1 | ||
140 | subq %r9,%rdx | ||
141 | jmp .Lafter_bad_alignment | ||
142 | #endif | ||
143 | |||
144 | /* table sorted by exception address */ | ||
145 | .section __ex_table,"a" | ||
146 | .align 8 | ||
147 | .quad .Ls1,.Ls1e | ||
148 | .quad .Ls2,.Ls2e | ||
149 | .quad .Ls3,.Ls3e | ||
150 | .quad .Ls4,.Ls4e | ||
151 | .quad .Ld1,.Ls1e | ||
152 | .quad .Ld2,.Ls2e | ||
153 | .quad .Ld3,.Ls3e | ||
154 | .quad .Ld4,.Ls4e | ||
155 | .quad .Ls5,.Ls5e | ||
156 | .quad .Ls6,.Ls6e | ||
157 | .quad .Ls7,.Ls7e | ||
158 | .quad .Ls8,.Ls8e | ||
159 | .quad .Ld5,.Ls5e | ||
160 | .quad .Ld6,.Ls6e | ||
161 | .quad .Ld7,.Ls7e | ||
162 | .quad .Ld8,.Ls8e | ||
163 | .quad .Ls9,.Le_quad | ||
164 | .quad .Ld9,.Le_quad | ||
165 | .quad .Ls10,.Le_byte | ||
166 | .quad .Ld10,.Le_byte | ||
167 | #ifdef FIX_ALIGNMENT | ||
168 | .quad .Ls11,.Lzero_rest | ||
169 | .quad .Ld11,.Lzero_rest | ||
170 | #endif | ||
171 | .quad .Le5,.Le_zero | ||
172 | .previous | ||
173 | |||
174 | /* compute 64-offset for main loop. 8 bytes accuracy with error on the | ||
175 | pessimistic side. this is gross. it would be better to fix the | ||
176 | interface. */ | ||
177 | /* eax: zero, ebx: 64 */ | ||
178 | .Ls1e: addl $8,%eax | ||
179 | .Ls2e: addl $8,%eax | ||
180 | .Ls3e: addl $8,%eax | ||
181 | .Ls4e: addl $8,%eax | ||
182 | .Ls5e: addl $8,%eax | ||
183 | .Ls6e: addl $8,%eax | ||
184 | .Ls7e: addl $8,%eax | ||
185 | .Ls8e: addl $8,%eax | ||
186 | addq %rbx,%rdi /* +64 */ | ||
187 | subq %rax,%rdi /* correct destination with computed offset */ | ||
188 | |||
189 | shlq $6,%rdx /* loop counter * 64 (stride length) */ | ||
190 | addq %rax,%rdx /* add offset to loopcnt */ | ||
191 | andl $63,%ecx /* remaining bytes */ | ||
192 | addq %rcx,%rdx /* add them */ | ||
193 | jmp .Lzero_rest | ||
194 | |||
195 | /* exception on quad word loop in tail handling */ | ||
196 | /* ecx: loopcnt/8, %edx: length, rdi: correct */ | ||
197 | .Le_quad: | ||
198 | shll $3,%ecx | ||
199 | andl $7,%edx | ||
200 | addl %ecx,%edx | ||
201 | /* edx: bytes to zero, rdi: dest, eax:zero */ | ||
202 | .Lzero_rest: | ||
203 | cmpl $0,(%rsp) /* zero flag set? */ | ||
204 | jz .Le_zero | ||
205 | movq %rdx,%rcx | ||
206 | .Le_byte: | ||
207 | xorl %eax,%eax | ||
208 | .Le5: rep | ||
209 | stosb | ||
210 | /* when there is another exception while zeroing the rest just return */ | ||
211 | .Le_zero: | ||
212 | movq %rdx,%rax | ||
213 | jmp .Lende | ||
214 | CFI_ENDPROC | ||
215 | ENDPROC(__copy_user_nocache) | ||
216 | |||
217 | |||
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 49e8cf2e06f8..6ada7231f3ab 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -56,17 +56,17 @@ int unregister_page_fault_notifier(struct notifier_block *nb) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); | 57 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); |
58 | 58 | ||
59 | static inline int notify_page_fault(enum die_val val, const char *str, | 59 | static inline int notify_page_fault(struct pt_regs *regs, long err) |
60 | struct pt_regs *regs, long err, int trap, int sig) | ||
61 | { | 60 | { |
62 | struct die_args args = { | 61 | struct die_args args = { |
63 | .regs = regs, | 62 | .regs = regs, |
64 | .str = str, | 63 | .str = "page fault", |
65 | .err = err, | 64 | .err = err, |
66 | .trapnr = trap, | 65 | .trapnr = 14, |
67 | .signr = sig | 66 | .signr = SIGSEGV |
68 | }; | 67 | }; |
69 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | 68 | return atomic_notifier_call_chain(¬ify_page_fault_chain, |
69 | DIE_PAGE_FAULT, &args); | ||
70 | } | 70 | } |
71 | 71 | ||
72 | /* Sometimes the CPU reports invalid exceptions on prefetch. | 72 | /* Sometimes the CPU reports invalid exceptions on prefetch. |
@@ -355,8 +355,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
355 | if (vmalloc_fault(address) >= 0) | 355 | if (vmalloc_fault(address) >= 0) |
356 | return; | 356 | return; |
357 | } | 357 | } |
358 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | 358 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) |
359 | SIGSEGV) == NOTIFY_STOP) | ||
360 | return; | 359 | return; |
361 | /* | 360 | /* |
362 | * Don't take the mm semaphore here. If we fixup a prefetch | 361 | * Don't take the mm semaphore here. If we fixup a prefetch |
@@ -365,8 +364,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
365 | goto bad_area_nosemaphore; | 364 | goto bad_area_nosemaphore; |
366 | } | 365 | } |
367 | 366 | ||
368 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | 367 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) |
369 | SIGSEGV) == NOTIFY_STOP) | ||
370 | return; | 368 | return; |
371 | 369 | ||
372 | if (likely(regs->eflags & X86_EFLAGS_IF)) | 370 | if (likely(regs->eflags & X86_EFLAGS_IF)) |
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 2ee2e003606c..41b8fb069924 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c | |||
@@ -36,6 +36,8 @@ unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | |||
36 | cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; | 36 | cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; |
37 | 37 | ||
38 | int numa_off __initdata; | 38 | int numa_off __initdata; |
39 | unsigned long __initdata nodemap_addr; | ||
40 | unsigned long __initdata nodemap_size; | ||
39 | 41 | ||
40 | 42 | ||
41 | /* | 43 | /* |
@@ -52,34 +54,88 @@ populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift) | |||
52 | int res = -1; | 54 | int res = -1; |
53 | unsigned long addr, end; | 55 | unsigned long addr, end; |
54 | 56 | ||
55 | if (shift >= 64) | 57 | memset(memnodemap, 0xff, memnodemapsize); |
56 | return -1; | ||
57 | memset(memnodemap, 0xff, sizeof(memnodemap)); | ||
58 | for (i = 0; i < numnodes; i++) { | 58 | for (i = 0; i < numnodes; i++) { |
59 | addr = nodes[i].start; | 59 | addr = nodes[i].start; |
60 | end = nodes[i].end; | 60 | end = nodes[i].end; |
61 | if (addr >= end) | 61 | if (addr >= end) |
62 | continue; | 62 | continue; |
63 | if ((end >> shift) >= NODEMAPSIZE) | 63 | if ((end >> shift) >= memnodemapsize) |
64 | return 0; | 64 | return 0; |
65 | do { | 65 | do { |
66 | if (memnodemap[addr >> shift] != 0xff) | 66 | if (memnodemap[addr >> shift] != 0xff) |
67 | return -1; | 67 | return -1; |
68 | memnodemap[addr >> shift] = i; | 68 | memnodemap[addr >> shift] = i; |
69 | addr += (1UL << shift); | 69 | addr += (1UL << shift); |
70 | } while (addr < end); | 70 | } while (addr < end); |
71 | res = 1; | 71 | res = 1; |
72 | } | 72 | } |
73 | return res; | 73 | return res; |
74 | } | 74 | } |
75 | 75 | ||
76 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes) | 76 | static int __init allocate_cachealigned_memnodemap(void) |
77 | { | 77 | { |
78 | int shift = 20; | 78 | unsigned long pad, pad_addr; |
79 | |||
80 | memnodemap = memnode.embedded_map; | ||
81 | if (memnodemapsize <= 48) | ||
82 | return 0; | ||
83 | |||
84 | pad = L1_CACHE_BYTES - 1; | ||
85 | pad_addr = 0x8000; | ||
86 | nodemap_size = pad + memnodemapsize; | ||
87 | nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT, | ||
88 | nodemap_size); | ||
89 | if (nodemap_addr == -1UL) { | ||
90 | printk(KERN_ERR | ||
91 | "NUMA: Unable to allocate Memory to Node hash map\n"); | ||
92 | nodemap_addr = nodemap_size = 0; | ||
93 | return -1; | ||
94 | } | ||
95 | pad_addr = (nodemap_addr + pad) & ~pad; | ||
96 | memnodemap = phys_to_virt(pad_addr); | ||
97 | |||
98 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", | ||
99 | nodemap_addr, nodemap_addr + nodemap_size); | ||
100 | return 0; | ||
101 | } | ||
79 | 102 | ||
80 | while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0) | 103 | /* |
81 | shift++; | 104 | * The LSB of all start and end addresses in the node map is the value of the |
105 | * maximum possible shift. | ||
106 | */ | ||
107 | static int __init | ||
108 | extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes) | ||
109 | { | ||
110 | int i, nodes_used = 0; | ||
111 | unsigned long start, end; | ||
112 | unsigned long bitfield = 0, memtop = 0; | ||
113 | |||
114 | for (i = 0; i < numnodes; i++) { | ||
115 | start = nodes[i].start; | ||
116 | end = nodes[i].end; | ||
117 | if (start >= end) | ||
118 | continue; | ||
119 | bitfield |= start; | ||
120 | nodes_used++; | ||
121 | if (end > memtop) | ||
122 | memtop = end; | ||
123 | } | ||
124 | if (nodes_used <= 1) | ||
125 | i = 63; | ||
126 | else | ||
127 | i = find_first_bit(&bitfield, sizeof(unsigned long)*8); | ||
128 | memnodemapsize = (memtop >> i)+1; | ||
129 | return i; | ||
130 | } | ||
131 | |||
132 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes) | ||
133 | { | ||
134 | int shift; | ||
82 | 135 | ||
136 | shift = extract_lsb_from_nodes(nodes, numnodes); | ||
137 | if (allocate_cachealigned_memnodemap()) | ||
138 | return -1; | ||
83 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", | 139 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", |
84 | shift); | 140 | shift); |
85 | 141 | ||
@@ -216,31 +272,113 @@ void __init numa_init_array(void) | |||
216 | } | 272 | } |
217 | 273 | ||
218 | #ifdef CONFIG_NUMA_EMU | 274 | #ifdef CONFIG_NUMA_EMU |
275 | /* Numa emulation */ | ||
219 | int numa_fake __initdata = 0; | 276 | int numa_fake __initdata = 0; |
220 | 277 | ||
221 | /* Numa emulation */ | 278 | /* |
279 | * This function is used to find out if the start and end correspond to | ||
280 | * different zones. | ||
281 | */ | ||
282 | int zone_cross_over(unsigned long start, unsigned long end) | ||
283 | { | ||
284 | if ((start < (MAX_DMA32_PFN << PAGE_SHIFT)) && | ||
285 | (end >= (MAX_DMA32_PFN << PAGE_SHIFT))) | ||
286 | return 1; | ||
287 | return 0; | ||
288 | } | ||
289 | |||
222 | static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) | 290 | static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) |
223 | { | 291 | { |
224 | int i; | 292 | int i, big; |
225 | struct bootnode nodes[MAX_NUMNODES]; | 293 | struct bootnode nodes[MAX_NUMNODES]; |
226 | unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake; | 294 | unsigned long sz, old_sz; |
295 | unsigned long hole_size; | ||
296 | unsigned long start, end; | ||
297 | unsigned long max_addr = (end_pfn << PAGE_SHIFT); | ||
298 | |||
299 | start = (start_pfn << PAGE_SHIFT); | ||
300 | hole_size = e820_hole_size(start, max_addr); | ||
301 | sz = (max_addr - start - hole_size) / numa_fake; | ||
227 | 302 | ||
228 | /* Kludge needed for the hash function */ | 303 | /* Kludge needed for the hash function */ |
229 | if (hweight64(sz) > 1) { | ||
230 | unsigned long x = 1; | ||
231 | while ((x << 1) < sz) | ||
232 | x <<= 1; | ||
233 | if (x < sz/2) | ||
234 | printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n"); | ||
235 | sz = x; | ||
236 | } | ||
237 | 304 | ||
305 | old_sz = sz; | ||
306 | /* | ||
307 | * Round down to the nearest FAKE_NODE_MIN_SIZE. | ||
308 | */ | ||
309 | sz &= FAKE_NODE_MIN_HASH_MASK; | ||
310 | |||
311 | /* | ||
312 | * We ensure that each node is at least 64MB big. Smaller than this | ||
313 | * size can cause VM hiccups. | ||
314 | */ | ||
315 | if (sz == 0) { | ||
316 | printk(KERN_INFO "Not enough memory for %d nodes. Reducing " | ||
317 | "the number of nodes\n", numa_fake); | ||
318 | numa_fake = (max_addr - start - hole_size) / FAKE_NODE_MIN_SIZE; | ||
319 | printk(KERN_INFO "Number of fake nodes will be = %d\n", | ||
320 | numa_fake); | ||
321 | sz = FAKE_NODE_MIN_SIZE; | ||
322 | } | ||
323 | /* | ||
324 | * Find out how many nodes can get an extra NODE_MIN_SIZE granule. | ||
325 | * This logic ensures the extra memory gets distributed among as many | ||
326 | * nodes as possible (as compared to one single node getting all that | ||
327 | * extra memory. | ||
328 | */ | ||
329 | big = ((old_sz - sz) * numa_fake) / FAKE_NODE_MIN_SIZE; | ||
330 | printk(KERN_INFO "Fake node Size: %luMB hole_size: %luMB big nodes: " | ||
331 | "%d\n", | ||
332 | (sz >> 20), (hole_size >> 20), big); | ||
238 | memset(&nodes,0,sizeof(nodes)); | 333 | memset(&nodes,0,sizeof(nodes)); |
334 | end = start; | ||
239 | for (i = 0; i < numa_fake; i++) { | 335 | for (i = 0; i < numa_fake; i++) { |
240 | nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz; | 336 | /* |
337 | * In case we are not able to allocate enough memory for all | ||
338 | * the nodes, we reduce the number of fake nodes. | ||
339 | */ | ||
340 | if (end >= max_addr) { | ||
341 | numa_fake = i - 1; | ||
342 | break; | ||
343 | } | ||
344 | start = nodes[i].start = end; | ||
345 | /* | ||
346 | * Final node can have all the remaining memory. | ||
347 | */ | ||
241 | if (i == numa_fake-1) | 348 | if (i == numa_fake-1) |
242 | sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start; | 349 | sz = max_addr - start; |
243 | nodes[i].end = nodes[i].start + sz; | 350 | end = nodes[i].start + sz; |
351 | /* | ||
352 | * Fir "big" number of nodes get extra granule. | ||
353 | */ | ||
354 | if (i < big) | ||
355 | end += FAKE_NODE_MIN_SIZE; | ||
356 | /* | ||
357 | * Iterate over the range to ensure that this node gets at | ||
358 | * least sz amount of RAM (excluding holes) | ||
359 | */ | ||
360 | while ((end - start - e820_hole_size(start, end)) < sz) { | ||
361 | end += FAKE_NODE_MIN_SIZE; | ||
362 | if (end >= max_addr) | ||
363 | break; | ||
364 | } | ||
365 | /* | ||
366 | * Look at the next node to make sure there is some real memory | ||
367 | * to map. Bad things happen when the only memory present | ||
368 | * in a zone on a fake node is IO hole. | ||
369 | */ | ||
370 | while (e820_hole_size(end, end + FAKE_NODE_MIN_SIZE) > 0) { | ||
371 | if (zone_cross_over(start, end + sz)) { | ||
372 | end = (MAX_DMA32_PFN << PAGE_SHIFT); | ||
373 | break; | ||
374 | } | ||
375 | if (end >= max_addr) | ||
376 | break; | ||
377 | end += FAKE_NODE_MIN_SIZE; | ||
378 | } | ||
379 | if (end > max_addr) | ||
380 | end = max_addr; | ||
381 | nodes[i].end = end; | ||
244 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", | 382 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", |
245 | i, | 383 | i, |
246 | nodes[i].start, nodes[i].end, | 384 | nodes[i].start, nodes[i].end, |
@@ -290,6 +428,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | |||
290 | end_pfn << PAGE_SHIFT); | 428 | end_pfn << PAGE_SHIFT); |
291 | /* setup dummy node covering all memory */ | 429 | /* setup dummy node covering all memory */ |
292 | memnode_shift = 63; | 430 | memnode_shift = 63; |
431 | memnodemap = memnode.embedded_map; | ||
293 | memnodemap[0] = 0; | 432 | memnodemap[0] = 0; |
294 | nodes_clear(node_online_map); | 433 | nodes_clear(node_online_map); |
295 | node_set_online(0); | 434 | node_set_online(0); |
@@ -321,20 +460,6 @@ unsigned long __init numa_free_all_bootmem(void) | |||
321 | return pages; | 460 | return pages; |
322 | } | 461 | } |
323 | 462 | ||
324 | #ifdef CONFIG_SPARSEMEM | ||
325 | static void __init arch_sparse_init(void) | ||
326 | { | ||
327 | int i; | ||
328 | |||
329 | for_each_online_node(i) | ||
330 | memory_present(i, node_start_pfn(i), node_end_pfn(i)); | ||
331 | |||
332 | sparse_init(); | ||
333 | } | ||
334 | #else | ||
335 | #define arch_sparse_init() do {} while (0) | ||
336 | #endif | ||
337 | |||
338 | void __init paging_init(void) | 463 | void __init paging_init(void) |
339 | { | 464 | { |
340 | int i; | 465 | int i; |
@@ -344,7 +469,8 @@ void __init paging_init(void) | |||
344 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | 469 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
345 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | 470 | max_zone_pfns[ZONE_NORMAL] = end_pfn; |
346 | 471 | ||
347 | arch_sparse_init(); | 472 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
473 | sparse_init(); | ||
348 | 474 | ||
349 | for_each_online_node(i) { | 475 | for_each_online_node(i) { |
350 | setup_node_zones(i); | 476 | setup_node_zones(i); |
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index ccb91dd996a9..65c5eaa59905 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -107,6 +107,7 @@ static void revert_page(unsigned long address, pgprot_t ref_prot) | |||
107 | pud_t *pud; | 107 | pud_t *pud; |
108 | pmd_t *pmd; | 108 | pmd_t *pmd; |
109 | pte_t large_pte; | 109 | pte_t large_pte; |
110 | unsigned long pfn; | ||
110 | 111 | ||
111 | pgd = pgd_offset_k(address); | 112 | pgd = pgd_offset_k(address); |
112 | BUG_ON(pgd_none(*pgd)); | 113 | BUG_ON(pgd_none(*pgd)); |
@@ -114,7 +115,8 @@ static void revert_page(unsigned long address, pgprot_t ref_prot) | |||
114 | BUG_ON(pud_none(*pud)); | 115 | BUG_ON(pud_none(*pud)); |
115 | pmd = pmd_offset(pud, address); | 116 | pmd = pmd_offset(pud, address); |
116 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 117 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
117 | large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot); | 118 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; |
119 | large_pte = pfn_pte(pfn, ref_prot); | ||
118 | large_pte = pte_mkhuge(large_pte); | 120 | large_pte = pte_mkhuge(large_pte); |
119 | set_pte((pte_t *)pmd, large_pte); | 121 | set_pte((pte_t *)pmd, large_pte); |
120 | } | 122 | } |
diff --git a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile index 149aba05a5b8..c9eddc8859c0 100644 --- a/arch/x86_64/pci/Makefile +++ b/arch/x86_64/pci/Makefile | |||
@@ -11,7 +11,7 @@ obj-y += fixup.o init.o | |||
11 | obj-$(CONFIG_ACPI) += acpi.o | 11 | obj-$(CONFIG_ACPI) += acpi.o |
12 | obj-y += legacy.o irq.o common.o early.o | 12 | obj-y += legacy.o irq.o common.o early.o |
13 | # mmconfig has a 64bit special | 13 | # mmconfig has a 64bit special |
14 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o | 14 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o mmconfig-shared.o |
15 | 15 | ||
16 | obj-$(CONFIG_NUMA) += k8-bus.o | 16 | obj-$(CONFIG_NUMA) += k8-bus.o |
17 | 17 | ||
@@ -24,3 +24,4 @@ fixup-y += ../../i386/pci/fixup.o | |||
24 | i386-y += ../../i386/pci/i386.o | 24 | i386-y += ../../i386/pci/i386.o |
25 | init-y += ../../i386/pci/init.o | 25 | init-y += ../../i386/pci/init.o |
26 | early-y += ../../i386/pci/early.o | 26 | early-y += ../../i386/pci/early.o |
27 | mmconfig-shared-y += ../../i386/pci/mmconfig-shared.o | ||
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c index faabb6e87f12..65d82736987e 100644 --- a/arch/x86_64/pci/mmconfig.c +++ b/arch/x86_64/pci/mmconfig.c | |||
@@ -13,16 +13,6 @@ | |||
13 | 13 | ||
14 | #include "pci.h" | 14 | #include "pci.h" |
15 | 15 | ||
16 | /* aperture is up to 256MB but BIOS may reserve less */ | ||
17 | #define MMCONFIG_APER_MIN (2 * 1024*1024) | ||
18 | #define MMCONFIG_APER_MAX (256 * 1024*1024) | ||
19 | |||
20 | /* Verify the first 16 busses. We assume that systems with more busses | ||
21 | get MCFG right. */ | ||
22 | #define MAX_CHECK_BUS 16 | ||
23 | |||
24 | static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS); | ||
25 | |||
26 | /* Static virtual mapping of the MMCONFIG aperture */ | 16 | /* Static virtual mapping of the MMCONFIG aperture */ |
27 | struct mmcfg_virt { | 17 | struct mmcfg_virt { |
28 | struct acpi_mcfg_allocation *cfg; | 18 | struct acpi_mcfg_allocation *cfg; |
@@ -32,30 +22,17 @@ static struct mmcfg_virt *pci_mmcfg_virt; | |||
32 | 22 | ||
33 | static char __iomem *get_virt(unsigned int seg, unsigned bus) | 23 | static char __iomem *get_virt(unsigned int seg, unsigned bus) |
34 | { | 24 | { |
35 | int cfg_num = -1; | ||
36 | struct acpi_mcfg_allocation *cfg; | 25 | struct acpi_mcfg_allocation *cfg; |
26 | int cfg_num; | ||
37 | 27 | ||
38 | while (1) { | 28 | for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) { |
39 | ++cfg_num; | ||
40 | if (cfg_num >= pci_mmcfg_config_num) | ||
41 | break; | ||
42 | cfg = pci_mmcfg_virt[cfg_num].cfg; | 29 | cfg = pci_mmcfg_virt[cfg_num].cfg; |
43 | if (cfg->pci_segment != seg) | 30 | if (cfg->pci_segment == seg && |
44 | continue; | 31 | (cfg->start_bus_number <= bus) && |
45 | if ((cfg->start_bus_number <= bus) && | ||
46 | (cfg->end_bus_number >= bus)) | 32 | (cfg->end_bus_number >= bus)) |
47 | return pci_mmcfg_virt[cfg_num].virt; | 33 | return pci_mmcfg_virt[cfg_num].virt; |
48 | } | 34 | } |
49 | 35 | ||
50 | /* Handle more broken MCFG tables on Asus etc. | ||
51 | They only contain a single entry for bus 0-0. Assume | ||
52 | this applies to all busses. */ | ||
53 | cfg = &pci_mmcfg_config[0]; | ||
54 | if (pci_mmcfg_config_num == 1 && | ||
55 | cfg->pci_segment == 0 && | ||
56 | (cfg->start_bus_number | cfg->end_bus_number) == 0) | ||
57 | return pci_mmcfg_virt[0].virt; | ||
58 | |||
59 | /* Fall back to type 0 */ | 36 | /* Fall back to type 0 */ |
60 | return NULL; | 37 | return NULL; |
61 | } | 38 | } |
@@ -63,8 +40,8 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus) | |||
63 | static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) | 40 | static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) |
64 | { | 41 | { |
65 | char __iomem *addr; | 42 | char __iomem *addr; |
66 | if (seg == 0 && bus < MAX_CHECK_BUS && | 43 | if (seg == 0 && bus < PCI_MMCFG_MAX_CHECK_BUS && |
67 | test_bit(32*bus + PCI_SLOT(devfn), fallback_slots)) | 44 | test_bit(32*bus + PCI_SLOT(devfn), pci_mmcfg_fallback_slots)) |
68 | return NULL; | 45 | return NULL; |
69 | addr = get_virt(seg, bus); | 46 | addr = get_virt(seg, bus); |
70 | if (!addr) | 47 | if (!addr) |
@@ -135,79 +112,46 @@ static struct pci_raw_ops pci_mmcfg = { | |||
135 | .write = pci_mmcfg_write, | 112 | .write = pci_mmcfg_write, |
136 | }; | 113 | }; |
137 | 114 | ||
138 | /* K8 systems have some devices (typically in the builtin northbridge) | 115 | static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) |
139 | that are only accessible using type1 | ||
140 | Normally this can be expressed in the MCFG by not listing them | ||
141 | and assigning suitable _SEGs, but this isn't implemented in some BIOS. | ||
142 | Instead try to discover all devices on bus 0 that are unreachable using MM | ||
143 | and fallback for them. */ | ||
144 | static __init void unreachable_devices(void) | ||
145 | { | 116 | { |
146 | int i, k; | 117 | void __iomem *addr; |
147 | /* Use the max bus number from ACPI here? */ | 118 | u32 size; |
148 | for (k = 0; k < MAX_CHECK_BUS; k++) { | 119 | |
149 | for (i = 0; i < 32; i++) { | 120 | size = (cfg->end_bus_number + 1) << 20; |
150 | u32 val1; | 121 | addr = ioremap_nocache(cfg->address, size); |
151 | char __iomem *addr; | 122 | if (addr) { |
152 | 123 | printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", | |
153 | pci_conf1_read(0, k, PCI_DEVFN(i,0), 0, 4, &val1); | 124 | cfg->address, cfg->address + size - 1); |
154 | if (val1 == 0xffffffff) | ||
155 | continue; | ||
156 | addr = pci_dev_base(0, k, PCI_DEVFN(i, 0)); | ||
157 | if (addr == NULL|| readl(addr) != val1) { | ||
158 | set_bit(i + 32*k, fallback_slots); | ||
159 | printk(KERN_NOTICE "PCI: No mmconfig possible" | ||
160 | " on device %02x:%02x\n", k, i); | ||
161 | } | ||
162 | } | ||
163 | } | 125 | } |
126 | return addr; | ||
164 | } | 127 | } |
165 | 128 | ||
166 | void __init pci_mmcfg_init(int type) | 129 | int __init pci_mmcfg_arch_reachable(unsigned int seg, unsigned int bus, |
130 | unsigned int devfn) | ||
167 | { | 131 | { |
168 | int i; | 132 | return pci_dev_base(seg, bus, devfn) != NULL; |
169 | 133 | } | |
170 | if ((pci_probe & PCI_PROBE_MMCONF) == 0) | ||
171 | return; | ||
172 | |||
173 | acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); | ||
174 | if ((pci_mmcfg_config_num == 0) || | ||
175 | (pci_mmcfg_config == NULL) || | ||
176 | (pci_mmcfg_config[0].address == 0)) | ||
177 | return; | ||
178 | |||
179 | /* Only do this check when type 1 works. If it doesn't work | ||
180 | assume we run on a Mac and always use MCFG */ | ||
181 | if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address, | ||
182 | pci_mmcfg_config[0].address + MMCONFIG_APER_MIN, | ||
183 | E820_RESERVED)) { | ||
184 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n", | ||
185 | (unsigned long)pci_mmcfg_config[0].address); | ||
186 | printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); | ||
187 | return; | ||
188 | } | ||
189 | 134 | ||
190 | pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL); | 135 | int __init pci_mmcfg_arch_init(void) |
136 | { | ||
137 | int i; | ||
138 | pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * | ||
139 | pci_mmcfg_config_num, GFP_KERNEL); | ||
191 | if (pci_mmcfg_virt == NULL) { | 140 | if (pci_mmcfg_virt == NULL) { |
192 | printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n"); | 141 | printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n"); |
193 | return; | 142 | return 0; |
194 | } | 143 | } |
144 | |||
195 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | 145 | for (i = 0; i < pci_mmcfg_config_num; ++i) { |
196 | pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; | 146 | pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; |
197 | pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address, | 147 | pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]); |
198 | MMCONFIG_APER_MAX); | ||
199 | if (!pci_mmcfg_virt[i].virt) { | 148 | if (!pci_mmcfg_virt[i].virt) { |
200 | printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " | 149 | printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " |
201 | "segment %d\n", | 150 | "segment %d\n", |
202 | pci_mmcfg_config[i].pci_segment); | 151 | pci_mmcfg_config[i].pci_segment); |
203 | return; | 152 | return 0; |
204 | } | 153 | } |
205 | printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n", | ||
206 | (unsigned long)pci_mmcfg_config[i].address); | ||
207 | } | 154 | } |
208 | |||
209 | unreachable_devices(); | ||
210 | |||
211 | raw_pci_ops = &pci_mmcfg; | 155 | raw_pci_ops = &pci_mmcfg; |
212 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 156 | return 1; |
213 | } | 157 | } |