diff options
Diffstat (limited to 'arch/powerpc')
32 files changed, 882 insertions, 283 deletions
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index e28e65e7a0e1..7de127e4ceef 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -1,13 +1,14 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc5 | 3 | # Linux kernel version: 2.6.31-rc7 |
4 | # Fri May 15 10:37:00 2009 | 4 | # Mon Aug 24 17:38:50 2009 |
5 | # | 5 | # |
6 | CONFIG_PPC64=y | 6 | CONFIG_PPC64=y |
7 | 7 | ||
8 | # | 8 | # |
9 | # Processor support | 9 | # Processor support |
10 | # | 10 | # |
11 | CONFIG_PPC_BOOK3S_64=y | ||
11 | CONFIG_PPC_BOOK3S=y | 12 | CONFIG_PPC_BOOK3S=y |
12 | # CONFIG_POWER4_ONLY is not set | 13 | # CONFIG_POWER4_ONLY is not set |
13 | CONFIG_POWER3=y | 14 | CONFIG_POWER3=y |
@@ -20,6 +21,7 @@ CONFIG_PPC_STD_MMU=y | |||
20 | CONFIG_PPC_STD_MMU_64=y | 21 | CONFIG_PPC_STD_MMU_64=y |
21 | CONFIG_PPC_MM_SLICES=y | 22 | CONFIG_PPC_MM_SLICES=y |
22 | CONFIG_VIRT_CPU_ACCOUNTING=y | 23 | CONFIG_VIRT_CPU_ACCOUNTING=y |
24 | CONFIG_PPC_HAVE_PMU_SUPPORT=y | ||
23 | CONFIG_SMP=y | 25 | CONFIG_SMP=y |
24 | CONFIG_NR_CPUS=2 | 26 | CONFIG_NR_CPUS=2 |
25 | CONFIG_64BIT=y | 27 | CONFIG_64BIT=y |
@@ -31,6 +33,7 @@ CONFIG_GENERIC_TIME=y | |||
31 | CONFIG_GENERIC_TIME_VSYSCALL=y | 33 | CONFIG_GENERIC_TIME_VSYSCALL=y |
32 | CONFIG_GENERIC_CLOCKEVENTS=y | 34 | CONFIG_GENERIC_CLOCKEVENTS=y |
33 | CONFIG_GENERIC_HARDIRQS=y | 35 | CONFIG_GENERIC_HARDIRQS=y |
36 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
34 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 37 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
35 | CONFIG_IRQ_PER_CPU=y | 38 | CONFIG_IRQ_PER_CPU=y |
36 | CONFIG_STACKTRACE_SUPPORT=y | 39 | CONFIG_STACKTRACE_SUPPORT=y |
@@ -41,7 +44,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |||
41 | CONFIG_ARCH_HAS_ILOG2_U32=y | 44 | CONFIG_ARCH_HAS_ILOG2_U32=y |
42 | CONFIG_ARCH_HAS_ILOG2_U64=y | 45 | CONFIG_ARCH_HAS_ILOG2_U64=y |
43 | CONFIG_GENERIC_HWEIGHT=y | 46 | CONFIG_GENERIC_HWEIGHT=y |
44 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
45 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 47 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
46 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 48 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
47 | CONFIG_PPC=y | 49 | CONFIG_PPC=y |
@@ -62,6 +64,7 @@ CONFIG_DTC=y | |||
62 | # CONFIG_PPC_DCR_MMIO is not set | 64 | # CONFIG_PPC_DCR_MMIO is not set |
63 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | 65 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y |
64 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 66 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
67 | CONFIG_CONSTRUCTORS=y | ||
65 | 68 | ||
66 | # | 69 | # |
67 | # General setup | 70 | # General setup |
@@ -113,7 +116,6 @@ CONFIG_SYSCTL_SYSCALL=y | |||
113 | CONFIG_KALLSYMS=y | 116 | CONFIG_KALLSYMS=y |
114 | CONFIG_KALLSYMS_ALL=y | 117 | CONFIG_KALLSYMS_ALL=y |
115 | CONFIG_KALLSYMS_EXTRA_PASS=y | 118 | CONFIG_KALLSYMS_EXTRA_PASS=y |
116 | # CONFIG_STRIP_ASM_SYMS is not set | ||
117 | CONFIG_HOTPLUG=y | 119 | CONFIG_HOTPLUG=y |
118 | CONFIG_PRINTK=y | 120 | CONFIG_PRINTK=y |
119 | CONFIG_BUG=y | 121 | CONFIG_BUG=y |
@@ -126,7 +128,14 @@ CONFIG_TIMERFD=y | |||
126 | CONFIG_EVENTFD=y | 128 | CONFIG_EVENTFD=y |
127 | CONFIG_SHMEM=y | 129 | CONFIG_SHMEM=y |
128 | CONFIG_AIO=y | 130 | CONFIG_AIO=y |
131 | CONFIG_HAVE_PERF_COUNTERS=y | ||
132 | |||
133 | # | ||
134 | # Performance Counters | ||
135 | # | ||
136 | # CONFIG_PERF_COUNTERS is not set | ||
129 | CONFIG_VM_EVENT_COUNTERS=y | 137 | CONFIG_VM_EVENT_COUNTERS=y |
138 | # CONFIG_STRIP_ASM_SYMS is not set | ||
130 | # CONFIG_COMPAT_BRK is not set | 139 | # CONFIG_COMPAT_BRK is not set |
131 | CONFIG_SLAB=y | 140 | CONFIG_SLAB=y |
132 | # CONFIG_SLUB is not set | 141 | # CONFIG_SLUB is not set |
@@ -145,6 +154,11 @@ CONFIG_HAVE_KRETPROBES=y | |||
145 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 154 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
146 | CONFIG_HAVE_DMA_ATTRS=y | 155 | CONFIG_HAVE_DMA_ATTRS=y |
147 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 156 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
157 | |||
158 | # | ||
159 | # GCOV-based kernel profiling | ||
160 | # | ||
161 | # CONFIG_GCOV_KERNEL is not set | ||
148 | # CONFIG_SLOW_WORK is not set | 162 | # CONFIG_SLOW_WORK is not set |
149 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 163 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
150 | CONFIG_SLABINFO=y | 164 | CONFIG_SLABINFO=y |
@@ -210,7 +224,7 @@ CONFIG_PPC_CELL=y | |||
210 | # | 224 | # |
211 | # Cell Broadband Engine options | 225 | # Cell Broadband Engine options |
212 | # | 226 | # |
213 | CONFIG_SPU_FS=y | 227 | CONFIG_SPU_FS=m |
214 | CONFIG_SPU_FS_64K_LS=y | 228 | CONFIG_SPU_FS_64K_LS=y |
215 | # CONFIG_SPU_TRACE is not set | 229 | # CONFIG_SPU_TRACE is not set |
216 | CONFIG_SPU_BASE=y | 230 | CONFIG_SPU_BASE=y |
@@ -255,6 +269,7 @@ CONFIG_BINFMT_MISC=y | |||
255 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y | 269 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y |
256 | # CONFIG_IOMMU_VMERGE is not set | 270 | # CONFIG_IOMMU_VMERGE is not set |
257 | CONFIG_IOMMU_HELPER=y | 271 | CONFIG_IOMMU_HELPER=y |
272 | # CONFIG_SWIOTLB is not set | ||
258 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | 273 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y |
259 | CONFIG_ARCH_HAS_WALK_MEMORY=y | 274 | CONFIG_ARCH_HAS_WALK_MEMORY=y |
260 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | 275 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y |
@@ -285,9 +300,9 @@ CONFIG_MIGRATION=y | |||
285 | CONFIG_PHYS_ADDR_T_64BIT=y | 300 | CONFIG_PHYS_ADDR_T_64BIT=y |
286 | CONFIG_ZONE_DMA_FLAG=1 | 301 | CONFIG_ZONE_DMA_FLAG=1 |
287 | CONFIG_BOUNCE=y | 302 | CONFIG_BOUNCE=y |
288 | CONFIG_UNEVICTABLE_LRU=y | ||
289 | CONFIG_HAVE_MLOCK=y | 303 | CONFIG_HAVE_MLOCK=y |
290 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 304 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
305 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
291 | CONFIG_ARCH_MEMORY_PROBE=y | 306 | CONFIG_ARCH_MEMORY_PROBE=y |
292 | CONFIG_PPC_HAS_HASH_64K=y | 307 | CONFIG_PPC_HAS_HASH_64K=y |
293 | CONFIG_PPC_4K_PAGES=y | 308 | CONFIG_PPC_4K_PAGES=y |
@@ -399,6 +414,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y | |||
399 | # CONFIG_ECONET is not set | 414 | # CONFIG_ECONET is not set |
400 | # CONFIG_WAN_ROUTER is not set | 415 | # CONFIG_WAN_ROUTER is not set |
401 | # CONFIG_PHONET is not set | 416 | # CONFIG_PHONET is not set |
417 | # CONFIG_IEEE802154 is not set | ||
402 | # CONFIG_NET_SCHED is not set | 418 | # CONFIG_NET_SCHED is not set |
403 | # CONFIG_DCB is not set | 419 | # CONFIG_DCB is not set |
404 | 420 | ||
@@ -433,11 +449,14 @@ CONFIG_BT_HCIBTUSB=m | |||
433 | CONFIG_WIRELESS=y | 449 | CONFIG_WIRELESS=y |
434 | CONFIG_CFG80211=m | 450 | CONFIG_CFG80211=m |
435 | # CONFIG_CFG80211_REG_DEBUG is not set | 451 | # CONFIG_CFG80211_REG_DEBUG is not set |
452 | # CONFIG_CFG80211_DEBUGFS is not set | ||
436 | # CONFIG_WIRELESS_OLD_REGULATORY is not set | 453 | # CONFIG_WIRELESS_OLD_REGULATORY is not set |
437 | CONFIG_WIRELESS_EXT=y | 454 | CONFIG_WIRELESS_EXT=y |
438 | # CONFIG_WIRELESS_EXT_SYSFS is not set | 455 | # CONFIG_WIRELESS_EXT_SYSFS is not set |
439 | # CONFIG_LIB80211 is not set | 456 | # CONFIG_LIB80211 is not set |
440 | CONFIG_MAC80211=m | 457 | CONFIG_MAC80211=m |
458 | CONFIG_MAC80211_DEFAULT_PS=y | ||
459 | CONFIG_MAC80211_DEFAULT_PS_VALUE=1 | ||
441 | 460 | ||
442 | # | 461 | # |
443 | # Rate control algorithm selection | 462 | # Rate control algorithm selection |
@@ -447,7 +466,6 @@ CONFIG_MAC80211_RC_PID=y | |||
447 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 466 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
448 | # CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set | 467 | # CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set |
449 | CONFIG_MAC80211_RC_DEFAULT="pid" | 468 | CONFIG_MAC80211_RC_DEFAULT="pid" |
450 | # CONFIG_MAC80211_MESH is not set | ||
451 | # CONFIG_MAC80211_LEDS is not set | 469 | # CONFIG_MAC80211_LEDS is not set |
452 | # CONFIG_MAC80211_DEBUGFS is not set | 470 | # CONFIG_MAC80211_DEBUGFS is not set |
453 | # CONFIG_MAC80211_DEBUG_MENU is not set | 471 | # CONFIG_MAC80211_DEBUG_MENU is not set |
@@ -472,77 +490,7 @@ CONFIG_EXTRA_FIRMWARE="" | |||
472 | # CONFIG_DEBUG_DEVRES is not set | 490 | # CONFIG_DEBUG_DEVRES is not set |
473 | # CONFIG_SYS_HYPERVISOR is not set | 491 | # CONFIG_SYS_HYPERVISOR is not set |
474 | # CONFIG_CONNECTOR is not set | 492 | # CONFIG_CONNECTOR is not set |
475 | CONFIG_MTD=y | 493 | # CONFIG_MTD is not set |
476 | CONFIG_MTD_DEBUG=y | ||
477 | CONFIG_MTD_DEBUG_VERBOSE=0 | ||
478 | # CONFIG_MTD_CONCAT is not set | ||
479 | # CONFIG_MTD_PARTITIONS is not set | ||
480 | # CONFIG_MTD_TESTS is not set | ||
481 | |||
482 | # | ||
483 | # User Modules And Translation Layers | ||
484 | # | ||
485 | # CONFIG_MTD_CHAR is not set | ||
486 | CONFIG_MTD_BLKDEVS=y | ||
487 | CONFIG_MTD_BLOCK=y | ||
488 | # CONFIG_FTL is not set | ||
489 | # CONFIG_NFTL is not set | ||
490 | # CONFIG_INFTL is not set | ||
491 | # CONFIG_RFD_FTL is not set | ||
492 | # CONFIG_SSFDC is not set | ||
493 | # CONFIG_MTD_OOPS is not set | ||
494 | |||
495 | # | ||
496 | # RAM/ROM/Flash chip drivers | ||
497 | # | ||
498 | # CONFIG_MTD_CFI is not set | ||
499 | # CONFIG_MTD_JEDECPROBE is not set | ||
500 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
501 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
502 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
503 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
504 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
505 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
506 | CONFIG_MTD_CFI_I1=y | ||
507 | CONFIG_MTD_CFI_I2=y | ||
508 | # CONFIG_MTD_CFI_I4 is not set | ||
509 | # CONFIG_MTD_CFI_I8 is not set | ||
510 | # CONFIG_MTD_RAM is not set | ||
511 | # CONFIG_MTD_ROM is not set | ||
512 | # CONFIG_MTD_ABSENT is not set | ||
513 | |||
514 | # | ||
515 | # Mapping drivers for chip access | ||
516 | # | ||
517 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
518 | # CONFIG_MTD_PLATRAM is not set | ||
519 | |||
520 | # | ||
521 | # Self-contained MTD device drivers | ||
522 | # | ||
523 | # CONFIG_MTD_SLRAM is not set | ||
524 | # CONFIG_MTD_PHRAM is not set | ||
525 | # CONFIG_MTD_MTDRAM is not set | ||
526 | # CONFIG_MTD_BLOCK2MTD is not set | ||
527 | |||
528 | # | ||
529 | # Disk-On-Chip Device Drivers | ||
530 | # | ||
531 | # CONFIG_MTD_DOC2000 is not set | ||
532 | # CONFIG_MTD_DOC2001 is not set | ||
533 | # CONFIG_MTD_DOC2001PLUS is not set | ||
534 | # CONFIG_MTD_NAND is not set | ||
535 | # CONFIG_MTD_ONENAND is not set | ||
536 | |||
537 | # | ||
538 | # LPDDR flash memory drivers | ||
539 | # | ||
540 | # CONFIG_MTD_LPDDR is not set | ||
541 | |||
542 | # | ||
543 | # UBI - Unsorted block images | ||
544 | # | ||
545 | # CONFIG_MTD_UBI is not set | ||
546 | CONFIG_OF_DEVICE=y | 494 | CONFIG_OF_DEVICE=y |
547 | # CONFIG_PARPORT is not set | 495 | # CONFIG_PARPORT is not set |
548 | CONFIG_BLK_DEV=y | 496 | CONFIG_BLK_DEV=y |
@@ -590,10 +538,6 @@ CONFIG_BLK_DEV_SR=y | |||
590 | # CONFIG_BLK_DEV_SR_VENDOR is not set | 538 | # CONFIG_BLK_DEV_SR_VENDOR is not set |
591 | CONFIG_CHR_DEV_SG=m | 539 | CONFIG_CHR_DEV_SG=m |
592 | # CONFIG_CHR_DEV_SCH is not set | 540 | # CONFIG_CHR_DEV_SCH is not set |
593 | |||
594 | # | ||
595 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
596 | # | ||
597 | CONFIG_SCSI_MULTI_LUN=y | 541 | CONFIG_SCSI_MULTI_LUN=y |
598 | # CONFIG_SCSI_CONSTANTS is not set | 542 | # CONFIG_SCSI_CONSTANTS is not set |
599 | # CONFIG_SCSI_LOGGING is not set | 543 | # CONFIG_SCSI_LOGGING is not set |
@@ -626,7 +570,6 @@ CONFIG_BLK_DEV_DM=m | |||
626 | # CONFIG_DM_UEVENT is not set | 570 | # CONFIG_DM_UEVENT is not set |
627 | # CONFIG_MACINTOSH_DRIVERS is not set | 571 | # CONFIG_MACINTOSH_DRIVERS is not set |
628 | CONFIG_NETDEVICES=y | 572 | CONFIG_NETDEVICES=y |
629 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
630 | # CONFIG_DUMMY is not set | 573 | # CONFIG_DUMMY is not set |
631 | # CONFIG_BONDING is not set | 574 | # CONFIG_BONDING is not set |
632 | # CONFIG_MACVLAN is not set | 575 | # CONFIG_MACVLAN is not set |
@@ -646,10 +589,11 @@ CONFIG_MII=m | |||
646 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 589 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
647 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 590 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
648 | # CONFIG_B44 is not set | 591 | # CONFIG_B44 is not set |
592 | # CONFIG_KS8842 is not set | ||
649 | CONFIG_NETDEV_1000=y | 593 | CONFIG_NETDEV_1000=y |
650 | CONFIG_GELIC_NET=y | 594 | CONFIG_GELIC_NET=y |
651 | CONFIG_GELIC_WIRELESS=y | 595 | CONFIG_GELIC_WIRELESS=y |
652 | CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y | 596 | # CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE is not set |
653 | # CONFIG_NETDEV_10000 is not set | 597 | # CONFIG_NETDEV_10000 is not set |
654 | 598 | ||
655 | # | 599 | # |
@@ -669,8 +613,7 @@ CONFIG_WLAN_80211=y | |||
669 | # CONFIG_HOSTAP is not set | 613 | # CONFIG_HOSTAP is not set |
670 | # CONFIG_B43 is not set | 614 | # CONFIG_B43 is not set |
671 | # CONFIG_B43LEGACY is not set | 615 | # CONFIG_B43LEGACY is not set |
672 | CONFIG_ZD1211RW=m | 616 | # CONFIG_ZD1211RW is not set |
673 | # CONFIG_ZD1211RW_DEBUG is not set | ||
674 | # CONFIG_RT2X00 is not set | 617 | # CONFIG_RT2X00 is not set |
675 | 618 | ||
676 | # | 619 | # |
@@ -682,7 +625,7 @@ CONFIG_ZD1211RW=m | |||
682 | # | 625 | # |
683 | # CONFIG_USB_CATC is not set | 626 | # CONFIG_USB_CATC is not set |
684 | # CONFIG_USB_KAWETH is not set | 627 | # CONFIG_USB_KAWETH is not set |
685 | CONFIG_USB_PEGASUS=m | 628 | # CONFIG_USB_PEGASUS is not set |
686 | # CONFIG_USB_RTL8150 is not set | 629 | # CONFIG_USB_RTL8150 is not set |
687 | CONFIG_USB_USBNET=m | 630 | CONFIG_USB_USBNET=m |
688 | CONFIG_USB_NET_AX8817X=m | 631 | CONFIG_USB_NET_AX8817X=m |
@@ -693,10 +636,11 @@ CONFIG_USB_NET_AX8817X=m | |||
693 | # CONFIG_USB_NET_GL620A is not set | 636 | # CONFIG_USB_NET_GL620A is not set |
694 | # CONFIG_USB_NET_NET1080 is not set | 637 | # CONFIG_USB_NET_NET1080 is not set |
695 | # CONFIG_USB_NET_PLUSB is not set | 638 | # CONFIG_USB_NET_PLUSB is not set |
696 | CONFIG_USB_NET_MCS7830=m | 639 | # CONFIG_USB_NET_MCS7830 is not set |
697 | # CONFIG_USB_NET_RNDIS_HOST is not set | 640 | # CONFIG_USB_NET_RNDIS_HOST is not set |
698 | # CONFIG_USB_NET_CDC_SUBSET is not set | 641 | # CONFIG_USB_NET_CDC_SUBSET is not set |
699 | # CONFIG_USB_NET_ZAURUS is not set | 642 | # CONFIG_USB_NET_ZAURUS is not set |
643 | # CONFIG_USB_NET_INT51X1 is not set | ||
700 | # CONFIG_WAN is not set | 644 | # CONFIG_WAN is not set |
701 | CONFIG_PPP=m | 645 | CONFIG_PPP=m |
702 | CONFIG_PPP_MULTILINK=y | 646 | CONFIG_PPP_MULTILINK=y |
@@ -771,8 +715,7 @@ CONFIG_DEVKMEM=y | |||
771 | # | 715 | # |
772 | CONFIG_UNIX98_PTYS=y | 716 | CONFIG_UNIX98_PTYS=y |
773 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 717 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
774 | CONFIG_LEGACY_PTYS=y | 718 | # CONFIG_LEGACY_PTYS is not set |
775 | CONFIG_LEGACY_PTY_COUNT=16 | ||
776 | # CONFIG_HVC_UDBG is not set | 719 | # CONFIG_HVC_UDBG is not set |
777 | # CONFIG_IPMI_HANDLER is not set | 720 | # CONFIG_IPMI_HANDLER is not set |
778 | # CONFIG_HW_RANDOM is not set | 721 | # CONFIG_HW_RANDOM is not set |
@@ -782,6 +725,11 @@ CONFIG_LEGACY_PTY_COUNT=16 | |||
782 | # CONFIG_TCG_TPM is not set | 725 | # CONFIG_TCG_TPM is not set |
783 | # CONFIG_I2C is not set | 726 | # CONFIG_I2C is not set |
784 | # CONFIG_SPI is not set | 727 | # CONFIG_SPI is not set |
728 | |||
729 | # | ||
730 | # PPS support | ||
731 | # | ||
732 | # CONFIG_PPS is not set | ||
785 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | 733 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y |
786 | # CONFIG_GPIOLIB is not set | 734 | # CONFIG_GPIOLIB is not set |
787 | # CONFIG_W1 is not set | 735 | # CONFIG_W1 is not set |
@@ -805,22 +753,7 @@ CONFIG_SSB_POSSIBLE=y | |||
805 | # CONFIG_HTC_PASIC3 is not set | 753 | # CONFIG_HTC_PASIC3 is not set |
806 | # CONFIG_MFD_TMIO is not set | 754 | # CONFIG_MFD_TMIO is not set |
807 | # CONFIG_REGULATOR is not set | 755 | # CONFIG_REGULATOR is not set |
808 | 756 | # CONFIG_MEDIA_SUPPORT is not set | |
809 | # | ||
810 | # Multimedia devices | ||
811 | # | ||
812 | |||
813 | # | ||
814 | # Multimedia core support | ||
815 | # | ||
816 | # CONFIG_VIDEO_DEV is not set | ||
817 | # CONFIG_DVB_CORE is not set | ||
818 | # CONFIG_VIDEO_MEDIA is not set | ||
819 | |||
820 | # | ||
821 | # Multimedia drivers | ||
822 | # | ||
823 | # CONFIG_DAB is not set | ||
824 | 757 | ||
825 | # | 758 | # |
826 | # Graphics support | 759 | # Graphics support |
@@ -898,6 +831,11 @@ CONFIG_SND_SUPPORT_OLD_API=y | |||
898 | CONFIG_SND_VERBOSE_PROCFS=y | 831 | CONFIG_SND_VERBOSE_PROCFS=y |
899 | # CONFIG_SND_VERBOSE_PRINTK is not set | 832 | # CONFIG_SND_VERBOSE_PRINTK is not set |
900 | # CONFIG_SND_DEBUG is not set | 833 | # CONFIG_SND_DEBUG is not set |
834 | # CONFIG_SND_RAWMIDI_SEQ is not set | ||
835 | # CONFIG_SND_OPL3_LIB_SEQ is not set | ||
836 | # CONFIG_SND_OPL4_LIB_SEQ is not set | ||
837 | # CONFIG_SND_SBAWE_SEQ is not set | ||
838 | # CONFIG_SND_EMU10K1_SEQ is not set | ||
901 | # CONFIG_SND_DRIVERS is not set | 839 | # CONFIG_SND_DRIVERS is not set |
902 | CONFIG_SND_PPC=y | 840 | CONFIG_SND_PPC=y |
903 | CONFIG_SND_PS3=m | 841 | CONFIG_SND_PS3=m |
@@ -930,29 +868,34 @@ CONFIG_USB_HIDDEV=y | |||
930 | # Special HID drivers | 868 | # Special HID drivers |
931 | # | 869 | # |
932 | # CONFIG_HID_A4TECH is not set | 870 | # CONFIG_HID_A4TECH is not set |
933 | # CONFIG_HID_APPLE is not set | 871 | CONFIG_HID_APPLE=m |
934 | # CONFIG_HID_BELKIN is not set | 872 | CONFIG_HID_BELKIN=m |
935 | # CONFIG_HID_CHERRY is not set | 873 | CONFIG_HID_CHERRY=m |
936 | # CONFIG_HID_CHICONY is not set | 874 | # CONFIG_HID_CHICONY is not set |
937 | # CONFIG_HID_CYPRESS is not set | 875 | # CONFIG_HID_CYPRESS is not set |
938 | # CONFIG_DRAGONRISE_FF is not set | 876 | # CONFIG_HID_DRAGONRISE is not set |
939 | # CONFIG_HID_EZKEY is not set | 877 | CONFIG_HID_EZKEY=m |
940 | # CONFIG_HID_KYE is not set | 878 | # CONFIG_HID_KYE is not set |
941 | # CONFIG_HID_GYRATION is not set | 879 | # CONFIG_HID_GYRATION is not set |
942 | # CONFIG_HID_KENSINGTON is not set | 880 | # CONFIG_HID_KENSINGTON is not set |
943 | # CONFIG_HID_LOGITECH is not set | 881 | CONFIG_HID_LOGITECH=m |
944 | # CONFIG_HID_MICROSOFT is not set | 882 | # CONFIG_LOGITECH_FF is not set |
883 | # CONFIG_LOGIRUMBLEPAD2_FF is not set | ||
884 | CONFIG_HID_MICROSOFT=m | ||
945 | # CONFIG_HID_MONTEREY is not set | 885 | # CONFIG_HID_MONTEREY is not set |
946 | # CONFIG_HID_NTRIG is not set | 886 | # CONFIG_HID_NTRIG is not set |
947 | # CONFIG_HID_PANTHERLORD is not set | 887 | # CONFIG_HID_PANTHERLORD is not set |
948 | # CONFIG_HID_PETALYNX is not set | 888 | # CONFIG_HID_PETALYNX is not set |
949 | # CONFIG_HID_SAMSUNG is not set | 889 | # CONFIG_HID_SAMSUNG is not set |
950 | CONFIG_HID_SONY=m | 890 | CONFIG_HID_SONY=m |
951 | # CONFIG_HID_SUNPLUS is not set | 891 | CONFIG_HID_SUNPLUS=m |
952 | # CONFIG_GREENASIA_FF is not set | 892 | # CONFIG_HID_GREENASIA is not set |
893 | CONFIG_HID_SMARTJOYPLUS=m | ||
894 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
953 | # CONFIG_HID_TOPSEED is not set | 895 | # CONFIG_HID_TOPSEED is not set |
954 | # CONFIG_THRUSTMASTER_FF is not set | 896 | # CONFIG_HID_THRUSTMASTER is not set |
955 | # CONFIG_ZEROPLUS_FF is not set | 897 | # CONFIG_HID_WACOM is not set |
898 | # CONFIG_HID_ZEROPLUS is not set | ||
956 | CONFIG_USB_SUPPORT=y | 899 | CONFIG_USB_SUPPORT=y |
957 | CONFIG_USB_ARCH_HAS_HCD=y | 900 | CONFIG_USB_ARCH_HAS_HCD=y |
958 | CONFIG_USB_ARCH_HAS_OHCI=y | 901 | CONFIG_USB_ARCH_HAS_OHCI=y |
@@ -988,6 +931,8 @@ CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y | |||
988 | # CONFIG_USB_ISP116X_HCD is not set | 931 | # CONFIG_USB_ISP116X_HCD is not set |
989 | # CONFIG_USB_ISP1760_HCD is not set | 932 | # CONFIG_USB_ISP1760_HCD is not set |
990 | CONFIG_USB_OHCI_HCD=m | 933 | CONFIG_USB_OHCI_HCD=m |
934 | # CONFIG_USB_OHCI_HCD_PPC_OF_BE is not set | ||
935 | # CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set | ||
991 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set | 936 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set |
992 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | 937 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set |
993 | CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y | 938 | CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y |
@@ -1115,6 +1060,10 @@ CONFIG_RTC_DRV_PS3=m | |||
1115 | # CONFIG_DMADEVICES is not set | 1060 | # CONFIG_DMADEVICES is not set |
1116 | # CONFIG_AUXDISPLAY is not set | 1061 | # CONFIG_AUXDISPLAY is not set |
1117 | # CONFIG_UIO is not set | 1062 | # CONFIG_UIO is not set |
1063 | |||
1064 | # | ||
1065 | # TI VLYNQ | ||
1066 | # | ||
1118 | # CONFIG_STAGING is not set | 1067 | # CONFIG_STAGING is not set |
1119 | 1068 | ||
1120 | # | 1069 | # |
@@ -1141,11 +1090,12 @@ CONFIG_FS_MBCACHE=y | |||
1141 | # CONFIG_REISERFS_FS is not set | 1090 | # CONFIG_REISERFS_FS is not set |
1142 | # CONFIG_JFS_FS is not set | 1091 | # CONFIG_JFS_FS is not set |
1143 | # CONFIG_FS_POSIX_ACL is not set | 1092 | # CONFIG_FS_POSIX_ACL is not set |
1144 | CONFIG_FILE_LOCKING=y | ||
1145 | # CONFIG_XFS_FS is not set | 1093 | # CONFIG_XFS_FS is not set |
1146 | # CONFIG_GFS2_FS is not set | 1094 | # CONFIG_GFS2_FS is not set |
1147 | # CONFIG_OCFS2_FS is not set | 1095 | # CONFIG_OCFS2_FS is not set |
1148 | # CONFIG_BTRFS_FS is not set | 1096 | # CONFIG_BTRFS_FS is not set |
1097 | CONFIG_FILE_LOCKING=y | ||
1098 | CONFIG_FSNOTIFY=y | ||
1149 | CONFIG_DNOTIFY=y | 1099 | CONFIG_DNOTIFY=y |
1150 | CONFIG_INOTIFY=y | 1100 | CONFIG_INOTIFY=y |
1151 | CONFIG_INOTIFY_USER=y | 1101 | CONFIG_INOTIFY_USER=y |
@@ -1205,7 +1155,6 @@ CONFIG_MISC_FILESYSTEMS=y | |||
1205 | # CONFIG_BEFS_FS is not set | 1155 | # CONFIG_BEFS_FS is not set |
1206 | # CONFIG_BFS_FS is not set | 1156 | # CONFIG_BFS_FS is not set |
1207 | # CONFIG_EFS_FS is not set | 1157 | # CONFIG_EFS_FS is not set |
1208 | # CONFIG_JFFS2_FS is not set | ||
1209 | # CONFIG_CRAMFS is not set | 1158 | # CONFIG_CRAMFS is not set |
1210 | # CONFIG_SQUASHFS is not set | 1159 | # CONFIG_SQUASHFS is not set |
1211 | # CONFIG_VXFS_FS is not set | 1160 | # CONFIG_VXFS_FS is not set |
@@ -1222,6 +1171,7 @@ CONFIG_NFS_FS=y | |||
1222 | CONFIG_NFS_V3=y | 1171 | CONFIG_NFS_V3=y |
1223 | # CONFIG_NFS_V3_ACL is not set | 1172 | # CONFIG_NFS_V3_ACL is not set |
1224 | CONFIG_NFS_V4=y | 1173 | CONFIG_NFS_V4=y |
1174 | # CONFIG_NFS_V4_1 is not set | ||
1225 | CONFIG_ROOT_NFS=y | 1175 | CONFIG_ROOT_NFS=y |
1226 | # CONFIG_NFSD is not set | 1176 | # CONFIG_NFSD is not set |
1227 | CONFIG_LOCKD=y | 1177 | CONFIG_LOCKD=y |
@@ -1359,7 +1309,6 @@ CONFIG_DEBUG_MEMORY_INIT=y | |||
1359 | CONFIG_DEBUG_LIST=y | 1309 | CONFIG_DEBUG_LIST=y |
1360 | # CONFIG_DEBUG_SG is not set | 1310 | # CONFIG_DEBUG_SG is not set |
1361 | # CONFIG_DEBUG_NOTIFIERS is not set | 1311 | # CONFIG_DEBUG_NOTIFIERS is not set |
1362 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
1363 | # CONFIG_RCU_TORTURE_TEST is not set | 1312 | # CONFIG_RCU_TORTURE_TEST is not set |
1364 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 1313 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
1365 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1314 | # CONFIG_BACKTRACE_SELF_TEST is not set |
@@ -1374,31 +1323,21 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |||
1374 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 1323 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
1375 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 1324 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
1376 | CONFIG_RING_BUFFER=y | 1325 | CONFIG_RING_BUFFER=y |
1326 | CONFIG_EVENT_TRACING=y | ||
1327 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1377 | CONFIG_TRACING=y | 1328 | CONFIG_TRACING=y |
1378 | CONFIG_TRACING_SUPPORT=y | 1329 | CONFIG_TRACING_SUPPORT=y |
1379 | 1330 | # CONFIG_FTRACE is not set | |
1380 | # | ||
1381 | # Tracers | ||
1382 | # | ||
1383 | # CONFIG_FUNCTION_TRACER is not set | ||
1384 | # CONFIG_IRQSOFF_TRACER is not set | ||
1385 | # CONFIG_SCHED_TRACER is not set | ||
1386 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
1387 | # CONFIG_EVENT_TRACER is not set | ||
1388 | # CONFIG_BOOT_TRACER is not set | ||
1389 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
1390 | # CONFIG_STACK_TRACER is not set | ||
1391 | # CONFIG_KMEMTRACE is not set | ||
1392 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1393 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1394 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1395 | # CONFIG_DYNAMIC_DEBUG is not set | 1331 | # CONFIG_DYNAMIC_DEBUG is not set |
1396 | # CONFIG_SAMPLES is not set | 1332 | # CONFIG_SAMPLES is not set |
1397 | CONFIG_HAVE_ARCH_KGDB=y | 1333 | CONFIG_HAVE_ARCH_KGDB=y |
1398 | # CONFIG_KGDB is not set | 1334 | # CONFIG_KGDB is not set |
1335 | # CONFIG_PPC_DISABLE_WERROR is not set | ||
1336 | CONFIG_PPC_WERROR=y | ||
1399 | CONFIG_PRINT_STACK_DEPTH=64 | 1337 | CONFIG_PRINT_STACK_DEPTH=64 |
1400 | CONFIG_DEBUG_STACKOVERFLOW=y | 1338 | CONFIG_DEBUG_STACKOVERFLOW=y |
1401 | # CONFIG_DEBUG_STACK_USAGE is not set | 1339 | # CONFIG_DEBUG_STACK_USAGE is not set |
1340 | # CONFIG_PPC_EMULATED_STATS is not set | ||
1402 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1341 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
1403 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1342 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
1404 | # CONFIG_MSI_BITMAP_SELFTEST is not set | 1343 | # CONFIG_MSI_BITMAP_SELFTEST is not set |
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h index 86455c4c31ee..416e12c2d505 100644 --- a/arch/powerpc/include/asm/agp.h +++ b/arch/powerpc/include/asm/agp.h | |||
@@ -8,10 +8,6 @@ | |||
8 | #define unmap_page_from_agp(page) | 8 | #define unmap_page_from_agp(page) |
9 | #define flush_agp_cache() mb() | 9 | #define flush_agp_cache() mb() |
10 | 10 | ||
11 | /* Convert a physical address to an address suitable for the GART. */ | ||
12 | #define phys_to_gart(x) (x) | ||
13 | #define gart_to_phys(x) (x) | ||
14 | |||
15 | /* GATT allocation. Returns/accepts GATT kernel virtual address. */ | 11 | /* GATT allocation. Returns/accepts GATT kernel virtual address. */ |
16 | #define alloc_gatt_pages(order) \ | 12 | #define alloc_gatt_pages(order) \ |
17 | ((char *)__get_free_pages(GFP_KERNEL, (order))) | 13 | ((char *)__get_free_pages(GFP_KERNEL, (order))) |
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 7d2277cef09a..e3e06e0f7fc0 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h | |||
@@ -30,4 +30,7 @@ dev_archdata_get_node(const struct dev_archdata *ad) | |||
30 | return ad->of_node; | 30 | return ad->of_node; |
31 | } | 31 | } |
32 | 32 | ||
33 | struct pdev_archdata { | ||
34 | }; | ||
35 | |||
33 | #endif /* _ASM_POWERPC_DEVICE_H */ | 36 | #endif /* _ASM_POWERPC_DEVICE_H */ |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index b44aaabdd1a6..0c34371ec49c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
424 | #endif | 424 | #endif |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
428 | { | ||
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
430 | |||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | ||
432 | return 0; | ||
433 | |||
434 | if (!dev->dma_mask) | ||
435 | return 0; | ||
436 | |||
437 | return addr + size <= *dev->dma_mask; | ||
438 | } | ||
439 | |||
440 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
441 | { | ||
442 | return paddr + get_dma_direct_offset(dev); | ||
443 | } | ||
444 | |||
445 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | ||
446 | { | ||
447 | return daddr - get_dma_direct_offset(dev); | ||
448 | } | ||
449 | |||
427 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 450 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
428 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 451 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
429 | #ifdef CONFIG_NOT_COHERENT_CACHE | 452 | #ifdef CONFIG_NOT_COHERENT_CACHE |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index fddc3ed715fa..c9c930ed11d7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -34,7 +34,8 @@ | |||
34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
35 | 35 | ||
36 | /* We don't currently support large pages. */ | 36 | /* We don't currently support large pages. */ |
37 | #define KVM_PAGES_PER_HPAGE (1UL << 31) | 37 | #define KVM_NR_PAGE_SIZES 1 |
38 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | ||
38 | 39 | ||
39 | struct kvm; | 40 | struct kvm; |
40 | struct kvm_run; | 41 | struct kvm_run; |
@@ -153,7 +154,6 @@ struct kvm_vcpu_arch { | |||
153 | u32 pid; | 154 | u32 pid; |
154 | u32 swap_pid; | 155 | u32 swap_pid; |
155 | 156 | ||
156 | u32 pvr; | ||
157 | u32 ccr0; | 157 | u32 ccr0; |
158 | u32 ccr1; | 158 | u32 ccr1; |
159 | u32 dbcr0; | 159 | u32 dbcr0; |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da781128..2a5da069714e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
104 | else | 104 | else |
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
106 | 106 | ||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
109 | * can just store as long as we do the two halves in the right order | 109 | * can just store as long as we do the two halves in the right order |
110 | * with a barrier in between. This is possible because we take care, | 110 | * with a barrier in between. This is possible because we take care, |
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
140 | 140 | ||
141 | #else | 141 | #else |
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
144 | */ | 144 | */ |
145 | *ptep = pte; | 145 | *ptep = pte; |
146 | #endif | 146 | #endif |
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 157c5ca581c8..f388f0ab193f 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h | |||
@@ -154,6 +154,7 @@ int qe_get_snum(void); | |||
154 | void qe_put_snum(u8 snum); | 154 | void qe_put_snum(u8 snum); |
155 | unsigned int qe_get_num_of_risc(void); | 155 | unsigned int qe_get_num_of_risc(void); |
156 | unsigned int qe_get_num_of_snums(void); | 156 | unsigned int qe_get_num_of_snums(void); |
157 | int qe_alive_during_sleep(void); | ||
157 | 158 | ||
158 | /* we actually use cpm_muram implementation, define this for convenience */ | 159 | /* we actually use cpm_muram implementation, define this for convenience */ |
159 | #define qe_muram_init cpm_muram_init | 160 | #define qe_muram_init cpm_muram_init |
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 1e5cfad0e3f7..3ab8b3e6feb0 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h | |||
@@ -64,4 +64,7 @@ | |||
64 | #define SO_TIMESTAMPING 37 | 64 | #define SO_TIMESTAMPING 37 |
65 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 65 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
66 | 66 | ||
67 | #define SO_PROTOCOL 38 | ||
68 | #define SO_DOMAIN 39 | ||
69 | |||
67 | #endif /* _ASM_POWERPC_SOCKET_H */ | 70 | #endif /* _ASM_POWERPC_SOCKET_H */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b73396b93905..9619285f64e8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
97 | 97 | ||
98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 98 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 99 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o | 100 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o |
101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 101 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
102 | power5+-pmu.o power6-pmu.o power7-pmu.o | 102 | power5+-pmu.o power6-pmu.o power7-pmu.o |
103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 103 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 561b64652311..197b15646eeb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -67,6 +67,8 @@ int main(void) | |||
67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); | 67 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); |
68 | #ifdef CONFIG_PPC64 | 68 | #ifdef CONFIG_PPC64 |
69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 69 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
70 | DEFINE(SIGSEGV, SIGSEGV); | ||
71 | DEFINE(NMI_MASK, NMI_MASK); | ||
70 | #else | 72 | #else |
71 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 73 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
72 | #endif /* CONFIG_PPC64 */ | 74 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 68ccf11e4f19..e8a57de85bcf 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -24,50 +24,12 @@ | |||
24 | int swiotlb __read_mostly; | 24 | int swiotlb __read_mostly; |
25 | unsigned int ppc_swiotlb_enable; | 25 | unsigned int ppc_swiotlb_enable; |
26 | 26 | ||
27 | void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) | ||
28 | { | ||
29 | unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); | ||
30 | void *pageaddr = page_address(pfn_to_page(pfn)); | ||
31 | |||
32 | if (pageaddr != NULL) | ||
33 | return pageaddr + (addr % PAGE_SIZE); | ||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
38 | { | ||
39 | return paddr + get_dma_direct_offset(hwdev); | ||
40 | } | ||
41 | |||
42 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
43 | |||
44 | { | ||
45 | return baddr - get_dma_direct_offset(hwdev); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Determine if an address needs bounce buffering via swiotlb. | ||
50 | * Going forward I expect the swiotlb code to generalize on using | ||
51 | * a dma_ops->addr_needs_map, and this function will move from here to the | ||
52 | * generic swiotlb code. | ||
53 | */ | ||
54 | int | ||
55 | swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, | ||
56 | size_t size) | ||
57 | { | ||
58 | struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); | ||
59 | |||
60 | BUG_ON(!dma_ops); | ||
61 | return dma_ops->addr_needs_map(hwdev, addr, size); | ||
62 | } | ||
63 | |||
64 | /* | 27 | /* |
65 | * Determine if an address is reachable by a pci device, or if we must bounce. | 28 | * Determine if an address is reachable by a pci device, or if we must bounce. |
66 | */ | 29 | */ |
67 | static int | 30 | static int |
68 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | 31 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) |
69 | { | 32 | { |
70 | u64 mask = dma_get_mask(hwdev); | ||
71 | dma_addr_t max; | 33 | dma_addr_t max; |
72 | struct pci_controller *hose; | 34 | struct pci_controller *hose; |
73 | struct pci_dev *pdev = to_pci_dev(hwdev); | 35 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | |||
79 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) | 41 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) |
80 | return 1; | 42 | return 1; |
81 | 43 | ||
82 | return !is_buffer_dma_capable(mask, addr, size); | 44 | return 0; |
83 | } | ||
84 | |||
85 | static int | ||
86 | swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
87 | { | ||
88 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
89 | } | 45 | } |
90 | 46 | ||
91 | |||
92 | /* | 47 | /* |
93 | * At the moment, all platforms that use this code only require | 48 | * At the moment, all platforms that use this code only require |
94 | * swiotlb to be used if we're operating on HIGHMEM. Since | 49 | * swiotlb to be used if we're operating on HIGHMEM. Since |
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
104 | .dma_supported = swiotlb_dma_supported, | 59 | .dma_supported = swiotlb_dma_supported, |
105 | .map_page = swiotlb_map_page, | 60 | .map_page = swiotlb_map_page, |
106 | .unmap_page = swiotlb_unmap_page, | 61 | .unmap_page = swiotlb_unmap_page, |
107 | .addr_needs_map = swiotlb_addr_needs_map, | ||
108 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 62 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
109 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 63 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
110 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 64 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eb898112e577..8ac85e08ffae 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION | |||
729 | bne- do_ste_alloc /* If so handle it */ | 729 | bne- do_ste_alloc /* If so handle it */ |
730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
731 | 731 | ||
732 | clrrdi r11,r1,THREAD_SHIFT | ||
733 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | ||
734 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | ||
735 | bne 77f /* then don't call hash_page now */ | ||
736 | |||
732 | /* | 737 | /* |
733 | * On iSeries, we soft-disable interrupts here, then | 738 | * On iSeries, we soft-disable interrupts here, then |
734 | * hard-enable interrupts so that the hash_page code can spin on | 739 | * hard-enable interrupts so that the hash_page code can spin on |
@@ -833,6 +838,20 @@ handle_page_fault: | |||
833 | bl .low_hash_fault | 838 | bl .low_hash_fault |
834 | b .ret_from_except | 839 | b .ret_from_except |
835 | 840 | ||
841 | /* | ||
842 | * We come here as a result of a DSI at a point where we don't want | ||
843 | * to call hash_page, such as when we are accessing memory (possibly | ||
844 | * user memory) inside a PMU interrupt that occurred while interrupts | ||
845 | * were soft-disabled. We want to invoke the exception handler for | ||
846 | * the access, or panic if there isn't a handler. | ||
847 | */ | ||
848 | 77: bl .save_nvgprs | ||
849 | mr r4,r3 | ||
850 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
851 | li r5,SIGSEGV | ||
852 | bl .bad_page_fault | ||
853 | b .ret_from_except | ||
854 | |||
836 | /* here we have a segment miss */ | 855 | /* here we have a segment miss */ |
837 | do_ste_alloc: | 856 | do_ste_alloc: |
838 | bl .ste_allocate /* try to insert stab entry */ | 857 | bl .ste_allocate /* try to insert stab entry */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c new file mode 100644 index 000000000000..f74b62c67511 --- /dev/null +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Performance counter callchain support - powerpc architecture code | ||
3 | * | ||
4 | * Copyright © 2009 Paul Mackerras, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/sigcontext.h> | ||
20 | #include <asm/ucontext.h> | ||
21 | #include <asm/vdso.h> | ||
22 | #ifdef CONFIG_PPC64 | ||
23 | #include "ppc32.h" | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | ||
41 | * The next frame may be in a different stack area but should not go | ||
42 | * back down in the same stack area. | ||
43 | */ | ||
44 | static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | ||
45 | { | ||
46 | if (sp & 0xf) | ||
47 | return 0; /* must be 16-byte aligned */ | ||
48 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
49 | return 0; | ||
50 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | ||
51 | return 1; | ||
52 | /* | ||
53 | * sp could decrease when we jump off an interrupt stack | ||
54 | * back to the regular process stack. | ||
55 | */ | ||
56 | if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) | ||
57 | return 1; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
62 | struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | unsigned long sp, next_sp; | ||
65 | unsigned long next_ip; | ||
66 | unsigned long lr; | ||
67 | long level = 0; | ||
68 | unsigned long *fp; | ||
69 | |||
70 | lr = regs->link; | ||
71 | sp = regs->gpr[1]; | ||
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
73 | callchain_store(entry, regs->nip); | ||
74 | |||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | ||
76 | return; | ||
77 | |||
78 | for (;;) { | ||
79 | fp = (unsigned long *) sp; | ||
80 | next_sp = fp[0]; | ||
81 | |||
82 | if (next_sp == sp + STACK_INT_FRAME_SIZE && | ||
83 | fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | ||
84 | /* | ||
85 | * This looks like an interrupt frame for an | ||
86 | * interrupt that occurred in the kernel | ||
87 | */ | ||
88 | regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); | ||
89 | next_ip = regs->nip; | ||
90 | lr = regs->link; | ||
91 | level = 0; | ||
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
93 | |||
94 | } else { | ||
95 | if (level == 0) | ||
96 | next_ip = lr; | ||
97 | else | ||
98 | next_ip = fp[STACK_FRAME_LR_SAVE]; | ||
99 | |||
100 | /* | ||
101 | * We can't tell which of the first two addresses | ||
102 | * we get are valid, but we can filter out the | ||
103 | * obviously bogus ones here. We replace them | ||
104 | * with 0 rather than removing them entirely so | ||
105 | * that userspace can tell which is which. | ||
106 | */ | ||
107 | if ((level == 1 && next_ip == lr) || | ||
108 | (level <= 1 && !kernel_text_address(next_ip))) | ||
109 | next_ip = 0; | ||
110 | |||
111 | ++level; | ||
112 | } | ||
113 | |||
114 | callchain_store(entry, next_ip); | ||
115 | if (!valid_next_sp(next_sp, sp)) | ||
116 | return; | ||
117 | sp = next_sp; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | #ifdef CONFIG_PPC64 | ||
122 | |||
123 | #ifdef CONFIG_HUGETLB_PAGE | ||
124 | #define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize]) | ||
125 | #else | ||
126 | #define is_huge_psize(pagesize) 0 | ||
127 | #endif | ||
128 | |||
129 | /* | ||
130 | * On 64-bit we don't want to invoke hash_page on user addresses from | ||
131 | * interrupt context, so if the access faults, we read the page tables | ||
132 | * to find which page (if any) is mapped and access it directly. | ||
133 | */ | ||
134 | static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | ||
135 | { | ||
136 | pgd_t *pgdir; | ||
137 | pte_t *ptep, pte; | ||
138 | int pagesize; | ||
139 | unsigned long addr = (unsigned long) ptr; | ||
140 | unsigned long offset; | ||
141 | unsigned long pfn; | ||
142 | void *kaddr; | ||
143 | |||
144 | pgdir = current->mm->pgd; | ||
145 | if (!pgdir) | ||
146 | return -EFAULT; | ||
147 | |||
148 | pagesize = get_slice_psize(current->mm, addr); | ||
149 | |||
150 | /* align address to page boundary */ | ||
151 | offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); | ||
152 | addr -= offset; | ||
153 | |||
154 | if (is_huge_psize(pagesize)) | ||
155 | ptep = huge_pte_offset(current->mm, addr); | ||
156 | else | ||
157 | ptep = find_linux_pte(pgdir, addr); | ||
158 | |||
159 | if (ptep == NULL) | ||
160 | return -EFAULT; | ||
161 | pte = *ptep; | ||
162 | if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) | ||
163 | return -EFAULT; | ||
164 | pfn = pte_pfn(pte); | ||
165 | if (!page_is_ram(pfn)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | /* no highmem to worry about here */ | ||
169 | kaddr = pfn_to_kaddr(pfn); | ||
170 | memcpy(ret, kaddr + offset, nb); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) | ||
175 | { | ||
176 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || | ||
177 | ((unsigned long)ptr & 7)) | ||
178 | return -EFAULT; | ||
179 | |||
180 | if (!__get_user_inatomic(*ret, ptr)) | ||
181 | return 0; | ||
182 | |||
183 | return read_user_stack_slow(ptr, ret, 8); | ||
184 | } | ||
185 | |||
186 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
187 | { | ||
188 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
189 | ((unsigned long)ptr & 3)) | ||
190 | return -EFAULT; | ||
191 | |||
192 | if (!__get_user_inatomic(*ret, ptr)) | ||
193 | return 0; | ||
194 | |||
195 | return read_user_stack_slow(ptr, ret, 4); | ||
196 | } | ||
197 | |||
198 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
199 | { | ||
200 | if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32) | ||
201 | return 0; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * 64-bit user processes use the same stack frame for RT and non-RT signals. | ||
207 | */ | ||
208 | struct signal_frame_64 { | ||
209 | char dummy[__SIGNAL_FRAMESIZE]; | ||
210 | struct ucontext uc; | ||
211 | unsigned long unused[2]; | ||
212 | unsigned int tramp[6]; | ||
213 | struct siginfo *pinfo; | ||
214 | void *puc; | ||
215 | struct siginfo info; | ||
216 | char abigap[288]; | ||
217 | }; | ||
218 | |||
219 | static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) | ||
220 | { | ||
221 | if (nip == fp + offsetof(struct signal_frame_64, tramp)) | ||
222 | return 1; | ||
223 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base && | ||
224 | nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) | ||
225 | return 1; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Do some sanity checking on the signal frame pointed to by sp. | ||
231 | * We check the pinfo and puc pointers in the frame. | ||
232 | */ | ||
233 | static int sane_signal_64_frame(unsigned long sp) | ||
234 | { | ||
235 | struct signal_frame_64 __user *sf; | ||
236 | unsigned long pinfo, puc; | ||
237 | |||
238 | sf = (struct signal_frame_64 __user *) sp; | ||
239 | if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || | ||
240 | read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) | ||
241 | return 0; | ||
242 | return pinfo == (unsigned long) &sf->info && | ||
243 | puc == (unsigned long) &sf->uc; | ||
244 | } | ||
245 | |||
246 | static void perf_callchain_user_64(struct pt_regs *regs, | ||
247 | struct perf_callchain_entry *entry) | ||
248 | { | ||
249 | unsigned long sp, next_sp; | ||
250 | unsigned long next_ip; | ||
251 | unsigned long lr; | ||
252 | long level = 0; | ||
253 | struct signal_frame_64 __user *sigframe; | ||
254 | unsigned long __user *fp, *uregs; | ||
255 | |||
256 | next_ip = regs->nip; | ||
257 | lr = regs->link; | ||
258 | sp = regs->gpr[1]; | ||
259 | callchain_store(entry, PERF_CONTEXT_USER); | ||
260 | callchain_store(entry, next_ip); | ||
261 | |||
262 | for (;;) { | ||
263 | fp = (unsigned long __user *) sp; | ||
264 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) | ||
265 | return; | ||
266 | if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) | ||
267 | return; | ||
268 | |||
269 | /* | ||
270 | * Note: the next_sp - sp >= signal frame size check | ||
271 | * is true when next_sp < sp, which can happen when | ||
272 | * transitioning from an alternate signal stack to the | ||
273 | * normal stack. | ||
274 | */ | ||
275 | if (next_sp - sp >= sizeof(struct signal_frame_64) && | ||
276 | (is_sigreturn_64_address(next_ip, sp) || | ||
277 | (level <= 1 && is_sigreturn_64_address(lr, sp))) && | ||
278 | sane_signal_64_frame(sp)) { | ||
279 | /* | ||
280 | * This looks like an signal frame | ||
281 | */ | ||
282 | sigframe = (struct signal_frame_64 __user *) sp; | ||
283 | uregs = sigframe->uc.uc_mcontext.gp_regs; | ||
284 | if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || | ||
285 | read_user_stack_64(&uregs[PT_LNK], &lr) || | ||
286 | read_user_stack_64(&uregs[PT_R1], &sp)) | ||
287 | return; | ||
288 | level = 0; | ||
289 | callchain_store(entry, PERF_CONTEXT_USER); | ||
290 | callchain_store(entry, next_ip); | ||
291 | continue; | ||
292 | } | ||
293 | |||
294 | if (level == 0) | ||
295 | next_ip = lr; | ||
296 | callchain_store(entry, next_ip); | ||
297 | ++level; | ||
298 | sp = next_sp; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | static inline int current_is_64bit(void) | ||
303 | { | ||
304 | /* | ||
305 | * We can't use test_thread_flag() here because we may be on an | ||
306 | * interrupt stack, and the thread flags don't get copied over | ||
307 | * from the thread_info on the main stack to the interrupt stack. | ||
308 | */ | ||
309 | return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); | ||
310 | } | ||
311 | |||
312 | #else /* CONFIG_PPC64 */ | ||
313 | /* | ||
314 | * On 32-bit we just access the address and let hash_page create a | ||
315 | * HPTE if necessary, so there is no need to fall back to reading | ||
316 | * the page tables. Since this is called at interrupt level, | ||
317 | * do_page_fault() won't treat a DSI as a page fault. | ||
318 | */ | ||
319 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | ||
320 | { | ||
321 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | ||
322 | ((unsigned long)ptr & 3)) | ||
323 | return -EFAULT; | ||
324 | |||
325 | return __get_user_inatomic(*ret, ptr); | ||
326 | } | ||
327 | |||
328 | static inline void perf_callchain_user_64(struct pt_regs *regs, | ||
329 | struct perf_callchain_entry *entry) | ||
330 | { | ||
331 | } | ||
332 | |||
333 | static inline int current_is_64bit(void) | ||
334 | { | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static inline int valid_user_sp(unsigned long sp, int is_64) | ||
339 | { | ||
340 | if (!sp || (sp & 7) || sp > TASK_SIZE - 32) | ||
341 | return 0; | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE | ||
346 | #define sigcontext32 sigcontext | ||
347 | #define mcontext32 mcontext | ||
348 | #define ucontext32 ucontext | ||
349 | #define compat_siginfo_t struct siginfo | ||
350 | |||
351 | #endif /* CONFIG_PPC64 */ | ||
352 | |||
353 | /* | ||
354 | * Layout for non-RT signal frames | ||
355 | */ | ||
356 | struct signal_frame_32 { | ||
357 | char dummy[__SIGNAL_FRAMESIZE32]; | ||
358 | struct sigcontext32 sctx; | ||
359 | struct mcontext32 mctx; | ||
360 | int abigap[56]; | ||
361 | }; | ||
362 | |||
363 | /* | ||
364 | * Layout for RT signal frames | ||
365 | */ | ||
366 | struct rt_signal_frame_32 { | ||
367 | char dummy[__SIGNAL_FRAMESIZE32 + 16]; | ||
368 | compat_siginfo_t info; | ||
369 | struct ucontext32 uc; | ||
370 | int abigap[56]; | ||
371 | }; | ||
372 | |||
373 | static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
374 | { | ||
375 | if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) | ||
376 | return 1; | ||
377 | if (vdso32_sigtramp && current->mm->context.vdso_base && | ||
378 | nip == current->mm->context.vdso_base + vdso32_sigtramp) | ||
379 | return 1; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) | ||
384 | { | ||
385 | if (nip == fp + offsetof(struct rt_signal_frame_32, | ||
386 | uc.uc_mcontext.mc_pad)) | ||
387 | return 1; | ||
388 | if (vdso32_rt_sigtramp && current->mm->context.vdso_base && | ||
389 | nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) | ||
390 | return 1; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static int sane_signal_32_frame(unsigned int sp) | ||
395 | { | ||
396 | struct signal_frame_32 __user *sf; | ||
397 | unsigned int regs; | ||
398 | |||
399 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
400 | if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s)) | ||
401 | return 0; | ||
402 | return regs == (unsigned long) &sf->mctx; | ||
403 | } | ||
404 | |||
405 | static int sane_rt_signal_32_frame(unsigned int sp) | ||
406 | { | ||
407 | struct rt_signal_frame_32 __user *sf; | ||
408 | unsigned int regs; | ||
409 | |||
410 | sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
411 | if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s)) | ||
412 | return 0; | ||
413 | return regs == (unsigned long) &sf->uc.uc_mcontext; | ||
414 | } | ||
415 | |||
416 | static unsigned int __user *signal_frame_32_regs(unsigned int sp, | ||
417 | unsigned int next_sp, unsigned int next_ip) | ||
418 | { | ||
419 | struct mcontext32 __user *mctx = NULL; | ||
420 | struct signal_frame_32 __user *sf; | ||
421 | struct rt_signal_frame_32 __user *rt_sf; | ||
422 | |||
423 | /* | ||
424 | * Note: the next_sp - sp >= signal frame size check | ||
425 | * is true when next_sp < sp, for example, when | ||
426 | * transitioning from an alternate signal stack to the | ||
427 | * normal stack. | ||
428 | */ | ||
429 | if (next_sp - sp >= sizeof(struct signal_frame_32) && | ||
430 | is_sigreturn_32_address(next_ip, sp) && | ||
431 | sane_signal_32_frame(sp)) { | ||
432 | sf = (struct signal_frame_32 __user *) (unsigned long) sp; | ||
433 | mctx = &sf->mctx; | ||
434 | } | ||
435 | |||
436 | if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && | ||
437 | is_rt_sigreturn_32_address(next_ip, sp) && | ||
438 | sane_rt_signal_32_frame(sp)) { | ||
439 | rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; | ||
440 | mctx = &rt_sf->uc.uc_mcontext; | ||
441 | } | ||
442 | |||
443 | if (!mctx) | ||
444 | return NULL; | ||
445 | return mctx->mc_gregs; | ||
446 | } | ||
447 | |||
448 | static void perf_callchain_user_32(struct pt_regs *regs, | ||
449 | struct perf_callchain_entry *entry) | ||
450 | { | ||
451 | unsigned int sp, next_sp; | ||
452 | unsigned int next_ip; | ||
453 | unsigned int lr; | ||
454 | long level = 0; | ||
455 | unsigned int __user *fp, *uregs; | ||
456 | |||
457 | next_ip = regs->nip; | ||
458 | lr = regs->link; | ||
459 | sp = regs->gpr[1]; | ||
460 | callchain_store(entry, PERF_CONTEXT_USER); | ||
461 | callchain_store(entry, next_ip); | ||
462 | |||
463 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
464 | fp = (unsigned int __user *) (unsigned long) sp; | ||
465 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) | ||
466 | return; | ||
467 | if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) | ||
468 | return; | ||
469 | |||
470 | uregs = signal_frame_32_regs(sp, next_sp, next_ip); | ||
471 | if (!uregs && level <= 1) | ||
472 | uregs = signal_frame_32_regs(sp, next_sp, lr); | ||
473 | if (uregs) { | ||
474 | /* | ||
475 | * This looks like an signal frame, so restart | ||
476 | * the stack trace with the values in it. | ||
477 | */ | ||
478 | if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || | ||
479 | read_user_stack_32(&uregs[PT_LNK], &lr) || | ||
480 | read_user_stack_32(&uregs[PT_R1], &sp)) | ||
481 | return; | ||
482 | level = 0; | ||
483 | callchain_store(entry, PERF_CONTEXT_USER); | ||
484 | callchain_store(entry, next_ip); | ||
485 | continue; | ||
486 | } | ||
487 | |||
488 | if (level == 0) | ||
489 | next_ip = lr; | ||
490 | callchain_store(entry, next_ip); | ||
491 | ++level; | ||
492 | sp = next_sp; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | ||
498 | * we don't need separate irq and nmi entries here. | ||
499 | */ | ||
500 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
501 | |||
502 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
503 | { | ||
504 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
505 | |||
506 | entry->nr = 0; | ||
507 | |||
508 | if (current->pid == 0) /* idle task? */ | ||
509 | return entry; | ||
510 | |||
511 | if (!user_mode(regs)) { | ||
512 | perf_callchain_kernel(regs, entry); | ||
513 | if (current->mm) | ||
514 | regs = task_pt_regs(current); | ||
515 | else | ||
516 | regs = NULL; | ||
517 | } | ||
518 | |||
519 | if (regs) { | ||
520 | if (current_is_64bit()) | ||
521 | perf_callchain_user_64(regs, entry); | ||
522 | else | ||
523 | perf_callchain_user_32(regs, entry); | ||
524 | } | ||
525 | |||
526 | return entry; | ||
527 | } | ||
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 388cf57ad827..018d094d92f9 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -317,7 +317,7 @@ static int power7_generic_events[] = { | |||
317 | */ | 317 | */ |
318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
320 | [C(OP_READ)] = { 0x400f0, 0xc880 }, | 320 | [C(OP_READ)] = { 0xc880, 0x400f0 }, |
321 | [C(OP_WRITE)] = { 0, 0x300f0 }, | 321 | [C(OP_WRITE)] = { 0, 0x300f0 }, |
322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, | 322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, |
323 | }, | 323 | }, |
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, | 327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, |
328 | }, | 328 | }, |
329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
330 | [C(OP_READ)] = { 0x6080, 0x6084 }, | 330 | [C(OP_READ)] = { 0x16080, 0x26080 }, |
331 | [C(OP_WRITE)] = { 0x6082, 0x6086 }, | 331 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, |
332 | [C(OP_PREFETCH)] = { 0, 0 }, | 332 | [C(OP_PREFETCH)] = { 0, 0 }, |
333 | }, | 333 | }, |
334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 0cef809cec21..f4d1b55aa70b 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -138,7 +138,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
138 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 138 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
139 | } | 139 | } |
140 | 140 | ||
141 | static int kvmppc_44x_init(void) | 141 | static int __init kvmppc_44x_init(void) |
142 | { | 142 | { |
143 | int r; | 143 | int r; |
144 | 144 | ||
@@ -149,7 +149,7 @@ static int kvmppc_44x_init(void) | |||
149 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); | 149 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void kvmppc_44x_exit(void) | 152 | static void __exit kvmppc_44x_exit(void) |
153 | { | 153 | { |
154 | kvmppc_booke_exit(); | 154 | kvmppc_booke_exit(); |
155 | } | 155 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 4a16f472cc18..ff3cb63b8117 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "timing.h" | 30 | #include "timing.h" |
31 | 31 | ||
32 | #include "44x_tlb.h" | 32 | #include "44x_tlb.h" |
33 | #include "trace.h" | ||
33 | 34 | ||
34 | #ifndef PPC44x_TLBE_SIZE | 35 | #ifndef PPC44x_TLBE_SIZE |
35 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | 36 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K |
@@ -263,7 +264,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, | |||
263 | 264 | ||
264 | /* XXX set tlb_44x_index to stlb_index? */ | 265 | /* XXX set tlb_44x_index to stlb_index? */ |
265 | 266 | ||
266 | KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); | 267 | trace_kvm_stlb_inval(stlb_index); |
267 | } | 268 | } |
268 | 269 | ||
269 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 270 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
@@ -365,8 +366,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | |||
365 | /* Insert shadow mapping into hardware TLB. */ | 366 | /* Insert shadow mapping into hardware TLB. */ |
366 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | 367 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); |
367 | kvmppc_44x_tlbwe(victim, &stlbe); | 368 | kvmppc_44x_tlbwe(victim, &stlbe); |
368 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | 369 | trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, |
369 | stlbe.word2, handler); | 370 | stlbe.word2); |
370 | } | 371 | } |
371 | 372 | ||
372 | /* For a particular guest TLB entry, invalidate the corresponding host TLB | 373 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
@@ -485,8 +486,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
485 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | 486 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
486 | } | 487 | } |
487 | 488 | ||
488 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, | 489 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, |
489 | tlbe->word1, tlbe->word2, handler); | 490 | tlbe->word2); |
490 | 491 | ||
491 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | 492 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); |
492 | return EMULATE_DONE; | 493 | return EMULATE_DONE; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 5a152a52796f..c29926846613 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -2,8 +2,7 @@ | |||
2 | # KVM configuration | 2 | # KVM configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config HAVE_KVM_IRQCHIP | 5 | source "virt/kvm/Kconfig" |
6 | bool | ||
7 | 6 | ||
8 | menuconfig VIRTUALIZATION | 7 | menuconfig VIRTUALIZATION |
9 | bool "Virtualization" | 8 | bool "Virtualization" |
@@ -59,17 +58,6 @@ config KVM_E500 | |||
59 | 58 | ||
60 | If unsure, say N. | 59 | If unsure, say N. |
61 | 60 | ||
62 | config KVM_TRACE | ||
63 | bool "KVM trace support" | ||
64 | depends on KVM && MARKERS && SYSFS | ||
65 | select RELAY | ||
66 | select DEBUG_FS | ||
67 | default n | ||
68 | ---help--- | ||
69 | This option allows reading a trace of kvm-related events through | ||
70 | relayfs. Note the ABI is not considered stable and will be | ||
71 | modified in future updates. | ||
72 | |||
73 | source drivers/virtio/Kconfig | 61 | source drivers/virtio/Kconfig |
74 | 62 | ||
75 | endif # VIRTUALIZATION | 63 | endif # VIRTUALIZATION |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 459c7ee580f7..37655fe19f2f 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -8,7 +8,9 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm | |||
8 | 8 | ||
9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
10 | 10 | ||
11 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 11 | CFLAGS_44x_tlb.o := -I. |
12 | CFLAGS_e500_tlb.o := -I. | ||
13 | CFLAGS_emulate.o := -I. | ||
12 | 14 | ||
13 | kvm-objs := $(common-objs-y) powerpc.o emulate.o | 15 | kvm-objs := $(common-objs-y) powerpc.o emulate.o |
14 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | 16 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 642e4204cf25..e7bf4d029484 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -520,7 +520,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
520 | return kvmppc_core_vcpu_translate(vcpu, tr); | 520 | return kvmppc_core_vcpu_translate(vcpu, tr); |
521 | } | 521 | } |
522 | 522 | ||
523 | int kvmppc_booke_init(void) | 523 | int __init kvmppc_booke_init(void) |
524 | { | 524 | { |
525 | unsigned long ivor[16]; | 525 | unsigned long ivor[16]; |
526 | unsigned long max_ivor = 0; | 526 | unsigned long max_ivor = 0; |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index d8067fd81cdd..64949eef43f1 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -60,9 +60,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
60 | 60 | ||
61 | kvmppc_e500_tlb_setup(vcpu_e500); | 61 | kvmppc_e500_tlb_setup(vcpu_e500); |
62 | 62 | ||
63 | /* Use the same core vertion as host's */ | ||
64 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
65 | |||
66 | return 0; | 63 | return 0; |
67 | } | 64 | } |
68 | 65 | ||
@@ -132,7 +129,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
132 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 129 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
133 | } | 130 | } |
134 | 131 | ||
135 | static int kvmppc_e500_init(void) | 132 | static int __init kvmppc_e500_init(void) |
136 | { | 133 | { |
137 | int r, i; | 134 | int r, i; |
138 | unsigned long ivor[3]; | 135 | unsigned long ivor[3]; |
@@ -160,7 +157,7 @@ static int kvmppc_e500_init(void) | |||
160 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); | 157 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); |
161 | } | 158 | } |
162 | 159 | ||
163 | static void kvmppc_e500_exit(void) | 160 | static void __init kvmppc_e500_exit(void) |
164 | { | 161 | { |
165 | kvmppc_booke_exit(); | 162 | kvmppc_booke_exit(); |
166 | } | 163 | } |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 3f760414b9f8..be95b8d8e3b7 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -180,6 +180,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
180 | case SPRN_MMUCSR0: | 180 | case SPRN_MMUCSR0: |
181 | vcpu->arch.gpr[rt] = 0; break; | 181 | vcpu->arch.gpr[rt] = 0; break; |
182 | 182 | ||
183 | case SPRN_MMUCFG: | ||
184 | vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; | ||
185 | |||
183 | /* extra exceptions */ | 186 | /* extra exceptions */ |
184 | case SPRN_IVOR32: | 187 | case SPRN_IVOR32: |
185 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 188 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 0e773fc2d5e4..fb1e1dc11ba5 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include "../mm/mmu_decl.h" | 23 | #include "../mm/mmu_decl.h" |
24 | #include "e500_tlb.h" | 24 | #include "e500_tlb.h" |
25 | #include "trace.h" | ||
25 | 26 | ||
26 | #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) | 27 | #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) |
27 | 28 | ||
@@ -224,9 +225,8 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
224 | 225 | ||
225 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); | 226 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); |
226 | stlbe->mas1 = 0; | 227 | stlbe->mas1 = 0; |
227 | KVMTRACE_5D(STLB_INVAL, &vcpu_e500->vcpu, index_of(tlbsel, esel), | 228 | trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, |
228 | stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7, | 229 | stlbe->mas3, stlbe->mas7); |
229 | handler); | ||
230 | } | 230 | } |
231 | 231 | ||
232 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | 232 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -269,7 +269,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
269 | tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; | 269 | tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; |
270 | victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; | 270 | victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; |
271 | pidsel = (vcpu_e500->mas4 >> 16) & 0xf; | 271 | pidsel = (vcpu_e500->mas4 >> 16) & 0xf; |
272 | tsized = (vcpu_e500->mas4 >> 8) & 0xf; | 272 | tsized = (vcpu_e500->mas4 >> 7) & 0x1f; |
273 | 273 | ||
274 | vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | 274 | vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) |
275 | | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); | 275 | | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); |
@@ -309,7 +309,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
309 | vcpu_e500->shadow_pages[tlbsel][esel] = new_page; | 309 | vcpu_e500->shadow_pages[tlbsel][esel] = new_page; |
310 | 310 | ||
311 | /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ | 311 | /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ |
312 | stlbe->mas1 = MAS1_TSIZE(BOOKE_PAGESZ_4K) | 312 | stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K) |
313 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; | 313 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; |
314 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 314 | stlbe->mas2 = (gvaddr & MAS2_EPN) |
315 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 315 | | e500_shadow_mas2_attrib(gtlbe->mas2, |
@@ -319,9 +319,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
319 | vcpu_e500->vcpu.arch.msr & MSR_PR); | 319 | vcpu_e500->vcpu.arch.msr & MSR_PR); |
320 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; | 320 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; |
321 | 321 | ||
322 | KVMTRACE_5D(STLB_WRITE, &vcpu_e500->vcpu, index_of(tlbsel, esel), | 322 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, |
323 | stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7, | 323 | stlbe->mas3, stlbe->mas7); |
324 | handler); | ||
325 | } | 324 | } |
326 | 325 | ||
327 | /* XXX only map the one-one case, for now use TLB0 */ | 326 | /* XXX only map the one-one case, for now use TLB0 */ |
@@ -535,9 +534,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
535 | gtlbe->mas3 = vcpu_e500->mas3; | 534 | gtlbe->mas3 = vcpu_e500->mas3; |
536 | gtlbe->mas7 = vcpu_e500->mas7; | 535 | gtlbe->mas7 = vcpu_e500->mas7; |
537 | 536 | ||
538 | KVMTRACE_5D(GTLB_WRITE, vcpu, vcpu_e500->mas0, | 537 | trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, |
539 | gtlbe->mas1, gtlbe->mas2, gtlbe->mas3, gtlbe->mas7, | 538 | gtlbe->mas3, gtlbe->mas7); |
540 | handler); | ||
541 | 539 | ||
542 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 540 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
543 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 541 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
@@ -545,7 +543,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
545 | case 0: | 543 | case 0: |
546 | /* TLB0 */ | 544 | /* TLB0 */ |
547 | gtlbe->mas1 &= ~MAS1_TSIZE(~0); | 545 | gtlbe->mas1 &= ~MAS1_TSIZE(~0); |
548 | gtlbe->mas1 |= MAS1_TSIZE(BOOKE_PAGESZ_4K); | 546 | gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
549 | 547 | ||
550 | stlbsel = 0; | 548 | stlbsel = 0; |
551 | sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel); | 549 | sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel); |
@@ -679,14 +677,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
679 | 677 | ||
680 | /* Insert large initial mapping for guest. */ | 678 | /* Insert large initial mapping for guest. */ |
681 | tlbe = &vcpu_e500->guest_tlb[1][0]; | 679 | tlbe = &vcpu_e500->guest_tlb[1][0]; |
682 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_256M); | 680 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); |
683 | tlbe->mas2 = 0; | 681 | tlbe->mas2 = 0; |
684 | tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; | 682 | tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; |
685 | tlbe->mas7 = 0; | 683 | tlbe->mas7 = 0; |
686 | 684 | ||
687 | /* 4K map for serial output. Used by kernel wrapper. */ | 685 | /* 4K map for serial output. Used by kernel wrapper. */ |
688 | tlbe = &vcpu_e500->guest_tlb[1][1]; | 686 | tlbe = &vcpu_e500->guest_tlb[1][1]; |
689 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_4K); | 687 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
690 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | 688 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; |
691 | tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | 689 | tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; |
692 | tlbe->mas7 = 0; | 690 | tlbe->mas7 = 0; |
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index 45b064b76906..d28e3010a5e2 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #define __KVM_E500_TLB_H__ | 16 | #define __KVM_E500_TLB_H__ |
17 | 17 | ||
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <asm/mmu-fsl-booke.h> | 19 | #include <asm/mmu-book3e.h> |
20 | #include <asm/tlb.h> | 20 | #include <asm/tlb.h> |
21 | #include <asm/kvm_e500.h> | 21 | #include <asm/kvm_e500.h> |
22 | 22 | ||
@@ -59,7 +59,7 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); | |||
59 | /* TLB helper functions */ | 59 | /* TLB helper functions */ |
60 | static inline unsigned int get_tlb_size(const struct tlbe *tlbe) | 60 | static inline unsigned int get_tlb_size(const struct tlbe *tlbe) |
61 | { | 61 | { |
62 | return (tlbe->mas1 >> 8) & 0xf; | 62 | return (tlbe->mas1 >> 7) & 0x1f; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) | 65 | static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) |
@@ -70,7 +70,7 @@ static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) | |||
70 | static inline u64 get_tlb_bytes(const struct tlbe *tlbe) | 70 | static inline u64 get_tlb_bytes(const struct tlbe *tlbe) |
71 | { | 71 | { |
72 | unsigned int pgsize = get_tlb_size(tlbe); | 72 | unsigned int pgsize = get_tlb_size(tlbe); |
73 | return 1ULL << 10 << (pgsize << 1); | 73 | return 1ULL << 10 << pgsize; |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline gva_t get_tlb_end(const struct tlbe *tlbe) | 76 | static inline gva_t get_tlb_end(const struct tlbe *tlbe) |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index a561d6e8da1c..7737146af3fb 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/disassemble.h> | 30 | #include <asm/disassemble.h> |
31 | #include "timing.h" | 31 | #include "timing.h" |
32 | #include "trace.h" | ||
32 | 33 | ||
33 | #define OP_TRAP 3 | 34 | #define OP_TRAP 3 |
34 | 35 | ||
@@ -187,7 +188,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
187 | case SPRN_SRR1: | 188 | case SPRN_SRR1: |
188 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 189 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; |
189 | case SPRN_PVR: | 190 | case SPRN_PVR: |
190 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | 191 | vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; |
192 | case SPRN_PIR: | ||
193 | vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; | ||
191 | 194 | ||
192 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 195 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
193 | * the guest can always access the real TB anyways. | 196 | * the guest can always access the real TB anyways. |
@@ -417,7 +420,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
417 | } | 420 | } |
418 | } | 421 | } |
419 | 422 | ||
420 | KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); | 423 | trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); |
421 | 424 | ||
422 | if (advance) | 425 | if (advance) |
423 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | 426 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2cf915e51e7e..2a4551f78f60 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -31,25 +31,17 @@ | |||
31 | #include "timing.h" | 31 | #include "timing.h" |
32 | #include "../mm/mmu_decl.h" | 32 | #include "../mm/mmu_decl.h" |
33 | 33 | ||
34 | #define CREATE_TRACE_POINTS | ||
35 | #include "trace.h" | ||
36 | |||
34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 37 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
35 | { | 38 | { |
36 | return gfn; | 39 | return gfn; |
37 | } | 40 | } |
38 | 41 | ||
39 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v) | ||
40 | { | ||
41 | return !!(v->arch.pending_exceptions); | ||
42 | } | ||
43 | |||
44 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) | ||
45 | { | ||
46 | /* do real check here */ | ||
47 | return 1; | ||
48 | } | ||
49 | |||
50 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 42 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
51 | { | 43 | { |
52 | return !(v->arch.msr & MSR_WE); | 44 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); |
53 | } | 45 | } |
54 | 46 | ||
55 | 47 | ||
@@ -122,13 +114,17 @@ struct kvm *kvm_arch_create_vm(void) | |||
122 | static void kvmppc_free_vcpus(struct kvm *kvm) | 114 | static void kvmppc_free_vcpus(struct kvm *kvm) |
123 | { | 115 | { |
124 | unsigned int i; | 116 | unsigned int i; |
117 | struct kvm_vcpu *vcpu; | ||
125 | 118 | ||
126 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 119 | kvm_for_each_vcpu(i, vcpu, kvm) |
127 | if (kvm->vcpus[i]) { | 120 | kvm_arch_vcpu_free(vcpu); |
128 | kvm_arch_vcpu_free(kvm->vcpus[i]); | 121 | |
129 | kvm->vcpus[i] = NULL; | 122 | mutex_lock(&kvm->lock); |
130 | } | 123 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
131 | } | 124 | kvm->vcpus[i] = NULL; |
125 | |||
126 | atomic_set(&kvm->online_vcpus, 0); | ||
127 | mutex_unlock(&kvm->lock); | ||
132 | } | 128 | } |
133 | 129 | ||
134 | void kvm_arch_sync_events(struct kvm *kvm) | 130 | void kvm_arch_sync_events(struct kvm *kvm) |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h new file mode 100644 index 000000000000..67f219de0455 --- /dev/null +++ b/arch/powerpc/kvm/trace.h | |||
@@ -0,0 +1,104 @@ | |||
1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KVM_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | |||
6 | #undef TRACE_SYSTEM | ||
7 | #define TRACE_SYSTEM kvm | ||
8 | #define TRACE_INCLUDE_PATH . | ||
9 | #define TRACE_INCLUDE_FILE trace | ||
10 | |||
11 | /* | ||
12 | * Tracepoint for guest mode entry. | ||
13 | */ | ||
14 | TRACE_EVENT(kvm_ppc_instr, | ||
15 | TP_PROTO(unsigned int inst, unsigned long pc, unsigned int emulate), | ||
16 | TP_ARGS(inst, pc, emulate), | ||
17 | |||
18 | TP_STRUCT__entry( | ||
19 | __field( unsigned int, inst ) | ||
20 | __field( unsigned long, pc ) | ||
21 | __field( unsigned int, emulate ) | ||
22 | ), | ||
23 | |||
24 | TP_fast_assign( | ||
25 | __entry->inst = inst; | ||
26 | __entry->pc = pc; | ||
27 | __entry->emulate = emulate; | ||
28 | ), | ||
29 | |||
30 | TP_printk("inst %u pc 0x%lx emulate %u\n", | ||
31 | __entry->inst, __entry->pc, __entry->emulate) | ||
32 | ); | ||
33 | |||
34 | TRACE_EVENT(kvm_stlb_inval, | ||
35 | TP_PROTO(unsigned int stlb_index), | ||
36 | TP_ARGS(stlb_index), | ||
37 | |||
38 | TP_STRUCT__entry( | ||
39 | __field( unsigned int, stlb_index ) | ||
40 | ), | ||
41 | |||
42 | TP_fast_assign( | ||
43 | __entry->stlb_index = stlb_index; | ||
44 | ), | ||
45 | |||
46 | TP_printk("stlb_index %u", __entry->stlb_index) | ||
47 | ); | ||
48 | |||
49 | TRACE_EVENT(kvm_stlb_write, | ||
50 | TP_PROTO(unsigned int victim, unsigned int tid, unsigned int word0, | ||
51 | unsigned int word1, unsigned int word2), | ||
52 | TP_ARGS(victim, tid, word0, word1, word2), | ||
53 | |||
54 | TP_STRUCT__entry( | ||
55 | __field( unsigned int, victim ) | ||
56 | __field( unsigned int, tid ) | ||
57 | __field( unsigned int, word0 ) | ||
58 | __field( unsigned int, word1 ) | ||
59 | __field( unsigned int, word2 ) | ||
60 | ), | ||
61 | |||
62 | TP_fast_assign( | ||
63 | __entry->victim = victim; | ||
64 | __entry->tid = tid; | ||
65 | __entry->word0 = word0; | ||
66 | __entry->word1 = word1; | ||
67 | __entry->word2 = word2; | ||
68 | ), | ||
69 | |||
70 | TP_printk("victim %u tid %u w0 %u w1 %u w2 %u", | ||
71 | __entry->victim, __entry->tid, __entry->word0, | ||
72 | __entry->word1, __entry->word2) | ||
73 | ); | ||
74 | |||
75 | TRACE_EVENT(kvm_gtlb_write, | ||
76 | TP_PROTO(unsigned int gtlb_index, unsigned int tid, unsigned int word0, | ||
77 | unsigned int word1, unsigned int word2), | ||
78 | TP_ARGS(gtlb_index, tid, word0, word1, word2), | ||
79 | |||
80 | TP_STRUCT__entry( | ||
81 | __field( unsigned int, gtlb_index ) | ||
82 | __field( unsigned int, tid ) | ||
83 | __field( unsigned int, word0 ) | ||
84 | __field( unsigned int, word1 ) | ||
85 | __field( unsigned int, word2 ) | ||
86 | ), | ||
87 | |||
88 | TP_fast_assign( | ||
89 | __entry->gtlb_index = gtlb_index; | ||
90 | __entry->tid = tid; | ||
91 | __entry->word0 = word0; | ||
92 | __entry->word1 = word1; | ||
93 | __entry->word2 = word2; | ||
94 | ), | ||
95 | |||
96 | TP_printk("gtlb_index %u tid %u w0 %u w1 %u w2 %u", | ||
97 | __entry->gtlb_index, __entry->tid, __entry->word0, | ||
98 | __entry->word1, __entry->word2) | ||
99 | ); | ||
100 | |||
101 | #endif /* _TRACE_KVM_H */ | ||
102 | |||
103 | /* This part must be outside protection */ | ||
104 | #include <trace/define_trace.h> | ||
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..a685652effeb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
92 | : "memory" ); | 92 | : "memory" ); |
93 | } | 93 | } |
94 | 94 | ||
95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
96 | { | 96 | { |
97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
98 | * appropriately too. */ | 98 | * appropriately too. */ |
99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
101 | 101 | ||
102 | WARN_ON(!irqs_disabled()); | ||
103 | |||
104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
118 | } | 116 | } |
119 | 117 | ||
120 | /* | ||
121 | * We can't take a PMU exception in the following code, so hard | ||
122 | * disable interrupts. | ||
123 | */ | ||
124 | hard_irq_disable(); | ||
125 | |||
126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
139 | : "memory"); | 131 | : "memory"); |
140 | } | 132 | } |
141 | 133 | ||
134 | void slb_flush_and_rebolt(void) | ||
135 | { | ||
136 | |||
137 | WARN_ON(!irqs_disabled()); | ||
138 | |||
139 | /* | ||
140 | * We can't take a PMU exception in the following code, so hard | ||
141 | * disable interrupts. | ||
142 | */ | ||
143 | hard_irq_disable(); | ||
144 | |||
145 | __slb_flush_and_rebolt(); | ||
146 | get_paca()->slb_cache_ptr = 0; | ||
147 | } | ||
148 | |||
142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
143 | { | 150 | { |
144 | unsigned long vflags; | 151 | unsigned long vflags; |
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
182 | { | 189 | { |
183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
187 | unsigned long unmapped_base; | 194 | unsigned long unmapped_base; |
188 | 195 | ||
196 | /* | ||
197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
198 | * so that a PMU interrupt can't occur, which might try to access | ||
199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
201 | */ | ||
202 | hard_irq_disable(); | ||
203 | offset = get_paca()->slb_cache_ptr; | ||
189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
191 | int i; | 206 | int i; |
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | } | 215 | } |
201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
202 | } else { | 217 | } else { |
203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
204 | } | 219 | } |
205 | 220 | ||
206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 6e9b69c99856..687fddaa24c5 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
164 | { | 164 | { |
165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; |
166 | struct stab_entry *ste; | 166 | struct stab_entry *ste; |
167 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | 167 | unsigned long offset; |
168 | unsigned long pc = KSTK_EIP(tsk); | 168 | unsigned long pc = KSTK_EIP(tsk); |
169 | unsigned long stack = KSTK_ESP(tsk); | 169 | unsigned long stack = KSTK_ESP(tsk); |
170 | unsigned long unmapped_base; | 170 | unsigned long unmapped_base; |
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
172 | /* Force previous translations to complete. DRENG */ | 172 | /* Force previous translations to complete. DRENG */ |
173 | asm volatile("isync" : : : "memory"); | 173 | asm volatile("isync" : : : "memory"); |
174 | 174 | ||
175 | /* | ||
176 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
177 | * so that a PMU interrupt can't occur, which might try to access | ||
178 | * user memory (to get a stack trace) and possible cause an STAB miss | ||
179 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | ||
180 | */ | ||
181 | hard_irq_disable(); | ||
182 | |||
183 | offset = __get_cpu_var(stab_cache_ptr); | ||
175 | if (offset <= NR_STAB_CACHE_ENTRIES) { | 184 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
176 | int i; | 185 | int i; |
177 | 186 | ||
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c index b178a1e66c91..40b5cb433005 100644 --- a/arch/powerpc/platforms/ps3/time.c +++ b/arch/powerpc/platforms/ps3/time.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | 23 | ||
24 | #include <asm/firmware.h> | ||
24 | #include <asm/rtc.h> | 25 | #include <asm/rtc.h> |
25 | #include <asm/lv1call.h> | 26 | #include <asm/lv1call.h> |
26 | #include <asm/ps3.h> | 27 | #include <asm/ps3.h> |
@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void) | |||
84 | { | 85 | { |
85 | struct platform_device *pdev; | 86 | struct platform_device *pdev; |
86 | 87 | ||
88 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | ||
89 | return -ENODEV; | ||
90 | |||
87 | pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); | 91 | pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); |
88 | if (IS_ERR(pdev)) | 92 | if (IS_ERR(pdev)) |
89 | return PTR_ERR(pdev); | 93 | return PTR_ERR(pdev); |
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 237e3654f48c..464271bea6c9 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -65,6 +65,19 @@ static unsigned int qe_num_of_snum; | |||
65 | 65 | ||
66 | static phys_addr_t qebase = -1; | 66 | static phys_addr_t qebase = -1; |
67 | 67 | ||
68 | int qe_alive_during_sleep(void) | ||
69 | { | ||
70 | static int ret = -1; | ||
71 | |||
72 | if (ret != -1) | ||
73 | return ret; | ||
74 | |||
75 | ret = !of_find_compatible_node(NULL, NULL, "fsl,mpc8569-pmc"); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | EXPORT_SYMBOL(qe_alive_during_sleep); | ||
80 | |||
68 | phys_addr_t get_qe_base(void) | 81 | phys_addr_t get_qe_base(void) |
69 | { | 82 | { |
70 | struct device_node *qe; | 83 | struct device_node *qe; |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 3ee1fd37bbfc..40edad520770 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | |||
234 | generic_handle_irq(cascade_irq); | 234 | generic_handle_irq(cascade_irq); |
235 | 235 | ||
236 | /* Let xilinx_intc end the interrupt */ | 236 | /* Let xilinx_intc end the interrupt */ |
237 | desc->chip->ack(irq); | ||
238 | desc->chip->unmask(irq); | 237 | desc->chip->unmask(irq); |
239 | } | 238 | } |
240 | 239 | ||