aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-10-06 11:36:55 -0400
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-10-06 11:36:55 -0400
commitd94e5fcbf1420366dcb4102bafe04dbcfc0d0d4b (patch)
treea9b7de7df6da5c3132cc68169b9c47ba288ccd42 /arch/sparc
parentd55651168a20078a94597a297d5cdfd807bf07b6 (diff)
parent374576a8b6f865022c0fd1ca62396889b23d66dd (diff)
Merge commit 'v2.6.32-rc3'
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig20
-rw-r--r--arch/sparc/Makefile16
-rw-r--r--arch/sparc/boot/Makefile3
-rw-r--r--arch/sparc/configs/sparc32_defconfig44
-rw-r--r--arch/sparc/configs/sparc64_defconfig51
-rw-r--r--arch/sparc/include/asm/agp.h4
-rw-r--r--arch/sparc/include/asm/asi.h4
-rw-r--r--arch/sparc/include/asm/device.h3
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/leon.h362
-rw-r--r--arch/sparc/include/asm/leon_amba.h263
-rw-r--r--arch/sparc/include/asm/machines.h6
-rw-r--r--arch/sparc/include/asm/mman.h2
-rw-r--r--arch/sparc/include/asm/nmi.h5
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h106
-rw-r--r--arch/sparc/include/asm/pci_64.h89
-rw-r--r--arch/sparc/include/asm/perf_event.h14
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h4
-rw-r--r--arch/sparc/include/asm/prom.h3
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/socket.h3
-rw-r--r--arch/sparc/include/asm/spinlock_32.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h28
-rw-r--r--arch/sparc/include/asm/system_32.h1
-rw-r--r--arch/sparc/include/asm/system_64.h4
-rw-r--r--arch/sparc/include/asm/topology_64.h23
-rw-r--r--arch/sparc/include/asm/types.h27
-rw-r--r--arch/sparc/include/asm/uaccess_64.h2
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/sparc/kernel/Makefile13
-rw-r--r--arch/sparc/kernel/cpu.c5
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/head_32.S5
-rw-r--r--arch/sparc/kernel/idprom.c2
-rw-r--r--arch/sparc/kernel/init_task.c5
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c222
-rw-r--r--arch/sparc/kernel/irq_32.c5
-rw-r--r--arch/sparc/kernel/irq_64.c4
-rw-r--r--arch/sparc/kernel/leon_kernel.c203
-rw-r--r--arch/sparc/kernel/nmi.c74
-rw-r--r--arch/sparc/kernel/of_device_32.c40
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/sparc/kernel/pcr.c16
-rw-r--r--arch/sparc/kernel/perf_event.c556
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/prom_32.c33
-rw-r--r--arch/sparc/kernel/prom_common.c10
-rw-r--r--arch/sparc/kernel/setup_32.c7
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c3
-rw-r--r--arch/sparc/kernel/smp_64.c155
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/sysfs.c1
-rw-r--r--arch/sparc/kernel/systbls.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S83
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/init_32.c6
-rw-r--r--arch/sparc/mm/leon_mm.c260
-rw-r--r--arch/sparc/mm/loadmmu.c1
-rw-r--r--arch/sparc/mm/srmmu.c53
-rw-r--r--arch/sparc/oprofile/init.c4
-rw-r--r--arch/sparc/prom/misc_64.c2
-rw-r--r--arch/sparc/prom/printf.c7
73 files changed, 2310 insertions, 984 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..ac45aab741a5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,6 +25,9 @@ config SPARC
25 select ARCH_WANT_OPTIONAL_GPIOLIB 25 select ARCH_WANT_OPTIONAL_GPIOLIB
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_EVENTS
29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG
28 31
29config SPARC32 32config SPARC32
30 def_bool !64BIT 33 def_bool !64BIT
@@ -44,6 +47,7 @@ config SPARC64
44 select RTC_DRV_BQ4802 47 select RTC_DRV_BQ4802
45 select RTC_DRV_SUN4V 48 select RTC_DRV_SUN4V
46 select RTC_DRV_STARFIRE 49 select RTC_DRV_STARFIRE
50 select HAVE_PERF_EVENTS
47 51
48config ARCH_DEFCONFIG 52config ARCH_DEFCONFIG
49 string 53 string
@@ -95,7 +99,10 @@ config AUDIT_ARCH
95config HAVE_SETUP_PER_CPU_AREA 99config HAVE_SETUP_PER_CPU_AREA
96 def_bool y if SPARC64 100 def_bool y if SPARC64
97 101
98config HAVE_DYNAMIC_PER_CPU_AREA 102config NEED_PER_CPU_EMBED_FIRST_CHUNK
103 def_bool y if SPARC64
104
105config NEED_PER_CPU_PAGE_FIRST_CHUNK
99 def_bool y if SPARC64 106 def_bool y if SPARC64
100 107
101config GENERIC_HARDIRQS_NO__DO_IRQ 108config GENERIC_HARDIRQS_NO__DO_IRQ
@@ -437,6 +444,17 @@ config SERIAL_CONSOLE
437 444
438 If unsure, say N. 445 If unsure, say N.
439 446
447config SPARC_LEON
448 bool "Sparc Leon processor family"
449 depends on SPARC32
450 ---help---
451 If you say Y here if you are running on a SPARC-LEON processor.
452 The LEON processor is a synthesizable VHDL model of the
453 SPARC-v8 standard. LEON is part of the GRLIB collection of
454 IP cores that are distributed under GPL. GRLIB can be downloaded
455 from www.gaisler.com. You can download a sparc-linux cross-compilation
456 toolchain at www.gaisler.com.
457
440endmenu 458endmenu
441 459
442menu "Bus options (PCI etc.)" 460menu "Bus options (PCI etc.)"
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 2003ded054c2..dfe272d14465 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -31,17 +31,12 @@ export BITS := 32
31#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 31#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
32KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 32KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
33KBUILD_AFLAGS += -m32 33KBUILD_AFLAGS += -m32
34CPPFLAGS_vmlinux.lds += -m32
35 34
36#LDFLAGS_vmlinux = -N -Ttext 0xf0004000 35#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
37# Since 2.5.40, the first stage is left not btfix-ed. 36# Since 2.5.40, the first stage is left not btfix-ed.
38# Actual linking is done with "make image". 37# Actual linking is done with "make image".
39LDFLAGS_vmlinux = -r 38LDFLAGS_vmlinux = -r
40 39
41# Default target
42all: zImage
43
44
45else 40else
46##### 41#####
47# sparc64 42# sparc64
@@ -49,9 +44,6 @@ else
49 44
50CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 45CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
51 46
52# Undefine sparc when processing vmlinux.lds - it is used
53# And teach CPP we are doing 64 bit builds (for this case)
54CPPFLAGS_vmlinux.lds += -m64 -Usparc
55LDFLAGS := -m elf64_sparc 47LDFLAGS := -m elf64_sparc
56export BITS := 64 48export BITS := 64
57 49
@@ -91,6 +83,9 @@ endif
91 83
92boot := arch/sparc/boot 84boot := arch/sparc/boot
93 85
86# Default target
87all: zImage
88
94image zImage tftpboot.img vmlinux.aout: vmlinux 89image zImage tftpboot.img vmlinux.aout: vmlinux
95 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 90 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
96 91
@@ -109,8 +104,9 @@ define archhelp
109endef 104endef
110else 105else
111define archhelp 106define archhelp
112 echo '* vmlinux - Standard sparc64 kernel' 107 echo '* vmlinux - standard sparc64 kernel'
113 echo ' vmlinux.aout - a.out kernel for sparc64' 108 echo '* zImage - stripped and compressed sparc64 kernel ($(boot)/zImage)'
109 echo ' vmlinux.aout - a.out kernel for sparc64'
114 echo ' tftpboot.img - image prepared for tftp' 110 echo ' tftpboot.img - image prepared for tftp'
115endef 111endef
116endif 112endif
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 1ff0fd924756..97e3feb9ff1b 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE
79 $(call if_changed,strip) 79 $(call if_changed,strip)
80 @echo ' kernel: $@ is ready' 80 @echo ' kernel: $@ is ready'
81 81
82$(obj)/zImage: $(obj)/image
83 $(call if_changed,gzip)
84
82$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE 85$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
83 $(call if_changed,elftoaout) 86 $(call if_changed,elftoaout)
84 $(call if_changed,piggy) 87 $(call if_changed,piggy)
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index a0f62a808edb..983d59824a28 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31-rc1 3# Linux kernel version: 2.6.31
4# Tue Aug 18 23:45:52 2009 4# Wed Sep 16 00:03:43 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -39,11 +39,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
39# 39#
40# RCU Subsystem 40# RCU Subsystem
41# 41#
42CONFIG_CLASSIC_RCU=y 42CONFIG_TREE_RCU=y
43# CONFIG_TREE_RCU is not set 43# CONFIG_TREE_PREEMPT_RCU is not set
44# CONFIG_PREEMPT_RCU is not set 44# CONFIG_RCU_TRACE is not set
45CONFIG_RCU_FANOUT=32
46# CONFIG_RCU_FANOUT_EXACT is not set
45# CONFIG_TREE_RCU_TRACE is not set 47# CONFIG_TREE_RCU_TRACE is not set
46# CONFIG_PREEMPT_RCU_TRACE is not set
47# CONFIG_IKCONFIG is not set 48# CONFIG_IKCONFIG is not set
48CONFIG_LOG_BUF_SHIFT=14 49CONFIG_LOG_BUF_SHIFT=14
49CONFIG_GROUP_SCHED=y 50CONFIG_GROUP_SCHED=y
@@ -87,10 +88,12 @@ CONFIG_TIMERFD=y
87CONFIG_EVENTFD=y 88CONFIG_EVENTFD=y
88CONFIG_SHMEM=y 89CONFIG_SHMEM=y
89CONFIG_AIO=y 90CONFIG_AIO=y
91CONFIG_HAVE_PERF_COUNTERS=y
90 92
91# 93#
92# Performance Counters 94# Performance Counters
93# 95#
96# CONFIG_PERF_COUNTERS is not set
94CONFIG_VM_EVENT_COUNTERS=y 97CONFIG_VM_EVENT_COUNTERS=y
95CONFIG_PCI_QUIRKS=y 98CONFIG_PCI_QUIRKS=y
96# CONFIG_STRIP_ASM_SYMS is not set 99# CONFIG_STRIP_ASM_SYMS is not set
@@ -102,6 +105,8 @@ CONFIG_SLAB=y
102# CONFIG_MARKERS is not set 105# CONFIG_MARKERS is not set
103CONFIG_HAVE_OPROFILE=y 106CONFIG_HAVE_OPROFILE=y
104CONFIG_HAVE_ARCH_TRACEHOOK=y 107CONFIG_HAVE_ARCH_TRACEHOOK=y
108CONFIG_HAVE_DMA_ATTRS=y
109CONFIG_HAVE_DMA_API_DEBUG=y
105 110
106# 111#
107# GCOV-based kernel profiling 112# GCOV-based kernel profiling
@@ -169,6 +174,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
169CONFIG_SUN_PM=y 174CONFIG_SUN_PM=y
170# CONFIG_SPARC_LED is not set 175# CONFIG_SPARC_LED is not set
171CONFIG_SERIAL_CONSOLE=y 176CONFIG_SERIAL_CONSOLE=y
177# CONFIG_SPARC_LEON is not set
172 178
173# 179#
174# Bus options (PCI etc.) 180# Bus options (PCI etc.)
@@ -259,6 +265,7 @@ CONFIG_IPV6_TUNNEL=m
259# CONFIG_NETFILTER is not set 265# CONFIG_NETFILTER is not set
260# CONFIG_IP_DCCP is not set 266# CONFIG_IP_DCCP is not set
261# CONFIG_IP_SCTP is not set 267# CONFIG_IP_SCTP is not set
268# CONFIG_RDS is not set
262# CONFIG_TIPC is not set 269# CONFIG_TIPC is not set
263# CONFIG_ATM is not set 270# CONFIG_ATM is not set
264# CONFIG_BRIDGE is not set 271# CONFIG_BRIDGE is not set
@@ -288,6 +295,7 @@ CONFIG_NET_PKTGEN=m
288# CONFIG_AF_RXRPC is not set 295# CONFIG_AF_RXRPC is not set
289CONFIG_WIRELESS=y 296CONFIG_WIRELESS=y
290# CONFIG_CFG80211 is not set 297# CONFIG_CFG80211 is not set
298CONFIG_CFG80211_DEFAULT_PS_VALUE=0
291CONFIG_WIRELESS_OLD_REGULATORY=y 299CONFIG_WIRELESS_OLD_REGULATORY=y
292# CONFIG_WIRELESS_EXT is not set 300# CONFIG_WIRELESS_EXT is not set
293# CONFIG_LIB80211 is not set 301# CONFIG_LIB80211 is not set
@@ -295,7 +303,6 @@ CONFIG_WIRELESS_OLD_REGULATORY=y
295# 303#
296# CFG80211 needs to be enabled for MAC80211 304# CFG80211 needs to be enabled for MAC80211
297# 305#
298CONFIG_MAC80211_DEFAULT_PS_VALUE=0
299# CONFIG_WIMAX is not set 306# CONFIG_WIMAX is not set
300# CONFIG_RFKILL is not set 307# CONFIG_RFKILL is not set
301# CONFIG_NET_9P is not set 308# CONFIG_NET_9P is not set
@@ -426,6 +433,7 @@ CONFIG_SCSI_QLOGICPTI=m
426# CONFIG_SCSI_NSP32 is not set 433# CONFIG_SCSI_NSP32 is not set
427# CONFIG_SCSI_DEBUG is not set 434# CONFIG_SCSI_DEBUG is not set
428CONFIG_SCSI_SUNESP=y 435CONFIG_SCSI_SUNESP=y
436# CONFIG_SCSI_PMCRAID is not set
429# CONFIG_SCSI_SRP is not set 437# CONFIG_SCSI_SRP is not set
430# CONFIG_SCSI_DH is not set 438# CONFIG_SCSI_DH is not set
431# CONFIG_SCSI_OSD_INITIATOR is not set 439# CONFIG_SCSI_OSD_INITIATOR is not set
@@ -524,12 +532,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
524# CONFIG_SFC is not set 532# CONFIG_SFC is not set
525# CONFIG_BE2NET is not set 533# CONFIG_BE2NET is not set
526# CONFIG_TR is not set 534# CONFIG_TR is not set
527 535# CONFIG_WLAN is not set
528#
529# Wireless LAN
530#
531# CONFIG_WLAN_PRE80211 is not set
532# CONFIG_WLAN_80211 is not set
533 536
534# 537#
535# Enable WiMAX (Networking options) to see the WiMAX drivers 538# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -569,11 +572,11 @@ CONFIG_INPUT_EVBUG=m
569# 572#
570CONFIG_INPUT_KEYBOARD=y 573CONFIG_INPUT_KEYBOARD=y
571CONFIG_KEYBOARD_ATKBD=m 574CONFIG_KEYBOARD_ATKBD=m
572CONFIG_KEYBOARD_SUNKBD=m
573# CONFIG_KEYBOARD_LKKBD is not set 575# CONFIG_KEYBOARD_LKKBD is not set
574# CONFIG_KEYBOARD_XTKBD is not set
575# CONFIG_KEYBOARD_NEWTON is not set 576# CONFIG_KEYBOARD_NEWTON is not set
576# CONFIG_KEYBOARD_STOWAWAY is not set 577# CONFIG_KEYBOARD_STOWAWAY is not set
578CONFIG_KEYBOARD_SUNKBD=m
579# CONFIG_KEYBOARD_XTKBD is not set
577CONFIG_INPUT_MOUSE=y 580CONFIG_INPUT_MOUSE=y
578CONFIG_MOUSE_PS2=m 581CONFIG_MOUSE_PS2=m
579CONFIG_MOUSE_PS2_ALPS=y 582CONFIG_MOUSE_PS2_ALPS=y
@@ -581,6 +584,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
581CONFIG_MOUSE_PS2_SYNAPTICS=y 584CONFIG_MOUSE_PS2_SYNAPTICS=y
582CONFIG_MOUSE_PS2_TRACKPOINT=y 585CONFIG_MOUSE_PS2_TRACKPOINT=y
583# CONFIG_MOUSE_PS2_ELANTECH is not set 586# CONFIG_MOUSE_PS2_ELANTECH is not set
587# CONFIG_MOUSE_PS2_SENTELIC is not set
584# CONFIG_MOUSE_PS2_TOUCHKIT is not set 588# CONFIG_MOUSE_PS2_TOUCHKIT is not set
585CONFIG_MOUSE_SERIAL=m 589CONFIG_MOUSE_SERIAL=m
586# CONFIG_MOUSE_APPLETOUCH is not set 590# CONFIG_MOUSE_APPLETOUCH is not set
@@ -708,12 +712,10 @@ CONFIG_SSB_POSSIBLE=y
708# 712#
709# Console display driver support 713# Console display driver support
710# 714#
711# CONFIG_PROM_CONSOLE is not set
712CONFIG_DUMMY_CONSOLE=y 715CONFIG_DUMMY_CONSOLE=y
713# CONFIG_SOUND is not set 716# CONFIG_SOUND is not set
714CONFIG_HID_SUPPORT=y 717CONFIG_HID_SUPPORT=y
715CONFIG_HID=y 718CONFIG_HID=y
716# CONFIG_HID_DEBUG is not set
717# CONFIG_HIDRAW is not set 719# CONFIG_HIDRAW is not set
718# CONFIG_HID_PID is not set 720# CONFIG_HID_PID is not set
719 721
@@ -814,6 +816,7 @@ CONFIG_FS_POSIX_ACL=y
814# CONFIG_GFS2_FS is not set 816# CONFIG_GFS2_FS is not set
815# CONFIG_OCFS2_FS is not set 817# CONFIG_OCFS2_FS is not set
816# CONFIG_BTRFS_FS is not set 818# CONFIG_BTRFS_FS is not set
819# CONFIG_NILFS2_FS is not set
817CONFIG_FILE_LOCKING=y 820CONFIG_FILE_LOCKING=y
818CONFIG_FSNOTIFY=y 821CONFIG_FSNOTIFY=y
819CONFIG_DNOTIFY=y 822CONFIG_DNOTIFY=y
@@ -877,7 +880,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
877CONFIG_ROMFS_ON_BLOCK=y 880CONFIG_ROMFS_ON_BLOCK=y
878# CONFIG_SYSV_FS is not set 881# CONFIG_SYSV_FS is not set
879# CONFIG_UFS_FS is not set 882# CONFIG_UFS_FS is not set
880# CONFIG_NILFS2_FS is not set
881CONFIG_NETWORK_FILESYSTEMS=y 883CONFIG_NETWORK_FILESYSTEMS=y
882CONFIG_NFS_FS=y 884CONFIG_NFS_FS=y
883# CONFIG_NFS_V3 is not set 885# CONFIG_NFS_V3 is not set
@@ -984,14 +986,17 @@ CONFIG_DEBUG_MEMORY_INIT=y
984# CONFIG_DEBUG_LIST is not set 986# CONFIG_DEBUG_LIST is not set
985# CONFIG_DEBUG_SG is not set 987# CONFIG_DEBUG_SG is not set
986# CONFIG_DEBUG_NOTIFIERS is not set 988# CONFIG_DEBUG_NOTIFIERS is not set
989# CONFIG_DEBUG_CREDENTIALS is not set
987# CONFIG_BOOT_PRINTK_DELAY is not set 990# CONFIG_BOOT_PRINTK_DELAY is not set
988# CONFIG_RCU_TORTURE_TEST is not set 991# CONFIG_RCU_TORTURE_TEST is not set
989# CONFIG_RCU_CPU_STALL_DETECTOR is not set 992# CONFIG_RCU_CPU_STALL_DETECTOR is not set
990# CONFIG_BACKTRACE_SELF_TEST is not set 993# CONFIG_BACKTRACE_SELF_TEST is not set
991# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 994# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
995# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
992# CONFIG_FAULT_INJECTION is not set 996# CONFIG_FAULT_INJECTION is not set
993# CONFIG_SYSCTL_SYSCALL_CHECK is not set 997# CONFIG_SYSCTL_SYSCALL_CHECK is not set
994# CONFIG_PAGE_POISONING is not set 998# CONFIG_PAGE_POISONING is not set
999# CONFIG_DMA_API_DEBUG is not set
995# CONFIG_SAMPLES is not set 1000# CONFIG_SAMPLES is not set
996CONFIG_HAVE_ARCH_KGDB=y 1001CONFIG_HAVE_ARCH_KGDB=y
997CONFIG_KGDB=y 1002CONFIG_KGDB=y
@@ -1014,7 +1019,6 @@ CONFIG_CRYPTO=y
1014# 1019#
1015# Crypto core or helper 1020# Crypto core or helper
1016# 1021#
1017# CONFIG_CRYPTO_FIPS is not set
1018CONFIG_CRYPTO_ALGAPI=y 1022CONFIG_CRYPTO_ALGAPI=y
1019CONFIG_CRYPTO_ALGAPI2=y 1023CONFIG_CRYPTO_ALGAPI2=y
1020CONFIG_CRYPTO_AEAD=y 1024CONFIG_CRYPTO_AEAD=y
@@ -1057,11 +1061,13 @@ CONFIG_CRYPTO_PCBC=m
1057# 1061#
1058CONFIG_CRYPTO_HMAC=y 1062CONFIG_CRYPTO_HMAC=y
1059# CONFIG_CRYPTO_XCBC is not set 1063# CONFIG_CRYPTO_XCBC is not set
1064# CONFIG_CRYPTO_VMAC is not set
1060 1065
1061# 1066#
1062# Digest 1067# Digest
1063# 1068#
1064CONFIG_CRYPTO_CRC32C=m 1069CONFIG_CRYPTO_CRC32C=m
1070# CONFIG_CRYPTO_GHASH is not set
1065CONFIG_CRYPTO_MD4=y 1071CONFIG_CRYPTO_MD4=y
1066CONFIG_CRYPTO_MD5=y 1072CONFIG_CRYPTO_MD5=y
1067CONFIG_CRYPTO_MICHAEL_MIC=m 1073CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index fdddf7a6f725..f80b881dfea7 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31-rc1 3# Linux kernel version: 2.6.31
4# Tue Aug 18 23:56:02 2009 4# Tue Sep 15 17:06:03 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -19,7 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y
19CONFIG_HAVE_LATENCYTOP_SUPPORT=y 19CONFIG_HAVE_LATENCYTOP_SUPPORT=y
20CONFIG_AUDIT_ARCH=y 20CONFIG_AUDIT_ARCH=y
21CONFIG_HAVE_SETUP_PER_CPU_AREA=y 21CONFIG_HAVE_SETUP_PER_CPU_AREA=y
22CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y 22CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
23CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 23CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
24CONFIG_MMU=y 24CONFIG_MMU=y
25CONFIG_ARCH_NO_VIRT_TO_BUS=y 25CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -48,11 +48,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
48# 48#
49# RCU Subsystem 49# RCU Subsystem
50# 50#
51CONFIG_CLASSIC_RCU=y 51CONFIG_TREE_RCU=y
52# CONFIG_TREE_RCU is not set 52# CONFIG_TREE_PREEMPT_RCU is not set
53# CONFIG_PREEMPT_RCU is not set 53# CONFIG_RCU_TRACE is not set
54CONFIG_RCU_FANOUT=64
55# CONFIG_RCU_FANOUT_EXACT is not set
54# CONFIG_TREE_RCU_TRACE is not set 56# CONFIG_TREE_RCU_TRACE is not set
55# CONFIG_PREEMPT_RCU_TRACE is not set
56# CONFIG_IKCONFIG is not set 57# CONFIG_IKCONFIG is not set
57CONFIG_LOG_BUF_SHIFT=18 58CONFIG_LOG_BUF_SHIFT=18
58CONFIG_GROUP_SCHED=y 59CONFIG_GROUP_SCHED=y
@@ -96,10 +97,13 @@ CONFIG_TIMERFD=y
96CONFIG_EVENTFD=y 97CONFIG_EVENTFD=y
97CONFIG_SHMEM=y 98CONFIG_SHMEM=y
98CONFIG_AIO=y 99CONFIG_AIO=y
100CONFIG_HAVE_PERF_COUNTERS=y
99 101
100# 102#
101# Performance Counters 103# Performance Counters
102# 104#
105CONFIG_PERF_COUNTERS=y
106CONFIG_EVENT_PROFILE=y
103CONFIG_VM_EVENT_COUNTERS=y 107CONFIG_VM_EVENT_COUNTERS=y
104CONFIG_PCI_QUIRKS=y 108CONFIG_PCI_QUIRKS=y
105CONFIG_SLUB_DEBUG=y 109CONFIG_SLUB_DEBUG=y
@@ -119,7 +123,9 @@ CONFIG_KRETPROBES=y
119CONFIG_HAVE_KPROBES=y 123CONFIG_HAVE_KPROBES=y
120CONFIG_HAVE_KRETPROBES=y 124CONFIG_HAVE_KRETPROBES=y
121CONFIG_HAVE_ARCH_TRACEHOOK=y 125CONFIG_HAVE_ARCH_TRACEHOOK=y
126CONFIG_HAVE_DMA_ATTRS=y
122CONFIG_USE_GENERIC_SMP_HELPERS=y 127CONFIG_USE_GENERIC_SMP_HELPERS=y
128CONFIG_HAVE_DMA_API_DEBUG=y
123 129
124# 130#
125# GCOV-based kernel profiling 131# GCOV-based kernel profiling
@@ -317,6 +323,7 @@ CONFIG_IPV6_TUNNEL=m
317# CONFIG_NETFILTER is not set 323# CONFIG_NETFILTER is not set
318# CONFIG_IP_DCCP is not set 324# CONFIG_IP_DCCP is not set
319# CONFIG_IP_SCTP is not set 325# CONFIG_IP_SCTP is not set
326# CONFIG_RDS is not set
320# CONFIG_TIPC is not set 327# CONFIG_TIPC is not set
321# CONFIG_ATM is not set 328# CONFIG_ATM is not set
322# CONFIG_BRIDGE is not set 329# CONFIG_BRIDGE is not set
@@ -349,6 +356,7 @@ CONFIG_NET_TCPPROBE=m
349# CONFIG_AF_RXRPC is not set 356# CONFIG_AF_RXRPC is not set
350CONFIG_WIRELESS=y 357CONFIG_WIRELESS=y
351# CONFIG_CFG80211 is not set 358# CONFIG_CFG80211 is not set
359CONFIG_CFG80211_DEFAULT_PS_VALUE=0
352CONFIG_WIRELESS_OLD_REGULATORY=y 360CONFIG_WIRELESS_OLD_REGULATORY=y
353# CONFIG_WIRELESS_EXT is not set 361# CONFIG_WIRELESS_EXT is not set
354# CONFIG_LIB80211 is not set 362# CONFIG_LIB80211 is not set
@@ -356,7 +364,6 @@ CONFIG_WIRELESS_OLD_REGULATORY=y
356# 364#
357# CFG80211 needs to be enabled for MAC80211 365# CFG80211 needs to be enabled for MAC80211
358# 366#
359CONFIG_MAC80211_DEFAULT_PS_VALUE=0
360# CONFIG_WIMAX is not set 367# CONFIG_WIMAX is not set
361# CONFIG_RFKILL is not set 368# CONFIG_RFKILL is not set
362# CONFIG_NET_9P is not set 369# CONFIG_NET_9P is not set
@@ -549,6 +556,7 @@ CONFIG_SCSI_LOWLEVEL=y
549# CONFIG_SCSI_DC390T is not set 556# CONFIG_SCSI_DC390T is not set
550# CONFIG_SCSI_DEBUG is not set 557# CONFIG_SCSI_DEBUG is not set
551# CONFIG_SCSI_SUNESP is not set 558# CONFIG_SCSI_SUNESP is not set
559# CONFIG_SCSI_PMCRAID is not set
552# CONFIG_SCSI_SRP is not set 560# CONFIG_SCSI_SRP is not set
553# CONFIG_SCSI_DH is not set 561# CONFIG_SCSI_DH is not set
554# CONFIG_SCSI_OSD_INITIATOR is not set 562# CONFIG_SCSI_OSD_INITIATOR is not set
@@ -704,12 +712,7 @@ CONFIG_NIU=m
704# CONFIG_SFC is not set 712# CONFIG_SFC is not set
705# CONFIG_BE2NET is not set 713# CONFIG_BE2NET is not set
706# CONFIG_TR is not set 714# CONFIG_TR is not set
707 715# CONFIG_WLAN is not set
708#
709# Wireless LAN
710#
711# CONFIG_WLAN_PRE80211 is not set
712# CONFIG_WLAN_80211 is not set
713 716
714# 717#
715# Enable WiMAX (Networking options) to see the WiMAX drivers 718# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -768,11 +771,11 @@ CONFIG_INPUT_EVDEV=y
768# 771#
769CONFIG_INPUT_KEYBOARD=y 772CONFIG_INPUT_KEYBOARD=y
770CONFIG_KEYBOARD_ATKBD=y 773CONFIG_KEYBOARD_ATKBD=y
771CONFIG_KEYBOARD_SUNKBD=y
772CONFIG_KEYBOARD_LKKBD=m 774CONFIG_KEYBOARD_LKKBD=m
773# CONFIG_KEYBOARD_XTKBD is not set
774# CONFIG_KEYBOARD_NEWTON is not set 775# CONFIG_KEYBOARD_NEWTON is not set
775# CONFIG_KEYBOARD_STOWAWAY is not set 776# CONFIG_KEYBOARD_STOWAWAY is not set
777CONFIG_KEYBOARD_SUNKBD=y
778# CONFIG_KEYBOARD_XTKBD is not set
776CONFIG_INPUT_MOUSE=y 779CONFIG_INPUT_MOUSE=y
777CONFIG_MOUSE_PS2=y 780CONFIG_MOUSE_PS2=y
778CONFIG_MOUSE_PS2_ALPS=y 781CONFIG_MOUSE_PS2_ALPS=y
@@ -780,6 +783,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
780CONFIG_MOUSE_PS2_SYNAPTICS=y 783CONFIG_MOUSE_PS2_SYNAPTICS=y
781CONFIG_MOUSE_PS2_TRACKPOINT=y 784CONFIG_MOUSE_PS2_TRACKPOINT=y
782# CONFIG_MOUSE_PS2_ELANTECH is not set 785# CONFIG_MOUSE_PS2_ELANTECH is not set
786# CONFIG_MOUSE_PS2_SENTELIC is not set
783# CONFIG_MOUSE_PS2_TOUCHKIT is not set 787# CONFIG_MOUSE_PS2_TOUCHKIT is not set
784CONFIG_MOUSE_SERIAL=y 788CONFIG_MOUSE_SERIAL=y
785# CONFIG_MOUSE_APPLETOUCH is not set 789# CONFIG_MOUSE_APPLETOUCH is not set
@@ -883,7 +887,6 @@ CONFIG_I2C_ALGOBIT=y
883# 887#
884# I2C system bus drivers (mostly embedded / system-on-chip) 888# I2C system bus drivers (mostly embedded / system-on-chip)
885# 889#
886# CONFIG_I2C_DESIGNWARE is not set
887# CONFIG_I2C_OCORES is not set 890# CONFIG_I2C_OCORES is not set
888# CONFIG_I2C_SIMTEC is not set 891# CONFIG_I2C_SIMTEC is not set
889 892
@@ -1102,7 +1105,6 @@ CONFIG_FB_ATY_GX=y
1102# 1105#
1103# Console display driver support 1106# Console display driver support
1104# 1107#
1105# CONFIG_PROM_CONSOLE is not set
1106CONFIG_DUMMY_CONSOLE=y 1108CONFIG_DUMMY_CONSOLE=y
1107CONFIG_FRAMEBUFFER_CONSOLE=y 1109CONFIG_FRAMEBUFFER_CONSOLE=y
1108CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 1110CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -1124,6 +1126,7 @@ CONFIG_LOGO=y
1124CONFIG_LOGO_SUN_CLUT224=y 1126CONFIG_LOGO_SUN_CLUT224=y
1125CONFIG_SOUND=m 1127CONFIG_SOUND=m
1126CONFIG_SOUND_OSS_CORE=y 1128CONFIG_SOUND_OSS_CORE=y
1129CONFIG_SOUND_OSS_CORE_PRECLAIM=y
1127CONFIG_SND=m 1130CONFIG_SND=m
1128CONFIG_SND_TIMER=m 1131CONFIG_SND_TIMER=m
1129CONFIG_SND_PCM=m 1132CONFIG_SND_PCM=m
@@ -1232,7 +1235,6 @@ CONFIG_SND_SUN_CS4231=m
1232CONFIG_AC97_BUS=m 1235CONFIG_AC97_BUS=m
1233CONFIG_HID_SUPPORT=y 1236CONFIG_HID_SUPPORT=y
1234CONFIG_HID=y 1237CONFIG_HID=y
1235# CONFIG_HID_DEBUG is not set
1236# CONFIG_HIDRAW is not set 1238# CONFIG_HIDRAW is not set
1237 1239
1238# 1240#
@@ -1256,6 +1258,7 @@ CONFIG_HID_DRAGONRISE=y
1256CONFIG_HID_EZKEY=y 1258CONFIG_HID_EZKEY=y
1257CONFIG_HID_KYE=y 1259CONFIG_HID_KYE=y
1258CONFIG_HID_GYRATION=y 1260CONFIG_HID_GYRATION=y
1261CONFIG_HID_TWINHAN=y
1259CONFIG_HID_KENSINGTON=y 1262CONFIG_HID_KENSINGTON=y
1260CONFIG_HID_LOGITECH=y 1263CONFIG_HID_LOGITECH=y
1261# CONFIG_LOGITECH_FF is not set 1264# CONFIG_LOGITECH_FF is not set
@@ -1289,6 +1292,7 @@ CONFIG_USB=y
1289# 1292#
1290# Miscellaneous USB options 1293# Miscellaneous USB options
1291# 1294#
1295# CONFIG_USB_DEVICEFS is not set
1292# CONFIG_USB_DEVICE_CLASS is not set 1296# CONFIG_USB_DEVICE_CLASS is not set
1293# CONFIG_USB_DYNAMIC_MINORS is not set 1297# CONFIG_USB_DYNAMIC_MINORS is not set
1294# CONFIG_USB_OTG is not set 1298# CONFIG_USB_OTG is not set
@@ -1379,6 +1383,7 @@ CONFIG_USB_STORAGE=m
1379# CONFIG_USB_LD is not set 1383# CONFIG_USB_LD is not set
1380# CONFIG_USB_TRANCEVIBRATOR is not set 1384# CONFIG_USB_TRANCEVIBRATOR is not set
1381# CONFIG_USB_IOWARRIOR is not set 1385# CONFIG_USB_IOWARRIOR is not set
1386# CONFIG_USB_TEST is not set
1382# CONFIG_USB_ISIGHTFW is not set 1387# CONFIG_USB_ISIGHTFW is not set
1383# CONFIG_USB_VST is not set 1388# CONFIG_USB_VST is not set
1384# CONFIG_USB_GADGET is not set 1389# CONFIG_USB_GADGET is not set
@@ -1493,6 +1498,7 @@ CONFIG_FS_POSIX_ACL=y
1493# CONFIG_GFS2_FS is not set 1498# CONFIG_GFS2_FS is not set
1494# CONFIG_OCFS2_FS is not set 1499# CONFIG_OCFS2_FS is not set
1495# CONFIG_BTRFS_FS is not set 1500# CONFIG_BTRFS_FS is not set
1501# CONFIG_NILFS2_FS is not set
1496CONFIG_FILE_LOCKING=y 1502CONFIG_FILE_LOCKING=y
1497CONFIG_FSNOTIFY=y 1503CONFIG_FSNOTIFY=y
1498CONFIG_DNOTIFY=y 1504CONFIG_DNOTIFY=y
@@ -1553,7 +1559,6 @@ CONFIG_MISC_FILESYSTEMS=y
1553# CONFIG_ROMFS_FS is not set 1559# CONFIG_ROMFS_FS is not set
1554# CONFIG_SYSV_FS is not set 1560# CONFIG_SYSV_FS is not set
1555# CONFIG_UFS_FS is not set 1561# CONFIG_UFS_FS is not set
1556# CONFIG_NILFS2_FS is not set
1557CONFIG_NETWORK_FILESYSTEMS=y 1562CONFIG_NETWORK_FILESYSTEMS=y
1558# CONFIG_NFS_FS is not set 1563# CONFIG_NFS_FS is not set
1559# CONFIG_NFSD is not set 1564# CONFIG_NFSD is not set
@@ -1656,12 +1661,14 @@ CONFIG_DEBUG_MEMORY_INIT=y
1656# CONFIG_DEBUG_LIST is not set 1661# CONFIG_DEBUG_LIST is not set
1657# CONFIG_DEBUG_SG is not set 1662# CONFIG_DEBUG_SG is not set
1658# CONFIG_DEBUG_NOTIFIERS is not set 1663# CONFIG_DEBUG_NOTIFIERS is not set
1664# CONFIG_DEBUG_CREDENTIALS is not set
1659# CONFIG_BOOT_PRINTK_DELAY is not set 1665# CONFIG_BOOT_PRINTK_DELAY is not set
1660# CONFIG_RCU_TORTURE_TEST is not set 1666# CONFIG_RCU_TORTURE_TEST is not set
1661# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1667# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1662# CONFIG_KPROBES_SANITY_TEST is not set 1668# CONFIG_KPROBES_SANITY_TEST is not set
1663# CONFIG_BACKTRACE_SELF_TEST is not set 1669# CONFIG_BACKTRACE_SELF_TEST is not set
1664# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1670# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1671# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1665# CONFIG_LKDTM is not set 1672# CONFIG_LKDTM is not set
1666# CONFIG_FAULT_INJECTION is not set 1673# CONFIG_FAULT_INJECTION is not set
1667# CONFIG_LATENCYTOP is not set 1674# CONFIG_LATENCYTOP is not set
@@ -1692,6 +1699,7 @@ CONFIG_BLK_DEV_IO_TRACE=y
1692# CONFIG_FTRACE_STARTUP_TEST is not set 1699# CONFIG_FTRACE_STARTUP_TEST is not set
1693# CONFIG_RING_BUFFER_BENCHMARK is not set 1700# CONFIG_RING_BUFFER_BENCHMARK is not set
1694# CONFIG_DYNAMIC_DEBUG is not set 1701# CONFIG_DYNAMIC_DEBUG is not set
1702# CONFIG_DMA_API_DEBUG is not set
1695# CONFIG_SAMPLES is not set 1703# CONFIG_SAMPLES is not set
1696CONFIG_HAVE_ARCH_KGDB=y 1704CONFIG_HAVE_ARCH_KGDB=y
1697# CONFIG_KGDB is not set 1705# CONFIG_KGDB is not set
@@ -1716,7 +1724,6 @@ CONFIG_CRYPTO=y
1716# 1724#
1717# Crypto core or helper 1725# Crypto core or helper
1718# 1726#
1719# CONFIG_CRYPTO_FIPS is not set
1720CONFIG_CRYPTO_ALGAPI=y 1727CONFIG_CRYPTO_ALGAPI=y
1721CONFIG_CRYPTO_ALGAPI2=y 1728CONFIG_CRYPTO_ALGAPI2=y
1722CONFIG_CRYPTO_AEAD=y 1729CONFIG_CRYPTO_AEAD=y
@@ -1759,11 +1766,13 @@ CONFIG_CRYPTO_XTS=m
1759# 1766#
1760CONFIG_CRYPTO_HMAC=y 1767CONFIG_CRYPTO_HMAC=y
1761CONFIG_CRYPTO_XCBC=y 1768CONFIG_CRYPTO_XCBC=y
1769# CONFIG_CRYPTO_VMAC is not set
1762 1770
1763# 1771#
1764# Digest 1772# Digest
1765# 1773#
1766CONFIG_CRYPTO_CRC32C=m 1774CONFIG_CRYPTO_CRC32C=m
1775# CONFIG_CRYPTO_GHASH is not set
1767CONFIG_CRYPTO_MD4=y 1776CONFIG_CRYPTO_MD4=y
1768CONFIG_CRYPTO_MD5=y 1777CONFIG_CRYPTO_MD5=y
1769CONFIG_CRYPTO_MICHAEL_MIC=m 1778CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/sparc/include/asm/agp.h b/arch/sparc/include/asm/agp.h
index c2456870b05c..70f52c1661bc 100644
--- a/arch/sparc/include/asm/agp.h
+++ b/arch/sparc/include/asm/agp.h
@@ -7,10 +7,6 @@
7#define unmap_page_from_agp(page) 7#define unmap_page_from_agp(page)
8#define flush_agp_cache() mb() 8#define flush_agp_cache() mb()
9 9
10/* Convert a physical address to an address suitable for the GART. */
11#define phys_to_gart(x) (x)
12#define gart_to_phys(x) (x)
13
14/* GATT allocation. Returns/accepts GATT kernel virtual address. */ 10/* GATT allocation. Returns/accepts GATT kernel virtual address. */
15#define alloc_gatt_pages(order) \ 11#define alloc_gatt_pages(order) \
16 ((char *)__get_free_pages(GFP_KERNEL, (order))) 12 ((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index 74703c5ef985..b2e3db63a64b 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,7 +40,11 @@
40#define ASI_M_UNA01 0x01 /* Same here... */ 40#define ASI_M_UNA01 0x01 /* Same here... */
41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ 41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ 42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
43#ifndef CONFIG_SPARC_LEON
43#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ 44#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
45#else
46#define ASI_M_MMUREGS 0x19
47#endif /* CONFIG_SPARC_LEON */
44#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ 48#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
45#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ 49#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
46#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ 50#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index 3702e087df2c..f3b85b6b0b76 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -32,4 +32,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
32 return ad->prom_node; 32 return ad->prom_node;
33} 33}
34 34
35struct pdev_archdata {
36};
37
35#endif /* _ASM_SPARC_DEVICE_H */ 38#endif /* _ASM_SPARC_DEVICE_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/dma-debug.h>
6 7
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8 9
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
15 16
16struct dma_ops { 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
17 void *(*alloc_coherent)(struct device *dev, size_t size, 18extern struct bus_type pci_bus_type;
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46 19
47static inline void *dma_alloc_coherent(struct device *dev, size_t size, 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{ 21{
98 dma_ops->unmap_sg(dev, sg, nents, direction); 22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
99} 23 if (dev->bus == &pci_bus_type)
100 24 return &pci32_dma_ops;
101static inline void dma_sync_single_for_cpu(struct device *dev, 25#endif
102 dma_addr_t dma_handle, size_t size, 26 return dma_ops;
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106} 27}
107 28
108static inline void dma_sync_single_for_device(struct device *dev, 29#include <asm-generic/dma-mapping-common.h>
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117 30
118static inline void dma_sync_sg_for_cpu(struct device *dev, 31static inline void *dma_alloc_coherent(struct device *dev, size_t size,
119 struct scatterlist *sg, int nelems, 32 dma_addr_t *dma_handle, gfp_t flag)
120 enum dma_data_direction direction)
121{ 33{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
123} 35 void *cpu_addr;
124 36
125static inline void dma_sync_sg_for_device(struct device *dev, 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
126 struct scatterlist *sg, int nelems, 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
127 enum dma_data_direction direction) 39 return cpu_addr;
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131} 40}
132 41
133static inline void dma_sync_single_range_for_cpu(struct device *dev, 42static inline void dma_free_coherent(struct device *dev, size_t size,
134 dma_addr_t dma_handle, 43 void *cpu_addr, dma_addr_t dma_handle)
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{ 44{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); 45 struct dma_map_ops *ops = get_dma_ops(dev);
140}
141 46
142static inline void dma_sync_single_range_for_device(struct device *dev, 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
143 dma_addr_t dma_handle, 48 ops->free_coherent(dev, size, cpu_addr, dma_handle);
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149} 49}
150 50
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{ 52{
154 return (dma_addr == DMA_ERROR_CODE); 53 return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void __trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(void);
93#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
96extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
new file mode 100644
index 000000000000..28a42b73f64f
--- /dev/null
+++ b/arch/sparc/include/asm/leon.h
@@ -0,0 +1,362 @@
1/*
2 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research
3 * Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart
4 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
5 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
6 */
7
8#ifndef LEON_H_INCLUDE
9#define LEON_H_INCLUDE
10
11#ifdef CONFIG_SPARC_LEON
12
13#define ASI_LEON_NOCACHE 0x01
14
15#define ASI_LEON_DCACHE_MISS 0x1
16
17#define ASI_LEON_CACHEREGS 0x02
18#define ASI_LEON_IFLUSH 0x10
19#define ASI_LEON_DFLUSH 0x11
20
21#define ASI_LEON_MMUFLUSH 0x18
22#define ASI_LEON_MMUREGS 0x19
23#define ASI_LEON_BYPASS 0x1c
24#define ASI_LEON_FLUSH_PAGE 0x10
25
26/* mmu register access, ASI_LEON_MMUREGS */
27#define LEON_CNR_CTRL 0x000
28#define LEON_CNR_CTXP 0x100
29#define LEON_CNR_CTX 0x200
30#define LEON_CNR_F 0x300
31#define LEON_CNR_FADDR 0x400
32
33#define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */
34
35#define LEON_CNR_CTRL_TLBDIS 0x80000000
36
37#define LEON_MMUTLB_ENT_MAX 64
38
39/*
40 * diagnostic access from mmutlb.vhd:
41 * 0: pte address
42 * 4: pte
43 * 8: additional flags
44 */
45#define LEON_DIAGF_LVL 0x3
46#define LEON_DIAGF_WR 0x8
47#define LEON_DIAGF_WR_SHIFT 3
48#define LEON_DIAGF_HIT 0x10
49#define LEON_DIAGF_HIT_SHIFT 4
50#define LEON_DIAGF_CTX 0x1fe0
51#define LEON_DIAGF_CTX_SHIFT 5
52#define LEON_DIAGF_VALID 0x2000
53#define LEON_DIAGF_VALID_SHIFT 13
54
55/*
56 * Interrupt Sources
57 *
58 * The interrupt source numbers directly map to the trap type and to
59 * the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask,
60 * and the Interrupt Pending Registers.
61 */
62#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1
63#define LEON_INTERRUPT_UART_1_RX_TX 2
64#define LEON_INTERRUPT_UART_0_RX_TX 3
65#define LEON_INTERRUPT_EXTERNAL_0 4
66#define LEON_INTERRUPT_EXTERNAL_1 5
67#define LEON_INTERRUPT_EXTERNAL_2 6
68#define LEON_INTERRUPT_EXTERNAL_3 7
69#define LEON_INTERRUPT_TIMER1 8
70#define LEON_INTERRUPT_TIMER2 9
71#define LEON_INTERRUPT_EMPTY1 10
72#define LEON_INTERRUPT_EMPTY2 11
73#define LEON_INTERRUPT_OPEN_ETH 12
74#define LEON_INTERRUPT_EMPTY4 13
75#define LEON_INTERRUPT_EMPTY5 14
76#define LEON_INTERRUPT_EMPTY6 15
77
78/* irq masks */
79#define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */
80#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */
81#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */
82
83/* leon uart register definitions */
84#define LEON_OFF_UDATA 0x0
85#define LEON_OFF_USTAT 0x4
86#define LEON_OFF_UCTRL 0x8
87#define LEON_OFF_USCAL 0xc
88
89#define LEON_UCTRL_RE 0x01
90#define LEON_UCTRL_TE 0x02
91#define LEON_UCTRL_RI 0x04
92#define LEON_UCTRL_TI 0x08
93#define LEON_UCTRL_PS 0x10
94#define LEON_UCTRL_PE 0x20
95#define LEON_UCTRL_FL 0x40
96#define LEON_UCTRL_LB 0x80
97
98#define LEON_USTAT_DR 0x01
99#define LEON_USTAT_TS 0x02
100#define LEON_USTAT_TH 0x04
101#define LEON_USTAT_BR 0x08
102#define LEON_USTAT_OV 0x10
103#define LEON_USTAT_PE 0x20
104#define LEON_USTAT_FE 0x40
105
106#define LEON_MCFG2_SRAMDIS 0x00002000
107#define LEON_MCFG2_SDRAMEN 0x00004000
108#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */
109#define LEON_MCFG2_SRAMBANKSZ_SHIFT 9
110#define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */
111#define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23
112
113#define LEON_TCNT0_MASK 0x7fffff
114
115#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
116/* no break yet */
117
118#define ASI_LEON3_SYSCTRL 0x02
119#define ASI_LEON3_SYSCTRL_ICFG 0x08
120#define ASI_LEON3_SYSCTRL_DCFG 0x0c
121#define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27)
122#define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf))
123
124#ifndef __ASSEMBLY__
125
126/* do a virtual address read without cache */
127static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
128{
129 unsigned long retval;
130 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
131 "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
132 return retval;
133}
134
135/* do a physical address bypass write, i.e. for 0x80000000 */
136static inline void leon_store_reg(unsigned long paddr, unsigned long value)
137{
138 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr),
139 "i"(ASI_LEON_BYPASS) : "memory");
140}
141
142/* do a physical address bypass load, i.e. for 0x80000000 */
143static inline unsigned long leon_load_reg(unsigned long paddr)
144{
145 unsigned long retval;
146 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
147 "=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS));
148 return retval;
149}
150
151extern inline void leon_srmmu_disabletlb(void)
152{
153 unsigned int retval;
154 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
155 "i"(ASI_LEON_MMUREGS));
156 retval |= LEON_CNR_CTRL_TLBDIS;
157 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
158 "i"(ASI_LEON_MMUREGS) : "memory");
159}
160
161extern inline void leon_srmmu_enabletlb(void)
162{
163 unsigned int retval;
164 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
165 "i"(ASI_LEON_MMUREGS));
166 retval = retval & ~LEON_CNR_CTRL_TLBDIS;
167 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
168 "i"(ASI_LEON_MMUREGS) : "memory");
169}
170
171/* macro access for leon_load_reg() and leon_store_reg() */
172#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
173#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
174#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
175#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
176#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
177#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
178#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
179#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
180#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
181#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
182
183/* macro access for leon_readnobuffer_reg() */
184#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
185
186extern void sparc_leon_eirq_register(int eirq);
187extern void leon_init(void);
188extern void leon_switch_mm(void);
189extern void leon_init_IRQ(void);
190
191extern unsigned long last_valid_pfn;
192
193extern inline unsigned long sparc_leon3_get_dcachecfg(void)
194{
195 unsigned int retval;
196 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
197 "=r"(retval) :
198 "r"(ASI_LEON3_SYSCTRL_DCFG),
199 "i"(ASI_LEON3_SYSCTRL));
200 return retval;
201}
202
203/* enable snooping */
204extern inline void sparc_leon3_enable_snooping(void)
205{
206 __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
207 "set 0x800000, %%l2\n\t"
208 "or %%l2, %%l1, %%l2\n\t"
209 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
210};
211
212extern inline void sparc_leon3_disable_cache(void)
213{
214 __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
215 "set 0x00000f, %%l2\n\t"
216 "andn %%l2, %%l1, %%l2\n\t"
217 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
218};
219
220#endif /*!__ASSEMBLY__*/
221
222#ifdef CONFIG_SMP
223# define LEON3_IRQ_RESCHEDULE 13
224# define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq)
225# define LEON3_IRQ_CROSS_CALL 15
226#endif
227
228#if defined(PAGE_SIZE_LEON_8K)
229#define LEON_PAGE_SIZE_LEON 1
230#elif defined(PAGE_SIZE_LEON_16K)
231#define LEON_PAGE_SIZE_LEON 2)
232#else
233#define LEON_PAGE_SIZE_LEON 0
234#endif
235
236#if LEON_PAGE_SIZE_LEON == 0
237/* [ 8, 6, 6 ] + 12 */
238#define LEON_PGD_SH 24
239#define LEON_PGD_M 0xff
240#define LEON_PMD_SH 18
241#define LEON_PMD_SH_V (LEON_PGD_SH-2)
242#define LEON_PMD_M 0x3f
243#define LEON_PTE_SH 12
244#define LEON_PTE_M 0x3f
245#elif LEON_PAGE_SIZE_LEON == 1
246/* [ 7, 6, 6 ] + 13 */
247#define LEON_PGD_SH 25
248#define LEON_PGD_M 0x7f
249#define LEON_PMD_SH 19
250#define LEON_PMD_SH_V (LEON_PGD_SH-1)
251#define LEON_PMD_M 0x3f
252#define LEON_PTE_SH 13
253#define LEON_PTE_M 0x3f
254#elif LEON_PAGE_SIZE_LEON == 2
255/* [ 6, 6, 6 ] + 14 */
256#define LEON_PGD_SH 26
257#define LEON_PGD_M 0x3f
258#define LEON_PMD_SH 20
259#define LEON_PMD_SH_V (LEON_PGD_SH-0)
260#define LEON_PMD_M 0x3f
261#define LEON_PTE_SH 14
262#define LEON_PTE_M 0x3f
263#elif LEON_PAGE_SIZE_LEON == 3
264/* [ 4, 7, 6 ] + 15 */
265#define LEON_PGD_SH 28
266#define LEON_PGD_M 0x0f
267#define LEON_PMD_SH 21
268#define LEON_PMD_SH_V (LEON_PGD_SH-0)
269#define LEON_PMD_M 0x7f
270#define LEON_PTE_SH 15
271#define LEON_PTE_M 0x3f
272#else
273#error cannot determine LEON_PAGE_SIZE_LEON
274#endif
275
276#define PAGE_MIN_SHIFT (12)
277#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
278
279#define LEON3_XCCR_SETS_MASK 0x07000000UL
280#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
281
282#define LEON2_CCR_DSETS_MASK 0x03000000UL
283#define LEON2_CFG_SSIZE_MASK 0x00007000UL
284
285#ifndef __ASSEMBLY__
286extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
287extern void leon_flush_icache_all(void);
288extern void leon_flush_dcache_all(void);
289extern void leon_flush_cache_all(void);
290extern void leon_flush_tlb_all(void);
291extern int leon_flush_during_switch;
292extern int leon_flush_needed(void);
293
294struct vm_area_struct;
295extern void leon_flush_icache_all(void);
296extern void leon_flush_dcache_all(void);
297extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
298extern void leon_flush_cache_all(void);
299extern void leon_flush_tlb_all(void);
300extern int leon_flush_during_switch;
301extern int leon_flush_needed(void);
302extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
303
304/* struct that hold LEON3 cache configuration registers */
305struct leon3_cacheregs {
306 unsigned long ccr; /* 0x00 - Cache Control Register */
307 unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */
308 unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
309};
310
311/* struct that hold LEON2 cache configuration register
312 * & configuration register
313 */
314struct leon2_cacheregs {
315 unsigned long ccr, cfg;
316};
317
318#ifdef __KERNEL__
319
320#include <linux/interrupt.h>
321
322struct device_node;
323extern int sparc_leon_eirq_get(int eirq, int cpu);
324extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id);
325extern void sparc_leon_eirq_register(int eirq);
326extern void leon_clear_clock_irq(void);
327extern void leon_load_profile_irq(int cpu, unsigned int limit);
328extern void leon_init_timers(irq_handler_t counter_fn);
329extern void leon_clear_clock_irq(void);
330extern void leon_load_profile_irq(int cpu, unsigned int limit);
331extern void leon_trans_init(struct device_node *dp);
332extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
333extern void leon_init_IRQ(void);
334extern void leon_init(void);
335extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
336extern void init_leon(void);
337extern void poke_leonsparc(void);
338extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
339extern int leon_flush_needed(void);
340extern void leon_switch_mm(void);
341extern int srmmu_swprobe_trace;
342
343#endif /* __KERNEL__ */
344
345#endif /* __ASSEMBLY__ */
346
347/* macros used in leon_mm.c */
348#define PFN(x) ((x) >> PAGE_SHIFT)
349#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
350#define _SRMMU_PTE_PMASK_LEON 0xffffffff
351
352#else /* defined(CONFIG_SPARC_LEON) */
353
354/* nop definitions for !LEON case */
355#define leon_init() do {} while (0)
356#define leon_switch_mm() do {} while (0)
357#define leon_init_IRQ() do {} while (0)
358#define init_leon() do {} while (0)
359
360#endif /* !defined(CONFIG_SPARC_LEON) */
361
362#endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
new file mode 100644
index 000000000000..618e88821795
--- /dev/null
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -0,0 +1,263 @@
1/*
2*Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research
3*Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart
4*Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
5*/
6
7#ifndef LEON_AMBA_H_INCLUDE
8#define LEON_AMBA_H_INCLUDE
9
10#ifndef __ASSEMBLY__
11
12struct amba_prom_registers {
13 unsigned int phys_addr; /* The physical address of this register */
14 unsigned int reg_size; /* How many bytes does this register take up? */
15};
16
17#endif
18
19/*
20 * The following defines the bits in the LEON UART Status Registers.
21 */
22
23#define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */
24#define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */
25#define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */
26#define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */
27#define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */
28#define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */
29#define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */
30#define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */
31
32/*
33 * The following defines the bits in the LEON UART Ctrl Registers.
34 */
35
36#define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */
37#define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */
38#define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */
39#define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */
40#define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */
41#define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */
42#define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */
43#define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */
44
45#define LEON3_GPTIMER_EN 1
46#define LEON3_GPTIMER_RL 2
47#define LEON3_GPTIMER_LD 4
48#define LEON3_GPTIMER_IRQEN 8
49#define LEON3_GPTIMER_SEPIRQ 8
50
51#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
52/* 0 = hold scalar and counter */
53#define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */
54 /* 0 = stop at 0 */
55#define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */
56 /* 0 = no function */
57#define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */
58 /* 0 = no function */
59
60/*
61 * The following defines the bits in the LEON PS/2 Status Registers.
62 */
63
64#define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */
65#define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */
66#define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */
67#define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */
68#define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */
69#define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */
70
71/*
72 * The following defines the bits in the LEON PS/2 Ctrl Registers.
73 */
74
75#define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */
76#define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */
77#define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */
78#define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */
79
80#define LEON3_IRQMPSTATUS_CPUNR 28
81#define LEON3_IRQMPSTATUS_BROADCAST 27
82
83#define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f)
84#define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8))
85#define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7))
86#define LEON3_GPTIMER_CTRL_PENDING 0x10
87#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
88#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
89
90#ifdef CONFIG_SPARC_LEON
91
92#ifndef __ASSEMBLY__
93
94struct leon3_irqctrl_regs_map {
95 u32 ilevel;
96 u32 ipend;
97 u32 iforce;
98 u32 iclear;
99 u32 mpstatus;
100 u32 mpbroadcast;
101 u32 notused02;
102 u32 notused03;
103 u32 notused10;
104 u32 notused11;
105 u32 notused12;
106 u32 notused13;
107 u32 notused20;
108 u32 notused21;
109 u32 notused22;
110 u32 notused23;
111 u32 mask[16];
112 u32 force[16];
113 /* Extended IRQ registers */
114 u32 intid[16]; /* 0xc0 */
115};
116
117struct leon3_apbuart_regs_map {
118 u32 data;
119 u32 status;
120 u32 ctrl;
121 u32 scaler;
122};
123
124struct leon3_gptimerelem_regs_map {
125 u32 val;
126 u32 rld;
127 u32 ctrl;
128 u32 unused;
129};
130
131struct leon3_gptimer_regs_map {
132 u32 scalar;
133 u32 scalar_reload;
134 u32 config;
135 u32 unused;
136 struct leon3_gptimerelem_regs_map e[8];
137};
138
139/*
140 * Types and structure used for AMBA Plug & Play bus scanning
141 */
142
143#define AMBA_MAXAPB_DEVS 64
144#define AMBA_MAXAPB_DEVS_PERBUS 16
145
146struct amba_device_table {
147 int devnr; /* number of devices on AHB or APB bus */
148 unsigned int *addr[16]; /* addresses to the devices configuration tables */
149 unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */
150};
151
152struct amba_apbslv_device_table {
153 int devnr; /* number of devices on AHB or APB bus */
154 unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */
155 unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */
156 unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */
157 unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */
158};
159
160struct amba_confarea_type {
161 struct amba_confarea_type *next;/* next bus in chain */
162 struct amba_device_table ahbmst;
163 struct amba_device_table ahbslv;
164 struct amba_apbslv_device_table apbslv;
165 unsigned int apbmst;
166};
167
168/* collect apb slaves */
169struct amba_apb_device {
170 unsigned int start, irq, bus_id;
171 struct amba_confarea_type *bus;
172};
173
174/* collect ahb slaves */
175struct amba_ahb_device {
176 unsigned int start[4], irq, bus_id;
177 struct amba_confarea_type *bus;
178};
179
180struct device_node;
181void _amba_init(struct device_node *dp, struct device_node ***nextp);
182
183extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
184extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
185extern struct amba_apb_device leon_percpu_timer_dev[16];
186extern int leondebug_irq_disable;
187extern int leon_debug_irqout;
188extern unsigned long leon3_gptimer_irq;
189extern unsigned int sparc_leon_eirq;
190
191#endif /* __ASSEMBLY__ */
192
193#define LEON3_IO_AREA 0xfff00000
194#define LEON3_CONF_AREA 0xff000
195#define LEON3_AHB_SLAVE_CONF_AREA (1 << 11)
196
197#define LEON3_AHB_CONF_WORDS 8
198#define LEON3_APB_CONF_WORDS 2
199#define LEON3_AHB_MASTERS 16
200#define LEON3_AHB_SLAVES 16
201#define LEON3_APB_SLAVES 16
202#define LEON3_APBUARTS 8
203
204/* Vendor codes */
205#define VENDOR_GAISLER 1
206#define VENDOR_PENDER 2
207#define VENDOR_ESA 4
208#define VENDOR_OPENCORES 8
209
210/* Gaisler Research device id's */
211#define GAISLER_LEON3 0x003
212#define GAISLER_LEON3DSU 0x004
213#define GAISLER_ETHAHB 0x005
214#define GAISLER_APBMST 0x006
215#define GAISLER_AHBUART 0x007
216#define GAISLER_SRCTRL 0x008
217#define GAISLER_SDCTRL 0x009
218#define GAISLER_APBUART 0x00C
219#define GAISLER_IRQMP 0x00D
220#define GAISLER_AHBRAM 0x00E
221#define GAISLER_GPTIMER 0x011
222#define GAISLER_PCITRG 0x012
223#define GAISLER_PCISBRG 0x013
224#define GAISLER_PCIFBRG 0x014
225#define GAISLER_PCITRACE 0x015
226#define GAISLER_PCIDMA 0x016
227#define GAISLER_AHBTRACE 0x017
228#define GAISLER_ETHDSU 0x018
229#define GAISLER_PIOPORT 0x01A
230#define GAISLER_GRGPIO 0x01A
231#define GAISLER_AHBJTAG 0x01c
232#define GAISLER_ETHMAC 0x01D
233#define GAISLER_AHB2AHB 0x020
234#define GAISLER_USBDC 0x021
235#define GAISLER_ATACTRL 0x024
236#define GAISLER_DDRSPA 0x025
237#define GAISLER_USBEHC 0x026
238#define GAISLER_USBUHC 0x027
239#define GAISLER_I2CMST 0x028
240#define GAISLER_SPICTRL 0x02D
241#define GAISLER_DDR2SPA 0x02E
242#define GAISLER_SPIMCTRL 0x045
243#define GAISLER_LEON4 0x048
244#define GAISLER_LEON4DSU 0x049
245#define GAISLER_AHBSTAT 0x052
246#define GAISLER_FTMCTRL 0x054
247#define GAISLER_KBD 0x060
248#define GAISLER_VGA 0x061
249#define GAISLER_SVGA 0x063
250#define GAISLER_GRSYSMON 0x066
251#define GAISLER_GRACECTRL 0x067
252
253#define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */
254#define GAISLER_L2C 0xffe /* internal device: leon2compat */
255#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
256
257#define amba_vendor(x) (((x) >> 24) & 0xff)
258
259#define amba_device(x) (((x) >> 12) & 0xfff)
260
261#endif /* !defined(CONFIG_SPARC_LEON) */
262
263#endif
diff --git a/arch/sparc/include/asm/machines.h b/arch/sparc/include/asm/machines.h
index c28c2f248794..cd9c099567e4 100644
--- a/arch/sparc/include/asm/machines.h
+++ b/arch/sparc/include/asm/machines.h
@@ -15,7 +15,7 @@ struct Sun_Machine_Models {
15/* Current number of machines we know about that has an IDPROM 15/* Current number of machines we know about that has an IDPROM
16 * machtype entry including one entry for the 0x80 OBP machines. 16 * machtype entry including one entry for the 0x80 OBP machines.
17 */ 17 */
18#define NUM_SUN_MACHINES 15 18#define NUM_SUN_MACHINES 16
19 19
20/* The machine type in the idprom area looks like this: 20/* The machine type in the idprom area looks like this:
21 * 21 *
@@ -30,6 +30,7 @@ struct Sun_Machine_Models {
30 30
31#define SM_ARCH_MASK 0xf0 31#define SM_ARCH_MASK 0xf0
32#define SM_SUN4 0x20 32#define SM_SUN4 0x20
33#define M_LEON 0x30
33#define SM_SUN4C 0x50 34#define SM_SUN4C 0x50
34#define SM_SUN4M 0x70 35#define SM_SUN4M 0x70
35#define SM_SUN4M_OBP 0x80 36#define SM_SUN4M_OBP 0x80
@@ -41,6 +42,9 @@ struct Sun_Machine_Models {
41#define SM_4_330 0x03 /* Sun 4/300 series */ 42#define SM_4_330 0x03 /* Sun 4/300 series */
42#define SM_4_470 0x04 /* Sun 4/400 series */ 43#define SM_4_470 0x04 /* Sun 4/400 series */
43 44
45/* Leon machines */
46#define M_LEON3_SOC 0x02 /* Leon3 SoC */
47
44/* Sun4c machines Full Name - PROM NAME */ 48/* Sun4c machines Full Name - PROM NAME */
45#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ 49#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */
46#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ 50#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index 988192e8e956..c3029ad6619a 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -20,6 +20,8 @@
20 20
21#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 21#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
22#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 22#define MAP_NONBLOCK 0x10000 /* do not block on IO */
23#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
24#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
23 25
24#ifdef __KERNEL__ 26#ifdef __KERNEL__
25#ifndef __ASSEMBLY__ 27#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index fbd546dd4feb..72e6500e7ab0 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -5,6 +5,9 @@ extern int __init nmi_init(void);
5extern void perfctr_irq(int irq, struct pt_regs *regs); 5extern void perfctr_irq(int irq, struct pt_regs *regs);
6extern void nmi_adjust_hz(unsigned int new_hz); 6extern void nmi_adjust_hz(unsigned int new_hz);
7 7
8extern int nmi_usable; 8extern atomic_t nmi_active;
9
10extern void start_nmi_watchdog(void *unused);
11extern void stop_nmi_watchdog(void *unused);
9 12
10#endif /* __NMI_H */ 13#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..e769f668a4b5 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -10,7 +10,6 @@
10 * or architectures with incomplete PCI setup by the loader. 10 * or architectures with incomplete PCI setup by the loader.
11 */ 11 */
12#define pcibios_assign_all_busses() 0 12#define pcibios_assign_all_busses() 0
13#define pcibios_scan_all_fns(a, b) 0
14 13
15#define PCIBIOS_MIN_IO 0UL 14#define PCIBIOS_MIN_IO 0UL
16#define PCIBIOS_MIN_MEM 0UL 15#define PCIBIOS_MIN_MEM 0UL
@@ -31,42 +30,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 30 */
32#define PCI_DMA_BUS_IS_PHYS (0) 31#define PCI_DMA_BUS_IS_PHYS (0)
33 32
34#include <asm/scatterlist.h>
35
36struct pci_dev; 33struct pci_dev;
37 34
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 35/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 36#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 37 dma_addr_t ADDR_NAME;
@@ -81,69 +46,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 46#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 47 (((PTR)->LEN_NAME) = (VAL))
83 48
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 49#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 50static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 51 enum pci_dma_burst_strategy *strat,
@@ -154,14 +56,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 56}
155#endif 57#endif
156 58
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 59struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 60extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 61
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..b63e51c3c3ee 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -10,7 +10,6 @@
10 * or architectures with incomplete PCI setup by the loader. 10 * or architectures with incomplete PCI setup by the loader.
11 */ 11 */
12#define pcibios_assign_all_busses() 0 12#define pcibios_assign_all_busses() 0
13#define pcibios_scan_all_fns(a, b) 0
14 13
15#define PCIBIOS_MIN_IO 0UL 14#define PCIBIOS_MIN_IO 0UL
16#define PCIBIOS_MIN_MEM 0UL 15#define PCIBIOS_MIN_MEM 0UL
@@ -35,37 +34,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 34 */
36#define PCI_DMA_BUS_IS_PHYS (0) 35#define PCI_DMA_BUS_IS_PHYS (0)
37 36
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 37/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 38#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 39 dma_addr_t ADDR_NAME;
@@ -80,57 +48,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 48#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 49 (((PTR)->LEN_NAME) = (VAL))
82 50
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 51/* PCI IOMMU mapping bypass support. */
135 52
136/* PCI 64-bit addressing works for all slots on all controller 53/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +57,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 57#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 58#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 59
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 60#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 61static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 62 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
new file mode 100644
index 000000000000..7e2669894ce8
--- /dev/null
+++ b/arch/sparc/include/asm/perf_event.h
@@ -0,0 +1,14 @@
1#ifndef __ASM_SPARC_PERF_EVENT_H
2#define __ASM_SPARC_PERF_EVENT_H
3
4extern void set_perf_event_pending(void);
5
6#define PERF_EVENT_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_EVENTS
9extern void init_hw_perf_events(void);
10#else
11static inline void init_hw_perf_events(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 808555fc1d58..1407c07bdade 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page)
267 267
268} 268}
269 269
270#ifndef CONFIG_SPARC_LEON
270static inline unsigned long srmmu_hwprobe(unsigned long vaddr) 271static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
271{ 272{
272 unsigned long retval; 273 unsigned long retval;
@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
278 279
279 return retval; 280 return retval;
280} 281}
282#else
283#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
284#endif
281 285
282static inline int 286static inline int
283srmmu_get_pte (unsigned long addr) 287srmmu_get_pte (unsigned long addr)
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index be8d7aaeb60d..82a190d7efc1 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -118,5 +118,8 @@ extern struct device_node *of_console_device;
118extern char *of_console_path; 118extern char *of_console_path;
119extern char *of_console_options; 119extern char *of_console_options;
120 120
121extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
122extern char *build_full_name(struct device_node *dp);
123
121#endif /* __KERNEL__ */ 124#endif /* __KERNEL__ */
122#endif /* _SPARC_PROM_H */ 125#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a9..f49e11cd4ded 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
36 36
37extern void arch_send_call_function_single_ipi(int cpu); 37extern void arch_send_call_function_single_ipi(int cpu);
38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
39#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
40 39
41/* 40/*
42 * General functions that each host system must provide. 41 * General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h
index 982a12f959f4..3a5ae3d12088 100644
--- a/arch/sparc/include/asm/socket.h
+++ b/arch/sparc/include/asm/socket.h
@@ -29,6 +29,9 @@
29#define SO_RCVBUFFORCE 0x100b 29#define SO_RCVBUFFORCE 0x100b
30#define SO_ERROR 0x1007 30#define SO_ERROR 0x1007
31#define SO_TYPE 0x1008 31#define SO_TYPE 0x1008
32#define SO_PROTOCOL 0x1028
33#define SO_DOMAIN 0x1029
34
32 35
33/* Linux specific, keep the same. */ 36/* Linux specific, keep the same. */
34#define SO_NO_CHECK 0x000b 37#define SO_NO_CHECK 0x000b
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 46f91ab66a50..857630cff636 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void __read_lock(raw_rwlock_t *rw) 79static inline void arch_read_lock(raw_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register raw_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
92#define __raw_read_lock(lock) \ 92#define __raw_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 __read_lock(lock); \ 95 arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void __read_unlock(raw_rwlock_t *rw) 99static inline void arch_read_unlock(raw_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register raw_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
112#define __raw_read_unlock(lock) \ 112#define __raw_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 __read_unlock(lock); \ 115 arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int __read_trylock(raw_rwlock_t *rw) 153static inline int arch_read_trylock(raw_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register raw_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = __read_trylock(lock); \ 172 res = arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index f6b2b92ad8d2..43e514783582 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline __read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(raw_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline __read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(raw_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline __read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(raw_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline __write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(raw_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(raw_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline __write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(raw_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) __read_lock(p) 213#define __raw_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) __read_lock(p) 214#define __raw_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) __read_trylock(p) 215#define __raw_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) __read_unlock(p) 216#define __raw_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) __write_lock(p) 217#define __raw_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) __write_lock(p) 218#define __raw_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) __write_unlock(p) 219#define __raw_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) __write_trylock(p) 220#define __raw_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 751c8c17f5a0..890036b3689a 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -32,6 +32,7 @@ enum sparc_cpu {
32 sun4u = 0x05, /* V8 ploos ploos */ 32 sun4u = 0x05, /* V8 ploos ploos */
33 sun_unknown = 0x06, 33 sun_unknown = 0x06,
34 ap1000 = 0x07, /* almost a sun4m */ 34 ap1000 = 0x07, /* almost a sun4m */
35 sparc_leon = 0x08, /* Leon SoC */
35}; 36};
36 37
37/* Really, userland should not be looking at any of this... */ 38/* Really, userland should not be looking at any of this... */
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 6c077816ab28..25e848f0cad7 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -29,6 +29,10 @@ enum sparc_cpu {
29/* This cannot ever be a sun4c :) That's just history. */ 29/* This cannot ever be a sun4c :) That's just history. */
30#define ARCH_SUN4C 0 30#define ARCH_SUN4C 0
31 31
32extern const char *sparc_cpu_type;
33extern const char *sparc_fpu_type;
34extern const char *sparc_pmu_type;
35
32extern char reboot_command[]; 36extern char reboot_command[];
33 37
34/* These are here in an effort to more fully work around Spitfire Errata 38/* These are here in an effort to more fully work around Spitfire Errata
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index e5ea8d332421..600a79035fa1 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
12 12
13#define parent_node(node) (node) 13#define parent_node(node) (node)
14 14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 15#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
20 16
21/*
22 * Returns a pointer to the cpumask of CPUs on Node 'node'.
23 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
24 */
25#define node_to_cpumask_ptr(v, node) \
26 cpumask_t *v = &(numa_cpumask_lookup_table[node])
27
28#define node_to_cpumask_ptr_next(v, node) \
29 v = &(numa_cpumask_lookup_table[node])
30
31struct pci_bus; 17struct pci_bus;
32#ifdef CONFIG_PCI 18#ifdef CONFIG_PCI
33extern int pcibus_to_node(struct pci_bus *pbus); 19extern int pcibus_to_node(struct pci_bus *pbus);
@@ -52,13 +38,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
52 .busy_idx = 3, \ 38 .busy_idx = 3, \
53 .idle_idx = 2, \ 39 .idle_idx = 2, \
54 .newidle_idx = 0, \ 40 .newidle_idx = 0, \
55 .wake_idx = 1, \ 41 .wake_idx = 0, \
56 .forkexec_idx = 1, \ 42 .forkexec_idx = 0, \
57 .flags = SD_LOAD_BALANCE \ 43 .flags = SD_LOAD_BALANCE \
58 | SD_BALANCE_FORK \ 44 | SD_BALANCE_FORK \
59 | SD_BALANCE_EXEC \ 45 | SD_BALANCE_EXEC \
60 | SD_SERIALIZE \ 46 | SD_SERIALIZE, \
61 | SD_WAKE_BALANCE, \
62 .last_balance = jiffies, \ 47 .last_balance = jiffies, \
63 .balance_interval = 1, \ 48 .balance_interval = 1, \
64} 49}
@@ -72,8 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
72#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
73#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 58#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
74#define topology_core_id(cpu) (cpu_data(cpu).core_id) 59#define topology_core_id(cpu) (cpu_data(cpu).core_id)
75#define topology_core_siblings(cpu) (cpu_core_map[cpu])
76#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
77#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 60#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
78#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 61#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
79#define mc_capable() (sparc64_multi_core) 62#define mc_capable() (sparc64_multi_core)
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index de671d73baed..09c79a9c8516 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -8,9 +8,8 @@
8 * need to be careful to avoid a name clashes. 8 * need to be careful to avoid a name clashes.
9 */ 9 */
10 10
11#if defined(__sparc__) && defined(__arch64__) 11#if defined(__sparc__)
12 12
13/*** SPARC 64 bit ***/
14#include <asm-generic/int-ll64.h> 13#include <asm-generic/int-ll64.h>
15 14
16#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
@@ -26,33 +25,21 @@ typedef unsigned short umode_t;
26/* Dma addresses come in generic and 64-bit flavours. */ 25/* Dma addresses come in generic and 64-bit flavours. */
27 26
28typedef u32 dma_addr_t; 27typedef u32 dma_addr_t;
29typedef u64 dma64_addr_t;
30 28
31#endif /* __ASSEMBLY__ */ 29#if defined(__arch64__)
32 30
33#endif /* __KERNEL__ */ 31/*** SPARC 64 bit ***/
32typedef u64 dma64_addr_t;
34#else 33#else
35
36/*** SPARC 32 bit ***/ 34/*** SPARC 32 bit ***/
37#include <asm-generic/int-ll64.h>
38
39#ifndef __ASSEMBLY__
40
41typedef unsigned short umode_t;
42
43#endif /* __ASSEMBLY__ */
44
45#ifdef __KERNEL__
46
47#ifndef __ASSEMBLY__
48
49typedef u32 dma_addr_t;
50typedef u32 dma64_addr_t; 35typedef u32 dma64_addr_t;
51 36
37#endif /* defined(__arch64__) */
38
52#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
53 40
54#endif /* __KERNEL__ */ 41#endif /* __KERNEL__ */
55 42
56#endif /* defined(__sparc__) && defined(__arch64__) */ 43#endif /* defined(__sparc__) */
57 44
58#endif /* defined(_SPARC_TYPES_H) */ 45#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a38c03238918..9ea271e19c70 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -7,8 +7,8 @@
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/sched.h>
11#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/thread_info.h>
12#include <asm/asi.h> 12#include <asm/asi.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/spitfire.h> 14#include <asm/spitfire.h>
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b2c406de7d4f..42f2316c3eaa 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,8 +395,9 @@
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326 397#define __NR_rt_tgsigqueueinfo 326
398#define __NR_perf_event_open 327
398 399
399#define NR_SYSCALLS 327 400#define NR_SYSCALLS 328
400 401
401#ifdef __32bit_syscall_numbers__ 402#ifdef __32bit_syscall_numbers__
402/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 403/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index d4de32f0f8af..9d83d3bcb494 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -258,8 +258,6 @@ static inline void *vio_dring_entry(struct vio_dring_state *dr,
258static inline u32 vio_dring_avail(struct vio_dring_state *dr, 258static inline u32 vio_dring_avail(struct vio_dring_state *dr,
259 unsigned int ring_size) 259 unsigned int ring_size)
260{ 260{
261 BUILD_BUG_ON(!is_power_of_2(ring_size));
262
263 return (dr->pending - 261 return (dr->pending -
264 ((dr->prod - dr->cons) & (ring_size - 1))); 262 ((dr->prod - dr->cons) & (ring_size - 1)));
265} 263}
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 475ce4696acd..5b47fab9966e 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -7,7 +7,11 @@ ccflags-y := -Werror
7 7
8extra-y := head_$(BITS).o 8extra-y := head_$(BITS).o
9extra-y += init_task.o 9extra-y += init_task.o
10extra-y += vmlinux.lds 10
11# Undefine sparc when processing vmlinux.lds - it is used
12# And teach CPP we are doing $(BITS) builds (for this case)
13CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
14extra-y += vmlinux.lds
11 15
12obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 16obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
13obj-$(CONFIG_SPARC32) += etrap_32.o 17obj-$(CONFIG_SPARC32) += etrap_32.o
@@ -41,6 +45,8 @@ obj-y += of_device_common.o
41obj-y += of_device_$(BITS).o 45obj-y += of_device_$(BITS).o
42obj-$(CONFIG_SPARC64) += prom_irqtrans.o 46obj-$(CONFIG_SPARC64) += prom_irqtrans.o
43 47
48obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
49
44obj-$(CONFIG_SPARC64) += reboot.o 50obj-$(CONFIG_SPARC64) += reboot.o
45obj-$(CONFIG_SPARC64) += sysfs.o 51obj-$(CONFIG_SPARC64) += sysfs.o
46obj-$(CONFIG_SPARC64) += iommu.o 52obj-$(CONFIG_SPARC64) += iommu.o
@@ -61,7 +67,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
61obj-$(CONFIG_SPARC32) += devres.o 67obj-$(CONFIG_SPARC32) += devres.o
62devres-y := ../../../kernel/irq/devres.o 68devres-y := ../../../kernel/irq/devres.o
63 69
64obj-$(CONFIG_SPARC32) += dma.o 70obj-y += dma.o
65 71
66obj-$(CONFIG_SPARC32_PCI) += pcic.o 72obj-$(CONFIG_SPARC32_PCI) += pcic.o
67 73
@@ -101,3 +107,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
101obj-$(CONFIG_AUDIT) += audit.o 107obj-$(CONFIG_AUDIT) += audit.o
102audit--$(CONFIG_AUDIT) := compat_audit.o 108audit--$(CONFIG_AUDIT) := compat_audit.o
103obj-$(CONFIG_COMPAT) += $(audit--y) 109obj-$(CONFIG_COMPAT) += $(audit--y)
110
111pc--$(CONFIG_PERF_EVENTS) := perf_event.o
112obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index d85c3dc4953a..1446df90ef85 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void)
312 312
313 psr = get_psr(); 313 psr = get_psr();
314 put_psr(psr | PSR_EF); 314 put_psr(psr | PSR_EF);
315#ifdef CONFIG_SPARC_LEON
316 fpu_vers = 7;
317#else
315 fpu_vers = ((get_fsr() >> 17) & 0x7); 318 fpu_vers = ((get_fsr() >> 17) & 0x7);
319#endif
320
316 put_psr(psr); 321 put_psr(psr);
317 322
318 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); 323 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
1/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h> 1#include <linux/kernel.h>
7#include <linux/module.h> 2#include <linux/module.h>
8#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h> 4#include <linux/dma-debug.h>
10#include <linux/mm.h>
11
12#ifdef CONFIG_PCI
13#include <linux/pci.h>
14#endif
15 5
16#include "dma.h" 6#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
17 7
18int dma_supported(struct device *dev, u64 mask) 8static int __init dma_init(void)
19{ 9{
20#ifdef CONFIG_PCI 10 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
21 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask);
23#endif
24 return 0; 11 return 0;
25} 12}
26EXPORT_SYMBOL(dma_supported); 13fs_initcall(dma_init);
27
28int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30#ifdef CONFIG_PCI
31 if (dev->bus == &pci_bus_type)
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33#endif
34 return -EOPNOTSUPP;
35}
36EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction)
64{
65#ifdef CONFIG_PCI
66 if (dev->bus == &pci_bus_type)
67 return pci_map_page(to_pci_dev(dev), page, offset,
68 size, (int)direction);
69#endif
70 return sbus_map_single(dev, page_address(page) + offset,
71 size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction)
76{
77#ifdef CONFIG_PCI
78 if (dev->bus == &pci_bus_type) {
79 pci_unmap_page(to_pci_dev(dev), dma_address,
80 size, (int)direction);
81 return;
82 }
83#endif
84 sbus_unmap_single(dev, dma_address, size, (int)direction);
85}
86
87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction direction)
89{
90#ifdef CONFIG_PCI
91 if (dev->bus == &pci_bus_type)
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93#endif
94 return sbus_map_sg(dev, sg, nents, direction);
95}
96
97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
98 int nents, enum dma_data_direction direction)
99{
100#ifdef CONFIG_PCI
101 if (dev->bus == &pci_bus_type) {
102 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
103 return;
104 }
105#endif
106 sbus_unmap_sg(dev, sg, nents, (int)direction);
107}
108
109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113#ifdef CONFIG_PCI
114 if (dev->bus == &pci_bus_type) {
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117 return;
118 }
119#endif
120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
121}
122
123static void dma32_sync_single_for_device(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127#ifdef CONFIG_PCI
128 if (dev->bus == &pci_bus_type) {
129 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
130 size, (int)direction);
131 return;
132 }
133#endif
134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
135}
136
137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
139{
140#ifdef CONFIG_PCI
141 if (dev->bus == &pci_bus_type) {
142 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
143 nelems, (int)direction);
144 return;
145 }
146#endif
147 BUG();
148}
149
150static void dma32_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sg, int nelems,
152 enum dma_data_direction direction)
153{
154#ifdef CONFIG_PCI
155 if (dev->bus == &pci_bus_type) {
156 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
157 nelems, (int)direction);
158 return;
159 }
160#endif
161 BUG();
162}
163
164static const struct dma_ops dma32_dma_ops = {
165 .alloc_coherent = dma32_alloc_coherent,
166 .free_coherent = dma32_free_coherent,
167 .map_page = dma32_map_page,
168 .unmap_page = dma32_unmap_page,
169 .map_sg = dma32_map_sg,
170 .unmap_sg = dma32_unmap_sg,
171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
172 .sync_single_for_device = dma32_sync_single_for_device,
173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_single(struct device *dev, void *va,
4 size_t len, int direction);
5void sbus_unmap_single(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 6b4d8acc4c83..439d82a95ac9 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -809,6 +809,11 @@ found_version:
809 nop 809 nop
810 810
811got_prop: 811got_prop:
812#ifdef CONFIG_SPARC_LEON
813 /* no cpu-type check is needed, it is a SPARC-LEON */
814 ba sun4c_continue_boot
815 nop
816#endif
812 set cputypval, %o2 817 set cputypval, %o2
813 ldub [%o2 + 0x4], %l1 818 ldub [%o2 + 0x4], %l1
814 819
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index 57922f69c3f7..52a15fe2db19 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
31{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, 31{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) },
32{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, 32{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) },
33{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, 33{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) },
34/* Now Leon */
35{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) },
34/* Now, Sun4c's */ 36/* Now, Sun4c's */
35{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, 37{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) },
36{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, 38{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) },
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index 28125c5b3d3c..5fe3d65581f7 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -18,6 +18,5 @@ EXPORT_SYMBOL(init_task);
18 * If this is not aligned on a 8k boundry, then you should change code 18 * If this is not aligned on a 8k boundry, then you should change code
19 * in etrap.S which assumes it. 19 * in etrap.S which assumes it.
20 */ 20 */
21union thread_union init_thread_union 21union thread_union init_thread_union __init_task_data =
22 __attribute__((section (".data.init_task"))) 22 { INIT_THREAD_INFO(init_task) };
23 = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
353 353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz, 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction) 356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
357{ 358{
358 struct iommu *iommu; 359 struct iommu *iommu;
359 struct strbuf *strbuf; 360 struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
474} 475}
475 476
476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 size_t sz, enum dma_data_direction direction) 478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
478{ 480{
479 struct iommu *iommu; 481 struct iommu *iommu;
480 struct strbuf *strbuf; 482 struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
520} 522}
521 523
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction) 525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
524{ 527{
525 struct scatterlist *s, *outs, *segstart; 528 struct scatterlist *s, *outs, *segstart;
526 unsigned long flags, handle, prot, ctx; 529 unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
691} 694}
692 695
693static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 int nelems, enum dma_data_direction direction) 697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
695{ 699{
696 unsigned long flags, ctx; 700 unsigned long flags, ctx;
697 struct scatterlist *sg; 701 struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
822 spin_unlock_irqrestore(&iommu->lock, flags); 826 spin_unlock_irqrestore(&iommu->lock, flags);
823} 827}
824 828
825static const struct dma_ops sun4u_dma_ops = { 829static struct dma_map_ops sun4u_dma_ops = {
826 .alloc_coherent = dma_4u_alloc_coherent, 830 .alloc_coherent = dma_4u_alloc_coherent,
827 .free_coherent = dma_4u_free_coherent, 831 .free_coherent = dma_4u_free_coherent,
828 .map_page = dma_4u_map_page, 832 .map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
833 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
834}; 838};
835 839
836const struct dma_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
837EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
838 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
839int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
840{ 846{
841 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
849 855
850#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
851 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
852 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
853#endif 859#endif
854 860
855 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */ 36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
38#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
39#include <linux/of_device.h> 40#include <linux/of_device.h>
40 41
@@ -48,8 +49,6 @@
48#include <asm/iommu.h> 49#include <asm/iommu.h>
49#include <asm/io-unit.h> 50#include <asm/io-unit.h>
50 51
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 53
55static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
248 */ 247 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 250{
251 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +299,8 @@ err_nopages:
299 return NULL; 299 return NULL;
300} 300}
301 301
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
303{ 304{
304 struct resource *res; 305 struct resource *res;
305 struct page *pgv; 306 struct page *pgv;
@@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 318
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
322 return; 323 return;
323 } 324 }
@@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
339 */ 340 */
340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
341{ 345{
346 void *va = page_address(page) + offset;
347
342 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) { 349 if (len <= 0) {
344 return 0; 350 return 0;
@@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
350 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
351} 357}
352 358
353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
354{ 361{
355 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
356} 363}
357 364
358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
359{ 367{
360 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
361 369
@@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
366 return n; 374 return n;
367} 375}
368 376
369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
370{ 379{
371 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
372} 381}
373 382
374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
375{ 385{
386 BUG();
376} 387}
377 388
378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
379{ 391{
392 BUG();
380} 393}
381 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
382static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
383{ 410{
384 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport);
395/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */ 424 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
399{ 427{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va; 429 unsigned long va;
@@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start; 468 return (void *) res->start;
441} 469}
442EXPORT_SYMBOL(pci_alloc_consistent);
443 470
444/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
449 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal. 477 * past this call are illegal.
451 */ 478 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
453{ 481{
454 struct resource *res; 482 struct resource *res;
455 unsigned long pgp; 483 unsigned long pgp;
@@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
481 509
482 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
483} 511}
484EXPORT_SYMBOL(pci_free_consistent);
485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
499EXPORT_SYMBOL(pci_map_single);
500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
517EXPORT_SYMBOL(pci_unmap_single);
518 512
519/* 513/*
520 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
521 */ 515 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
523 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
524{ 520{
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
528} 523}
529EXPORT_SYMBOL(pci_map_page);
530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536}
537EXPORT_SYMBOL(pci_unmap_page);
538 524
539/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
551 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here. 538 * the same here.
553 */ 539 */
554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
555 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
556{ 543{
557 struct scatterlist *sg; 544 struct scatterlist *sg;
558 int n; 545 int n;
559 546
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
566 } 552 }
567 return nents; 553 return nents;
568} 554}
569EXPORT_SYMBOL(pci_map_sg);
570 555
571/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above. 558 * pci_unmap_single() above.
574 */ 559 */
575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
576 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
577{ 563{
578 struct scatterlist *sg; 564 struct scatterlist *sg;
579 int n; 565 int n;
580 566
581 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
588 } 573 }
589 } 574 }
590} 575}
591EXPORT_SYMBOL(pci_unmap_sg);
592 576
593/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
600 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer. 585 * device again owns the buffer.
602 */ 586 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
604{ 589{
605 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
609 } 593 }
610} 594}
611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612 595
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
614{ 598{
615 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
619 } 602 }
620} 603}
621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622 604
623/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
626 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage. 609 * same rules and usage.
628 */ 610 */
629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
630{ 613{
631 struct scatterlist *sg; 614 struct scatterlist *sg;
632 int n; 615 int n;
633 616
634 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
641 } 623 }
642 } 624 }
643} 625}
644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645 626
646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
647{ 629{
648 struct scatterlist *sg; 630 struct scatterlist *sg;
649 int n; 631 int n;
650 632
651 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
658 } 639 }
659 } 640 }
660} 641}
661EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
662#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
663 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
664#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
665 686
666static int 687static int sparc_io_proc_show(struct seq_file *m, void *v)
667_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
668 void *data)
669{ 688{
670 char *p = buf, *e = buf + length; 689 struct resource *root = m->private, *r;
671 struct resource *r;
672 const char *nm; 690 const char *nm;
673 691
674 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { 692 for (r = root->child; r != NULL; r = r->sibling) {
675 if (p + 32 >= e) /* Better than nothing */
676 break;
677 if ((nm = r->name) == 0) nm = "???"; 693 if ((nm = r->name) == 0) nm = "???";
678 p += sprintf(p, "%016llx-%016llx: %s\n", 694 seq_printf(m, "%016llx-%016llx: %s\n",
679 (unsigned long long)r->start, 695 (unsigned long long)r->start,
680 (unsigned long long)r->end, nm); 696 (unsigned long long)r->end, nm);
681 } 697 }
682 698
683 return p-buf; 699 return 0;
684} 700}
685 701
702static int sparc_io_proc_open(struct inode *inode, struct file *file)
703{
704 return single_open(file, sparc_io_proc_show, PDE(inode)->data);
705}
706
707static const struct file_operations sparc_io_proc_fops = {
708 .owner = THIS_MODULE,
709 .open = sparc_io_proc_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
686#endif /* CONFIG_PROC_FS */ 714#endif /* CONFIG_PROC_FS */
687 715
688/* 716/*
@@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root,
707static void register_proc_sparc_ioport(void) 735static void register_proc_sparc_ioport(void)
708{ 736{
709#ifdef CONFIG_PROC_FS 737#ifdef CONFIG_PROC_FS
710 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); 738 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
711 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); 739 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
712#endif 740#endif
713} 741}
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ad800b80c718..e1af43728329 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -45,6 +45,7 @@
45#include <asm/pcic.h> 45#include <asm/pcic.h>
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <asm/irq_regs.h> 47#include <asm/irq_regs.h>
48#include <asm/leon.h>
48 49
49#include "kernel.h" 50#include "kernel.h"
50#include "irq.h" 51#include "irq.h"
@@ -661,6 +662,10 @@ void __init init_IRQ(void)
661 sun4d_init_IRQ(); 662 sun4d_init_IRQ();
662 break; 663 break;
663 664
665 case sparc_leon:
666 leon_init_IRQ();
667 break;
668
664 default: 669 default:
665 prom_printf("Cannot initialize IRQs on this Sun machine..."); 670 prom_printf("Cannot initialize IRQs on this Sun machine...");
666 break; 671 break;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index f0ee79055409..8ab1d4728a4b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -229,7 +229,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
229 tid = ((a << IMAP_AID_SHIFT) | 229 tid = ((a << IMAP_AID_SHIFT) |
230 (n << IMAP_NID_SHIFT)); 230 (n << IMAP_NID_SHIFT));
231 tid &= (IMAP_AID_SAFARI | 231 tid &= (IMAP_AID_SAFARI |
232 IMAP_NID_SAFARI);; 232 IMAP_NID_SAFARI);
233 } 233 }
234 } else { 234 } else {
235 tid = cpuid << IMAP_TID_SHIFT; 235 tid = cpuid << IMAP_TID_SHIFT;
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
886 * Therefore you cannot make any OBP calls, not even prom_printf, 886 * Therefore you cannot make any OBP calls, not even prom_printf,
887 * from these two routines. 887 * from these two routines.
888 */ 888 */
889static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 889static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
890{ 890{
891 unsigned long num_entries = (qmask + 1) / 64; 891 unsigned long num_entries = (qmask + 1) / 64;
892 unsigned long status; 892 unsigned long status;
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
new file mode 100644
index 000000000000..54d8a5bd4824
--- /dev/null
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
3 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/errno.h>
9#include <linux/mutex.h>
10#include <linux/slab.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/interrupt.h>
14#include <linux/of_device.h>
15#include <asm/oplib.h>
16#include <asm/timer.h>
17#include <asm/prom.h>
18#include <asm/leon.h>
19#include <asm/leon_amba.h>
20
21#include "prom.h"
22#include "irq.h"
23
24struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */
25struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */
26struct amba_apb_device leon_percpu_timer_dev[16];
27
28int leondebug_irq_disable;
29int leon_debug_irqout;
30static int dummy_master_l10_counter;
31
32unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */
33unsigned int sparc_leon_eirq;
34#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
35
36/* Return the IRQ of the pending IRQ on the extended IRQ controller */
37int sparc_leon_eirq_get(int eirq, int cpu)
38{
39 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
40}
41
42irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id)
43{
44 printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n");
45 return IRQ_HANDLED;
46}
47
48/* The extended IRQ controller has been found, this function registers it */
49void sparc_leon_eirq_register(int eirq)
50{
51 int irq;
52
53 /* Register a "BAD" handler for this interrupt, it should never happen */
54 irq = request_irq(eirq, sparc_leon_eirq_isr,
55 (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
56
57 if (irq) {
58 printk(KERN_ERR
59 "sparc_leon_eirq_register: unable to attach IRQ%d\n",
60 eirq);
61 } else {
62 sparc_leon_eirq = eirq;
63 }
64
65}
66
67static inline unsigned long get_irqmask(unsigned int irq)
68{
69 unsigned long mask;
70
71 if (!irq || ((irq > 0xf) && !sparc_leon_eirq)
72 || ((irq > 0x1f) && sparc_leon_eirq)) {
73 printk(KERN_ERR
74 "leon_get_irqmask: false irq number: %d\n", irq);
75 mask = 0;
76 } else {
77 mask = LEON_HARD_INT(irq);
78 }
79 return mask;
80}
81
82static void leon_enable_irq(unsigned int irq_nr)
83{
84 unsigned long mask, flags;
85 mask = get_irqmask(irq_nr);
86 local_irq_save(flags);
87 LEON3_BYPASS_STORE_PA(LEON_IMASK,
88 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask)));
89 local_irq_restore(flags);
90}
91
92static void leon_disable_irq(unsigned int irq_nr)
93{
94 unsigned long mask, flags;
95 mask = get_irqmask(irq_nr);
96 local_irq_save(flags);
97 LEON3_BYPASS_STORE_PA(LEON_IMASK,
98 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask)));
99 local_irq_restore(flags);
100
101}
102
103void __init leon_init_timers(irq_handler_t counter_fn)
104{
105 int irq;
106
107 leondebug_irq_disable = 0;
108 leon_debug_irqout = 0;
109 master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
110 dummy_master_l10_counter = 0;
111
112 if (leon3_gptimer_regs && leon3_irqctrl_regs) {
113 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
114 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
115 (((1000000 / 100) - 1)));
116 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
117
118 } else {
119 printk(KERN_ERR "No Timer/irqctrl found\n");
120 BUG();
121 }
122
123 irq = request_irq(leon3_gptimer_irq,
124 counter_fn,
125 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
126
127 if (irq) {
128 printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n",
129 LEON_INTERRUPT_TIMER1);
130 prom_halt();
131 }
132
133 if (leon3_gptimer_regs) {
134 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl,
135 LEON3_GPTIMER_EN |
136 LEON3_GPTIMER_RL |
137 LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
138 }
139}
140
141void leon_clear_clock_irq(void)
142{
143}
144
145void leon_load_profile_irq(int cpu, unsigned int limit)
146{
147 BUG();
148}
149
150
151
152
153void __init leon_trans_init(struct device_node *dp)
154{
155 if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
156 struct property *p;
157 p = of_find_property(dp, "mid", (void *)0);
158 if (p) {
159 int mid;
160 dp->name = prom_early_alloc(5 + 1);
161 memcpy(&mid, p->value, p->length);
162 sprintf((char *)dp->name, "cpu%.2d", mid);
163 }
164 }
165}
166
167void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0;
168
169void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
170{
171 if (prom_amba_init &&
172 strcmp(dp->type, "ambapp") == 0 &&
173 strcmp(dp->name, "ambapp0") == 0) {
174 prom_amba_init(dp, nextp);
175 }
176}
177
178void __init leon_init_IRQ(void)
179{
180 sparc_init_timers = leon_init_timers;
181
182 BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
183 BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
184 BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
185 BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
186
187 BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
188 BTFIXUPCALL_NORM);
189 BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq,
190 BTFIXUPCALL_NOP);
191
192#ifdef CONFIG_SMP
193 BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM);
194 BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM);
195 BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM);
196#endif
197
198}
199
200void __init leon_init(void)
201{
202 prom_build_more = &leon_node_init;
203}
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 2c0cc72d295b..b129611590a4 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21 21
22#include <asm/perf_event.h>
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/local.h> 24#include <asm/local.h>
24#include <asm/pcr.h> 25#include <asm/pcr.h>
@@ -31,13 +32,19 @@
31 * level 14 as our IRQ off level. 32 * level 14 as our IRQ off level.
32 */ 33 */
33 34
34static int nmi_watchdog_active;
35static int panic_on_timeout; 35static int panic_on_timeout;
36 36
37int nmi_usable; 37/* nmi_active:
38EXPORT_SYMBOL_GPL(nmi_usable); 38 * >0: the NMI watchdog is active, but can be disabled
39 * <0: the NMI watchdog has not been set up, and cannot be enabled
40 * 0: the NMI watchdog is disabled, but can be enabled
41 */
42atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
43EXPORT_SYMBOL(nmi_active);
39 44
40static unsigned int nmi_hz = HZ; 45static unsigned int nmi_hz = HZ;
46static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata;
41 48
42static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
43static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(local_t, alert_counter);
@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch);
45 52
46void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
47{ 54{
48 if (nmi_watchdog_active) { 55 if (atomic_read(&nmi_active)) {
49 int cpu; 56 int cpu;
50 57
51 for_each_present_cpu(cpu) { 58 for_each_present_cpu(cpu) {
@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
78 if (do_panic || panic_on_oops) 85 if (do_panic || panic_on_oops)
79 panic("Non maskable interrupt"); 86 panic("Non maskable interrupt");
80 87
88 nmi_exit();
81 local_irq_enable(); 89 local_irq_enable();
82 do_exit(SIGBUS); 90 do_exit(SIGBUS);
83} 91}
@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
92 100
93 local_cpu_data().__nmi_count++; 101 local_cpu_data().__nmi_count++;
94 102
103 nmi_enter();
104
95 if (notify_die(DIE_NMI, "nmi", regs, 0, 105 if (notify_die(DIE_NMI, "nmi", regs, 0,
96 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 106 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
97 touched = 1; 107 touched = 1;
@@ -103,17 +113,19 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
103 } 113 }
104 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
105 local_inc(&__get_cpu_var(alert_counter)); 115 local_inc(&__get_cpu_var(alert_counter));
106 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
107 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
108 regs, panic_on_timeout); 118 regs, panic_on_timeout);
109 } else { 119 } else {
110 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
111 local_set(&__get_cpu_var(alert_counter), 0); 121 local_set(&__get_cpu_var(alert_counter), 0);
112 } 122 }
113 if (nmi_usable) { 123 if (__get_cpu_var(wd_enabled)) {
114 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
115 pcr_ops->write(pcr_enable); 125 pcr_ops->write(pcr_enable);
116 } 126 }
127
128 nmi_exit();
117} 129}
118 130
119static inline unsigned int get_nmi_count(int cpu) 131static inline unsigned int get_nmi_count(int cpu)
@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu)
121 return cpu_data(cpu).__nmi_count; 133 return cpu_data(cpu).__nmi_count;
122} 134}
123 135
124static int endflag __initdata;
125
126static __init void nmi_cpu_busy(void *data) 136static __init void nmi_cpu_busy(void *data)
127{ 137{
128 local_irq_enable_in_hardirq(); 138 local_irq_enable_in_hardirq();
@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
143 printk(KERN_WARNING 153 printk(KERN_WARNING
144 "and attach the output of the 'dmesg' command.\n"); 154 "and attach the output of the 'dmesg' command.\n");
145 155
146 nmi_usable = 0; 156 per_cpu(wd_enabled, cpu) = 0;
157 atomic_dec(&nmi_active);
147} 158}
148 159
149static void stop_watchdog(void *unused) 160void stop_nmi_watchdog(void *unused)
150{ 161{
151 pcr_ops->write(PCR_PIC_PRIV); 162 pcr_ops->write(PCR_PIC_PRIV);
163 __get_cpu_var(wd_enabled) = 0;
164 atomic_dec(&nmi_active);
152} 165}
153 166
154static int __init check_nmi_watchdog(void) 167static int __init check_nmi_watchdog(void)
@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void)
156 unsigned int *prev_nmi_count; 169 unsigned int *prev_nmi_count;
157 int cpu, err; 170 int cpu, err;
158 171
172 if (!atomic_read(&nmi_active))
173 return 0;
174
159 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); 175 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
160 if (!prev_nmi_count) { 176 if (!prev_nmi_count) {
161 err = -ENOMEM; 177 err = -ENOMEM;
@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void)
172 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ 188 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
173 189
174 for_each_online_cpu(cpu) { 190 for_each_online_cpu(cpu) {
191 if (!per_cpu(wd_enabled, cpu))
192 continue;
175 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) 193 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
176 report_broken_nmi(cpu, prev_nmi_count); 194 report_broken_nmi(cpu, prev_nmi_count);
177 } 195 }
178 endflag = 1; 196 endflag = 1;
179 if (!nmi_usable) { 197 if (!atomic_read(&nmi_active)) {
180 kfree(prev_nmi_count); 198 kfree(prev_nmi_count);
199 atomic_set(&nmi_active, -1);
181 err = -ENODEV; 200 err = -ENODEV;
182 goto error; 201 goto error;
183 } 202 }
@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void)
188 kfree(prev_nmi_count); 207 kfree(prev_nmi_count);
189 return 0; 208 return 0;
190error: 209error:
191 on_each_cpu(stop_watchdog, NULL, 1); 210 on_each_cpu(stop_nmi_watchdog, NULL, 1);
192 return err; 211 return err;
193} 212}
194 213
195static void start_watchdog(void *unused) 214void start_nmi_watchdog(void *unused)
196{ 215{
216 __get_cpu_var(wd_enabled) = 1;
217 atomic_inc(&nmi_active);
218
219 pcr_ops->write(PCR_PIC_PRIV);
220 write_pic(picl_value(nmi_hz));
221
222 pcr_ops->write(pcr_enable);
223}
224
225static void nmi_adjust_hz_one(void *unused)
226{
227 if (!__get_cpu_var(wd_enabled))
228 return;
229
197 pcr_ops->write(PCR_PIC_PRIV); 230 pcr_ops->write(PCR_PIC_PRIV);
198 write_pic(picl_value(nmi_hz)); 231 write_pic(picl_value(nmi_hz));
199 232
@@ -203,13 +236,13 @@ static void start_watchdog(void *unused)
203void nmi_adjust_hz(unsigned int new_hz) 236void nmi_adjust_hz(unsigned int new_hz)
204{ 237{
205 nmi_hz = new_hz; 238 nmi_hz = new_hz;
206 on_each_cpu(start_watchdog, NULL, 1); 239 on_each_cpu(nmi_adjust_hz_one, NULL, 1);
207} 240}
208EXPORT_SYMBOL_GPL(nmi_adjust_hz); 241EXPORT_SYMBOL_GPL(nmi_adjust_hz);
209 242
210static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) 243static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
211{ 244{
212 on_each_cpu(stop_watchdog, NULL, 1); 245 on_each_cpu(stop_nmi_watchdog, NULL, 1);
213 return 0; 246 return 0;
214} 247}
215 248
@@ -221,18 +254,19 @@ int __init nmi_init(void)
221{ 254{
222 int err; 255 int err;
223 256
224 nmi_usable = 1; 257 on_each_cpu(start_nmi_watchdog, NULL, 1);
225
226 on_each_cpu(start_watchdog, NULL, 1);
227 258
228 err = check_nmi_watchdog(); 259 err = check_nmi_watchdog();
229 if (!err) { 260 if (!err) {
230 err = register_reboot_notifier(&nmi_reboot_notifier); 261 err = register_reboot_notifier(&nmi_reboot_notifier);
231 if (err) { 262 if (err) {
232 nmi_usable = 0; 263 on_each_cpu(stop_nmi_watchdog, NULL, 1);
233 on_each_cpu(stop_watchdog, NULL, 1); 264 atomic_set(&nmi_active, -1);
234 } 265 }
235 } 266 }
267 if (!err)
268 init_hw_perf_events();
269
236 return err; 270 return err;
237} 271}
238 272
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 90396702ea2c..4c26eb59e742 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -9,6 +9,8 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12#include <asm/leon.h>
13#include <asm/leon_amba.h>
12 14
13#include "of_device_common.h" 15#include "of_device_common.h"
14 16
@@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
97 return IORESOURCE_MEM; 99 return IORESOURCE_MEM;
98} 100}
99 101
102 /*
103 * AMBAPP bus specific translator
104 */
105
106static int of_bus_ambapp_match(struct device_node *np)
107{
108 return !strcmp(np->name, "ambapp");
109}
110
111static void of_bus_ambapp_count_cells(struct device_node *child,
112 int *addrc, int *sizec)
113{
114 if (addrc)
115 *addrc = 1;
116 if (sizec)
117 *sizec = 1;
118}
119
120static int of_bus_ambapp_map(u32 *addr, const u32 *range,
121 int na, int ns, int pna)
122{
123 return of_bus_default_map(addr, range, na, ns, pna);
124}
125
126static unsigned long of_bus_ambapp_get_flags(const u32 *addr,
127 unsigned long flags)
128{
129 return IORESOURCE_MEM;
130}
100 131
101/* 132/*
102 * Array of bus specific translators 133 * Array of bus specific translators
@@ -121,6 +152,15 @@ static struct of_bus of_busses[] = {
121 .map = of_bus_default_map, 152 .map = of_bus_default_map,
122 .get_flags = of_bus_sbus_get_flags, 153 .get_flags = of_bus_sbus_get_flags,
123 }, 154 },
155 /* AMBA */
156 {
157 .name = "ambapp",
158 .addr_prop_name = "reg",
159 .match = of_bus_ambapp_match,
160 .count_cells = of_bus_ambapp_count_cells,
161 .map = of_bus_ambapp_map,
162 .get_flags = of_bus_ambapp_get_flags,
163 },
124 /* Default */ 164 /* Default */
125 { 165 {
126 .name = "default", 166 .name = "default",
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232 232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz, 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction) 235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
236{ 237{
237 struct iommu *iommu; 238 struct iommu *iommu;
238 unsigned long flags, npages, oaddr; 239 unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
296} 297}
297 298
298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction) 300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
300{ 302{
301 struct pci_pbm_info *pbm; 303 struct pci_pbm_info *pbm;
302 struct iommu *iommu; 304 struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
336} 338}
337 339
338static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction) 341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
340{ 343{
341 struct scatterlist *s, *outs, *segstart; 344 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot; 345 unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
478} 481}
479 482
480static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction) 484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
482{ 486{
483 struct pci_pbm_info *pbm; 487 struct pci_pbm_info *pbm;
484 struct scatterlist *sg; 488 struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
521 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
522} 526}
523 527
524static void dma_4v_sync_single_for_cpu(struct device *dev, 528static struct dma_map_ops sun4v_dma_ops = {
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
527{
528 /* Nothing to do... */
529}
530
531static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
534{
535 /* Nothing to do... */
536}
537
538static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent, 529 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent, 530 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page, 531 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page, 532 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg, 533 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg, 534 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
547}; 535};
548 536
549static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 1ae8cdd7e703..2d94e7a03af5 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,13 +7,15 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h>
11
10#include <asm/pil.h> 12#include <asm/pil.h>
11#include <asm/pcr.h> 13#include <asm/pcr.h>
12#include <asm/nmi.h> 14#include <asm/nmi.h>
13 15
14/* This code is shared between various users of the performance 16/* This code is shared between various users of the performance
15 * counters. Users will be oprofile, pseudo-NMI watchdog, and the 17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
16 * perf_counter support layer. 18 * perf_event support layer.
17 */ 19 */
18 20
19#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) 21#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -34,10 +36,20 @@ unsigned int picl_shift;
34 */ 36 */
35void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 37void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
36{ 38{
39 struct pt_regs *old_regs;
40
37 clear_softint(1 << PIL_DEFERRED_PCR_WORK); 41 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
42
43 old_regs = set_irq_regs(regs);
44 irq_enter();
45#ifdef CONFIG_PERF_EVENTS
46 perf_event_do_pending();
47#endif
48 irq_exit();
49 set_irq_regs(old_regs);
38} 50}
39 51
40void schedule_deferred_pcr_work(void) 52void set_perf_event_pending(void)
41{ 53{
42 set_softint(1 << PIL_DEFERRED_PCR_WORK); 54 set_softint(1 << PIL_DEFERRED_PCR_WORK);
43} 55}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
new file mode 100644
index 000000000000..2d6a1b10c81d
--- /dev/null
+++ b/arch/sparc/kernel/perf_event.c
@@ -0,0 +1,556 @@
1/* Performance event support for sparc64.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * This code is based almost entirely upon the x86 perf event
6 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
15#include <linux/perf_event.h>
16#include <linux/kprobes.h>
17#include <linux/kernel.h>
18#include <linux/kdebug.h>
19#include <linux/mutex.h>
20
21#include <asm/cpudata.h>
22#include <asm/atomic.h>
23#include <asm/nmi.h>
24#include <asm/pcr.h>
25
26/* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
29 *
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
34 * individually.
35 *
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
41 *
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
46 * normal code.
47 */
48
49#define MAX_HWEVENTS 2
50#define MAX_PERIOD ((1UL << 32) - 1)
51
52#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1
54
55struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled;
60};
61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62
63struct perf_event_map {
64 u16 encoding;
65 u8 pic_mask;
66#define PIC_NONE 0x00
67#define PIC_UPPER 0x01
68#define PIC_LOWER 0x02
69};
70
71struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int);
73 int max_events;
74 int upper_shift;
75 int lower_shift;
76 int event_mask;
77 int hv_bit;
78 int irq_bit;
79 int upper_nop;
80 int lower_nop;
81};
82
83static const struct perf_event_map ultra3i_perfmon_event_map[] = {
84 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
85 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88};
89
90static const struct perf_event_map *ultra3i_event_map(int event_id)
91{
92 return &ultra3i_perfmon_event_map[event_id];
93}
94
95static const struct sparc_pmu ultra3i_pmu = {
96 .event_map = ultra3i_event_map,
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
98 .upper_shift = 11,
99 .lower_shift = 4,
100 .event_mask = 0x3f,
101 .upper_nop = 0x1c,
102 .lower_nop = 0x14,
103};
104
105static const struct perf_event_map niagara2_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
108 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
109 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
110 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
112};
113
114static const struct perf_event_map *niagara2_event_map(int event_id)
115{
116 return &niagara2_perfmon_event_map[event_id];
117}
118
119static const struct sparc_pmu niagara2_pmu = {
120 .event_map = niagara2_event_map,
121 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
122 .upper_shift = 19,
123 .lower_shift = 6,
124 .event_mask = 0xfff,
125 .hv_bit = 0x8,
126 .irq_bit = 0x03,
127 .upper_nop = 0x220,
128 .lower_nop = 0x220,
129};
130
131static const struct sparc_pmu *sparc_pmu __read_mostly;
132
133static u64 event_encoding(u64 event_id, int idx)
134{
135 if (idx == PIC_UPPER_INDEX)
136 event_id <<= sparc_pmu->upper_shift;
137 else
138 event_id <<= sparc_pmu->lower_shift;
139 return event_id;
140}
141
142static u64 mask_for_index(int idx)
143{
144 return event_encoding(sparc_pmu->event_mask, idx);
145}
146
147static u64 nop_for_index(int idx)
148{
149 return event_encoding(idx == PIC_UPPER_INDEX ?
150 sparc_pmu->upper_nop :
151 sparc_pmu->lower_nop, idx);
152}
153
154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
155 int idx)
156{
157 u64 val, mask = mask_for_index(idx);
158
159 val = pcr_ops->read();
160 pcr_ops->write((val & ~mask) | hwc->config);
161}
162
163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
164 int idx)
165{
166 u64 mask = mask_for_index(idx);
167 u64 nop = nop_for_index(idx);
168 u64 val = pcr_ops->read();
169
170 pcr_ops->write((val & ~mask) | nop);
171}
172
173void hw_perf_enable(void)
174{
175 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
176 u64 val;
177 int i;
178
179 if (cpuc->enabled)
180 return;
181
182 cpuc->enabled = 1;
183 barrier();
184
185 val = pcr_ops->read();
186
187 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_event *cp = cpuc->events[i];
189 struct hw_perf_event *hwc;
190
191 if (!cp)
192 continue;
193 hwc = &cp->hw;
194 val |= hwc->config_base;
195 }
196
197 pcr_ops->write(val);
198}
199
200void hw_perf_disable(void)
201{
202 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
203 u64 val;
204
205 if (!cpuc->enabled)
206 return;
207
208 cpuc->enabled = 0;
209
210 val = pcr_ops->read();
211 val &= ~(PCR_UTRACE | PCR_STRACE |
212 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
213 pcr_ops->write(val);
214}
215
216static u32 read_pmc(int idx)
217{
218 u64 val;
219
220 read_pic(val);
221 if (idx == PIC_UPPER_INDEX)
222 val >>= 32;
223
224 return val & 0xffffffff;
225}
226
227static void write_pmc(int idx, u64 val)
228{
229 u64 shift, mask, pic;
230
231 shift = 0;
232 if (idx == PIC_UPPER_INDEX)
233 shift = 32;
234
235 mask = ((u64) 0xffffffff) << shift;
236 val <<= shift;
237
238 read_pic(pic);
239 pic &= ~mask;
240 pic |= val;
241 write_pic(pic);
242}
243
244static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_event *hwc, int idx)
246{
247 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period;
249 int ret = 0;
250
251 if (unlikely(left <= -period)) {
252 left = period;
253 atomic64_set(&hwc->period_left, left);
254 hwc->last_period = period;
255 ret = 1;
256 }
257
258 if (unlikely(left <= 0)) {
259 left += period;
260 atomic64_set(&hwc->period_left, left);
261 hwc->last_period = period;
262 ret = 1;
263 }
264 if (left > MAX_PERIOD)
265 left = MAX_PERIOD;
266
267 atomic64_set(&hwc->prev_count, (u64)-left);
268
269 write_pmc(idx, (u64)(-left) & 0xffffffff);
270
271 perf_event_update_userpage(event);
272
273 return ret;
274}
275
276static int sparc_pmu_enable(struct perf_event *event)
277{
278 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
279 struct hw_perf_event *hwc = &event->hw;
280 int idx = hwc->idx;
281
282 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN;
284
285 sparc_pmu_disable_event(hwc, idx);
286
287 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask);
289
290 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_event(hwc, idx);
292 perf_event_update_userpage(event);
293 return 0;
294}
295
296static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx)
298{
299 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count;
301 s64 delta;
302
303again:
304 prev_raw_count = atomic64_read(&hwc->prev_count);
305 new_raw_count = read_pmc(idx);
306
307 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
308 new_raw_count) != prev_raw_count)
309 goto again;
310
311 delta = (new_raw_count << shift) - (prev_raw_count << shift);
312 delta >>= shift;
313
314 atomic64_add(delta, &event->count);
315 atomic64_sub(delta, &hwc->period_left);
316
317 return new_raw_count;
318}
319
320static void sparc_pmu_disable(struct perf_event *event)
321{
322 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
323 struct hw_perf_event *hwc = &event->hw;
324 int idx = hwc->idx;
325
326 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_event(hwc, idx);
328
329 barrier();
330
331 sparc_perf_event_update(event, hwc, idx);
332 cpuc->events[idx] = NULL;
333 clear_bit(idx, cpuc->used_mask);
334
335 perf_event_update_userpage(event);
336}
337
338static void sparc_pmu_read(struct perf_event *event)
339{
340 struct hw_perf_event *hwc = &event->hw;
341 sparc_perf_event_update(event, hwc, hwc->idx);
342}
343
344static void sparc_pmu_unthrottle(struct perf_event *event)
345{
346 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_event(hwc, hwc->idx);
348}
349
350static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex);
352
353void perf_event_grab_pmc(void)
354{
355 if (atomic_inc_not_zero(&active_events))
356 return;
357
358 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0);
363 }
364 atomic_inc(&active_events);
365 }
366 mutex_unlock(&pmc_grab_mutex);
367}
368
369void perf_event_release_pmc(void)
370{
371 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
372 if (atomic_read(&nmi_active) == 0)
373 on_each_cpu(start_nmi_watchdog, NULL, 1);
374 mutex_unlock(&pmc_grab_mutex);
375 }
376}
377
378static void hw_perf_event_destroy(struct perf_event *event)
379{
380 perf_event_release_pmc();
381}
382
383static int __hw_perf_event_init(struct perf_event *event)
384{
385 struct perf_event_attr *attr = &event->attr;
386 struct hw_perf_event *hwc = &event->hw;
387 const struct perf_event_map *pmap;
388 u64 enc;
389
390 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV;
392
393 if (attr->type != PERF_TYPE_HARDWARE)
394 return -EOPNOTSUPP;
395
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_event_grab_pmc();
400 event->destroy = hw_perf_event_destroy;
401
402 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable
404 * things write 'config | config_base'.
405 */
406 hwc->config_base = sparc_pmu->irq_bit;
407 if (!attr->exclude_user)
408 hwc->config_base |= PCR_UTRACE;
409 if (!attr->exclude_kernel)
410 hwc->config_base |= PCR_STRACE;
411 if (!attr->exclude_hv)
412 hwc->config_base |= sparc_pmu->hv_bit;
413
414 if (!hwc->sample_period) {
415 hwc->sample_period = MAX_PERIOD;
416 hwc->last_period = hwc->sample_period;
417 atomic64_set(&hwc->period_left, hwc->sample_period);
418 }
419
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX;
425 enc <<= sparc_pmu->upper_shift;
426 } else {
427 hwc->idx = PIC_LOWER_INDEX;
428 enc <<= sparc_pmu->lower_shift;
429 }
430
431 hwc->config |= enc;
432 return 0;
433}
434
435static const struct pmu pmu = {
436 .enable = sparc_pmu_enable,
437 .disable = sparc_pmu_disable,
438 .read = sparc_pmu_read,
439 .unthrottle = sparc_pmu_unthrottle,
440};
441
442const struct pmu *hw_perf_event_init(struct perf_event *event)
443{
444 int err = __hw_perf_event_init(event);
445
446 if (err)
447 return ERR_PTR(err);
448 return &pmu;
449}
450
451void perf_event_print_debug(void)
452{
453 unsigned long flags;
454 u64 pcr, pic;
455 int cpu;
456
457 if (!sparc_pmu)
458 return;
459
460 local_irq_save(flags);
461
462 cpu = smp_processor_id();
463
464 pcr = pcr_ops->read();
465 read_pic(pic);
466
467 pr_info("\n");
468 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
469 cpu, pcr, pic);
470
471 local_irq_restore(flags);
472}
473
474static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args)
476{
477 struct die_args *args = __args;
478 struct perf_sample_data data;
479 struct cpu_hw_events *cpuc;
480 struct pt_regs *regs;
481 int idx;
482
483 if (!atomic_read(&active_events))
484 return NOTIFY_DONE;
485
486 switch (cmd) {
487 case DIE_NMI:
488 break;
489
490 default:
491 return NOTIFY_DONE;
492 }
493
494 regs = args->regs;
495
496 data.addr = 0;
497
498 cpuc = &__get_cpu_var(cpu_hw_events);
499 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
500 struct perf_event *event = cpuc->events[idx];
501 struct hw_perf_event *hwc;
502 u64 val;
503
504 if (!test_bit(idx, cpuc->active_mask))
505 continue;
506 hwc = &event->hw;
507 val = sparc_perf_event_update(event, hwc, idx);
508 if (val & (1ULL << 31))
509 continue;
510
511 data.period = event->hw.last_period;
512 if (!sparc_perf_event_set_period(event, hwc, idx))
513 continue;
514
515 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_event(hwc, idx);
517 }
518
519 return NOTIFY_STOP;
520}
521
522static __read_mostly struct notifier_block perf_event_nmi_notifier = {
523 .notifier_call = perf_event_nmi_handler,
524};
525
526static bool __init supported_pmu(void)
527{
528 if (!strcmp(sparc_pmu_type, "ultra3i")) {
529 sparc_pmu = &ultra3i_pmu;
530 return true;
531 }
532 if (!strcmp(sparc_pmu_type, "niagara2")) {
533 sparc_pmu = &niagara2_pmu;
534 return true;
535 }
536 return false;
537}
538
539void __init init_hw_perf_events(void)
540{
541 pr_info("Performance events: ");
542
543 if (!supported_pmu()) {
544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
545 return;
546 }
547
548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
549
550 /* All sparc64 PMUs currently have 2 events. But this simple
551 * driver only supports one active event at a time.
552 */
553 perf_max_events = 1;
554
555 register_die_notifier(&perf_event_nmi_notifier);
556}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
251 } 251 }
252} 252}
253 253
254void __trigger_all_cpu_backtrace(void) 254void arch_trigger_all_cpu_backtrace(void)
255{ 255{
256 struct thread_info *tp = current_thread_info(); 256 struct thread_info *tp = current_thread_info();
257 struct pt_regs *regs = get_irq_regs(); 257 struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
304 304
305static void sysrq_handle_globreg(int key, struct tty_struct *tty) 305static void sysrq_handle_globreg(int key, struct tty_struct *tty)
306{ 306{
307 __trigger_all_cpu_backtrace(); 307 arch_trigger_all_cpu_backtrace();
308} 308}
309 309
310static struct sysrq_key_op sparc_globalreg_op = { 310static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index fe43e80772db..0a37e8cfd160 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -24,6 +24,8 @@
24 24
25#include <asm/prom.h> 25#include <asm/prom.h>
26#include <asm/oplib.h> 26#include <asm/oplib.h>
27#include <asm/leon.h>
28#include <asm/leon_amba.h>
27 29
28#include "prom.h" 30#include "prom.h"
29 31
@@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
131 regs->which_io, regs->phys_addr); 133 regs->which_io, regs->phys_addr);
132} 134}
133 135
136/* "name:vendor:device@irq,addrlo" */
137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
138{
139 struct amba_prom_registers *regs; unsigned int *intr;
140 unsigned int *device, *vendor;
141 struct property *prop;
142
143 prop = of_find_property(dp, "reg", NULL);
144 if (!prop)
145 return;
146 regs = prop->value;
147 prop = of_find_property(dp, "interrupts", NULL);
148 if (!prop)
149 return;
150 intr = prop->value;
151 prop = of_find_property(dp, "vendor", NULL);
152 if (!prop)
153 return;
154 vendor = prop->value;
155 prop = of_find_property(dp, "device", NULL);
156 if (!prop)
157 return;
158 device = prop->value;
159
160 sprintf(tmp_buf, "%s:%d:%d@%x,%x",
161 dp->name, *vendor, *device,
162 *intr, regs->phys_addr);
163}
164
134static void __init __build_path_component(struct device_node *dp, char *tmp_buf) 165static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
135{ 166{
136 struct device_node *parent = dp->parent; 167 struct device_node *parent = dp->parent;
@@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
143 return sbus_path_component(dp, tmp_buf); 174 return sbus_path_component(dp, tmp_buf);
144 if (!strcmp(parent->type, "ebus")) 175 if (!strcmp(parent->type, "ebus"))
145 return ebus_path_component(dp, tmp_buf); 176 return ebus_path_component(dp, tmp_buf);
177 if (!strcmp(parent->type, "ambapp"))
178 return ambapp_path_component(dp, tmp_buf);
146 179
147 /* "isa" is handled with platform naming */ 180 /* "isa" is handled with platform naming */
148 } 181 }
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 0fb5789d43c8..138910c67206 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -22,9 +22,12 @@
22#include <linux/of.h> 22#include <linux/of.h>
23#include <asm/prom.h> 23#include <asm/prom.h>
24#include <asm/oplib.h> 24#include <asm/oplib.h>
25#include <asm/leon.h>
25 26
26#include "prom.h" 27#include "prom.h"
27 28
29void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
30
28struct device_node *of_console_device; 31struct device_node *of_console_device;
29EXPORT_SYMBOL(of_console_device); 32EXPORT_SYMBOL(of_console_device);
30 33
@@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev,
161 name = prom_nextprop(node, prev, p->name); 164 name = prom_nextprop(node, prev, p->name);
162 } 165 }
163 166
164 if (strlen(name) == 0) { 167 if (!name || strlen(name) == 0) {
165 tmp = p; 168 tmp = p;
166 return NULL; 169 return NULL;
167 } 170 }
@@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node,
242 return dp; 245 return dp;
243} 246}
244 247
245static char * __init build_full_name(struct device_node *dp) 248char * __init build_full_name(struct device_node *dp)
246{ 249{
247 int len, ourlen, plen; 250 int len, ourlen, plen;
248 char *n; 251 char *n;
@@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent,
289 292
290 dp->child = prom_build_tree(dp, prom_getchild(node), nextp); 293 dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
291 294
295 if (prom_build_more)
296 prom_build_more(dp, nextp);
297
292 node = prom_getsibling(node); 298 node = prom_getsibling(node);
293 } 299 }
294 300
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 998cadb4e7f2..9be2af55c5cd 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p)
235 sparc_cpu_model = sun4e; 235 sparc_cpu_model = sun4e;
236 if (!strcmp(&cputypval,"sun4u")) 236 if (!strcmp(&cputypval,"sun4u"))
237 sparc_cpu_model = sun4u; 237 sparc_cpu_model = sun4u;
238 if (!strncmp(&cputypval, "leon" , 4))
239 sparc_cpu_model = sparc_leon;
238 240
239 printk("ARCH: "); 241 printk("ARCH: ");
240 switch(sparc_cpu_model) { 242 switch(sparc_cpu_model) {
@@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p)
256 case sun4u: 258 case sun4u:
257 printk("SUN4U\n"); 259 printk("SUN4U\n");
258 break; 260 break;
261 case sparc_leon:
262 printk("LEON\n");
263 break;
259 default: 264 default:
260 printk("UNKNOWN!\n"); 265 printk("UNKNOWN!\n");
261 break; 266 break;
@@ -263,8 +268,6 @@ void __init setup_arch(char **cmdline_p)
263 268
264#ifdef CONFIG_DUMMY_CONSOLE 269#ifdef CONFIG_DUMMY_CONSOLE
265 conswitchp = &dummy_con; 270 conswitchp = &dummy_con;
266#elif defined(CONFIG_PROM_CONSOLE)
267 conswitchp = &prom_con;
268#endif 271#endif
269 boot_flags_init(*cmdline_p); 272 boot_flags_init(*cmdline_p);
270 273
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f2bcfd2967d7..21180339cb09 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -295,8 +295,6 @@ void __init setup_arch(char **cmdline_p)
295 295
296#ifdef CONFIG_DUMMY_CONSOLE 296#ifdef CONFIG_DUMMY_CONSOLE
297 conswitchp = &dummy_con; 297 conswitchp = &dummy_con;
298#elif defined(CONFIG_PROM_CONSOLE)
299 conswitchp = &prom_con;
300#endif 298#endif
301 299
302 idprom_init(); 300 idprom_init();
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 181d069a2d44..7ce1a1005b1d 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 590 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 591 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 592 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
593 } 595 }
594} 596}
595 597
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index ec82d76dc6f2..647afbda7ae1 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
613 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 613 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
614 clear_thread_flag(TIF_NOTIFY_RESUME); 614 clear_thread_flag(TIF_NOTIFY_RESUME);
615 tracehook_notify_resume(regs); 615 tracehook_notify_resume(regs);
616 if (current->replacement_session_keyring)
617 key_replace_session_keyring();
616 } 618 }
617} 619}
620
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3691907a43b4..aa36223497b9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1389,8 +1389,8 @@ void smp_send_stop(void)
1389 * RETURNS: 1389 * RETURNS:
1390 * Pointer to the allocated area on success, NULL on failure. 1390 * Pointer to the allocated area on success, NULL on failure.
1391 */ 1391 */
1392static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, 1392static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1393 unsigned long align) 1393 size_t align)
1394{ 1394{
1395 const unsigned long goal = __pa(MAX_DMA_ADDRESS); 1395 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1396#ifdef CONFIG_NEED_MULTIPLE_NODES 1396#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1415,127 +1415,70 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1415#endif 1415#endif
1416} 1416}
1417 1417
1418static size_t pcpur_size __initdata; 1418static void __init pcpu_free_bootmem(void *ptr, size_t size)
1419static void **pcpur_ptrs __initdata;
1420
1421static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
1422{ 1419{
1423 size_t off = (size_t)pageno << PAGE_SHIFT; 1420 free_bootmem(__pa(ptr), size);
1424
1425 if (off >= pcpur_size)
1426 return NULL;
1427
1428 return virt_to_page(pcpur_ptrs[cpu] + off);
1429} 1421}
1430 1422
1431#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) 1423static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1432
1433static void __init pcpu_map_range(unsigned long start, unsigned long end,
1434 struct page *page)
1435{ 1424{
1436 unsigned long pfn = page_to_pfn(page); 1425 if (cpu_to_node(from) == cpu_to_node(to))
1437 unsigned long pte_base; 1426 return LOCAL_DISTANCE;
1438 1427 else
1439 BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); 1428 return REMOTE_DISTANCE;
1440 1429}
1441 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1442 _PAGE_CP_4U | _PAGE_CV_4U |
1443 _PAGE_P_4U | _PAGE_W_4U);
1444 if (tlb_type == hypervisor)
1445 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1446 _PAGE_CP_4V | _PAGE_CV_4V |
1447 _PAGE_P_4V | _PAGE_W_4V);
1448
1449 while (start < end) {
1450 pgd_t *pgd = pgd_offset_k(start);
1451 unsigned long this_end;
1452 pud_t *pud;
1453 pmd_t *pmd;
1454 pte_t *pte;
1455
1456 pud = pud_offset(pgd, start);
1457 if (pud_none(*pud)) {
1458 pmd_t *new;
1459
1460 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1461 pud_populate(&init_mm, pud, new);
1462 }
1463
1464 pmd = pmd_offset(pud, start);
1465 if (!pmd_present(*pmd)) {
1466 pte_t *new;
1467 1430
1468 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1431static void __init pcpu_populate_pte(unsigned long addr)
1469 pmd_populate_kernel(&init_mm, pmd, new); 1432{
1470 } 1433 pgd_t *pgd = pgd_offset_k(addr);
1434 pud_t *pud;
1435 pmd_t *pmd;
1471 1436
1472 pte = pte_offset_kernel(pmd, start); 1437 pud = pud_offset(pgd, addr);
1473 this_end = (start + PMD_SIZE) & PMD_MASK; 1438 if (pud_none(*pud)) {
1474 if (this_end > end) 1439 pmd_t *new;
1475 this_end = end;
1476 1440
1477 while (start < this_end) { 1441 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1478 unsigned long paddr = pfn << PAGE_SHIFT; 1442 pud_populate(&init_mm, pud, new);
1443 }
1479 1444
1480 pte_val(*pte) = (paddr | pte_base); 1445 pmd = pmd_offset(pud, addr);
1446 if (!pmd_present(*pmd)) {
1447 pte_t *new;
1481 1448
1482 start += PAGE_SIZE; 1449 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1483 pte++; 1450 pmd_populate_kernel(&init_mm, pmd, new);
1484 pfn++;
1485 }
1486 } 1451 }
1487} 1452}
1488 1453
1489void __init setup_per_cpu_areas(void) 1454void __init setup_per_cpu_areas(void)
1490{ 1455{
1491 size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; 1456 unsigned long delta;
1492 static struct vm_struct vm; 1457 unsigned int cpu;
1493 unsigned long delta, cpu; 1458 int rc = -EINVAL;
1494 size_t pcpu_unit_size; 1459
1495 size_t ptrs_size; 1460 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1496 1461 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1497 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 1462 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1498 PERCPU_DYNAMIC_RESERVE); 1463 pcpu_cpu_distance,
1499 dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; 1464 pcpu_alloc_bootmem,
1500 1465 pcpu_free_bootmem);
1501 1466 if (rc)
1502 ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0])); 1467 pr_warning("PERCPU: %s allocator failed (%d), "
1503 pcpur_ptrs = alloc_bootmem(ptrs_size); 1468 "falling back to page size\n",
1504 1469 pcpu_fc_names[pcpu_chosen_fc], rc);
1505 for_each_possible_cpu(cpu) {
1506 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
1507 PCPU_CHUNK_SIZE);
1508
1509 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
1510 PCPU_CHUNK_SIZE - pcpur_size);
1511
1512 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
1513 } 1470 }
1514 1471 if (rc < 0)
1515 /* allocate address and map */ 1472 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1516 vm.flags = VM_ALLOC; 1473 pcpu_alloc_bootmem,
1517 vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE; 1474 pcpu_free_bootmem,
1518 vm_area_register_early(&vm, PCPU_CHUNK_SIZE); 1475 pcpu_populate_pte);
1519 1476 if (rc < 0)
1520 for_each_possible_cpu(cpu) { 1477 panic("cannot initialize percpu area (err=%d)", rc);
1521 unsigned long start = (unsigned long) vm.addr;
1522 unsigned long end;
1523
1524 start += cpu * PCPU_CHUNK_SIZE;
1525 end = start + PCPU_CHUNK_SIZE;
1526 pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
1527 }
1528
1529 pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
1530 PERCPU_MODULE_RESERVE, dyn_size,
1531 PCPU_CHUNK_SIZE, vm.addr, NULL);
1532
1533 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
1534 1478
1535 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1479 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1536 for_each_possible_cpu(cpu) { 1480 for_each_possible_cpu(cpu)
1537 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 1481 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1538 }
1539 1482
1540 /* Setup %g5 for the boot cpu. */ 1483 /* Setup %g5 for the boot cpu. */
1541 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1484 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index aed94869ad6a..e7061138c98a 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -121,7 +121,7 @@ SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
121SIGN1(sys32_umask, sys_umask, %o0) 121SIGN1(sys32_umask, sys_umask, %o0)
122SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) 122SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
123SIGN1(sys32_sendto, sys_sendto, %o0) 123SIGN1(sys32_sendto, sys_sendto, %o0)
124SIGN1(sys32_recvfrom, sys_recvfrom, %o0) 124SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
125SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) 125SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
126SIGN2(sys32_connect, sys_connect, %o0, %o2) 126SIGN2(sys32_connect, sys_connect, %o0, %o2)
127SIGN2(sys32_bind, sys_bind, %o0, %o2) 127SIGN2(sys32_bind, sys_bind, %o0, %o2)
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index f5000a460c05..04e28b2671c8 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -16,7 +16,6 @@
16#include <linux/signal.h> 16#include <linux/signal.h>
17#include <linux/resource.h> 17#include <linux/resource.h>
18#include <linux/times.h> 18#include <linux/times.h>
19#include <linux/utsname.h>
20#include <linux/smp.h> 19#include <linux/smp.h>
21#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
22#include <linux/sem.h> 21#include <linux/sem.h>
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index d28f496f4669..ca39c606fe8e 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */ 4 */
5#include <linux/sched.h>
5#include <linux/sysdev.h> 6#include <linux/sysdev.h>
6#include <linux/cpu.h> 7#include <linux/cpu.h>
7#include <linux/smp.h> 8#include <linux/smp.h>
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index 15c2d752b2bc..a63c5d2d9849 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -3,10 +3,11 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/utsname.h>
7#include <asm/utrap.h> 6#include <asm/utrap.h>
8#include <asm/signal.h> 7#include <asm/signal.h>
9 8
9struct new_utsname;
10
10extern asmlinkage unsigned long sys_getpagesize(void); 11extern asmlinkage unsigned long sys_getpagesize(void);
11extern asmlinkage unsigned long sparc_brk(unsigned long brk); 12extern asmlinkage unsigned long sparc_brk(unsigned long brk);
12extern asmlinkage long sparc_pipe(struct pt_regs *regs); 13extern asmlinkage long sparc_pipe(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 690901657291..0f1658d37490 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
86 86
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 2ee7250ba7ae..009825f6e73c 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
87 87
88#endif /* CONFIG_COMPAT */ 88#endif /* CONFIG_COMPAT */
89 89
@@ -158,4 +158,4 @@ sys_call_table:
158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
161 .word sys_pwritev, sys_rt_tgsigqueueinfo 161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index fcbbd000ec08..4e5992593967 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -51,70 +51,27 @@ SECTIONS
51 _etext = .; 51 _etext = .;
52 52
53 RO_DATA(PAGE_SIZE) 53 RO_DATA(PAGE_SIZE)
54 .data : {
55 DATA_DATA
56 CONSTRUCTORS
57 }
58 .data1 : { 54 .data1 : {
59 *(.data1) 55 *(.data1)
60 } 56 }
61 . = ALIGN(SMP_CACHE_BYTES); 57 RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE)
62 .data.cacheline_aligned : { 58
63 *(.data.cacheline_aligned)
64 }
65 . = ALIGN(SMP_CACHE_BYTES);
66 .data.read_mostly : {
67 *(.data.read_mostly)
68 }
69 /* End of data section */ 59 /* End of data section */
70 _edata = .; 60 _edata = .;
71 61
72 /* init_task */
73 . = ALIGN(THREAD_SIZE);
74 .data.init_task : {
75 *(.data.init_task)
76 }
77 .fixup : { 62 .fixup : {
78 __start___fixup = .; 63 __start___fixup = .;
79 *(.fixup) 64 *(.fixup)
80 __stop___fixup = .; 65 __stop___fixup = .;
81 } 66 }
82 . = ALIGN(16); 67 EXCEPTION_TABLE(16)
83 __ex_table : {
84 __start___ex_table = .;
85 *(__ex_table)
86 __stop___ex_table = .;
87 }
88 NOTES 68 NOTES
89 69
90 . = ALIGN(PAGE_SIZE); 70 . = ALIGN(PAGE_SIZE);
91 .init.text : { 71 __init_begin = ALIGN(PAGE_SIZE);
92 __init_begin = .; 72 INIT_TEXT_SECTION(PAGE_SIZE)
93 _sinittext = .;
94 INIT_TEXT
95 _einittext = .;
96 }
97 __init_text_end = .; 73 __init_text_end = .;
98 .init.data : { 74 INIT_DATA_SECTION(16)
99 INIT_DATA
100 }
101 . = ALIGN(16);
102 .init.setup : {
103 __setup_start = .;
104 *(.init.setup)
105 __setup_end = .;
106 }
107 .initcall.init : {
108 __initcall_start = .;
109 INITCALLS
110 __initcall_end = .;
111 }
112 .con_initcall.init : {
113 __con_initcall_start = .;
114 *(.con_initcall.init)
115 __con_initcall_end = .;
116 }
117 SECURITY_INIT
118 75
119 . = ALIGN(4); 76 . = ALIGN(4);
120 .tsb_ldquad_phys_patch : { 77 .tsb_ldquad_phys_patch : {
@@ -146,37 +103,15 @@ SECTIONS
146 __sun4v_2insn_patch_end = .; 103 __sun4v_2insn_patch_end = .;
147 } 104 }
148 105
149#ifdef CONFIG_BLK_DEV_INITRD
150 . = ALIGN(PAGE_SIZE);
151 .init.ramfs : {
152 __initramfs_start = .;
153 *(.init.ramfs)
154 __initramfs_end = .;
155 }
156#endif
157
158 PERCPU(PAGE_SIZE) 106 PERCPU(PAGE_SIZE)
159 107
160 . = ALIGN(PAGE_SIZE); 108 . = ALIGN(PAGE_SIZE);
161 __init_end = .; 109 __init_end = .;
162 __bss_start = .; 110 BSS_SECTION(0, 0, 0)
163 .sbss : {
164 *(.sbss)
165 *(.scommon)
166 }
167 .bss : {
168 *(.dynbss)
169 *(.bss)
170 *(COMMON)
171 }
172 _end = . ; 111 _end = . ;
173 112
174 /DISCARD/ : {
175 EXIT_TEXT
176 EXIT_DATA
177 *(.exitcall.exit)
178 }
179
180 STABS_DEBUG 113 STABS_DEBUG
181 DWARF_DEBUG 114 DWARF_DEBUG
115
116 DISCARDS
182} 117}
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 681abe0a4594..79836a7dd00c 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o
11obj-y += generic_$(BITS).o 11obj-y += generic_$(BITS).o
12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o 12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
14obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
14 15
15# Only used by sparc64 16# Only used by sparc64
16obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 17obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 26bb3919ff1f..dc7c3b17a15f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -34,6 +34,7 @@
34#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ 34#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
35#include <asm/tlb.h> 35#include <asm/tlb.h>
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/leon.h>
37 38
38DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
39 40
@@ -326,6 +327,9 @@ void __init paging_init(void)
326 sparc_unmapped_base = 0xe0000000; 327 sparc_unmapped_base = 0xe0000000;
327 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); 328 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
328 break; 329 break;
330 case sparc_leon:
331 leon_init();
332 /* fall through */
329 case sun4m: 333 case sun4m:
330 case sun4d: 334 case sun4d:
331 srmmu_paging_init(); 335 srmmu_paging_init();
@@ -468,7 +472,7 @@ void __init mem_init(void)
468 reservedpages++; 472 reservedpages++;
469 473
470 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", 474 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
471 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 475 nr_free_pages() << (PAGE_SHIFT-10),
472 num_physpages << (PAGE_SHIFT - 10), 476 num_physpages << (PAGE_SHIFT - 10),
473 codepages << (PAGE_SHIFT-10), 477 codepages << (PAGE_SHIFT-10),
474 reservedpages << (PAGE_SHIFT - 10), 478 reservedpages << (PAGE_SHIFT - 10),
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
new file mode 100644
index 000000000000..c0e01297e64e
--- /dev/null
+++ b/arch/sparc/mm/leon_mm.c
@@ -0,0 +1,260 @@
1/*
2 * linux/arch/sparc/mm/leon_m.c
3 *
4 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
7 *
8 * do srmmu probe in software
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <asm/asi.h>
15#include <asm/leon.h>
16#include <asm/tlbflush.h>
17
18int leon_flush_during_switch = 1;
19int srmmu_swprobe_trace;
20
21unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
22{
23
24 unsigned int ctxtbl;
25 unsigned int pgd, pmd, ped;
26 unsigned int ptr;
27 unsigned int lvl, pte, paddrbase;
28 unsigned int ctx;
29 unsigned int paddr_calc;
30
31 paddrbase = 0;
32
33 if (srmmu_swprobe_trace)
34 printk(KERN_INFO "swprobe: trace on\n");
35
36 ctxtbl = srmmu_get_ctable_ptr();
37 if (!(ctxtbl)) {
38 if (srmmu_swprobe_trace)
39 printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
40 return 0;
41 }
42 if (!_pfn_valid(PFN(ctxtbl))) {
43 if (srmmu_swprobe_trace)
44 printk(KERN_INFO
45 "swprobe: !_pfn_valid(%x)=>0\n",
46 PFN(ctxtbl));
47 return 0;
48 }
49
50 ctx = srmmu_get_context();
51 if (srmmu_swprobe_trace)
52 printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
53
54 pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
55
56 if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
57 if (srmmu_swprobe_trace)
58 printk(KERN_INFO "swprobe: pgd is entry level 3\n");
59 lvl = 3;
60 pte = pgd;
61 paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
62 goto ready;
63 }
64 if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
65 if (srmmu_swprobe_trace)
66 printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
67 return 0;
68 }
69
70 if (srmmu_swprobe_trace)
71 printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
72
73 ptr = (pgd & SRMMU_PTD_PMASK) << 4;
74 ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
75 if (!_pfn_valid(PFN(ptr)))
76 return 0;
77
78 pmd = LEON_BYPASS_LOAD_PA(ptr);
79 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
80 if (srmmu_swprobe_trace)
81 printk(KERN_INFO "swprobe: pmd is entry level 2\n");
82 lvl = 2;
83 pte = pmd;
84 paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
85 goto ready;
86 }
87 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
88 if (srmmu_swprobe_trace)
89 printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
90 return 0;
91 }
92
93 if (srmmu_swprobe_trace)
94 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
95
96 ptr = (pmd & SRMMU_PTD_PMASK) << 4;
97 ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
98 if (!_pfn_valid(PFN(ptr))) {
99 if (srmmu_swprobe_trace)
100 printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
101 PFN(ptr));
102 return 0;
103 }
104
105 ped = LEON_BYPASS_LOAD_PA(ptr);
106
107 if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
108 if (srmmu_swprobe_trace)
109 printk(KERN_INFO "swprobe: ped is entry level 1\n");
110 lvl = 1;
111 pte = ped;
112 paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
113 goto ready;
114 }
115 if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
116 if (srmmu_swprobe_trace)
117 printk(KERN_INFO "swprobe: ped is invalid => 0\n");
118 return 0;
119 }
120
121 if (srmmu_swprobe_trace)
122 printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
123
124 ptr = (ped & SRMMU_PTD_PMASK) << 4;
125 ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
126 if (!_pfn_valid(PFN(ptr)))
127 return 0;
128
129 ptr = LEON_BYPASS_LOAD_PA(ptr);
130 if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
131 if (srmmu_swprobe_trace)
132 printk(KERN_INFO "swprobe: ptr is entry level 0\n");
133 lvl = 0;
134 pte = ptr;
135 paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
136 goto ready;
137 }
138 if (srmmu_swprobe_trace)
139 printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
140 return 0;
141
142ready:
143 switch (lvl) {
144 case 0:
145 paddr_calc =
146 (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
147 break;
148 case 1:
149 paddr_calc =
150 (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
151 break;
152 case 2:
153 paddr_calc =
154 (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
155 break;
156 default:
157 case 3:
158 paddr_calc = vaddr;
159 break;
160 }
161 if (srmmu_swprobe_trace)
162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
163 if (paddr)
164 *paddr = paddr_calc;
165 return paddrbase;
166}
167
168void leon_flush_icache_all(void)
169{
170 __asm__ __volatile__(" flush "); /*iflush*/
171}
172
173void leon_flush_dcache_all(void)
174{
175 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
176 "i"(ASI_LEON_DFLUSH) : "memory");
177}
178
179void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
180{
181 if (vma->vm_flags & VM_EXEC)
182 leon_flush_icache_all();
183 leon_flush_dcache_all();
184}
185
186void leon_flush_cache_all(void)
187{
188 __asm__ __volatile__(" flush "); /*iflush*/
189 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
190 "i"(ASI_LEON_DFLUSH) : "memory");
191}
192
193void leon_flush_tlb_all(void)
194{
195 leon_flush_cache_all();
196 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
197 "i"(ASI_LEON_MMUFLUSH) : "memory");
198}
199
200/* get all cache regs */
201void leon3_getCacheRegs(struct leon3_cacheregs *regs)
202{
203 unsigned long ccr, iccr, dccr;
204
205 if (!regs)
206 return;
207 /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
208 __asm__ __volatile__("lda [%%g0] %3, %0\n\t"
209 "mov 0x08, %%g1\n\t"
210 "lda [%%g1] %3, %1\n\t"
211 "mov 0x0c, %%g1\n\t"
212 "lda [%%g1] %3, %2\n\t"
213 : "=r"(ccr), "=r"(iccr), "=r"(dccr)
214 /* output */
215 : "i"(ASI_LEON_CACHEREGS) /* input */
216 : "g1" /* clobber list */
217 );
218 regs->ccr = ccr;
219 regs->iccr = iccr;
220 regs->dccr = dccr;
221}
222
223/* Due to virtual cache we need to check cache configuration if
224 * it is possible to skip flushing in some cases.
225 *
226 * Leon2 and Leon3 differ in their way of telling cache information
227 *
228 */
229int leon_flush_needed(void)
230{
231 int flush_needed = -1;
232 unsigned int ssize, sets;
233 char *setStr[4] =
234 { "direct mapped", "2-way associative", "3-way associative",
235 "4-way associative"
236 };
237 /* leon 3 */
238 struct leon3_cacheregs cregs;
239 leon3_getCacheRegs(&cregs);
240 sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
241 /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
242 ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
243
244 printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
245 sets > 3 ? "unknown" : setStr[sets], ssize);
246 if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
247 /* Set Size <= Page size ==>
248 flush on every context switch not needed. */
249 flush_needed = 0;
250 printk(KERN_INFO "CACHE: not flushing on every context switch\n");
251 }
252 return flush_needed;
253}
254
255void leon_switch_mm(void)
256{
257 flush_tlb_mm((void *)0);
258 if (leon_flush_during_switch)
259 leon_flush_cache_all();
260}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
index 652be05acbea..82ec8f666036 100644
--- a/arch/sparc/mm/loadmmu.c
+++ b/arch/sparc/mm/loadmmu.c
@@ -33,6 +33,7 @@ void __init load_mmu(void)
33 break; 33 break;
34 case sun4m: 34 case sun4m:
35 case sun4d: 35 case sun4d:
36 case sparc_leon:
36 ld_mmu_srmmu(); 37 ld_mmu_srmmu();
37 break; 38 break;
38 default: 39 default:
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index ade4eb373bdd..509b1ffeba66 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -46,6 +46,7 @@
46#include <asm/tsunami.h> 46#include <asm/tsunami.h>
47#include <asm/swift.h> 47#include <asm/swift.h>
48#include <asm/turbosparc.h> 48#include <asm/turbosparc.h>
49#include <asm/leon.h>
49 50
50#include <asm/btfixup.h> 51#include <asm/btfixup.h>
51 52
@@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
569 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); 570 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
570 } 571 }
571 572
573 if (sparc_cpu_model == sparc_leon)
574 leon_switch_mm();
575
572 if (is_hypersparc) 576 if (is_hypersparc)
573 hyper_flush_whole_icache(); 577 hyper_flush_whole_icache();
574 578
@@ -1977,6 +1981,45 @@ static void __init init_viking(void)
1977 poke_srmmu = poke_viking; 1981 poke_srmmu = poke_viking;
1978} 1982}
1979 1983
1984#ifdef CONFIG_SPARC_LEON
1985
1986void __init poke_leonsparc(void)
1987{
1988}
1989
1990void __init init_leon(void)
1991{
1992
1993 srmmu_name = "Leon";
1994
1995 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1996 BTFIXUPCALL_NORM);
1997 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1998 BTFIXUPCALL_NORM);
1999 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
2000 BTFIXUPCALL_NORM);
2001 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
2002 BTFIXUPCALL_NORM);
2003 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
2004 BTFIXUPCALL_NORM);
2005
2006 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2007 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2008 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2009 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2010
2011 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
2012 BTFIXUPCALL_NOP);
2013 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
2014
2015 poke_srmmu = poke_leonsparc;
2016
2017 srmmu_cache_pagetables = 0;
2018
2019 leon_flush_during_switch = leon_flush_needed();
2020}
2021#endif
2022
1980/* Probe for the srmmu chip version. */ 2023/* Probe for the srmmu chip version. */
1981static void __init get_srmmu_type(void) 2024static void __init get_srmmu_type(void)
1982{ 2025{
@@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void)
1992 psr_typ = (psr >> 28) & 0xf; 2035 psr_typ = (psr >> 28) & 0xf;
1993 psr_vers = (psr >> 24) & 0xf; 2036 psr_vers = (psr >> 24) & 0xf;
1994 2037
1995 /* First, check for HyperSparc or Cypress. */ 2038 /* First, check for sparc-leon. */
2039 if (sparc_cpu_model == sparc_leon) {
2040 psr_typ = 0xf; /* hardcoded ids for older models/simulators */
2041 psr_vers = 2;
2042 init_leon();
2043 return;
2044 }
2045
2046 /* Second, check for HyperSparc or Cypress. */
1996 if(mod_typ == 1) { 2047 if(mod_typ == 1) {
1997 switch(mod_rev) { 2048 switch(mod_rev) {
1998 case 7: 2049 case 7:
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index d172f86439b1..f97cb8b6ee5f 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -21,7 +21,7 @@
21static int profile_timer_exceptions_notify(struct notifier_block *self, 21static int profile_timer_exceptions_notify(struct notifier_block *self,
22 unsigned long val, void *data) 22 unsigned long val, void *data)
23{ 23{
24 struct die_args *args = (struct die_args *)data; 24 struct die_args *args = data;
25 int ret = NOTIFY_DONE; 25 int ret = NOTIFY_DONE;
26 26
27 switch (val) { 27 switch (val) {
@@ -57,7 +57,7 @@ static void timer_stop(void)
57 57
58static int op_nmi_timer_init(struct oprofile_operations *ops) 58static int op_nmi_timer_init(struct oprofile_operations *ops)
59{ 59{
60 if (!nmi_usable) 60 if (atomic_read(&nmi_active) <= 0)
61 return -ENODEV; 61 return -ENODEV;
62 62
63 ops->start = timer_start; 63 ops->start = timer_start;
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index eedffb4fec2d..39fc6af21b7c 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -88,7 +88,7 @@ void prom_cmdline(void)
88/* Drop into the prom, but completely terminate the program. 88/* Drop into the prom, but completely terminate the program.
89 * No chance of continuing. 89 * No chance of continuing.
90 */ 90 */
91void prom_halt(void) 91void notrace prom_halt(void)
92{ 92{
93#ifdef CONFIG_SUN_LDOMS 93#ifdef CONFIG_SUN_LDOMS
94 if (ldom_domaining_enabled) 94 if (ldom_domaining_enabled)
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
index 660943ee4c2a..ca869266b9f3 100644
--- a/arch/sparc/prom/printf.c
+++ b/arch/sparc/prom/printf.c
@@ -14,14 +14,14 @@
14 */ 14 */
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/compiler.h>
17 18
18#include <asm/openprom.h> 19#include <asm/openprom.h>
19#include <asm/oplib.h> 20#include <asm/oplib.h>
20 21
21static char ppbuf[1024]; 22static char ppbuf[1024];
22 23
23void 24void notrace prom_write(const char *buf, unsigned int n)
24prom_write(const char *buf, unsigned int n)
25{ 25{
26 char ch; 26 char ch;
27 27
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
33 } 33 }
34} 34}
35 35
36void 36void notrace prom_printf(const char *fmt, ...)
37prom_printf(const char *fmt, ...)
38{ 37{
39 va_list args; 38 va_list args;
40 int i; 39 int i;