aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 02:59:01 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-17 02:59:10 -0400
commitcc4949e1fdade5d063e9f8783cf0e2cc92041ce5 (patch)
tree4023bd641bfe464efbde518fb504d6865c9df014 /arch/sparc
parent28b4868820a56de661f54742ff91b78e12f1e582 (diff)
parent300df7dc89cc276377fc020704e34875d5c473b6 (diff)
Merge branch 'linus' into x86/urgent
Merge reason: pull in latest to fix a bug in it. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/configs/sparc64_defconfig63
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h2
-rw-r--r--arch/sparc/include/asm/bitsperlong.h13
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/dma-mapping.h168
-rw-r--r--arch/sparc/include/asm/dma-mapping_32.h60
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h154
-rw-r--r--arch/sparc/include/asm/errno.h2
-rw-r--r--arch/sparc/include/asm/ftrace.h11
-rw-r--r--arch/sparc/include/asm/mdesc.h3
-rw-r--r--arch/sparc/include/asm/mman.h2
-rw-r--r--arch/sparc/include/asm/page_32.h2
-rw-r--r--arch/sparc/include/asm/page_64.h2
-rw-r--r--arch/sparc/include/asm/percpu_64.h8
-rw-r--r--arch/sparc/include/asm/prom.h2
-rw-r--r--arch/sparc/include/asm/signal.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h207
-rw-r--r--arch/sparc/include/asm/types.h4
-rw-r--r--arch/sparc/include/asm/uaccess_32.h3
-rw-r--r--arch/sparc/include/asm/uaccess_64.h2
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpumap.c431
-rw-r--r--arch/sparc/kernel/cpumap.h16
-rw-r--r--arch/sparc/kernel/dma.c127
-rw-r--r--arch/sparc/kernel/ds.c3
-rw-r--r--arch/sparc/kernel/ftrace.c47
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/iommu.c15
-rw-r--r--arch/sparc/kernel/irq_64.c29
-rw-r--r--arch/sparc/kernel/mdesc.c149
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/of_device_32.c195
-rw-r--r--arch/sparc/kernel/of_device_64.c188
-rw-r--r--arch/sparc/kernel/of_device_common.c174
-rw-r--r--arch/sparc/kernel/of_device_common.h36
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
-rw-r--r--arch/sparc/kernel/prom.h1
-rw-r--r--arch/sparc/kernel/prom_64.c232
-rw-r--r--arch/sparc/kernel/prom_common.c2
-rw-r--r--arch/sparc/kernel/smp_64.c196
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/traps_64.c170
-rw-r--r--arch/sparc/mm/extable.c29
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c16
-rw-r--r--arch/sparc/mm/srmmu.c3
50 files changed, 1735 insertions, 1293 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cc12cd48bbc5..3f8b6a92eabd 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@ config SPARC64
37 select HAVE_KPROBES 37 select HAVE_KPROBES
38 select HAVE_LMB 38 select HAVE_LMB
39 select HAVE_SYSCALL_WRAPPERS 39 select HAVE_SYSCALL_WRAPPERS
40 select HAVE_DYNAMIC_FTRACE
41 select HAVE_FTRACE_MCOUNT_RECORD
40 select USE_GENERIC_SMP_HELPERS if SMP 42 select USE_GENERIC_SMP_HELPERS if SMP
41 select RTC_DRV_CMOS 43 select RTC_DRV_CMOS
42 select RTC_DRV_BQ4802 44 select RTC_DRV_BQ4802
@@ -93,6 +95,9 @@ config AUDIT_ARCH
93config HAVE_SETUP_PER_CPU_AREA 95config HAVE_SETUP_PER_CPU_AREA
94 def_bool y if SPARC64 96 def_bool y if SPARC64
95 97
98config HAVE_DYNAMIC_PER_CPU_AREA
99 def_bool y if SPARC64
100
96config GENERIC_HARDIRQS_NO__DO_IRQ 101config GENERIC_HARDIRQS_NO__DO_IRQ
97 bool 102 bool
98 def_bool y if SPARC64 103 def_bool y if SPARC64
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b5d63bd8716e..0123a4c596ce 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.30-rc2 3# Linux kernel version: 2.6.30
4# Fri Apr 17 02:03:07 2009 4# Tue Jun 16 04:59:36 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -19,6 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y
19CONFIG_HAVE_LATENCYTOP_SUPPORT=y 19CONFIG_HAVE_LATENCYTOP_SUPPORT=y
20CONFIG_AUDIT_ARCH=y 20CONFIG_AUDIT_ARCH=y
21CONFIG_HAVE_SETUP_PER_CPU_AREA=y 21CONFIG_HAVE_SETUP_PER_CPU_AREA=y
22CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
22CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 23CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
23CONFIG_MMU=y 24CONFIG_MMU=y
24CONFIG_ARCH_NO_VIRT_TO_BUS=y 25CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -82,7 +83,6 @@ CONFIG_SYSCTL_SYSCALL=y
82CONFIG_KALLSYMS=y 83CONFIG_KALLSYMS=y
83# CONFIG_KALLSYMS_ALL is not set 84# CONFIG_KALLSYMS_ALL is not set
84# CONFIG_KALLSYMS_EXTRA_PASS is not set 85# CONFIG_KALLSYMS_EXTRA_PASS is not set
85# CONFIG_STRIP_ASM_SYMS is not set
86CONFIG_HOTPLUG=y 86CONFIG_HOTPLUG=y
87CONFIG_PRINTK=y 87CONFIG_PRINTK=y
88CONFIG_BUG=y 88CONFIG_BUG=y
@@ -95,16 +95,21 @@ CONFIG_TIMERFD=y
95CONFIG_EVENTFD=y 95CONFIG_EVENTFD=y
96CONFIG_SHMEM=y 96CONFIG_SHMEM=y
97CONFIG_AIO=y 97CONFIG_AIO=y
98
99#
100# Performance Counters
101#
98CONFIG_VM_EVENT_COUNTERS=y 102CONFIG_VM_EVENT_COUNTERS=y
99CONFIG_PCI_QUIRKS=y 103CONFIG_PCI_QUIRKS=y
100CONFIG_SLUB_DEBUG=y 104CONFIG_SLUB_DEBUG=y
105# CONFIG_STRIP_ASM_SYMS is not set
101# CONFIG_COMPAT_BRK is not set 106# CONFIG_COMPAT_BRK is not set
102# CONFIG_SLAB is not set 107# CONFIG_SLAB is not set
103CONFIG_SLUB=y 108CONFIG_SLUB=y
104# CONFIG_SLOB is not set 109# CONFIG_SLOB is not set
105CONFIG_PROFILING=y 110CONFIG_PROFILING=y
106CONFIG_TRACEPOINTS=y 111CONFIG_TRACEPOINTS=y
107# CONFIG_MARKERS is not set 112CONFIG_MARKERS=y
108CONFIG_OPROFILE=m 113CONFIG_OPROFILE=m
109CONFIG_HAVE_OPROFILE=y 114CONFIG_HAVE_OPROFILE=y
110CONFIG_KPROBES=y 115CONFIG_KPROBES=y
@@ -202,6 +207,7 @@ CONFIG_NR_QUICK=1
202CONFIG_UNEVICTABLE_LRU=y 207CONFIG_UNEVICTABLE_LRU=y
203CONFIG_HAVE_MLOCK=y 208CONFIG_HAVE_MLOCK=y
204CONFIG_HAVE_MLOCKED_PAGE_BIT=y 209CONFIG_HAVE_MLOCKED_PAGE_BIT=y
210CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
205CONFIG_SCHED_SMT=y 211CONFIG_SCHED_SMT=y
206CONFIG_SCHED_MC=y 212CONFIG_SCHED_MC=y
207# CONFIG_PREEMPT_NONE is not set 213# CONFIG_PREEMPT_NONE is not set
@@ -321,6 +327,7 @@ CONFIG_VLAN_8021Q=m
321# CONFIG_ECONET is not set 327# CONFIG_ECONET is not set
322# CONFIG_WAN_ROUTER is not set 328# CONFIG_WAN_ROUTER is not set
323# CONFIG_PHONET is not set 329# CONFIG_PHONET is not set
330# CONFIG_IEEE802154 is not set
324# CONFIG_NET_SCHED is not set 331# CONFIG_NET_SCHED is not set
325# CONFIG_DCB is not set 332# CONFIG_DCB is not set
326 333
@@ -340,7 +347,11 @@ CONFIG_WIRELESS=y
340CONFIG_WIRELESS_OLD_REGULATORY=y 347CONFIG_WIRELESS_OLD_REGULATORY=y
341# CONFIG_WIRELESS_EXT is not set 348# CONFIG_WIRELESS_EXT is not set
342# CONFIG_LIB80211 is not set 349# CONFIG_LIB80211 is not set
343# CONFIG_MAC80211 is not set 350
351#
352# CFG80211 needs to be enabled for MAC80211
353#
354CONFIG_MAC80211_DEFAULT_PS_VALUE=0
344# CONFIG_WIMAX is not set 355# CONFIG_WIMAX is not set
345# CONFIG_RFKILL is not set 356# CONFIG_RFKILL is not set
346# CONFIG_NET_9P is not set 357# CONFIG_NET_9P is not set
@@ -364,6 +375,7 @@ CONFIG_EXTRA_FIRMWARE=""
364CONFIG_CONNECTOR=m 375CONFIG_CONNECTOR=m
365# CONFIG_MTD is not set 376# CONFIG_MTD is not set
366CONFIG_OF_DEVICE=y 377CONFIG_OF_DEVICE=y
378CONFIG_OF_MDIO=m
367# CONFIG_PARPORT is not set 379# CONFIG_PARPORT is not set
368CONFIG_BLK_DEV=y 380CONFIG_BLK_DEV=y
369# CONFIG_BLK_DEV_FD is not set 381# CONFIG_BLK_DEV_FD is not set
@@ -399,6 +411,7 @@ CONFIG_MISC_DEVICES=y
399# CONFIG_EEPROM_AT24 is not set 411# CONFIG_EEPROM_AT24 is not set
400# CONFIG_EEPROM_LEGACY is not set 412# CONFIG_EEPROM_LEGACY is not set
401# CONFIG_EEPROM_93CX6 is not set 413# CONFIG_EEPROM_93CX6 is not set
414# CONFIG_CB710_CORE is not set
402CONFIG_HAVE_IDE=y 415CONFIG_HAVE_IDE=y
403CONFIG_IDE=y 416CONFIG_IDE=y
404 417
@@ -477,10 +490,6 @@ CONFIG_BLK_DEV_SR=m
477CONFIG_BLK_DEV_SR_VENDOR=y 490CONFIG_BLK_DEV_SR_VENDOR=y
478CONFIG_CHR_DEV_SG=m 491CONFIG_CHR_DEV_SG=m
479# CONFIG_CHR_DEV_SCH is not set 492# CONFIG_CHR_DEV_SCH is not set
480
481#
482# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
483#
484CONFIG_SCSI_MULTI_LUN=y 493CONFIG_SCSI_MULTI_LUN=y
485CONFIG_SCSI_CONSTANTS=y 494CONFIG_SCSI_CONSTANTS=y
486# CONFIG_SCSI_LOGGING is not set 495# CONFIG_SCSI_LOGGING is not set
@@ -499,6 +508,7 @@ CONFIG_SCSI_FC_ATTRS=y
499CONFIG_SCSI_LOWLEVEL=y 508CONFIG_SCSI_LOWLEVEL=y
500# CONFIG_ISCSI_TCP is not set 509# CONFIG_ISCSI_TCP is not set
501# CONFIG_SCSI_CXGB3_ISCSI is not set 510# CONFIG_SCSI_CXGB3_ISCSI is not set
511# CONFIG_SCSI_BNX2_ISCSI is not set
502# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 512# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
503# CONFIG_SCSI_3W_9XXX is not set 513# CONFIG_SCSI_3W_9XXX is not set
504# CONFIG_SCSI_ACARD is not set 514# CONFIG_SCSI_ACARD is not set
@@ -507,6 +517,7 @@ CONFIG_SCSI_LOWLEVEL=y
507# CONFIG_SCSI_AIC7XXX_OLD is not set 517# CONFIG_SCSI_AIC7XXX_OLD is not set
508# CONFIG_SCSI_AIC79XX is not set 518# CONFIG_SCSI_AIC79XX is not set
509# CONFIG_SCSI_AIC94XX is not set 519# CONFIG_SCSI_AIC94XX is not set
520# CONFIG_SCSI_MVSAS is not set
510# CONFIG_SCSI_ARCMSR is not set 521# CONFIG_SCSI_ARCMSR is not set
511# CONFIG_MEGARAID_NEWGEN is not set 522# CONFIG_MEGARAID_NEWGEN is not set
512# CONFIG_MEGARAID_LEGACY is not set 523# CONFIG_MEGARAID_LEGACY is not set
@@ -521,7 +532,6 @@ CONFIG_SCSI_LOWLEVEL=y
521# CONFIG_SCSI_IPS is not set 532# CONFIG_SCSI_IPS is not set
522# CONFIG_SCSI_INITIO is not set 533# CONFIG_SCSI_INITIO is not set
523# CONFIG_SCSI_INIA100 is not set 534# CONFIG_SCSI_INIA100 is not set
524# CONFIG_SCSI_MVSAS is not set
525# CONFIG_SCSI_STEX is not set 535# CONFIG_SCSI_STEX is not set
526# CONFIG_SCSI_SYM53C8XX_2 is not set 536# CONFIG_SCSI_SYM53C8XX_2 is not set
527# CONFIG_SCSI_QLOGIC_1280 is not set 537# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -569,7 +579,6 @@ CONFIG_DM_ZERO=m
569# CONFIG_IEEE1394 is not set 579# CONFIG_IEEE1394 is not set
570# CONFIG_I2O is not set 580# CONFIG_I2O is not set
571CONFIG_NETDEVICES=y 581CONFIG_NETDEVICES=y
572CONFIG_COMPAT_NET_DEV_OPS=y
573# CONFIG_DUMMY is not set 582# CONFIG_DUMMY is not set
574# CONFIG_BONDING is not set 583# CONFIG_BONDING is not set
575# CONFIG_MACVLAN is not set 584# CONFIG_MACVLAN is not set
@@ -635,6 +644,7 @@ CONFIG_NET_PCI=y
635# CONFIG_SMSC9420 is not set 644# CONFIG_SMSC9420 is not set
636# CONFIG_SUNDANCE is not set 645# CONFIG_SUNDANCE is not set
637# CONFIG_TLAN is not set 646# CONFIG_TLAN is not set
647# CONFIG_KS8842 is not set
638# CONFIG_VIA_RHINE is not set 648# CONFIG_VIA_RHINE is not set
639# CONFIG_SC92031 is not set 649# CONFIG_SC92031 is not set
640# CONFIG_ATL2 is not set 650# CONFIG_ATL2 is not set
@@ -1127,6 +1137,11 @@ CONFIG_SND_VERBOSE_PROCFS=y
1127# CONFIG_SND_VERBOSE_PRINTK is not set 1137# CONFIG_SND_VERBOSE_PRINTK is not set
1128# CONFIG_SND_DEBUG is not set 1138# CONFIG_SND_DEBUG is not set
1129CONFIG_SND_VMASTER=y 1139CONFIG_SND_VMASTER=y
1140CONFIG_SND_RAWMIDI_SEQ=m
1141# CONFIG_SND_OPL3_LIB_SEQ is not set
1142# CONFIG_SND_OPL4_LIB_SEQ is not set
1143# CONFIG_SND_SBAWE_SEQ is not set
1144# CONFIG_SND_EMU10K1_SEQ is not set
1130CONFIG_SND_MPU401_UART=m 1145CONFIG_SND_MPU401_UART=m
1131CONFIG_SND_AC97_CODEC=m 1146CONFIG_SND_AC97_CODEC=m
1132CONFIG_SND_DRIVERS=y 1147CONFIG_SND_DRIVERS=y
@@ -1153,6 +1168,7 @@ CONFIG_SND_ALI5451=m
1153# CONFIG_SND_OXYGEN is not set 1168# CONFIG_SND_OXYGEN is not set
1154# CONFIG_SND_CS4281 is not set 1169# CONFIG_SND_CS4281 is not set
1155# CONFIG_SND_CS46XX is not set 1170# CONFIG_SND_CS46XX is not set
1171# CONFIG_SND_CTXFI is not set
1156# CONFIG_SND_DARLA20 is not set 1172# CONFIG_SND_DARLA20 is not set
1157# CONFIG_SND_GINA20 is not set 1173# CONFIG_SND_GINA20 is not set
1158# CONFIG_SND_LAYLA20 is not set 1174# CONFIG_SND_LAYLA20 is not set
@@ -1183,6 +1199,7 @@ CONFIG_SND_ALI5451=m
1183# CONFIG_SND_INTEL8X0 is not set 1199# CONFIG_SND_INTEL8X0 is not set
1184# CONFIG_SND_INTEL8X0M is not set 1200# CONFIG_SND_INTEL8X0M is not set
1185# CONFIG_SND_KORG1212 is not set 1201# CONFIG_SND_KORG1212 is not set
1202# CONFIG_SND_LX6464ES is not set
1186# CONFIG_SND_MAESTRO3 is not set 1203# CONFIG_SND_MAESTRO3 is not set
1187# CONFIG_SND_MIXART is not set 1204# CONFIG_SND_MIXART is not set
1188# CONFIG_SND_NM256 is not set 1205# CONFIG_SND_NM256 is not set
@@ -1229,6 +1246,7 @@ CONFIG_HID_BELKIN=y
1229CONFIG_HID_CHERRY=y 1246CONFIG_HID_CHERRY=y
1230CONFIG_HID_CHICONY=y 1247CONFIG_HID_CHICONY=y
1231CONFIG_HID_CYPRESS=y 1248CONFIG_HID_CYPRESS=y
1249CONFIG_HID_DRAGONRISE=y
1232# CONFIG_DRAGONRISE_FF is not set 1250# CONFIG_DRAGONRISE_FF is not set
1233CONFIG_HID_EZKEY=y 1251CONFIG_HID_EZKEY=y
1234CONFIG_HID_KYE=y 1252CONFIG_HID_KYE=y
@@ -1246,9 +1264,14 @@ CONFIG_HID_PETALYNX=y
1246CONFIG_HID_SAMSUNG=y 1264CONFIG_HID_SAMSUNG=y
1247CONFIG_HID_SONY=y 1265CONFIG_HID_SONY=y
1248CONFIG_HID_SUNPLUS=y 1266CONFIG_HID_SUNPLUS=y
1267CONFIG_HID_GREENASIA=y
1249# CONFIG_GREENASIA_FF is not set 1268# CONFIG_GREENASIA_FF is not set
1269CONFIG_HID_SMARTJOYPLUS=y
1270# CONFIG_SMARTJOYPLUS_FF is not set
1250CONFIG_HID_TOPSEED=y 1271CONFIG_HID_TOPSEED=y
1272CONFIG_HID_THRUSTMASTER=y
1251# CONFIG_THRUSTMASTER_FF is not set 1273# CONFIG_THRUSTMASTER_FF is not set
1274CONFIG_HID_ZEROPLUS=y
1252# CONFIG_ZEROPLUS_FF is not set 1275# CONFIG_ZEROPLUS_FF is not set
1253CONFIG_USB_SUPPORT=y 1276CONFIG_USB_SUPPORT=y
1254CONFIG_USB_ARCH_HAS_HCD=y 1277CONFIG_USB_ARCH_HAS_HCD=y
@@ -1462,6 +1485,7 @@ CONFIG_FILE_LOCKING=y
1462# CONFIG_GFS2_FS is not set 1485# CONFIG_GFS2_FS is not set
1463# CONFIG_OCFS2_FS is not set 1486# CONFIG_OCFS2_FS is not set
1464# CONFIG_BTRFS_FS is not set 1487# CONFIG_BTRFS_FS is not set
1488CONFIG_FSNOTIFY=y
1465CONFIG_DNOTIFY=y 1489CONFIG_DNOTIFY=y
1466CONFIG_INOTIFY=y 1490CONFIG_INOTIFY=y
1467CONFIG_INOTIFY_USER=y 1491CONFIG_INOTIFY_USER=y
@@ -1636,25 +1660,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
1636# CONFIG_DEBUG_PAGEALLOC is not set 1660# CONFIG_DEBUG_PAGEALLOC is not set
1637CONFIG_NOP_TRACER=y 1661CONFIG_NOP_TRACER=y
1638CONFIG_HAVE_FUNCTION_TRACER=y 1662CONFIG_HAVE_FUNCTION_TRACER=y
1663CONFIG_HAVE_DYNAMIC_FTRACE=y
1664CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1639CONFIG_RING_BUFFER=y 1665CONFIG_RING_BUFFER=y
1666CONFIG_EVENT_TRACING=y
1667CONFIG_CONTEXT_SWITCH_TRACER=y
1640CONFIG_TRACING=y 1668CONFIG_TRACING=y
1669CONFIG_GENERIC_TRACER=y
1641CONFIG_TRACING_SUPPORT=y 1670CONFIG_TRACING_SUPPORT=y
1642 1671CONFIG_FTRACE=y
1643#
1644# Tracers
1645#
1646# CONFIG_FUNCTION_TRACER is not set 1672# CONFIG_FUNCTION_TRACER is not set
1647# CONFIG_IRQSOFF_TRACER is not set 1673# CONFIG_IRQSOFF_TRACER is not set
1648# CONFIG_SCHED_TRACER is not set 1674# CONFIG_SCHED_TRACER is not set
1649# CONFIG_CONTEXT_SWITCH_TRACER is not set
1650# CONFIG_EVENT_TRACER is not set
1651# CONFIG_BOOT_TRACER is not set 1675# CONFIG_BOOT_TRACER is not set
1652# CONFIG_TRACE_BRANCH_PROFILING is not set 1676CONFIG_BRANCH_PROFILE_NONE=y
1677# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1678# CONFIG_PROFILE_ALL_BRANCHES is not set
1653# CONFIG_STACK_TRACER is not set 1679# CONFIG_STACK_TRACER is not set
1654# CONFIG_KMEMTRACE is not set 1680# CONFIG_KMEMTRACE is not set
1655# CONFIG_WORKQUEUE_TRACER is not set 1681# CONFIG_WORKQUEUE_TRACER is not set
1656CONFIG_BLK_DEV_IO_TRACE=y 1682CONFIG_BLK_DEV_IO_TRACE=y
1657# CONFIG_FTRACE_STARTUP_TEST is not set 1683# CONFIG_FTRACE_STARTUP_TEST is not set
1684# CONFIG_RING_BUFFER_BENCHMARK is not set
1658# CONFIG_DYNAMIC_DEBUG is not set 1685# CONFIG_DYNAMIC_DEBUG is not set
1659# CONFIG_SAMPLES is not set 1686# CONFIG_SAMPLES is not set
1660CONFIG_HAVE_ARCH_KGDB=y 1687CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index bb91b1248cd1..f0d343c3b956 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -161,5 +161,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
161 161
162#endif /* !(__KERNEL__) */ 162#endif /* !(__KERNEL__) */
163 163
164#include <asm-generic/atomic.h> 164#include <asm-generic/atomic-long.h>
165#endif /* !(__ARCH_SPARC_ATOMIC__) */ 165#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index a0a706492696..f2e48009989e 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -114,5 +114,5 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
114#define smp_mb__before_atomic_inc() barrier() 114#define smp_mb__before_atomic_inc() barrier()
115#define smp_mb__after_atomic_inc() barrier() 115#define smp_mb__after_atomic_inc() barrier()
116 116
117#include <asm-generic/atomic.h> 117#include <asm-generic/atomic-long.h>
118#endif /* !(__ARCH_SPARC64_ATOMIC__) */ 118#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/bitsperlong.h b/arch/sparc/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..40dcaa3aaa56
--- /dev/null
+++ b/arch/sparc/include/asm/bitsperlong.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_ALPHA_BITSPERLONG_H
2#define __ASM_ALPHA_BITSPERLONG_H
3
4#if defined(__sparc__) && defined(__arch64__)
5#define __BITS_PER_LONG 64
6#else
7#define __BITS_PER_LONG 32
8#endif
9
10#include <asm-generic/bitsperlong.h>
11
12#endif /* __ASM_ALPHA_BITSPERLONG_H */
13
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89ee9ef8..926397d345ff 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
13 10
14#include <linux/percpu.h> 11#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 35#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data) 36#define local_cpu_data() __get_cpu_var(__cpu_data)
40 37
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 void *hdesc;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern const struct seq_operations cpuinfo_op; 38extern const struct seq_operations cpuinfo_op;
90 39
91extern unsigned long real_hard_smp_processor_id(void);
92
93struct cpuid_patch_entry {
94 unsigned int addr;
95 unsigned int cheetah_safari[4];
96 unsigned int cheetah_jbus[4];
97 unsigned int starfire[4];
98 unsigned int sun4v[4];
99};
100extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
101
102struct sun4v_1insn_patch_entry {
103 unsigned int addr;
104 unsigned int insn;
105};
106extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
107 __sun4v_1insn_patch_end;
108
109struct sun4v_2insn_patch_entry {
110 unsigned int addr;
111 unsigned int insns[2];
112};
113extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
114 __sun4v_2insn_patch_end;
115
116#endif /* !(__ASSEMBLY__) */ 40#endif /* !(__ASSEMBLY__) */
117 41
118#define TRAP_PER_CPU_THREAD 0x00 42#include <asm/trap_block.h>
119#define TRAP_PER_CPU_PGD_PADDR 0x08
120#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
121#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
122#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
123#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
124#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
125#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
126#define TRAP_PER_CPU_FAULT_INFO 0x40
127#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
128#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
129#define TRAP_PER_CPU_TSB_HUGE 0xd0
130#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
131#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
132#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
133#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
134#define TRAP_PER_CPU_RESUM_QMASK 0xf0
135#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
136
137#define TRAP_BLOCK_SZ_SHIFT 8
138
139#include <asm/scratchpad.h>
140
141#define __GET_CPUID(REG) \
142 /* Spitfire implementation (default). */ \
143661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
144 srlx REG, 17, REG; \
145 and REG, 0x1f, REG; \
146 nop; \
147 .section .cpuid_patch, "ax"; \
148 /* Instruction location. */ \
149 .word 661b; \
150 /* Cheetah Safari implementation. */ \
151 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
152 srlx REG, 17, REG; \
153 and REG, 0x3ff, REG; \
154 nop; \
155 /* Cheetah JBUS implementation. */ \
156 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
157 srlx REG, 17, REG; \
158 and REG, 0x1f, REG; \
159 nop; \
160 /* Starfire implementation. */ \
161 sethi %hi(0x1fff40000d0 >> 9), REG; \
162 sllx REG, 9, REG; \
163 or REG, 0xd0, REG; \
164 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
165 /* sun4v implementation. */ \
166 mov SCRATCHPAD_CPUID, REG; \
167 ldxa [REG] ASI_SCRATCHPAD, REG; \
168 nop; \
169 nop; \
170 .previous;
171
172#ifdef CONFIG_SMP
173
174#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
175 __GET_CPUID(TMP) \
176 sethi %hi(trap_block), DEST; \
177 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
178 or DEST, %lo(trap_block), DEST; \
179 add DEST, TMP, DEST; \
180
181/* Clobbers TMP, current address space PGD phys address into DEST. */
182#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
183 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
184 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
185
186/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
187#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
188 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
189 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
190
191/* Clobbers TMP, loads DEST with current thread info pointer. */
192#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
193 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
194 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
195
196/* Given the current thread info pointer in THR, load the per-cpu
197 * area base of the current processor into DEST. REG1, REG2, and REG3 are
198 * clobbered.
199 *
200 * You absolutely cannot use DEST as a temporary in this code. The
201 * reason is that traps can happen during execution, and return from
202 * trap will load the fully resolved DEST per-cpu base. This can corrupt
203 * the calculations done by the macro mid-stream.
204 */
205#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
206 lduh [THR + TI_CPU], REG1; \
207 sethi %hi(__per_cpu_shift), REG3; \
208 sethi %hi(__per_cpu_base), REG2; \
209 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
210 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
211 sllx REG1, REG3, REG3; \
212 add REG3, REG2, DEST;
213
214#else
215
216#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
217 sethi %hi(trap_block), DEST; \
218 or DEST, %lo(trap_block), DEST; \
219
220/* Uniprocessor versions, we know the cpuid is zero. */
221#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
222 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
223 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
224
225/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
226#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
227 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
228 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
229
230#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
231 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
232 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
233
234/* No per-cpu areas on uniprocessor, so no need to load DEST. */
235#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
236
237#endif /* !(CONFIG_SMP) */
238 43
239#endif /* _SPARC64_CPUDATA_H */ 44#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e26619..204e4bf64438 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
1#ifndef ___ASM_SPARC_DMA_MAPPING_H 1#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define ___ASM_SPARC_DMA_MAPPING_H 2#define ___ASM_SPARC_DMA_MAPPING_H
3#if defined(__sparc__) && defined(__arch64__) 3
4#include <asm/dma-mapping_64.h> 4#include <linux/scatterlist.h>
5#else 5#include <linux/mm.h>
6#include <asm/dma-mapping_32.h> 6
7#endif 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9extern int dma_supported(struct device *dev, u64 mask);
10extern int dma_set_mask(struct device *dev, u64 dma_mask);
11
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1)
15
16struct dma_ops {
17 void *(*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46
47static inline void *dma_alloc_coherent(struct device *dev, size_t size,
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{
98 dma_ops->unmap_sg(dev, sg, nents, direction);
99}
100
101static inline void dma_sync_single_for_cpu(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106}
107
108static inline void dma_sync_single_for_device(struct device *dev,
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117
118static inline void dma_sync_sg_for_cpu(struct device *dev,
119 struct scatterlist *sg, int nelems,
120 enum dma_data_direction direction)
121{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
123}
124
125static inline void dma_sync_sg_for_device(struct device *dev,
126 struct scatterlist *sg, int nelems,
127 enum dma_data_direction direction)
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131}
132
133static inline void dma_sync_single_range_for_cpu(struct device *dev,
134 dma_addr_t dma_handle,
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
140}
141
142static inline void dma_sync_single_range_for_device(struct device *dev,
143 dma_addr_t dma_handle,
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149}
150
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{
154 return (dma_addr == DMA_ERROR_CODE);
155}
156
157static inline int dma_get_cache_alignment(void)
158{
159 /*
160 * no easy way to get cache size on all processors, so return
161 * the maximum possible, to be safe
162 */
163 return (1 << INTERNODE_CACHE_SHIFT);
164}
165
8#endif 166#endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0573e6..000000000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H
3
4#include <linux/types.h>
5
6struct device;
7struct scatterlist;
8struct page;
9
10#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
11
12extern int dma_supported(struct device *dev, u64 mask);
13extern int dma_set_mask(struct device *dev, u64 dma_mask);
14extern void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
16extern void dma_free_coherent(struct device *dev, size_t size,
17 void *cpu_addr, dma_addr_t dma_handle);
18extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
19 size_t size,
20 enum dma_data_direction direction);
21extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
22 size_t size,
23 enum dma_data_direction direction);
24extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
25 unsigned long offset, size_t size,
26 enum dma_data_direction direction);
27extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
28 size_t size, enum dma_data_direction direction);
29extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
30 int nents, enum dma_data_direction direction);
31extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
32 int nents, enum dma_data_direction direction);
33extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
34 size_t size,
35 enum dma_data_direction direction);
36extern void dma_sync_single_for_device(struct device *dev,
37 dma_addr_t dma_handle,
38 size_t size,
39 enum dma_data_direction direction);
40extern void dma_sync_single_range_for_cpu(struct device *dev,
41 dma_addr_t dma_handle,
42 unsigned long offset,
43 size_t size,
44 enum dma_data_direction direction);
45extern void dma_sync_single_range_for_device(struct device *dev,
46 dma_addr_t dma_handle,
47 unsigned long offset, size_t size,
48 enum dma_data_direction direction);
49extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
50 int nelems, enum dma_data_direction direction);
51extern void dma_sync_sg_for_device(struct device *dev,
52 struct scatterlist *sg, int nelems,
53 enum dma_data_direction direction);
54extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
55extern int dma_get_cache_alignment(void);
56
57#define dma_alloc_noncoherent dma_alloc_coherent
58#define dma_free_noncoherent dma_free_coherent
59
60#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9702d5..000000000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9struct dma_ops {
10 void *(*alloc_coherent)(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flag);
12 void (*free_coherent)(struct device *dev, size_t size,
13 void *cpu_addr, dma_addr_t dma_handle);
14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
15 size_t size,
16 enum dma_data_direction direction);
17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
18 size_t size,
19 enum dma_data_direction direction);
20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
21 enum dma_data_direction direction);
22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
23 int nhwentries,
24 enum dma_data_direction direction);
25 void (*sync_single_for_cpu)(struct device *dev,
26 dma_addr_t dma_handle, size_t size,
27 enum dma_data_direction direction);
28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
29 int nelems,
30 enum dma_data_direction direction);
31};
32extern const struct dma_ops *dma_ops;
33
34extern int dma_supported(struct device *dev, u64 mask);
35extern int dma_set_mask(struct device *dev, u64 dma_mask);
36
37static inline void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag)
39{
40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
41}
42
43static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *cpu_addr, dma_addr_t dma_handle)
45{
46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 size_t size,
51 enum dma_data_direction direction)
52{
53 return dma_ops->map_single(dev, cpu_addr, size, direction);
54}
55
56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
57 size_t size,
58 enum dma_data_direction direction)
59{
60 dma_ops->unmap_single(dev, dma_addr, size, direction);
61}
62
63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64 unsigned long offset, size_t size,
65 enum dma_data_direction direction)
66{
67 return dma_ops->map_single(dev, page_address(page) + offset,
68 size, direction);
69}
70
71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
72 size_t size,
73 enum dma_data_direction direction)
74{
75 dma_ops->unmap_single(dev, dma_address, size, direction);
76}
77
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction direction)
80{
81 return dma_ops->map_sg(dev, sg, nents, direction);
82}
83
84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
85 int nents, enum dma_data_direction direction)
86{
87 dma_ops->unmap_sg(dev, sg, nents, direction);
88}
89
90static inline void dma_sync_single_for_cpu(struct device *dev,
91 dma_addr_t dma_handle, size_t size,
92 enum dma_data_direction direction)
93{
94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
95}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t dma_handle,
99 size_t size,
100 enum dma_data_direction direction)
101{
102 /* No flushing needed to sync cpu writes to the device. */
103}
104
105static inline void dma_sync_single_range_for_cpu(struct device *dev,
106 dma_addr_t dma_handle,
107 unsigned long offset,
108 size_t size,
109 enum dma_data_direction direction)
110{
111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
112}
113
114static inline void dma_sync_single_range_for_device(struct device *dev,
115 dma_addr_t dma_handle,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction direction)
119{
120 /* No flushing needed to sync cpu writes to the device. */
121}
122
123
124static inline void dma_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
129}
130
131static inline void dma_sync_sg_for_device(struct device *dev,
132 struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction)
134{
135 /* No flushing needed to sync cpu writes to the device. */
136}
137
138static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
139{
140 return (dma_addr == DMA_ERROR_CODE);
141}
142
143static inline int dma_get_cache_alignment(void)
144{
145 /* no easy way to get cache size on all processors, so return
146 * the maximum possible, to be safe */
147 return (1 << INTERNODE_CACHE_SHIFT);
148}
149
150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
152#define dma_is_consistent(d, h) (1)
153
154#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h
index a9ef172977de..4e2bc490d714 100644
--- a/arch/sparc/include/asm/errno.h
+++ b/arch/sparc/include/asm/errno.h
@@ -110,4 +110,6 @@
110#define EOWNERDEAD 132 /* Owner died */ 110#define EOWNERDEAD 132 /* Owner died */
111#define ENOTRECOVERABLE 133 /* State not recoverable */ 111#define ENOTRECOVERABLE 133 /* State not recoverable */
112 112
113#define ERFKILL 134 /* Operation not possible due to RF-kill */
114
113#endif 115#endif
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716cd38c1..b0f18e9893db 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@ extern void _mcount(void);
11 11
12#endif 12#endif
13 13
14#ifdef CONFIG_DYNAMIC_FTRACE
15/* reloction of mcount call site is the same as the address */
16static inline unsigned long ftrace_call_adjust(unsigned long addr)
17{
18 return addr;
19}
20
21struct dyn_arch_ftrace {
22};
23#endif /* CONFIG_DYNAMIC_FTRACE */
24
14#endif /* _ASM_SPARC64_FTRACE */ 25#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc7272e537..9faa046713fb 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@ struct mdesc_notifier_client {
71 71
72extern void mdesc_register_notifier(struct mdesc_notifier_client *client); 72extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
73 73
74extern void mdesc_fill_in_cpu_data(cpumask_t mask); 74extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
75extern void mdesc_populate_present_mask(cpumask_t *mask);
75 76
76extern void sun4v_mdesc_init(void); 77extern void sun4v_mdesc_init(void);
77 78
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index fdfbbf0a4736..988192e8e956 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -1,7 +1,7 @@
1#ifndef __SPARC_MMAN_H__ 1#ifndef __SPARC_MMAN_H__
2#define __SPARC_MMAN_H__ 2#define __SPARC_MMAN_H__
3 3
4#include <asm-generic/mman.h> 4#include <asm-generic/mman-common.h>
5 5
6/* SunOS'ified... */ 6/* SunOS'ified... */
7 7
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index d1806edc0958..f72080bdda94 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -152,6 +152,6 @@ extern unsigned long pfn_base;
152 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 152 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
153 153
154#include <asm-generic/memory_model.h> 154#include <asm-generic/memory_model.h>
155#include <asm-generic/page.h> 155#include <asm-generic/getorder.h>
156 156
157#endif /* _SPARC_PAGE_H */ 157#endif /* _SPARC_PAGE_H */
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 4274ed13ddb2..f0d09b401036 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -132,6 +132,6 @@ typedef struct page *pgtable_t;
132#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 132#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
133 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 133 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
134 134
135#include <asm-generic/page.h> 135#include <asm-generic/getorder.h>
136 136
137#endif /* _SPARC64_PAGE_H */ 137#endif /* _SPARC64_PAGE_H */
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee64593023e..007aafb4ae97 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5");
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9 9
10extern void real_setup_per_cpu_areas(void); 10#include <asm/trap_block.h>
11 11
12extern unsigned long __per_cpu_base;
13extern unsigned long __per_cpu_shift;
14#define __per_cpu_offset(__cpu) \ 12#define __per_cpu_offset(__cpu) \
15 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 13 (trap_block[(__cpu)].__per_cpu_base)
16#define per_cpu_offset(x) (__per_cpu_offset(x)) 14#define per_cpu_offset(x) (__per_cpu_offset(x))
17 15
18#define __my_cpu_offset __local_per_cpu_offset 16#define __my_cpu_offset __local_per_cpu_offset
19 17
20#else /* ! SMP */ 18#else /* ! SMP */
21 19
22#define real_setup_per_cpu_areas() do { } while (0)
23
24#endif /* SMP */ 20#endif /* SMP */
25 21
26#include <asm-generic/percpu.h> 22#include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d44714f8d..be8d7aaeb60d 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp);
86#endif 86#endif
87 87
88extern void prom_build_devicetree(void); 88extern void prom_build_devicetree(void);
89extern void of_populate_present_mask(void);
90extern void of_fill_in_cpu_data(void);
89 91
90/* Dummy ref counting routines - to be implemented later */ 92/* Dummy ref counting routines - to be implemented later */
91static inline struct device_node *of_node_get(struct device_node *node) 93static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index cba45206b7f2..e49b828a2471 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -176,7 +176,7 @@ struct sigstack {
176#define SA_STATIC_ALLOC 0x8000 176#define SA_STATIC_ALLOC 0x8000
177#endif 177#endif
178 178
179#include <asm-generic/signal.h> 179#include <asm-generic/signal-defs.h>
180 180
181struct __new_sigaction { 181struct __new_sigaction {
182 __sighandler_t sa_handler; 182 __sighandler_t sa_handler;
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 000000000000..7e26b2db6211
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
1#ifndef _SPARC_TRAP_BLOCK_H
2#define _SPARC_TRAP_BLOCK_H
3
4#include <asm/hypervisor.h>
5#include <asm/asi.h>
6
7#ifndef __ASSEMBLY__
8
9/* Trap handling code needs to get at a few critical values upon
10 * trap entry and to process TSB misses. These cannot be in the
11 * per_cpu() area as we really need to lock them into the TLB and
12 * thus make them part of the main kernel image. As a result we
13 * try to make this as small as possible.
14 *
15 * This is padded out and aligned to 64-bytes to avoid false sharing
16 * on SMP.
17 */
18
19/* If you modify the size of this structure, please update
20 * TRAP_BLOCK_SZ_SHIFT below.
21 */
22struct thread_info;
23struct trap_per_cpu {
24/* D-cache line 1: Basic thread information, cpu and device mondo queues */
25 struct thread_info *thread;
26 unsigned long pgd_paddr;
27 unsigned long cpu_mondo_pa;
28 unsigned long dev_mondo_pa;
29
30/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
31 unsigned long resum_mondo_pa;
32 unsigned long resum_kernel_buf_pa;
33 unsigned long nonresum_mondo_pa;
34 unsigned long nonresum_kernel_buf_pa;
35
36/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
37 struct hv_fault_status fault_info;
38
39/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
40 unsigned long cpu_mondo_block_pa;
41 unsigned long cpu_list_pa;
42 unsigned long tsb_huge;
43 unsigned long tsb_huge_temp;
44
45/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
46 unsigned long irq_worklist_pa;
47 unsigned int cpu_mondo_qmask;
48 unsigned int dev_mondo_qmask;
49 unsigned int resum_qmask;
50 unsigned int nonresum_qmask;
51 unsigned long __per_cpu_base;
52} __attribute__((aligned(64)));
53extern struct trap_per_cpu trap_block[NR_CPUS];
54extern void init_cur_cpu_trap(struct thread_info *);
55extern void setup_tba(void);
56extern int ncpus_probed;
57
58extern unsigned long real_hard_smp_processor_id(void);
59
60struct cpuid_patch_entry {
61 unsigned int addr;
62 unsigned int cheetah_safari[4];
63 unsigned int cheetah_jbus[4];
64 unsigned int starfire[4];
65 unsigned int sun4v[4];
66};
67extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
68
69struct sun4v_1insn_patch_entry {
70 unsigned int addr;
71 unsigned int insn;
72};
73extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
74 __sun4v_1insn_patch_end;
75
76struct sun4v_2insn_patch_entry {
77 unsigned int addr;
78 unsigned int insns[2];
79};
80extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
81 __sun4v_2insn_patch_end;
82
83
84#endif /* !(__ASSEMBLY__) */
85
86#define TRAP_PER_CPU_THREAD 0x00
87#define TRAP_PER_CPU_PGD_PADDR 0x08
88#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
89#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
90#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
91#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
92#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
93#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
94#define TRAP_PER_CPU_FAULT_INFO 0x40
95#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
96#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
97#define TRAP_PER_CPU_TSB_HUGE 0xd0
98#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
99#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
100#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
101#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
102#define TRAP_PER_CPU_RESUM_QMASK 0xf0
103#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
104#define TRAP_PER_CPU_PER_CPU_BASE 0xf8
105
106#define TRAP_BLOCK_SZ_SHIFT 8
107
108#include <asm/scratchpad.h>
109
110#define __GET_CPUID(REG) \
111 /* Spitfire implementation (default). */ \
112661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
113 srlx REG, 17, REG; \
114 and REG, 0x1f, REG; \
115 nop; \
116 .section .cpuid_patch, "ax"; \
117 /* Instruction location. */ \
118 .word 661b; \
119 /* Cheetah Safari implementation. */ \
120 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
121 srlx REG, 17, REG; \
122 and REG, 0x3ff, REG; \
123 nop; \
124 /* Cheetah JBUS implementation. */ \
125 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
126 srlx REG, 17, REG; \
127 and REG, 0x1f, REG; \
128 nop; \
129 /* Starfire implementation. */ \
130 sethi %hi(0x1fff40000d0 >> 9), REG; \
131 sllx REG, 9, REG; \
132 or REG, 0xd0, REG; \
133 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
134 /* sun4v implementation. */ \
135 mov SCRATCHPAD_CPUID, REG; \
136 ldxa [REG] ASI_SCRATCHPAD, REG; \
137 nop; \
138 nop; \
139 .previous;
140
141#ifdef CONFIG_SMP
142
143#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
144 __GET_CPUID(TMP) \
145 sethi %hi(trap_block), DEST; \
146 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
147 or DEST, %lo(trap_block), DEST; \
148 add DEST, TMP, DEST; \
149
150/* Clobbers TMP, current address space PGD phys address into DEST. */
151#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
152 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
153 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
154
155/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
156#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
157 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
158 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
159
160/* Clobbers TMP, loads DEST with current thread info pointer. */
161#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
162 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
163 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
164
165/* Given the current thread info pointer in THR, load the per-cpu
166 * area base of the current processor into DEST. REG1, REG2, and REG3 are
167 * clobbered.
168 *
169 * You absolutely cannot use DEST as a temporary in this code. The
170 * reason is that traps can happen during execution, and return from
171 * trap will load the fully resolved DEST per-cpu base. This can corrupt
172 * the calculations done by the macro mid-stream.
173 */
174#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
175 lduh [THR + TI_CPU], REG1; \
176 sethi %hi(trap_block), REG2; \
177 sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \
178 or REG2, %lo(trap_block), REG2; \
179 add REG2, REG1, REG2; \
180 ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
181
182#else
183
184#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 sethi %hi(trap_block), DEST; \
186 or DEST, %lo(trap_block), DEST; \
187
188/* Uniprocessor versions, we know the cpuid is zero. */
189#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
190 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
191 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
192
193/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
194#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
195 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
196 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
197
198#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
199 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
200 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
201
202/* No per-cpu areas on uniprocessor, so no need to load DEST. */
203#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
204
205#endif /* !(CONFIG_SMP) */
206
207#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 2237118825d0..de671d73baed 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -21,8 +21,6 @@ typedef unsigned short umode_t;
21 21
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#define BITS_PER_LONG 64
25
26#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
27 25
28/* Dma addresses come in generic and 64-bit flavours. */ 26/* Dma addresses come in generic and 64-bit flavours. */
@@ -46,8 +44,6 @@ typedef unsigned short umode_t;
46 44
47#ifdef __KERNEL__ 45#ifdef __KERNEL__
48 46
49#define BITS_PER_LONG 32
50
51#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
52 48
53typedef u32 dma_addr_t; 49typedef u32 dma_addr_t;
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 47d5619d43fa..8303ac481034 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -17,6 +17,9 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19 19
20#define ARCH_HAS_SORT_EXTABLE
21#define ARCH_HAS_SEARCH_EXTABLE
22
20/* Sparc is not segmented, however we need to be able to fool access_ok() 23/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately. 24 * when doing system calls from kernel mode legitimately.
22 * 25 *
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index c64e767a3e4b..a38c03238918 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -12,7 +12,7 @@
12#include <asm/asi.h> 12#include <asm/asi.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/spitfire.h> 14#include <asm/spitfire.h>
15#include <asm-generic/uaccess.h> 15#include <asm-generic/uaccess-unaligned.h>
16#endif 16#endif
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71ef3163..b2c406de7d4f 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
394#define __NR_accept4 323 394#define __NR_accept4 323
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326
397 398
398#define NR_SYSCALLS 326 399#define NR_SYSCALLS 327
399 400
400#ifdef __32bit_syscall_numbers__ 401#ifdef __32bit_syscall_numbers__
401/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 402/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 54742e58831c..475ce4696acd 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o
37obj-$(CONFIG_SPARC32) += muldiv.o 37obj-$(CONFIG_SPARC32) += muldiv.o
38obj-y += prom_common.o 38obj-y += prom_common.o
39obj-y += prom_$(BITS).o 39obj-y += prom_$(BITS).o
40obj-y += of_device_common.o
40obj-y += of_device_$(BITS).o 41obj-y += of_device_$(BITS).o
41obj-$(CONFIG_SPARC64) += prom_irqtrans.o 42obj-$(CONFIG_SPARC64) += prom_irqtrans.o
42 43
@@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o
54obj-$(CONFIG_SPARC64) += mdesc.o 55obj-$(CONFIG_SPARC64) += mdesc.o
55obj-$(CONFIG_SPARC64) += pcr.o 56obj-$(CONFIG_SPARC64) += pcr.o
56obj-$(CONFIG_SPARC64) += nmi.o 57obj-$(CONFIG_SPARC64) += nmi.o
58obj-$(CONFIG_SPARC64_SMP) += cpumap.o
57 59
58# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation 60# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
59obj-$(CONFIG_SPARC32) += devres.o 61obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
new file mode 100644
index 000000000000..7430ed080b23
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.c
@@ -0,0 +1,431 @@
1/* cpumap.c: used for optimizing CPU assignment
2 *
3 * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/cpumask.h>
10#include <linux/spinlock.h>
11#include <asm/cpudata.h>
12#include "cpumap.h"
13
14
15enum {
16 CPUINFO_LVL_ROOT = 0,
17 CPUINFO_LVL_NODE,
18 CPUINFO_LVL_CORE,
19 CPUINFO_LVL_PROC,
20 CPUINFO_LVL_MAX,
21};
22
23enum {
24 ROVER_NO_OP = 0,
25 /* Increment rover every time level is visited */
26 ROVER_INC_ON_VISIT = 1 << 0,
27 /* Increment parent's rover every time rover wraps around */
28 ROVER_INC_PARENT_ON_LOOP = 1 << 1,
29};
30
31struct cpuinfo_node {
32 int id;
33 int level;
34 int num_cpus; /* Number of CPUs in this hierarchy */
35 int parent_index;
36 int child_start; /* Array index of the first child node */
37 int child_end; /* Array index of the last child node */
38 int rover; /* Child node iterator */
39};
40
41struct cpuinfo_level {
42 int start_index; /* Index of first node of a level in a cpuinfo tree */
43 int end_index; /* Index of last node of a level in a cpuinfo tree */
44 int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
45};
46
47struct cpuinfo_tree {
48 int total_nodes;
49
50 /* Offsets into nodes[] for each level of the tree */
51 struct cpuinfo_level level[CPUINFO_LVL_MAX];
52 struct cpuinfo_node nodes[0];
53};
54
55
56static struct cpuinfo_tree *cpuinfo_tree;
57
58static u16 cpu_distribution_map[NR_CPUS];
59static DEFINE_SPINLOCK(cpu_map_lock);
60
61
62/* Niagara optimized cpuinfo tree traversal. */
63static const int niagara_iterate_method[] = {
64 [CPUINFO_LVL_ROOT] = ROVER_NO_OP,
65
66 /* Strands (or virtual CPUs) within a core may not run concurrently
67 * on the Niagara, as instruction pipeline(s) are shared. Distribute
68 * work to strands in different cores first for better concurrency.
69 * Go to next NUMA node when all cores are used.
70 */
71 [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
72
73 /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
74 * a proc_id represents an instruction pipeline. Distribute work to
75 * strands in different proc_id groups if the core has multiple
76 * instruction pipelines (e.g. the Niagara 2/2+ has two).
77 */
78 [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
79
80 /* Pick the next strand in the proc_id group. */
81 [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
82};
83
84/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
85 * nodes.
86 */
87static const int generic_iterate_method[] = {
88 [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
89 [CPUINFO_LVL_NODE] = ROVER_NO_OP,
90 [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
91 [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
92};
93
94
95static int cpuinfo_id(int cpu, int level)
96{
97 int id;
98
99 switch (level) {
100 case CPUINFO_LVL_ROOT:
101 id = 0;
102 break;
103 case CPUINFO_LVL_NODE:
104 id = cpu_to_node(cpu);
105 break;
106 case CPUINFO_LVL_CORE:
107 id = cpu_data(cpu).core_id;
108 break;
109 case CPUINFO_LVL_PROC:
110 id = cpu_data(cpu).proc_id;
111 break;
112 default:
113 id = -EINVAL;
114 }
115 return id;
116}
117
118/*
119 * Enumerate the CPU information in __cpu_data to determine the start index,
120 * end index, and number of nodes for each level in the cpuinfo tree. The
121 * total number of cpuinfo nodes required to build the tree is returned.
122 */
123static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
124{
125 int prev_id[CPUINFO_LVL_MAX];
126 int i, n, num_nodes;
127
128 for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
129 struct cpuinfo_level *lv = &tree_level[i];
130
131 prev_id[i] = -1;
132 lv->start_index = lv->end_index = lv->num_nodes = 0;
133 }
134
135 num_nodes = 1; /* Include the root node */
136
137 for (i = 0; i < num_possible_cpus(); i++) {
138 if (!cpu_online(i))
139 continue;
140
141 n = cpuinfo_id(i, CPUINFO_LVL_NODE);
142 if (n > prev_id[CPUINFO_LVL_NODE]) {
143 tree_level[CPUINFO_LVL_NODE].num_nodes++;
144 prev_id[CPUINFO_LVL_NODE] = n;
145 num_nodes++;
146 }
147 n = cpuinfo_id(i, CPUINFO_LVL_CORE);
148 if (n > prev_id[CPUINFO_LVL_CORE]) {
149 tree_level[CPUINFO_LVL_CORE].num_nodes++;
150 prev_id[CPUINFO_LVL_CORE] = n;
151 num_nodes++;
152 }
153 n = cpuinfo_id(i, CPUINFO_LVL_PROC);
154 if (n > prev_id[CPUINFO_LVL_PROC]) {
155 tree_level[CPUINFO_LVL_PROC].num_nodes++;
156 prev_id[CPUINFO_LVL_PROC] = n;
157 num_nodes++;
158 }
159 }
160
161 tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
162
163 n = tree_level[CPUINFO_LVL_NODE].num_nodes;
164 tree_level[CPUINFO_LVL_NODE].start_index = 1;
165 tree_level[CPUINFO_LVL_NODE].end_index = n;
166
167 n++;
168 tree_level[CPUINFO_LVL_CORE].start_index = n;
169 n += tree_level[CPUINFO_LVL_CORE].num_nodes;
170 tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
171
172 tree_level[CPUINFO_LVL_PROC].start_index = n;
173 n += tree_level[CPUINFO_LVL_PROC].num_nodes;
174 tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
175
176 return num_nodes;
177}
178
179/* Build a tree representation of the CPU hierarchy using the per CPU
180 * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
181 * assumed to be sorted in ascending order based on node, core_id, and
182 * proc_id (in order of significance).
183 */
184static struct cpuinfo_tree *build_cpuinfo_tree(void)
185{
186 struct cpuinfo_tree *new_tree;
187 struct cpuinfo_node *node;
188 struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
189 int num_cpus[CPUINFO_LVL_MAX];
190 int level_rover[CPUINFO_LVL_MAX];
191 int prev_id[CPUINFO_LVL_MAX];
192 int n, id, cpu, prev_cpu, last_cpu, level;
193
194 n = enumerate_cpuinfo_nodes(tmp_level);
195
196 new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
197 (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
198 if (!new_tree)
199 return NULL;
200
201 new_tree->total_nodes = n;
202 memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
203
204 prev_cpu = cpu = first_cpu(cpu_online_map);
205
206 /* Initialize all levels in the tree with the first CPU */
207 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
208 n = new_tree->level[level].start_index;
209
210 level_rover[level] = n;
211 node = &new_tree->nodes[n];
212
213 id = cpuinfo_id(cpu, level);
214 if (unlikely(id < 0)) {
215 kfree(new_tree);
216 return NULL;
217 }
218 node->id = id;
219 node->level = level;
220 node->num_cpus = 1;
221
222 node->parent_index = (level > CPUINFO_LVL_ROOT)
223 ? new_tree->level[level - 1].start_index : -1;
224
225 node->child_start = node->child_end = node->rover =
226 (level == CPUINFO_LVL_PROC)
227 ? cpu : new_tree->level[level + 1].start_index;
228
229 prev_id[level] = node->id;
230 num_cpus[level] = 1;
231 }
232
233 for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
234 if (cpu_online(last_cpu))
235 break;
236 }
237
238 while (++cpu <= last_cpu) {
239 if (!cpu_online(cpu))
240 continue;
241
242 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
243 level--) {
244 id = cpuinfo_id(cpu, level);
245 if (unlikely(id < 0)) {
246 kfree(new_tree);
247 return NULL;
248 }
249
250 if ((id != prev_id[level]) || (cpu == last_cpu)) {
251 prev_id[level] = id;
252 node = &new_tree->nodes[level_rover[level]];
253 node->num_cpus = num_cpus[level];
254 num_cpus[level] = 1;
255
256 if (cpu == last_cpu)
257 node->num_cpus++;
258
259 /* Connect tree node to parent */
260 if (level == CPUINFO_LVL_ROOT)
261 node->parent_index = -1;
262 else
263 node->parent_index =
264 level_rover[level - 1];
265
266 if (level == CPUINFO_LVL_PROC) {
267 node->child_end =
268 (cpu == last_cpu) ? cpu : prev_cpu;
269 } else {
270 node->child_end =
271 level_rover[level + 1] - 1;
272 }
273
274 /* Initialize the next node in the same level */
275 n = ++level_rover[level];
276 if (n <= new_tree->level[level].end_index) {
277 node = &new_tree->nodes[n];
278 node->id = id;
279 node->level = level;
280
281 /* Connect node to child */
282 node->child_start = node->child_end =
283 node->rover =
284 (level == CPUINFO_LVL_PROC)
285 ? cpu : level_rover[level + 1];
286 }
287 } else
288 num_cpus[level]++;
289 }
290 prev_cpu = cpu;
291 }
292
293 return new_tree;
294}
295
296static void increment_rover(struct cpuinfo_tree *t, int node_index,
297 int root_index, const int *rover_inc_table)
298{
299 struct cpuinfo_node *node = &t->nodes[node_index];
300 int top_level, level;
301
302 top_level = t->nodes[root_index].level;
303 for (level = node->level; level >= top_level; level--) {
304 node->rover++;
305 if (node->rover <= node->child_end)
306 return;
307
308 node->rover = node->child_start;
309 /* If parent's rover does not need to be adjusted, stop here. */
310 if ((level == top_level) ||
311 !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
312 return;
313
314 node = &t->nodes[node->parent_index];
315 }
316}
317
318static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
319{
320 const int *rover_inc_table;
321 int level, new_index, index = root_index;
322
323 switch (sun4v_chip_type) {
324 case SUN4V_CHIP_NIAGARA1:
325 case SUN4V_CHIP_NIAGARA2:
326 rover_inc_table = niagara_iterate_method;
327 break;
328 default:
329 rover_inc_table = generic_iterate_method;
330 }
331
332 for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
333 level++) {
334 new_index = t->nodes[index].rover;
335 if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
336 increment_rover(t, index, root_index, rover_inc_table);
337
338 index = new_index;
339 }
340 return index;
341}
342
343static void _cpu_map_rebuild(void)
344{
345 int i;
346
347 if (cpuinfo_tree) {
348 kfree(cpuinfo_tree);
349 cpuinfo_tree = NULL;
350 }
351
352 cpuinfo_tree = build_cpuinfo_tree();
353 if (!cpuinfo_tree)
354 return;
355
356 /* Build CPU distribution map that spans all online CPUs. No need
357 * to check if the CPU is online, as that is done when the cpuinfo
358 * tree is being built.
359 */
360 for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
361 cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
362}
363
364/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
365 * round robin.
366 */
367static int simple_map_to_cpu(unsigned int index)
368{
369 int i, end, cpu_rover;
370
371 cpu_rover = 0;
372 end = index % num_online_cpus();
373 for (i = 0; i < num_possible_cpus(); i++) {
374 if (cpu_online(cpu_rover)) {
375 if (cpu_rover >= end)
376 return cpu_rover;
377
378 cpu_rover++;
379 }
380 }
381
382 /* Impossible, since num_online_cpus() <= num_possible_cpus() */
383 return first_cpu(cpu_online_map);
384}
385
386static int _map_to_cpu(unsigned int index)
387{
388 struct cpuinfo_node *root_node;
389
390 if (unlikely(!cpuinfo_tree)) {
391 _cpu_map_rebuild();
392 if (!cpuinfo_tree)
393 return simple_map_to_cpu(index);
394 }
395
396 root_node = &cpuinfo_tree->nodes[0];
397#ifdef CONFIG_HOTPLUG_CPU
398 if (unlikely(root_node->num_cpus != num_online_cpus())) {
399 _cpu_map_rebuild();
400 if (!cpuinfo_tree)
401 return simple_map_to_cpu(index);
402 }
403#endif
404 return cpu_distribution_map[index % root_node->num_cpus];
405}
406
407int map_to_cpu(unsigned int index)
408{
409 int mapped_cpu;
410 unsigned long flag;
411
412 spin_lock_irqsave(&cpu_map_lock, flag);
413 mapped_cpu = _map_to_cpu(index);
414
415#ifdef CONFIG_HOTPLUG_CPU
416 while (unlikely(!cpu_online(mapped_cpu)))
417 mapped_cpu = _map_to_cpu(index);
418#endif
419 spin_unlock_irqrestore(&cpu_map_lock, flag);
420 return mapped_cpu;
421}
422EXPORT_SYMBOL(map_to_cpu);
423
424void cpu_map_rebuild(void)
425{
426 unsigned long flag;
427
428 spin_lock_irqsave(&cpu_map_lock, flag);
429 _cpu_map_rebuild();
430 spin_unlock_irqrestore(&cpu_map_lock, flag);
431}
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
new file mode 100644
index 000000000000..e639880ab864
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.h
@@ -0,0 +1,16 @@
1#ifndef _CPUMAP_H
2#define _CPUMAP_H
3
4#ifdef CONFIG_SMP
5extern void cpu_map_rebuild(void);
6extern int map_to_cpu(unsigned int index);
7#define cpu_map_init() cpu_map_rebuild()
8#else
9#define cpu_map_init() do {} while (0)
10static inline int map_to_cpu(unsigned int index)
11{
12 return raw_smp_processor_id();
13}
14#endif
15
16#endif
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index ebc8403b035e..524c32f97c55 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
35} 35}
36EXPORT_SYMBOL(dma_set_mask); 36EXPORT_SYMBOL(dma_set_mask);
37 37
38void *dma_alloc_coherent(struct device *dev, size_t size, 38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag) 39 dma_addr_t *dma_handle, gfp_t flag)
40{ 40{
41#ifdef CONFIG_PCI 41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type) 42 if (dev->bus == &pci_bus_type)
@@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
44#endif 44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle); 45 return sbus_alloc_consistent(dev, size, dma_handle);
46} 46}
47EXPORT_SYMBOL(dma_alloc_coherent);
48 47
49void dma_free_coherent(struct device *dev, size_t size, 48static void dma32_free_coherent(struct device *dev, size_t size,
50 void *cpu_addr, dma_addr_t dma_handle) 49 void *cpu_addr, dma_addr_t dma_handle)
51{ 50{
52#ifdef CONFIG_PCI 51#ifdef CONFIG_PCI
53 if (dev->bus == &pci_bus_type) { 52 if (dev->bus == &pci_bus_type) {
@@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size,
58#endif 57#endif
59 sbus_free_consistent(dev, size, cpu_addr, dma_handle); 58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
60} 59}
61EXPORT_SYMBOL(dma_free_coherent);
62 60
63dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
64 size_t size, enum dma_data_direction direction) 62 unsigned long offset, size_t size,
65{ 63 enum dma_data_direction direction)
66#ifdef CONFIG_PCI
67 if (dev->bus == &pci_bus_type)
68 return pci_map_single(to_pci_dev(dev), cpu_addr,
69 size, (int)direction);
70#endif
71 return sbus_map_single(dev, cpu_addr, size, (int)direction);
72}
73EXPORT_SYMBOL(dma_map_single);
74
75void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
76 size_t size,
77 enum dma_data_direction direction)
78{
79#ifdef CONFIG_PCI
80 if (dev->bus == &pci_bus_type) {
81 pci_unmap_single(to_pci_dev(dev), dma_addr,
82 size, (int)direction);
83 return;
84 }
85#endif
86 sbus_unmap_single(dev, dma_addr, size, (int)direction);
87}
88EXPORT_SYMBOL(dma_unmap_single);
89
90dma_addr_t dma_map_page(struct device *dev, struct page *page,
91 unsigned long offset, size_t size,
92 enum dma_data_direction direction)
93{ 64{
94#ifdef CONFIG_PCI 65#ifdef CONFIG_PCI
95 if (dev->bus == &pci_bus_type) 66 if (dev->bus == &pci_bus_type)
@@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
99 return sbus_map_single(dev, page_address(page) + offset, 70 return sbus_map_single(dev, page_address(page) + offset,
100 size, (int)direction); 71 size, (int)direction);
101} 72}
102EXPORT_SYMBOL(dma_map_page);
103 73
104void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
105 size_t size, enum dma_data_direction direction) 75 size_t size, enum dma_data_direction direction)
106{ 76{
107#ifdef CONFIG_PCI 77#ifdef CONFIG_PCI
108 if (dev->bus == &pci_bus_type) { 78 if (dev->bus == &pci_bus_type) {
@@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
113#endif 83#endif
114 sbus_unmap_single(dev, dma_address, size, (int)direction); 84 sbus_unmap_single(dev, dma_address, size, (int)direction);
115} 85}
116EXPORT_SYMBOL(dma_unmap_page);
117 86
118int dma_map_sg(struct device *dev, struct scatterlist *sg, 87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
119 int nents, enum dma_data_direction direction) 88 int nents, enum dma_data_direction direction)
120{ 89{
121#ifdef CONFIG_PCI 90#ifdef CONFIG_PCI
122 if (dev->bus == &pci_bus_type) 91 if (dev->bus == &pci_bus_type)
@@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg,
124#endif 93#endif
125 return sbus_map_sg(dev, sg, nents, direction); 94 return sbus_map_sg(dev, sg, nents, direction);
126} 95}
127EXPORT_SYMBOL(dma_map_sg);
128 96
129void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
130 int nents, enum dma_data_direction direction) 98 int nents, enum dma_data_direction direction)
131{ 99{
132#ifdef CONFIG_PCI 100#ifdef CONFIG_PCI
133 if (dev->bus == &pci_bus_type) { 101 if (dev->bus == &pci_bus_type) {
@@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
137#endif 105#endif
138 sbus_unmap_sg(dev, sg, nents, (int)direction); 106 sbus_unmap_sg(dev, sg, nents, (int)direction);
139} 107}
140EXPORT_SYMBOL(dma_unmap_sg);
141 108
142void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
143 size_t size, enum dma_data_direction direction) 110 size_t size,
111 enum dma_data_direction direction)
144{ 112{
145#ifdef CONFIG_PCI 113#ifdef CONFIG_PCI
146 if (dev->bus == &pci_bus_type) { 114 if (dev->bus == &pci_bus_type) {
@@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
151#endif 119#endif
152 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); 120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
153} 121}
154EXPORT_SYMBOL(dma_sync_single_for_cpu);
155 122
156void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 123static void dma32_sync_single_for_device(struct device *dev,
157 size_t size, enum dma_data_direction direction) 124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
158{ 126{
159#ifdef CONFIG_PCI 127#ifdef CONFIG_PCI
160 if (dev->bus == &pci_bus_type) { 128 if (dev->bus == &pci_bus_type) {
@@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
165#endif 133#endif
166 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); 134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
167} 135}
168EXPORT_SYMBOL(dma_sync_single_for_device);
169
170void dma_sync_single_range_for_cpu(struct device *dev,
171 dma_addr_t dma_handle,
172 unsigned long offset,
173 size_t size,
174 enum dma_data_direction direction)
175{
176 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
177}
178EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
179
180void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
181 unsigned long offset, size_t size,
182 enum dma_data_direction direction)
183{
184 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
185}
186EXPORT_SYMBOL(dma_sync_single_range_for_device);
187 136
188void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
189 int nelems, enum dma_data_direction direction) 138 int nelems, enum dma_data_direction direction)
190{ 139{
191#ifdef CONFIG_PCI 140#ifdef CONFIG_PCI
192 if (dev->bus == &pci_bus_type) { 141 if (dev->bus == &pci_bus_type) {
@@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
197#endif 146#endif
198 BUG(); 147 BUG();
199} 148}
200EXPORT_SYMBOL(dma_sync_sg_for_cpu);
201 149
202void dma_sync_sg_for_device(struct device *dev, 150static void dma32_sync_sg_for_device(struct device *dev,
203 struct scatterlist *sg, int nelems, 151 struct scatterlist *sg, int nelems,
204 enum dma_data_direction direction) 152 enum dma_data_direction direction)
205{ 153{
206#ifdef CONFIG_PCI 154#ifdef CONFIG_PCI
207 if (dev->bus == &pci_bus_type) { 155 if (dev->bus == &pci_bus_type) {
@@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev,
212#endif 160#endif
213 BUG(); 161 BUG();
214} 162}
215EXPORT_SYMBOL(dma_sync_sg_for_device);
216 163
217int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 164static const struct dma_ops dma32_dma_ops = {
218{ 165 .alloc_coherent = dma32_alloc_coherent,
219 return (dma_addr == DMA_ERROR_CODE); 166 .free_coherent = dma32_free_coherent,
220} 167 .map_page = dma32_map_page,
221EXPORT_SYMBOL(dma_mapping_error); 168 .unmap_page = dma32_unmap_page,
222 169 .map_sg = dma32_map_sg,
223int dma_get_cache_alignment(void) 170 .unmap_sg = dma32_unmap_sg,
224{ 171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
225 return 32; 172 .sync_single_for_device = dma32_sync_single_for_device,
226} 173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
227EXPORT_SYMBOL(dma_get_cache_alignment); 174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 90350f838f05..4a700f4b79ce 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
544 resp_len, ncpus, mask, 544 resp_len, ncpus, mask,
545 DR_CPU_STAT_CONFIGURED); 545 DR_CPU_STAT_CONFIGURED);
546 546
547 mdesc_fill_in_cpu_data(*mask); 547 mdesc_populate_present_mask(mask);
548 mdesc_fill_in_cpu_data(mask);
548 549
549 for_each_cpu_mask(cpu, *mask) { 550 for_each_cpu_mask(cpu, *mask) {
550 int err; 551 int err;
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d0218e73f982..d3b1a3076569 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -7,14 +7,10 @@
7 7
8#include <asm/ftrace.h> 8#include <asm/ftrace.h>
9 9
10#ifdef CONFIG_DYNAMIC_FTRACE
10static const u32 ftrace_nop = 0x01000000; 11static const u32 ftrace_nop = 0x01000000;
11 12
12unsigned char *ftrace_nop_replace(void) 13static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
13{
14 return (char *)&ftrace_nop;
15}
16
17unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
18{ 14{
19 static u32 call; 15 static u32 call;
20 s32 off; 16 s32 off;
@@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
22 off = ((s32)addr - (s32)ip); 18 off = ((s32)addr - (s32)ip);
23 call = 0x40000000 | ((u32)off >> 2); 19 call = 0x40000000 | ((u32)off >> 2);
24 20
25 return (unsigned char *) &call; 21 return call;
26} 22}
27 23
28int 24static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
29ftrace_modify_code(unsigned long ip, unsigned char *old_code,
30 unsigned char *new_code)
31{ 25{
32 u32 old = *(u32 *)old_code;
33 u32 new = *(u32 *)new_code;
34 u32 replaced; 26 u32 replaced;
35 int faulted; 27 int faulted;
36 28
@@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
59 return faulted; 51 return faulted;
60} 52}
61 53
54int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
55{
56 unsigned long ip = rec->ip;
57 u32 old, new;
58
59 old = ftrace_call_replace(ip, addr);
60 new = ftrace_nop;
61 return ftrace_modify_code(ip, old, new);
62}
63
64int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
65{
66 unsigned long ip = rec->ip;
67 u32 old, new;
68
69 old = ftrace_nop;
70 new = ftrace_call_replace(ip, addr);
71 return ftrace_modify_code(ip, old, new);
72}
73
62int ftrace_update_ftrace_func(ftrace_func_t func) 74int ftrace_update_ftrace_func(ftrace_func_t func)
63{ 75{
64 unsigned long ip = (unsigned long)(&ftrace_call); 76 unsigned long ip = (unsigned long)(&ftrace_call);
65 unsigned char old[MCOUNT_INSN_SIZE], *new; 77 u32 old, new;
66 78
67 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 79 old = *(u32 *) &ftrace_call;
68 new = ftrace_call_replace(ip, (unsigned long)func); 80 new = ftrace_call_replace(ip, (unsigned long)func);
69 return ftrace_modify_code(ip, old, new); 81 return ftrace_modify_code(ip, old, new);
70} 82}
71 83
72int __init ftrace_dyn_arch_init(void *data) 84int __init ftrace_dyn_arch_init(void *data)
73{ 85{
74 ftrace_mcount_set(data); 86 unsigned long *p = data;
87
88 *p = 0;
89
75 return 0; 90 return 0;
76} 91}
92#endif
93
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 91bf4c7f79b9..f8f21050448b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -641,28 +641,6 @@ tlb_fixup_done:
641 /* Not reached... */ 641 /* Not reached... */
642 642
6431: 6431:
644 /* If we boot on a non-zero cpu, all of the per-cpu
645 * variable references we make before setting up the
646 * per-cpu areas will use a bogus offset. Put a
647 * compensating factor into __per_cpu_base to handle
648 * this cleanly.
649 *
650 * What the per-cpu code calculates is:
651 *
652 * __per_cpu_base + (cpu << __per_cpu_shift)
653 *
654 * These two variables are zero initially, so to
655 * make it all cancel out to zero we need to put
656 * "0 - (cpu << 0)" into __per_cpu_base so that the
657 * above formula evaluates to zero.
658 *
659 * We cannot even perform a printk() until this stuff
660 * is setup as that calls cpu_clock() which uses
661 * per-cpu variables.
662 */
663 sub %g0, %o0, %o1
664 sethi %hi(__per_cpu_base), %o2
665 stx %o1, [%o2 + %lo(__per_cpu_base)]
666#else 644#else
667 mov 0, %o0 645 mov 0, %o0
668#endif 646#endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1d5aad..0aeaefe696b9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
351 free_pages((unsigned long)cpu, order); 351 free_pages((unsigned long)cpu, order);
352} 352}
353 353
354static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 enum dma_data_direction direction) 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction)
356{ 357{
357 struct iommu *iommu; 358 struct iommu *iommu;
358 struct strbuf *strbuf; 359 struct strbuf *strbuf;
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
368 if (unlikely(direction == DMA_NONE)) 369 if (unlikely(direction == DMA_NONE))
369 goto bad_no_ctx; 370 goto bad_no_ctx;
370 371
371 oaddr = (unsigned long)ptr; 372 oaddr = (unsigned long)(page_address(page) + offset);
372 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 373 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373 npages >>= IO_PAGE_SHIFT; 374 npages >>= IO_PAGE_SHIFT;
374 375
@@ -472,8 +473,8 @@ do_flush_sync:
472 vaddr, ctx, npages); 473 vaddr, ctx, npages);
473} 474}
474 475
475static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, 476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476 size_t sz, enum dma_data_direction direction) 477 size_t sz, enum dma_data_direction direction)
477{ 478{
478 struct iommu *iommu; 479 struct iommu *iommu;
479 struct strbuf *strbuf; 480 struct strbuf *strbuf;
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
824static const struct dma_ops sun4u_dma_ops = { 825static const struct dma_ops sun4u_dma_ops = {
825 .alloc_coherent = dma_4u_alloc_coherent, 826 .alloc_coherent = dma_4u_alloc_coherent,
826 .free_coherent = dma_4u_free_coherent, 827 .free_coherent = dma_4u_free_coherent,
827 .map_single = dma_4u_map_single, 828 .map_page = dma_4u_map_page,
828 .unmap_single = dma_4u_unmap_single, 829 .unmap_page = dma_4u_unmap_page,
829 .map_sg = dma_4u_map_sg, 830 .map_sg = dma_4u_map_sg,
830 .unmap_sg = dma_4u_unmap_sg, 831 .unmap_sg = dma_4u_unmap_sg,
831 .sync_single_for_cpu = dma_4u_sync_single_for_cpu, 832 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e5e78f9cfc95..bd075054942b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -45,6 +45,7 @@
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46 46
47#include "entry.h" 47#include "entry.h"
48#include "cpumap.h"
48 49
49#define NUM_IVECS (IMAP_INR + 1) 50#define NUM_IVECS (IMAP_INR + 1)
50 51
@@ -256,35 +257,13 @@ static int irq_choose_cpu(unsigned int virt_irq)
256 int cpuid; 257 int cpuid;
257 258
258 cpumask_copy(&mask, irq_desc[virt_irq].affinity); 259 cpumask_copy(&mask, irq_desc[virt_irq].affinity);
259 if (cpus_equal(mask, CPU_MASK_ALL)) { 260 if (cpus_equal(mask, cpu_online_map)) {
260 static int irq_rover; 261 cpuid = map_to_cpu(virt_irq);
261 static DEFINE_SPINLOCK(irq_rover_lock);
262 unsigned long flags;
263
264 /* Round-robin distribution... */
265 do_round_robin:
266 spin_lock_irqsave(&irq_rover_lock, flags);
267
268 while (!cpu_online(irq_rover)) {
269 if (++irq_rover >= nr_cpu_ids)
270 irq_rover = 0;
271 }
272 cpuid = irq_rover;
273 do {
274 if (++irq_rover >= nr_cpu_ids)
275 irq_rover = 0;
276 } while (!cpu_online(irq_rover));
277
278 spin_unlock_irqrestore(&irq_rover_lock, flags);
279 } else { 262 } else {
280 cpumask_t tmp; 263 cpumask_t tmp;
281 264
282 cpus_and(tmp, cpu_online_map, mask); 265 cpus_and(tmp, cpu_online_map, mask);
283 266 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
284 if (cpus_empty(tmp))
285 goto do_round_robin;
286
287 cpuid = first_cpu(tmp);
288 } 267 }
289 268
290 return cpuid; 269 return cpuid;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index f0e6ed23a468..938da19dc065 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -574,7 +574,7 @@ static void __init report_platform_properties(void)
574 mdesc_release(hp); 574 mdesc_release(hp);
575} 575}
576 576
577static void __devinit fill_in_one_cache(cpuinfo_sparc *c, 577static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
578 struct mdesc_handle *hp, 578 struct mdesc_handle *hp,
579 u64 mp) 579 u64 mp)
580{ 580{
@@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
619 } 619 }
620} 620}
621 621
622static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, 622static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
623 int core_id)
624{ 623{
625 u64 a; 624 u64 a;
626 625
@@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
653 } 652 }
654} 653}
655 654
656static void __devinit set_core_ids(struct mdesc_handle *hp) 655static void __cpuinit set_core_ids(struct mdesc_handle *hp)
657{ 656{
658 int idx; 657 int idx;
659 u64 mp; 658 u64 mp;
@@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp)
678 } 677 }
679} 678}
680 679
681static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, 680static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
682 int proc_id)
683{ 681{
684 u64 a; 682 u64 a;
685 683
@@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
698 } 696 }
699} 697}
700 698
701static void __devinit __set_proc_ids(struct mdesc_handle *hp, 699static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
702 const char *exec_unit_name)
703{ 700{
704 int idx; 701 int idx;
705 u64 mp; 702 u64 mp;
@@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp,
720 } 717 }
721} 718}
722 719
723static void __devinit set_proc_ids(struct mdesc_handle *hp) 720static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
724{ 721{
725 __set_proc_ids(hp, "exec_unit"); 722 __set_proc_ids(hp, "exec_unit");
726 __set_proc_ids(hp, "exec-unit"); 723 __set_proc_ids(hp, "exec-unit");
727} 724}
728 725
729static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, 726static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
730 unsigned char def) 727 unsigned char def)
731{ 728{
732 u64 val; 729 u64 val;
@@ -745,7 +742,7 @@ use_default:
745 *mask = ((1U << def) * 64U) - 1U; 742 *mask = ((1U << def) * 64U) - 1U;
746} 743}
747 744
748static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, 745static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
749 struct trap_per_cpu *tb) 746 struct trap_per_cpu *tb)
750{ 747{
751 const u64 *val; 748 const u64 *val;
@@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
763 get_one_mondo_bits(val, &tb->nonresum_qmask, 2); 760 get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
764} 761}
765 762
766void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) 763static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
767{ 764{
768 struct mdesc_handle *hp = mdesc_grab(); 765 struct mdesc_handle *hp = mdesc_grab();
766 void *ret = NULL;
769 u64 mp; 767 u64 mp;
770 768
771 ncpus_probed = 0;
772 mdesc_for_each_node_by_name(hp, mp, "cpu") { 769 mdesc_for_each_node_by_name(hp, mp, "cpu") {
773 const u64 *id = mdesc_get_property(hp, mp, "id", NULL); 770 const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
774 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); 771 int cpuid = *id;
775 struct trap_per_cpu *tb;
776 cpuinfo_sparc *c;
777 int cpuid;
778 u64 a;
779
780 ncpus_probed++;
781
782 cpuid = *id;
783 772
784#ifdef CONFIG_SMP 773#ifdef CONFIG_SMP
785 if (cpuid >= NR_CPUS) { 774 if (cpuid >= NR_CPUS) {
@@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
788 cpuid, NR_CPUS); 777 cpuid, NR_CPUS);
789 continue; 778 continue;
790 } 779 }
791 if (!cpu_isset(cpuid, mask)) 780 if (!cpu_isset(cpuid, *mask))
792 continue; 781 continue;
793#else
794 /* On uniprocessor we only want the values for the
795 * real physical cpu the kernel booted onto, however
796 * cpu_data() only has one entry at index 0.
797 */
798 if (cpuid != real_hard_smp_processor_id())
799 continue;
800 cpuid = 0;
801#endif 782#endif
802 783
803 c = &cpu_data(cpuid); 784 ret = func(hp, mp, cpuid, arg);
804 c->clock_tick = *cfreq; 785 if (ret)
786 goto out;
787 }
788out:
789 mdesc_release(hp);
790 return ret;
791}
805 792
806 tb = &trap_block[cpuid]; 793static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
807 get_mondo_data(hp, mp, tb); 794{
795 ncpus_probed++;
796#ifdef CONFIG_SMP
797 set_cpu_present(cpuid, true);
798#endif
799 return NULL;
800}
808 801
809 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { 802void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
810 u64 j, t = mdesc_arc_target(hp, a); 803{
811 const char *t_name; 804 if (tlb_type != hypervisor)
805 return;
812 806
813 t_name = mdesc_node_name(hp, t); 807 ncpus_probed = 0;
814 if (!strcmp(t_name, "cache")) { 808 mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
815 fill_in_one_cache(c, hp, t); 809}
816 continue;
817 }
818 810
819 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { 811static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
820 u64 n = mdesc_arc_target(hp, j); 812{
821 const char *n_name; 813 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
814 struct trap_per_cpu *tb;
815 cpuinfo_sparc *c;
816 u64 a;
822 817
823 n_name = mdesc_node_name(hp, n); 818#ifndef CONFIG_SMP
824 if (!strcmp(n_name, "cache")) 819 /* On uniprocessor we only want the values for the
825 fill_in_one_cache(c, hp, n); 820 * real physical cpu the kernel booted onto, however
826 } 821 * cpu_data() only has one entry at index 0.
822 */
823 if (cpuid != real_hard_smp_processor_id())
824 return NULL;
825 cpuid = 0;
826#endif
827
828 c = &cpu_data(cpuid);
829 c->clock_tick = *cfreq;
830
831 tb = &trap_block[cpuid];
832 get_mondo_data(hp, mp, tb);
833
834 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
835 u64 j, t = mdesc_arc_target(hp, a);
836 const char *t_name;
837
838 t_name = mdesc_node_name(hp, t);
839 if (!strcmp(t_name, "cache")) {
840 fill_in_one_cache(c, hp, t);
841 continue;
827 } 842 }
828 843
829#ifdef CONFIG_SMP 844 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
830 cpu_set(cpuid, cpu_present_map); 845 u64 n = mdesc_arc_target(hp, j);
831#endif 846 const char *n_name;
832 847
833 c->core_id = 0; 848 n_name = mdesc_node_name(hp, n);
834 c->proc_id = -1; 849 if (!strcmp(n_name, "cache"))
850 fill_in_one_cache(c, hp, n);
851 }
835 } 852 }
836 853
854 c->core_id = 0;
855 c->proc_id = -1;
856
857 return NULL;
858}
859
860void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
861{
862 struct mdesc_handle *hp;
863
864 mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
865
837#ifdef CONFIG_SMP 866#ifdef CONFIG_SMP
838 sparc64_multi_core = 1; 867 sparc64_multi_core = 1;
839#endif 868#endif
840 869
870 hp = mdesc_grab();
871
841 set_core_ids(hp); 872 set_core_ids(hp);
842 set_proc_ids(hp); 873 set_proc_ids(hp);
843 874
844 smp_fill_in_sib_core_maps();
845
846 mdesc_release(hp); 875 mdesc_release(hp);
876
877 smp_fill_in_sib_core_maps();
847} 878}
848 879
849static ssize_t mdesc_read(struct file *file, char __user *buf, 880static ssize_t mdesc_read(struct file *file, char __user *buf,
@@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void)
887{ 918{
888 struct mdesc_handle *hp; 919 struct mdesc_handle *hp;
889 unsigned long len, real_len, status; 920 unsigned long len, real_len, status;
890 cpumask_t mask;
891 921
892 (void) sun4v_mach_desc(0UL, 0UL, &len); 922 (void) sun4v_mach_desc(0UL, 0UL, &len);
893 923
@@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void)
911 cur_mdesc = hp; 941 cur_mdesc = hp;
912 942
913 report_platform_properties(); 943 report_platform_properties();
914
915 cpus_setall(mask);
916 mdesc_fill_in_cpu_data(mask);
917} 944}
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 90273765e81f..0ee642f63234 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -75,8 +75,6 @@ void *module_alloc(unsigned long size)
75void module_free(struct module *mod, void *module_region) 75void module_free(struct module *mod, void *module_region)
76{ 76{
77 vfree(module_region); 77 vfree(module_region);
78 /* FIXME: If module_region == mod->init_region, trim exception
79 table entries. */
80} 78}
81 79
82/* Make generic code ignore STT_REGISTER dummy undefined symbols. */ 80/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index c8f14c1dc521..90396702ea2c 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -6,159 +6,11 @@
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/irq.h>
9#include <linux/of_device.h> 10#include <linux/of_device.h>
10#include <linux/of_platform.h> 11#include <linux/of_platform.h>
11 12
12static int node_match(struct device *dev, void *data) 13#include "of_device_common.h"
13{
14 struct of_device *op = to_of_device(dev);
15 struct device_node *dp = data;
16
17 return (op->node == dp);
18}
19
20struct of_device *of_find_device_by_node(struct device_node *dp)
21{
22 struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
23 dp, node_match);
24
25 if (dev)
26 return to_of_device(dev);
27
28 return NULL;
29}
30EXPORT_SYMBOL(of_find_device_by_node);
31
32unsigned int irq_of_parse_and_map(struct device_node *node, int index)
33{
34 struct of_device *op = of_find_device_by_node(node);
35
36 if (!op || index >= op->num_irqs)
37 return 0;
38
39 return op->irqs[index];
40}
41EXPORT_SYMBOL(irq_of_parse_and_map);
42
43/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
44 * BUS and propagate to all child of_device objects.
45 */
46void of_propagate_archdata(struct of_device *bus)
47{
48 struct dev_archdata *bus_sd = &bus->dev.archdata;
49 struct device_node *bus_dp = bus->node;
50 struct device_node *dp;
51
52 for (dp = bus_dp->child; dp; dp = dp->sibling) {
53 struct of_device *op = of_find_device_by_node(dp);
54
55 op->dev.archdata.iommu = bus_sd->iommu;
56 op->dev.archdata.stc = bus_sd->stc;
57 op->dev.archdata.host_controller = bus_sd->host_controller;
58 op->dev.archdata.numa_node = bus_sd->numa_node;
59
60 if (dp->child)
61 of_propagate_archdata(op);
62 }
63}
64
65struct bus_type of_platform_bus_type;
66EXPORT_SYMBOL(of_platform_bus_type);
67
68static inline u64 of_read_addr(const u32 *cell, int size)
69{
70 u64 r = 0;
71 while (size--)
72 r = (r << 32) | *(cell++);
73 return r;
74}
75
76static void __init get_cells(struct device_node *dp,
77 int *addrc, int *sizec)
78{
79 if (addrc)
80 *addrc = of_n_addr_cells(dp);
81 if (sizec)
82 *sizec = of_n_size_cells(dp);
83}
84
85/* Max address size we deal with */
86#define OF_MAX_ADDR_CELLS 4
87
88struct of_bus {
89 const char *name;
90 const char *addr_prop_name;
91 int (*match)(struct device_node *parent);
92 void (*count_cells)(struct device_node *child,
93 int *addrc, int *sizec);
94 int (*map)(u32 *addr, const u32 *range,
95 int na, int ns, int pna);
96 unsigned long (*get_flags)(const u32 *addr, unsigned long);
97};
98
99/*
100 * Default translator (generic bus)
101 */
102
103static void of_bus_default_count_cells(struct device_node *dev,
104 int *addrc, int *sizec)
105{
106 get_cells(dev, addrc, sizec);
107}
108
109/* Make sure the least significant 64-bits are in-range. Even
110 * for 3 or 4 cell values it is a good enough approximation.
111 */
112static int of_out_of_range(const u32 *addr, const u32 *base,
113 const u32 *size, int na, int ns)
114{
115 u64 a = of_read_addr(addr, na);
116 u64 b = of_read_addr(base, na);
117
118 if (a < b)
119 return 1;
120
121 b += of_read_addr(size, ns);
122 if (a >= b)
123 return 1;
124
125 return 0;
126}
127
128static int of_bus_default_map(u32 *addr, const u32 *range,
129 int na, int ns, int pna)
130{
131 u32 result[OF_MAX_ADDR_CELLS];
132 int i;
133
134 if (ns > 2) {
135 printk("of_device: Cannot handle size cells (%d) > 2.", ns);
136 return -EINVAL;
137 }
138
139 if (of_out_of_range(addr, range, range + na + pna, na, ns))
140 return -EINVAL;
141
142 /* Start with the parent range base. */
143 memcpy(result, range + na, pna * 4);
144
145 /* Add in the child address offset. */
146 for (i = 0; i < na; i++)
147 result[pna - 1 - i] +=
148 (addr[na - 1 - i] -
149 range[na - 1 - i]);
150
151 memcpy(addr, result, pna * 4);
152
153 return 0;
154}
155
156static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
157{
158 if (flags)
159 return flags;
160 return IORESOURCE_MEM;
161}
162 14
163/* 15/*
164 * PCI bus specific translator 16 * PCI bus specific translator
@@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
240 return flags; 92 return flags;
241} 93}
242 94
243/*
244 * SBUS bus specific translator
245 */
246
247static int of_bus_sbus_match(struct device_node *np)
248{
249 struct device_node *dp = np;
250
251 while (dp) {
252 if (!strcmp(dp->name, "sbus") ||
253 !strcmp(dp->name, "sbi"))
254 return 1;
255
256 /* Have a look at use_1to1_mapping(). We're trying
257 * to match SBUS if that's the top-level bus and we
258 * don't have some intervening real bus that provides
259 * ranges based translations.
260 */
261 if (of_find_property(dp, "ranges", NULL) != NULL)
262 break;
263
264 dp = dp->parent;
265 }
266
267 return 0;
268}
269
270static void of_bus_sbus_count_cells(struct device_node *child,
271 int *addrc, int *sizec)
272{
273 if (addrc)
274 *addrc = 2;
275 if (sizec)
276 *sizec = 1;
277}
278
279static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
280{
281 return of_bus_default_map(addr, range, na, ns, pna);
282}
283
284static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) 95static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
285{ 96{
286 return IORESOURCE_MEM; 97 return IORESOURCE_MEM;
@@ -307,7 +118,7 @@ static struct of_bus of_busses[] = {
307 .addr_prop_name = "reg", 118 .addr_prop_name = "reg",
308 .match = of_bus_sbus_match, 119 .match = of_bus_sbus_match,
309 .count_cells = of_bus_sbus_count_cells, 120 .count_cells = of_bus_sbus_count_cells,
310 .map = of_bus_sbus_map, 121 .map = of_bus_default_map,
311 .get_flags = of_bus_sbus_get_flags, 122 .get_flags = of_bus_sbus_get_flags,
312 }, 123 },
313 /* Default */ 124 /* Default */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5ac287ac03de..881947e59e95 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -10,6 +10,8 @@
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12 12
13#include "of_device_common.h"
14
13void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) 15void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
14{ 16{
15 unsigned long ret = res->start + offset; 17 unsigned long ret = res->start + offset;
@@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
35} 37}
36EXPORT_SYMBOL(of_iounmap); 38EXPORT_SYMBOL(of_iounmap);
37 39
38static int node_match(struct device *dev, void *data)
39{
40 struct of_device *op = to_of_device(dev);
41 struct device_node *dp = data;
42
43 return (op->node == dp);
44}
45
46struct of_device *of_find_device_by_node(struct device_node *dp)
47{
48 struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
49 dp, node_match);
50
51 if (dev)
52 return to_of_device(dev);
53
54 return NULL;
55}
56EXPORT_SYMBOL(of_find_device_by_node);
57
58unsigned int irq_of_parse_and_map(struct device_node *node, int index)
59{
60 struct of_device *op = of_find_device_by_node(node);
61
62 if (!op || index >= op->num_irqs)
63 return 0;
64
65 return op->irqs[index];
66}
67EXPORT_SYMBOL(irq_of_parse_and_map);
68
69/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
70 * BUS and propagate to all child of_device objects.
71 */
72void of_propagate_archdata(struct of_device *bus)
73{
74 struct dev_archdata *bus_sd = &bus->dev.archdata;
75 struct device_node *bus_dp = bus->node;
76 struct device_node *dp;
77
78 for (dp = bus_dp->child; dp; dp = dp->sibling) {
79 struct of_device *op = of_find_device_by_node(dp);
80
81 op->dev.archdata.iommu = bus_sd->iommu;
82 op->dev.archdata.stc = bus_sd->stc;
83 op->dev.archdata.host_controller = bus_sd->host_controller;
84 op->dev.archdata.numa_node = bus_sd->numa_node;
85
86 if (dp->child)
87 of_propagate_archdata(op);
88 }
89}
90
91struct bus_type of_platform_bus_type;
92EXPORT_SYMBOL(of_platform_bus_type);
93
94static inline u64 of_read_addr(const u32 *cell, int size)
95{
96 u64 r = 0;
97 while (size--)
98 r = (r << 32) | *(cell++);
99 return r;
100}
101
102static void get_cells(struct device_node *dp, int *addrc, int *sizec)
103{
104 if (addrc)
105 *addrc = of_n_addr_cells(dp);
106 if (sizec)
107 *sizec = of_n_size_cells(dp);
108}
109
110/* Max address size we deal with */
111#define OF_MAX_ADDR_CELLS 4
112
113struct of_bus {
114 const char *name;
115 const char *addr_prop_name;
116 int (*match)(struct device_node *parent);
117 void (*count_cells)(struct device_node *child,
118 int *addrc, int *sizec);
119 int (*map)(u32 *addr, const u32 *range,
120 int na, int ns, int pna);
121 unsigned long (*get_flags)(const u32 *addr, unsigned long);
122};
123
124/*
125 * Default translator (generic bus)
126 */
127
128static void of_bus_default_count_cells(struct device_node *dev,
129 int *addrc, int *sizec)
130{
131 get_cells(dev, addrc, sizec);
132}
133
134/* Make sure the least significant 64-bits are in-range. Even
135 * for 3 or 4 cell values it is a good enough approximation.
136 */
137static int of_out_of_range(const u32 *addr, const u32 *base,
138 const u32 *size, int na, int ns)
139{
140 u64 a = of_read_addr(addr, na);
141 u64 b = of_read_addr(base, na);
142
143 if (a < b)
144 return 1;
145
146 b += of_read_addr(size, ns);
147 if (a >= b)
148 return 1;
149
150 return 0;
151}
152
153static int of_bus_default_map(u32 *addr, const u32 *range,
154 int na, int ns, int pna)
155{
156 u32 result[OF_MAX_ADDR_CELLS];
157 int i;
158
159 if (ns > 2) {
160 printk("of_device: Cannot handle size cells (%d) > 2.", ns);
161 return -EINVAL;
162 }
163
164 if (of_out_of_range(addr, range, range + na + pna, na, ns))
165 return -EINVAL;
166
167 /* Start with the parent range base. */
168 memcpy(result, range + na, pna * 4);
169
170 /* Add in the child address offset. */
171 for (i = 0; i < na; i++)
172 result[pna - 1 - i] +=
173 (addr[na - 1 - i] -
174 range[na - 1 - i]);
175
176 memcpy(addr, result, pna * 4);
177
178 return 0;
179}
180
181static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
182{
183 if (flags)
184 return flags;
185 return IORESOURCE_MEM;
186}
187
188/* 40/*
189 * PCI bus specific translator 41 * PCI bus specific translator
190 */ 42 */
@@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
295} 147}
296 148
297/* 149/*
298 * SBUS bus specific translator
299 */
300
301static int of_bus_sbus_match(struct device_node *np)
302{
303 struct device_node *dp = np;
304
305 while (dp) {
306 if (!strcmp(dp->name, "sbus") ||
307 !strcmp(dp->name, "sbi"))
308 return 1;
309
310 /* Have a look at use_1to1_mapping(). We're trying
311 * to match SBUS if that's the top-level bus and we
312 * don't have some intervening real bus that provides
313 * ranges based translations.
314 */
315 if (of_find_property(dp, "ranges", NULL) != NULL)
316 break;
317
318 dp = dp->parent;
319 }
320
321 return 0;
322}
323
324static void of_bus_sbus_count_cells(struct device_node *child,
325 int *addrc, int *sizec)
326{
327 if (addrc)
328 *addrc = 2;
329 if (sizec)
330 *sizec = 1;
331}
332
333/*
334 * FHC/Central bus specific translator. 150 * FHC/Central bus specific translator.
335 * 151 *
336 * This is just needed to hard-code the address and size cell 152 * This is just needed to hard-code the address and size cell
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
new file mode 100644
index 000000000000..cb8eb799bb6c
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.c
@@ -0,0 +1,174 @@
1#include <linux/string.h>
2#include <linux/kernel.h>
3#include <linux/of.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <linux/slab.h>
8#include <linux/errno.h>
9#include <linux/irq.h>
10#include <linux/of_device.h>
11#include <linux/of_platform.h>
12
13#include "of_device_common.h"
14
15static int node_match(struct device *dev, void *data)
16{
17 struct of_device *op = to_of_device(dev);
18 struct device_node *dp = data;
19
20 return (op->node == dp);
21}
22
23struct of_device *of_find_device_by_node(struct device_node *dp)
24{
25 struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
26 dp, node_match);
27
28 if (dev)
29 return to_of_device(dev);
30
31 return NULL;
32}
33EXPORT_SYMBOL(of_find_device_by_node);
34
35unsigned int irq_of_parse_and_map(struct device_node *node, int index)
36{
37 struct of_device *op = of_find_device_by_node(node);
38
39 if (!op || index >= op->num_irqs)
40 return 0;
41
42 return op->irqs[index];
43}
44EXPORT_SYMBOL(irq_of_parse_and_map);
45
46/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
47 * BUS and propagate to all child of_device objects.
48 */
49void of_propagate_archdata(struct of_device *bus)
50{
51 struct dev_archdata *bus_sd = &bus->dev.archdata;
52 struct device_node *bus_dp = bus->node;
53 struct device_node *dp;
54
55 for (dp = bus_dp->child; dp; dp = dp->sibling) {
56 struct of_device *op = of_find_device_by_node(dp);
57
58 op->dev.archdata.iommu = bus_sd->iommu;
59 op->dev.archdata.stc = bus_sd->stc;
60 op->dev.archdata.host_controller = bus_sd->host_controller;
61 op->dev.archdata.numa_node = bus_sd->numa_node;
62
63 if (dp->child)
64 of_propagate_archdata(op);
65 }
66}
67
68struct bus_type of_platform_bus_type;
69EXPORT_SYMBOL(of_platform_bus_type);
70
71static void get_cells(struct device_node *dp, int *addrc, int *sizec)
72{
73 if (addrc)
74 *addrc = of_n_addr_cells(dp);
75 if (sizec)
76 *sizec = of_n_size_cells(dp);
77}
78
79/*
80 * Default translator (generic bus)
81 */
82
83void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
84{
85 get_cells(dev, addrc, sizec);
86}
87
88/* Make sure the least significant 64-bits are in-range. Even
89 * for 3 or 4 cell values it is a good enough approximation.
90 */
91int of_out_of_range(const u32 *addr, const u32 *base,
92 const u32 *size, int na, int ns)
93{
94 u64 a = of_read_addr(addr, na);
95 u64 b = of_read_addr(base, na);
96
97 if (a < b)
98 return 1;
99
100 b += of_read_addr(size, ns);
101 if (a >= b)
102 return 1;
103
104 return 0;
105}
106
107int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
108{
109 u32 result[OF_MAX_ADDR_CELLS];
110 int i;
111
112 if (ns > 2) {
113 printk("of_device: Cannot handle size cells (%d) > 2.", ns);
114 return -EINVAL;
115 }
116
117 if (of_out_of_range(addr, range, range + na + pna, na, ns))
118 return -EINVAL;
119
120 /* Start with the parent range base. */
121 memcpy(result, range + na, pna * 4);
122
123 /* Add in the child address offset. */
124 for (i = 0; i < na; i++)
125 result[pna - 1 - i] +=
126 (addr[na - 1 - i] -
127 range[na - 1 - i]);
128
129 memcpy(addr, result, pna * 4);
130
131 return 0;
132}
133
134unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
135{
136 if (flags)
137 return flags;
138 return IORESOURCE_MEM;
139}
140
141/*
142 * SBUS bus specific translator
143 */
144
145int of_bus_sbus_match(struct device_node *np)
146{
147 struct device_node *dp = np;
148
149 while (dp) {
150 if (!strcmp(dp->name, "sbus") ||
151 !strcmp(dp->name, "sbi"))
152 return 1;
153
154 /* Have a look at use_1to1_mapping(). We're trying
155 * to match SBUS if that's the top-level bus and we
156 * don't have some intervening real bus that provides
157 * ranges based translations.
158 */
159 if (of_find_property(dp, "ranges", NULL) != NULL)
160 break;
161
162 dp = dp->parent;
163 }
164
165 return 0;
166}
167
168void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
169{
170 if (addrc)
171 *addrc = 2;
172 if (sizec)
173 *sizec = 1;
174}
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
new file mode 100644
index 000000000000..cdfd23992841
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.h
@@ -0,0 +1,36 @@
1#ifndef _OF_DEVICE_COMMON_H
2#define _OF_DEVICE_COMMON_H
3
4static inline u64 of_read_addr(const u32 *cell, int size)
5{
6 u64 r = 0;
7 while (size--)
8 r = (r << 32) | *(cell++);
9 return r;
10}
11
12void of_bus_default_count_cells(struct device_node *dev, int *addrc,
13 int *sizec);
14int of_out_of_range(const u32 *addr, const u32 *base,
15 const u32 *size, int na, int ns);
16int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
17unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags);
18
19int of_bus_sbus_match(struct device_node *np);
20void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec);
21
22/* Max address size we deal with */
23#define OF_MAX_ADDR_CELLS 4
24
25struct of_bus {
26 const char *name;
27 const char *addr_prop_name;
28 int (*match)(struct device_node *parent);
29 void (*count_cells)(struct device_node *child,
30 int *addrc, int *sizec);
31 int (*map)(u32 *addr, const u32 *range,
32 int na, int ns, int pna);
33 unsigned long (*get_flags)(const u32 *addr, unsigned long);
34};
35
36#endif /* _OF_DEVICE_COMMON_H */
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebed35da..2485eaa23101 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
230 free_pages((unsigned long)cpu, order); 230 free_pages((unsigned long)cpu, order);
231} 231}
232 232
233static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 enum dma_data_direction direction) 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction)
235{ 236{
236 struct iommu *iommu; 237 struct iommu *iommu;
237 unsigned long flags, npages, oaddr; 238 unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
245 if (unlikely(direction == DMA_NONE)) 246 if (unlikely(direction == DMA_NONE))
246 goto bad; 247 goto bad;
247 248
248 oaddr = (unsigned long)ptr; 249 oaddr = (unsigned long)(page_address(page) + offset);
249 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 250 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
250 npages >>= IO_PAGE_SHIFT; 251 npages >>= IO_PAGE_SHIFT;
251 252
@@ -294,8 +295,8 @@ iommu_map_fail:
294 return DMA_ERROR_CODE; 295 return DMA_ERROR_CODE;
295} 296}
296 297
297static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, 298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
298 size_t sz, enum dma_data_direction direction) 299 size_t sz, enum dma_data_direction direction)
299{ 300{
300 struct pci_pbm_info *pbm; 301 struct pci_pbm_info *pbm;
301 struct iommu *iommu; 302 struct iommu *iommu;
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
537static const struct dma_ops sun4v_dma_ops = { 538static const struct dma_ops sun4v_dma_ops = {
538 .alloc_coherent = dma_4v_alloc_coherent, 539 .alloc_coherent = dma_4v_alloc_coherent,
539 .free_coherent = dma_4v_free_coherent, 540 .free_coherent = dma_4v_free_coherent,
540 .map_single = dma_4v_map_single, 541 .map_page = dma_4v_map_page,
541 .unmap_single = dma_4v_unmap_single, 542 .unmap_page = dma_4v_unmap_page,
542 .map_sg = dma_4v_map_sg, 543 .map_sg = dma_4v_map_sg,
543 .unmap_sg = dma_4v_unmap_sg, 544 .unmap_sg = dma_4v_unmap_sg,
544 .sync_single_for_cpu = dma_4v_sync_single_for_cpu, 545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index bb0f0fda6cab..453397fe5e14 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp)
22 22
23extern char *build_path_component(struct device_node *dp); 23extern char *build_path_component(struct device_node *dp);
24extern void of_console_init(void); 24extern void of_console_init(void);
25extern void of_fill_in_cpu_data(void);
26 25
27extern unsigned int prom_early_allocated; 26extern unsigned int prom_early_allocated;
28 27
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index ca55c7012f77..fb06ac2bd38f 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -374,75 +374,26 @@ static const char *get_mid_prop(void)
374 return (tlb_type == spitfire ? "upa-portid" : "portid"); 374 return (tlb_type == spitfire ? "upa-portid" : "portid");
375} 375}
376 376
377struct device_node *of_find_node_by_cpuid(int cpuid) 377static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
378{
379 struct device_node *dp;
380 const char *mid_prop = get_mid_prop();
381
382 for_each_node_by_type(dp, "cpu") {
383 int id = of_getintprop_default(dp, mid_prop, -1);
384 const char *this_mid_prop = mid_prop;
385
386 if (id < 0) {
387 this_mid_prop = "cpuid";
388 id = of_getintprop_default(dp, this_mid_prop, -1);
389 }
390
391 if (id < 0) {
392 prom_printf("OF: Serious problem, cpu lacks "
393 "%s property", this_mid_prop);
394 prom_halt();
395 }
396 if (cpuid == id)
397 return dp;
398 }
399 return NULL;
400}
401
402void __init of_fill_in_cpu_data(void)
403{ 378{
404 struct device_node *dp; 379 struct device_node *dp;
405 const char *mid_prop; 380 const char *mid_prop;
406 381
407 if (tlb_type == hypervisor)
408 return;
409
410 mid_prop = get_mid_prop(); 382 mid_prop = get_mid_prop();
411 ncpus_probed = 0;
412 for_each_node_by_type(dp, "cpu") { 383 for_each_node_by_type(dp, "cpu") {
413 int cpuid = of_getintprop_default(dp, mid_prop, -1); 384 int cpuid = of_getintprop_default(dp, mid_prop, -1);
414 const char *this_mid_prop = mid_prop; 385 const char *this_mid_prop = mid_prop;
415 struct device_node *portid_parent; 386 void *ret;
416 int portid = -1;
417 387
418 portid_parent = NULL;
419 if (cpuid < 0) { 388 if (cpuid < 0) {
420 this_mid_prop = "cpuid"; 389 this_mid_prop = "cpuid";
421 cpuid = of_getintprop_default(dp, this_mid_prop, -1); 390 cpuid = of_getintprop_default(dp, this_mid_prop, -1);
422 if (cpuid >= 0) {
423 int limit = 2;
424
425 portid_parent = dp;
426 while (limit--) {
427 portid_parent = portid_parent->parent;
428 if (!portid_parent)
429 break;
430 portid = of_getintprop_default(portid_parent,
431 "portid", -1);
432 if (portid >= 0)
433 break;
434 }
435 }
436 } 391 }
437
438 if (cpuid < 0) { 392 if (cpuid < 0) {
439 prom_printf("OF: Serious problem, cpu lacks " 393 prom_printf("OF: Serious problem, cpu lacks "
440 "%s property", this_mid_prop); 394 "%s property", this_mid_prop);
441 prom_halt(); 395 prom_halt();
442 } 396 }
443
444 ncpus_probed++;
445
446#ifdef CONFIG_SMP 397#ifdef CONFIG_SMP
447 if (cpuid >= NR_CPUS) { 398 if (cpuid >= NR_CPUS) {
448 printk(KERN_WARNING "Ignoring CPU %d which is " 399 printk(KERN_WARNING "Ignoring CPU %d which is "
@@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void)
450 cpuid, NR_CPUS); 401 cpuid, NR_CPUS);
451 continue; 402 continue;
452 } 403 }
453#else
454 /* On uniprocessor we only want the values for the
455 * real physical cpu the kernel booted onto, however
456 * cpu_data() only has one entry at index 0.
457 */
458 if (cpuid != real_hard_smp_processor_id())
459 continue;
460 cpuid = 0;
461#endif 404#endif
405 ret = func(dp, cpuid, arg);
406 if (ret)
407 return ret;
408 }
409 return NULL;
410}
462 411
463 cpu_data(cpuid).clock_tick = 412static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
464 of_getintprop_default(dp, "clock-frequency", 0); 413{
465 414 if (id == cpuid)
466 if (portid_parent) { 415 return dp;
467 cpu_data(cpuid).dcache_size = 416 return NULL;
468 of_getintprop_default(dp, "l1-dcache-size", 417}
469 16 * 1024); 418
470 cpu_data(cpuid).dcache_line_size = 419struct device_node *of_find_node_by_cpuid(int cpuid)
471 of_getintprop_default(dp, "l1-dcache-line-size", 420{
472 32); 421 return of_iterate_over_cpus(check_cpu_node, cpuid);
473 cpu_data(cpuid).icache_size = 422}
474 of_getintprop_default(dp, "l1-icache-size", 423
475 8 * 1024); 424static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
476 cpu_data(cpuid).icache_line_size = 425{
477 of_getintprop_default(dp, "l1-icache-line-size", 426 ncpus_probed++;
478 32);
479 cpu_data(cpuid).ecache_size =
480 of_getintprop_default(dp, "l2-cache-size", 0);
481 cpu_data(cpuid).ecache_line_size =
482 of_getintprop_default(dp, "l2-cache-line-size", 0);
483 if (!cpu_data(cpuid).ecache_size ||
484 !cpu_data(cpuid).ecache_line_size) {
485 cpu_data(cpuid).ecache_size =
486 of_getintprop_default(portid_parent,
487 "l2-cache-size",
488 (4 * 1024 * 1024));
489 cpu_data(cpuid).ecache_line_size =
490 of_getintprop_default(portid_parent,
491 "l2-cache-line-size", 64);
492 }
493
494 cpu_data(cpuid).core_id = portid + 1;
495 cpu_data(cpuid).proc_id = portid;
496#ifdef CONFIG_SMP 427#ifdef CONFIG_SMP
497 sparc64_multi_core = 1; 428 set_cpu_present(cpuid, true);
429 set_cpu_possible(cpuid, true);
498#endif 430#endif
499 } else { 431 return NULL;
500 cpu_data(cpuid).dcache_size = 432}
501 of_getintprop_default(dp, "dcache-size", 16 * 1024);
502 cpu_data(cpuid).dcache_line_size =
503 of_getintprop_default(dp, "dcache-line-size", 32);
504 433
505 cpu_data(cpuid).icache_size = 434void __init of_populate_present_mask(void)
506 of_getintprop_default(dp, "icache-size", 16 * 1024); 435{
507 cpu_data(cpuid).icache_line_size = 436 if (tlb_type == hypervisor)
508 of_getintprop_default(dp, "icache-line-size", 32); 437 return;
438
439 ncpus_probed = 0;
440 of_iterate_over_cpus(record_one_cpu, 0);
441}
509 442
443static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
444{
445 struct device_node *portid_parent = NULL;
446 int portid = -1;
447
448 if (of_find_property(dp, "cpuid", NULL)) {
449 int limit = 2;
450
451 portid_parent = dp;
452 while (limit--) {
453 portid_parent = portid_parent->parent;
454 if (!portid_parent)
455 break;
456 portid = of_getintprop_default(portid_parent,
457 "portid", -1);
458 if (portid >= 0)
459 break;
460 }
461 }
462
463#ifndef CONFIG_SMP
464 /* On uniprocessor we only want the values for the
465 * real physical cpu the kernel booted onto, however
466 * cpu_data() only has one entry at index 0.
467 */
468 if (cpuid != real_hard_smp_processor_id())
469 return NULL;
470 cpuid = 0;
471#endif
472
473 cpu_data(cpuid).clock_tick =
474 of_getintprop_default(dp, "clock-frequency", 0);
475
476 if (portid_parent) {
477 cpu_data(cpuid).dcache_size =
478 of_getintprop_default(dp, "l1-dcache-size",
479 16 * 1024);
480 cpu_data(cpuid).dcache_line_size =
481 of_getintprop_default(dp, "l1-dcache-line-size",
482 32);
483 cpu_data(cpuid).icache_size =
484 of_getintprop_default(dp, "l1-icache-size",
485 8 * 1024);
486 cpu_data(cpuid).icache_line_size =
487 of_getintprop_default(dp, "l1-icache-line-size",
488 32);
489 cpu_data(cpuid).ecache_size =
490 of_getintprop_default(dp, "l2-cache-size", 0);
491 cpu_data(cpuid).ecache_line_size =
492 of_getintprop_default(dp, "l2-cache-line-size", 0);
493 if (!cpu_data(cpuid).ecache_size ||
494 !cpu_data(cpuid).ecache_line_size) {
510 cpu_data(cpuid).ecache_size = 495 cpu_data(cpuid).ecache_size =
511 of_getintprop_default(dp, "ecache-size", 496 of_getintprop_default(portid_parent,
497 "l2-cache-size",
512 (4 * 1024 * 1024)); 498 (4 * 1024 * 1024));
513 cpu_data(cpuid).ecache_line_size = 499 cpu_data(cpuid).ecache_line_size =
514 of_getintprop_default(dp, "ecache-line-size", 64); 500 of_getintprop_default(portid_parent,
515 501 "l2-cache-line-size", 64);
516 cpu_data(cpuid).core_id = 0;
517 cpu_data(cpuid).proc_id = -1;
518 } 502 }
519 503
504 cpu_data(cpuid).core_id = portid + 1;
505 cpu_data(cpuid).proc_id = portid;
520#ifdef CONFIG_SMP 506#ifdef CONFIG_SMP
521 set_cpu_present(cpuid, true); 507 sparc64_multi_core = 1;
522 set_cpu_possible(cpuid, true);
523#endif 508#endif
509 } else {
510 cpu_data(cpuid).dcache_size =
511 of_getintprop_default(dp, "dcache-size", 16 * 1024);
512 cpu_data(cpuid).dcache_line_size =
513 of_getintprop_default(dp, "dcache-line-size", 32);
514
515 cpu_data(cpuid).icache_size =
516 of_getintprop_default(dp, "icache-size", 16 * 1024);
517 cpu_data(cpuid).icache_line_size =
518 of_getintprop_default(dp, "icache-line-size", 32);
519
520 cpu_data(cpuid).ecache_size =
521 of_getintprop_default(dp, "ecache-size",
522 (4 * 1024 * 1024));
523 cpu_data(cpuid).ecache_line_size =
524 of_getintprop_default(dp, "ecache-line-size", 64);
525
526 cpu_data(cpuid).core_id = 0;
527 cpu_data(cpuid).proc_id = -1;
524 } 528 }
525 529
530 return NULL;
531}
532
533void __init of_fill_in_cpu_data(void)
534{
535 if (tlb_type == hypervisor)
536 return;
537
538 of_iterate_over_cpus(fill_in_one_cpu, 0);
539
526 smp_fill_in_sib_core_maps(); 540 smp_fill_in_sib_core_maps();
527} 541}
528 542
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index ff7b591c8946..0fb5789d43c8 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -313,6 +313,4 @@ void __init prom_build_devicetree(void)
313 313
314 printk("PROM: Built device tree with %u bytes of memory.\n", 314 printk("PROM: Built device tree with %u bytes of memory.\n",
315 prom_early_allocated); 315 prom_early_allocated);
316
317 of_fill_in_cpu_data();
318} 316}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7642e5a94db..fa44eaf8d897 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -20,7 +20,8 @@
20#include <linux/cache.h> 20#include <linux/cache.h>
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/profile.h> 22#include <linux/profile.h>
23#include <linux/lmb.h> 23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
24#include <linux/cpu.h> 25#include <linux/cpu.h>
25 26
26#include <asm/head.h> 27#include <asm/head.h>
@@ -47,6 +48,8 @@
47#include <asm/ldc.h> 48#include <asm/ldc.h>
48#include <asm/hypervisor.h> 49#include <asm/hypervisor.h>
49 50
51#include "cpumap.h"
52
50int sparc64_multi_core __read_mostly; 53int sparc64_multi_core __read_mostly;
51 54
52DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 55DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p)
278 return kern_base + (val - KERNBASE); 281 return kern_base + (val - KERNBASE);
279} 282}
280 283
281static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) 284static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
282{ 285{
283 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long sparc64_ttable_tl0;
284 extern unsigned long kern_locked_tte_data; 287 extern unsigned long kern_locked_tte_data;
@@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
298 "hvtramp_descr.\n"); 301 "hvtramp_descr.\n");
299 return; 302 return;
300 } 303 }
304 *descrp = hdesc;
301 305
302 hdesc->cpu = cpu; 306 hdesc->cpu = cpu;
303 hdesc->num_mappings = num_kernel_image_mappings; 307 hdesc->num_mappings = num_kernel_image_mappings;
304 308
305 tb = &trap_block[cpu]; 309 tb = &trap_block[cpu];
306 tb->hdesc = hdesc;
307 310
308 hdesc->fault_info_va = (unsigned long) &tb->fault_info; 311 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
309 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 312 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL;
341 344
342static int __cpuinit smp_boot_one_cpu(unsigned int cpu) 345static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
343{ 346{
344 struct trap_per_cpu *tb = &trap_block[cpu];
345 unsigned long entry = 347 unsigned long entry =
346 (unsigned long)(&sparc64_cpu_startup); 348 (unsigned long)(&sparc64_cpu_startup);
347 unsigned long cookie = 349 unsigned long cookie =
348 (unsigned long)(&cpu_new_thread); 350 (unsigned long)(&cpu_new_thread);
349 struct task_struct *p; 351 struct task_struct *p;
352 void *descr = NULL;
350 int timeout, ret; 353 int timeout, ret;
351 354
352 p = fork_idle(cpu); 355 p = fork_idle(cpu);
@@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
359#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 362#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
360 if (ldom_domaining_enabled) 363 if (ldom_domaining_enabled)
361 ldom_startcpu_cpuid(cpu, 364 ldom_startcpu_cpuid(cpu,
362 (unsigned long) cpu_new_thread); 365 (unsigned long) cpu_new_thread,
366 &descr);
363 else 367 else
364#endif 368#endif
365 prom_startcpu_cpuid(cpu, entry, cookie); 369 prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
383 } 387 }
384 cpu_new_thread = NULL; 388 cpu_new_thread = NULL;
385 389
386 if (tb->hdesc) { 390 kfree(descr);
387 kfree(tb->hdesc);
388 tb->hdesc = NULL;
389 }
390 391
391 return ret; 392 return ret;
392} 393}
@@ -1315,6 +1316,8 @@ int __cpu_disable(void)
1315 cpu_clear(cpu, cpu_online_map); 1316 cpu_clear(cpu, cpu_online_map);
1316 ipi_call_unlock(); 1317 ipi_call_unlock();
1317 1318
1319 cpu_map_rebuild();
1320
1318 return 0; 1321 return 0;
1319} 1322}
1320 1323
@@ -1373,36 +1376,171 @@ void smp_send_stop(void)
1373{ 1376{
1374} 1377}
1375 1378
1376unsigned long __per_cpu_base __read_mostly; 1379/**
1377unsigned long __per_cpu_shift __read_mostly; 1380 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1381 * @cpu: cpu to allocate for
1382 * @size: size allocation in bytes
1383 * @align: alignment
1384 *
1385 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1386 * does the right thing for NUMA regardless of the current
1387 * configuration.
1388 *
1389 * RETURNS:
1390 * Pointer to the allocated area on success, NULL on failure.
1391 */
1392static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1393 unsigned long align)
1394{
1395 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1396#ifdef CONFIG_NEED_MULTIPLE_NODES
1397 int node = cpu_to_node(cpu);
1398 void *ptr;
1399
1400 if (!node_online(node) || !NODE_DATA(node)) {
1401 ptr = __alloc_bootmem(size, align, goal);
1402 pr_info("cpu %d has no node %d or node-local memory\n",
1403 cpu, node);
1404 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1405 cpu, size, __pa(ptr));
1406 } else {
1407 ptr = __alloc_bootmem_node(NODE_DATA(node),
1408 size, align, goal);
1409 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1410 "%016lx\n", cpu, size, node, __pa(ptr));
1411 }
1412 return ptr;
1413#else
1414 return __alloc_bootmem(size, align, goal);
1415#endif
1416}
1378 1417
1379EXPORT_SYMBOL(__per_cpu_base); 1418static size_t pcpur_size __initdata;
1380EXPORT_SYMBOL(__per_cpu_shift); 1419static void **pcpur_ptrs __initdata;
1381 1420
1382void __init real_setup_per_cpu_areas(void) 1421static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
1383{ 1422{
1384 unsigned long paddr, goal, size, i; 1423 size_t off = (size_t)pageno << PAGE_SHIFT;
1385 char *ptr;
1386 1424
1387 /* Copy section for each CPU (we discard the original) */ 1425 if (off >= pcpur_size)
1388 goal = PERCPU_ENOUGH_ROOM; 1426 return NULL;
1389 1427
1390 __per_cpu_shift = PAGE_SHIFT; 1428 return virt_to_page(pcpur_ptrs[cpu] + off);
1391 for (size = PAGE_SIZE; size < goal; size <<= 1UL) 1429}
1392 __per_cpu_shift++; 1430
1431#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
1432
1433static void __init pcpu_map_range(unsigned long start, unsigned long end,
1434 struct page *page)
1435{
1436 unsigned long pfn = page_to_pfn(page);
1437 unsigned long pte_base;
1438
1439 BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
1440
1441 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1442 _PAGE_CP_4U | _PAGE_CV_4U |
1443 _PAGE_P_4U | _PAGE_W_4U);
1444 if (tlb_type == hypervisor)
1445 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1446 _PAGE_CP_4V | _PAGE_CV_4V |
1447 _PAGE_P_4V | _PAGE_W_4V);
1448
1449 while (start < end) {
1450 pgd_t *pgd = pgd_offset_k(start);
1451 unsigned long this_end;
1452 pud_t *pud;
1453 pmd_t *pmd;
1454 pte_t *pte;
1455
1456 pud = pud_offset(pgd, start);
1457 if (pud_none(*pud)) {
1458 pmd_t *new;
1459
1460 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1461 pud_populate(&init_mm, pud, new);
1462 }
1463
1464 pmd = pmd_offset(pud, start);
1465 if (!pmd_present(*pmd)) {
1466 pte_t *new;
1467
1468 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1469 pmd_populate_kernel(&init_mm, pmd, new);
1470 }
1393 1471
1394 paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); 1472 pte = pte_offset_kernel(pmd, start);
1395 if (!paddr) { 1473 this_end = (start + PMD_SIZE) & PMD_MASK;
1396 prom_printf("Cannot allocate per-cpu memory.\n"); 1474 if (this_end > end)
1397 prom_halt(); 1475 this_end = end;
1476
1477 while (start < this_end) {
1478 unsigned long paddr = pfn << PAGE_SHIFT;
1479
1480 pte_val(*pte) = (paddr | pte_base);
1481
1482 start += PAGE_SIZE;
1483 pte++;
1484 pfn++;
1485 }
1486 }
1487}
1488
1489void __init setup_per_cpu_areas(void)
1490{
1491 size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
1492 static struct vm_struct vm;
1493 unsigned long delta, cpu;
1494 size_t pcpu_unit_size;
1495 size_t ptrs_size;
1496
1497 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
1498 PERCPU_DYNAMIC_RESERVE);
1499 dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
1500
1501
1502 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
1503 pcpur_ptrs = alloc_bootmem(ptrs_size);
1504
1505 for_each_possible_cpu(cpu) {
1506 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
1507 PCPU_CHUNK_SIZE);
1508
1509 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
1510 PCPU_CHUNK_SIZE - pcpur_size);
1511
1512 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
1398 } 1513 }
1399 1514
1400 ptr = __va(paddr); 1515 /* allocate address and map */
1401 __per_cpu_base = ptr - __per_cpu_start; 1516 vm.flags = VM_ALLOC;
1517 vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
1518 vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
1519
1520 for_each_possible_cpu(cpu) {
1521 unsigned long start = (unsigned long) vm.addr;
1522 unsigned long end;
1523
1524 start += cpu * PCPU_CHUNK_SIZE;
1525 end = start + PCPU_CHUNK_SIZE;
1526 pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
1527 }
1402 1528
1403 for (i = 0; i < NR_CPUS; i++, ptr += size) 1529 pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
1404 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1530 PERCPU_MODULE_RESERVE, dyn_size,
1531 PCPU_CHUNK_SIZE, vm.addr, NULL);
1532
1533 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
1534
1535 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1536 for_each_possible_cpu(cpu) {
1537 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
1538 }
1405 1539
1406 /* Setup %g5 for the boot cpu. */ 1540 /* Setup %g5 for the boot cpu. */
1407 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1541 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1542
1543 of_fill_in_cpu_data();
1544 if (tlb_type == hypervisor)
1545 mdesc_fill_in_cpu_data(cpu_all_mask);
1408} 1546}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 00ec3b15f38c..690901657291 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -81,4 +81,6 @@ sys_call_table:
81/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 81/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo
86
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 82b5bf85b9d2..6b3ee88e253c 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -82,7 +82,8 @@ sys_call_table32:
82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait 82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo
86 87
87#endif /* CONFIG_COMPAT */ 88#endif /* CONFIG_COMPAT */
88 89
@@ -156,4 +157,5 @@ sys_call_table:
156 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 157 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
157/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
158 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
159/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev 160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
161 .word sys_pwritev, sys_rt_tgsigqueueinfo
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d809c4ebb48f..10f7bb9fc140 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs)
2509} 2509}
2510 2510
2511struct trap_per_cpu trap_block[NR_CPUS]; 2511struct trap_per_cpu trap_block[NR_CPUS];
2512EXPORT_SYMBOL(trap_block);
2512 2513
2513/* This can get invoked before sched_init() so play it super safe 2514/* This can get invoked before sched_init() so play it super safe
2514 * and use hard_smp_processor_id(). 2515 * and use hard_smp_processor_id().
@@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void);
2530void __init trap_init(void) 2531void __init trap_init(void)
2531{ 2532{
2532 /* Compile time sanity check. */ 2533 /* Compile time sanity check. */
2533 if (TI_TASK != offsetof(struct thread_info, task) || 2534 BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2534 TI_FLAGS != offsetof(struct thread_info, flags) || 2535 TI_FLAGS != offsetof(struct thread_info, flags) ||
2535 TI_CPU != offsetof(struct thread_info, cpu) || 2536 TI_CPU != offsetof(struct thread_info, cpu) ||
2536 TI_FPSAVED != offsetof(struct thread_info, fpsaved) || 2537 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2537 TI_KSP != offsetof(struct thread_info, ksp) || 2538 TI_KSP != offsetof(struct thread_info, ksp) ||
2538 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || 2539 TI_FAULT_ADDR != offsetof(struct thread_info,
2539 TI_KREGS != offsetof(struct thread_info, kregs) || 2540 fault_address) ||
2540 TI_UTRAPS != offsetof(struct thread_info, utraps) || 2541 TI_KREGS != offsetof(struct thread_info, kregs) ||
2541 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || 2542 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2542 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || 2543 TI_EXEC_DOMAIN != offsetof(struct thread_info,
2543 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || 2544 exec_domain) ||
2544 TI_GSR != offsetof(struct thread_info, gsr) || 2545 TI_REG_WINDOW != offsetof(struct thread_info,
2545 TI_XFSR != offsetof(struct thread_info, xfsr) || 2546 reg_window) ||
2546 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) || 2547 TI_RWIN_SPTRS != offsetof(struct thread_info,
2547 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) || 2548 rwbuf_stkptrs) ||
2548 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || 2549 TI_GSR != offsetof(struct thread_info, gsr) ||
2549 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || 2550 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2550 TI_PCR != offsetof(struct thread_info, pcr_reg) || 2551 TI_USER_CNTD0 != offsetof(struct thread_info,
2551 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || 2552 user_cntd0) ||
2552 TI_NEW_CHILD != offsetof(struct thread_info, new_child) || 2553 TI_USER_CNTD1 != offsetof(struct thread_info,
2553 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || 2554 user_cntd1) ||
2554 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || 2555 TI_KERN_CNTD0 != offsetof(struct thread_info,
2555 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || 2556 kernel_cntd0) ||
2556 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || 2557 TI_KERN_CNTD1 != offsetof(struct thread_info,
2557 TI_FPREGS != offsetof(struct thread_info, fpregs) || 2558 kernel_cntd1) ||
2558 (TI_FPREGS & (64 - 1))) 2559 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2559 thread_info_offsets_are_bolixed_dave(); 2560 TI_PRE_COUNT != offsetof(struct thread_info,
2560 2561 preempt_count) ||
2561 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || 2562 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2562 (TRAP_PER_CPU_PGD_PADDR != 2563 TI_SYS_NOERROR != offsetof(struct thread_info,
2563 offsetof(struct trap_per_cpu, pgd_paddr)) || 2564 syscall_noerror) ||
2564 (TRAP_PER_CPU_CPU_MONDO_PA != 2565 TI_RESTART_BLOCK != offsetof(struct thread_info,
2565 offsetof(struct trap_per_cpu, cpu_mondo_pa)) || 2566 restart_block) ||
2566 (TRAP_PER_CPU_DEV_MONDO_PA != 2567 TI_KUNA_REGS != offsetof(struct thread_info,
2567 offsetof(struct trap_per_cpu, dev_mondo_pa)) || 2568 kern_una_regs) ||
2568 (TRAP_PER_CPU_RESUM_MONDO_PA != 2569 TI_KUNA_INSN != offsetof(struct thread_info,
2569 offsetof(struct trap_per_cpu, resum_mondo_pa)) || 2570 kern_una_insn) ||
2570 (TRAP_PER_CPU_RESUM_KBUF_PA != 2571 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2571 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || 2572 (TI_FPREGS & (64 - 1)));
2572 (TRAP_PER_CPU_NONRESUM_MONDO_PA != 2573
2573 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || 2574 BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2574 (TRAP_PER_CPU_NONRESUM_KBUF_PA != 2575 thread) ||
2575 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || 2576 (TRAP_PER_CPU_PGD_PADDR !=
2576 (TRAP_PER_CPU_FAULT_INFO != 2577 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2577 offsetof(struct trap_per_cpu, fault_info)) || 2578 (TRAP_PER_CPU_CPU_MONDO_PA !=
2578 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != 2579 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2579 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || 2580 (TRAP_PER_CPU_DEV_MONDO_PA !=
2580 (TRAP_PER_CPU_CPU_LIST_PA != 2581 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2581 offsetof(struct trap_per_cpu, cpu_list_pa)) || 2582 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2582 (TRAP_PER_CPU_TSB_HUGE != 2583 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2583 offsetof(struct trap_per_cpu, tsb_huge)) || 2584 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2584 (TRAP_PER_CPU_TSB_HUGE_TEMP != 2585 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2585 offsetof(struct trap_per_cpu, tsb_huge_temp)) || 2586 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2586 (TRAP_PER_CPU_IRQ_WORKLIST_PA != 2587 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2587 offsetof(struct trap_per_cpu, irq_worklist_pa)) || 2588 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2588 (TRAP_PER_CPU_CPU_MONDO_QMASK != 2589 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2589 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || 2590 (TRAP_PER_CPU_FAULT_INFO !=
2590 (TRAP_PER_CPU_DEV_MONDO_QMASK != 2591 offsetof(struct trap_per_cpu, fault_info)) ||
2591 offsetof(struct trap_per_cpu, dev_mondo_qmask)) || 2592 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2592 (TRAP_PER_CPU_RESUM_QMASK != 2593 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2593 offsetof(struct trap_per_cpu, resum_qmask)) || 2594 (TRAP_PER_CPU_CPU_LIST_PA !=
2594 (TRAP_PER_CPU_NONRESUM_QMASK != 2595 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2595 offsetof(struct trap_per_cpu, nonresum_qmask))) 2596 (TRAP_PER_CPU_TSB_HUGE !=
2596 trap_per_cpu_offsets_are_bolixed_dave(); 2597 offsetof(struct trap_per_cpu, tsb_huge)) ||
2597 2598 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2598 if ((TSB_CONFIG_TSB != 2599 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2599 offsetof(struct tsb_config, tsb)) || 2600 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2600 (TSB_CONFIG_RSS_LIMIT != 2601 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2601 offsetof(struct tsb_config, tsb_rss_limit)) || 2602 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2602 (TSB_CONFIG_NENTRIES != 2603 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2603 offsetof(struct tsb_config, tsb_nentries)) || 2604 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2604 (TSB_CONFIG_REG_VAL != 2605 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2605 offsetof(struct tsb_config, tsb_reg_val)) || 2606 (TRAP_PER_CPU_RESUM_QMASK !=
2606 (TSB_CONFIG_MAP_VADDR != 2607 offsetof(struct trap_per_cpu, resum_qmask)) ||
2607 offsetof(struct tsb_config, tsb_map_vaddr)) || 2608 (TRAP_PER_CPU_NONRESUM_QMASK !=
2608 (TSB_CONFIG_MAP_PTE != 2609 offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2609 offsetof(struct tsb_config, tsb_map_pte))) 2610 (TRAP_PER_CPU_PER_CPU_BASE !=
2610 tsb_config_offsets_are_bolixed_dave(); 2611 offsetof(struct trap_per_cpu, __per_cpu_base)));
2612
2613 BUILD_BUG_ON((TSB_CONFIG_TSB !=
2614 offsetof(struct tsb_config, tsb)) ||
2615 (TSB_CONFIG_RSS_LIMIT !=
2616 offsetof(struct tsb_config, tsb_rss_limit)) ||
2617 (TSB_CONFIG_NENTRIES !=
2618 offsetof(struct tsb_config, tsb_nentries)) ||
2619 (TSB_CONFIG_REG_VAL !=
2620 offsetof(struct tsb_config, tsb_reg_val)) ||
2621 (TSB_CONFIG_MAP_VADDR !=
2622 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2623 (TSB_CONFIG_MAP_PTE !=
2624 offsetof(struct tsb_config, tsb_map_pte)));
2611 2625
2612 /* Attach to the address space of init_task. On SMP we 2626 /* Attach to the address space of init_task. On SMP we
2613 * do this in smp.c:smp_callin for other cpus. 2627 * do this in smp.c:smp_callin for other cpus.
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index 16cc28935e39..a61c349448e1 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -28,6 +28,10 @@ search_extable(const struct exception_table_entry *start,
28 * word 3: last insn address + 4 bytes 28 * word 3: last insn address + 4 bytes
29 * word 4: fixup code address 29 * word 4: fixup code address
30 * 30 *
31 * Deleted entries are encoded as:
32 * word 1: unused
33 * word 2: -1
34 *
31 * See asm/uaccess.h for more details. 35 * See asm/uaccess.h for more details.
32 */ 36 */
33 37
@@ -39,6 +43,10 @@ search_extable(const struct exception_table_entry *start,
39 continue; 43 continue;
40 } 44 }
41 45
46 /* A deleted entry; see trim_init_extable */
47 if (walk->fixup == -1)
48 continue;
49
42 if (walk->insn == value) 50 if (walk->insn == value)
43 return walk; 51 return walk;
44 } 52 }
@@ -57,6 +65,27 @@ search_extable(const struct exception_table_entry *start,
57 return NULL; 65 return NULL;
58} 66}
59 67
68#ifdef CONFIG_MODULES
69/* We could memmove them around; easier to mark the trimmed ones. */
70void trim_init_extable(struct module *m)
71{
72 unsigned int i;
73 bool range;
74
75 for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
76 range = m->extable[i].fixup == 0;
77
78 if (within_module_init(m->extable[i].insn, m)) {
79 m->extable[i].fixup = -1;
80 if (range)
81 m->extable[i+1].fixup = -1;
82 }
83 if (range)
84 i++;
85 }
86}
87#endif /* CONFIG_MODULES */
88
60/* Special extable search, which handles ranges. Returns fixup */ 89/* Special extable search, which handles ranges. Returns fixup */
61unsigned long search_extables_range(unsigned long addr, unsigned long *g2) 90unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
62{ 91{
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index cbb282dab5a7..26bb3919ff1f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -358,6 +358,7 @@ void __init paging_init(void)
358 protection_map[15] = PAGE_SHARED; 358 protection_map[15] = PAGE_SHARED;
359 btfixup(); 359 btfixup();
360 prom_build_devicetree(); 360 prom_build_devicetree();
361 of_fill_in_cpu_data();
361 device_scan(); 362 device_scan();
362} 363}
363 364
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f26a352c08a0..ca92e2f54e4d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1679,11 +1679,6 @@ pgd_t swapper_pg_dir[2048];
1679static void sun4u_pgprot_init(void); 1679static void sun4u_pgprot_init(void);
1680static void sun4v_pgprot_init(void); 1680static void sun4v_pgprot_init(void);
1681 1681
1682/* Dummy function */
1683void __init setup_per_cpu_areas(void)
1684{
1685}
1686
1687void __init paging_init(void) 1682void __init paging_init(void)
1688{ 1683{
1689 unsigned long end_pfn, shift, phys_base; 1684 unsigned long end_pfn, shift, phys_base;
@@ -1799,16 +1794,13 @@ void __init paging_init(void)
1799 if (tlb_type == hypervisor) 1794 if (tlb_type == hypervisor)
1800 sun4v_ktsb_register(); 1795 sun4v_ktsb_register();
1801 1796
1802 /* We must setup the per-cpu areas before we pull in the
1803 * PROM and the MDESC. The code there fills in cpu and
1804 * other information into per-cpu data structures.
1805 */
1806 real_setup_per_cpu_areas();
1807
1808 prom_build_devicetree(); 1797 prom_build_devicetree();
1798 of_populate_present_mask();
1809 1799
1810 if (tlb_type == hypervisor) 1800 if (tlb_type == hypervisor) {
1811 sun4v_mdesc_init(); 1801 sun4v_mdesc_init();
1802 mdesc_populate_present_mask(cpu_all_mask);
1803 }
1812 1804
1813 /* Once the OF device tree and MDESC have been setup, we know 1805 /* Once the OF device tree and MDESC have been setup, we know
1814 * the list of possible cpus. Therefore we can allocate the 1806 * the list of possible cpus. Therefore we can allocate the
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 06c9a7d98206..ade4eb373bdd 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -19,6 +19,7 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/log2.h>
22 23
23#include <asm/bitext.h> 24#include <asm/bitext.h>
24#include <asm/page.h> 25#include <asm/page.h>
@@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size)
349 vaddr, srmmu_nocache_end); 350 vaddr, srmmu_nocache_end);
350 BUG(); 351 BUG();
351 } 352 }
352 if (size & (size-1)) { 353 if (!is_power_of_2(size)) {
353 printk("Size 0x%x is not a power of 2\n", size); 354 printk("Size 0x%x is not a power of 2\n", size);
354 BUG(); 355 BUG();
355 } 356 }