aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-29 17:03:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-29 17:03:42 -0400
commit6e98ee75c3ab99db48ecc0615c2246dc193111a9 (patch)
treeafdad9dc968ebef3787e7dc16a41290a525f18f5
parent486b4ce13221aa6cd0cbc9fff6993f444d8a52b5 (diff)
parent7db35f31cbb8ca1dbaba03d74b7db79ace084358 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: [SPARC64]: Fill holes in hypervisor APIs and fix KTSB registry. [SPARC64]: Fix two bugs wrt. kernel 4MB TSB. [SPARC]: Mark as emulating cmpxchg, add appropriate depends for DRM. [SPARC]: Emulate cmpxchg like parisc [SPARC64]: Fix _PAGE_EXEC_4U check in sun4u I-TLB miss handler. [SPARC]: Linux always started with 9600 8N1 [SPARC64]: arch/sparc64/time.c doesn't compile on Ultra 1 (no PCI) [SPARC64]: Eliminate NR_CPUS limitations. [SPARC64]: Use machine description and OBP properly for cpu probing. [SPARC64]: Negotiate hypervisor API for PCI services. [SPARC64]: Report proper system soft state to the hypervisor. [SPARC64]: Fix typo in sun4v_hvapi_register error handling. [SCSI] ESP: Kill SCSI_ESP_CORE and link directly just like jazz_esp [SCSI] jazz_esp: Converted to use esp_core. [SPARC64]: PCI device scan is way too verbose by default. [SERIAL] sunzilog: section mismatch fix [SPARC32]: Removes mismatch section warnigs in sparc time.c file [SPARC64]: Don't be picky about virtual-dma values on sun4v. [SPARC64]: Kill unused DIE_PAGE_FAULT enum value. [SCSI] pluto: Use wait_for_completion_timeout.
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc/lib/atomic32.c15
-rw-r--r--arch/sparc64/Kconfig6
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/devices.c196
-rw-r--r--arch/sparc64/kernel/entry.S575
-rw-r--r--arch/sparc64/kernel/head.S31
-rw-r--r--arch/sparc64/kernel/hvapi.c5
-rw-r--r--arch/sparc64/kernel/irq.c83
-rw-r--r--arch/sparc64/kernel/itlb_miss.S4
-rw-r--r--arch/sparc64/kernel/mdesc.c619
-rw-r--r--arch/sparc64/kernel/pci.c54
-rw-r--r--arch/sparc64/kernel/pci_sabre.c7
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c54
-rw-r--r--arch/sparc64/kernel/power.c2
-rw-r--r--arch/sparc64/kernel/process.c4
-rw-r--r--arch/sparc64/kernel/prom.c148
-rw-r--r--arch/sparc64/kernel/setup.c18
-rw-r--r--arch/sparc64/kernel/smp.c155
-rw-r--r--arch/sparc64/kernel/sstate.c104
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S30
-rw-r--r--arch/sparc64/kernel/time.c47
-rw-r--r--arch/sparc64/kernel/traps.c27
-rw-r--r--arch/sparc64/mm/init.c90
-rw-r--r--arch/sparc64/prom/misc.c19
-rw-r--r--drivers/char/drm/Kconfig2
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/jazz_esp.c429
-rw-r--r--drivers/scsi/pluto.c18
-rw-r--r--drivers/serial/suncore.c6
-rw-r--r--drivers/serial/sunzilog.c4
-rw-r--r--include/asm-sparc/atomic.h38
-rw-r--r--include/asm-sparc64/bugs.h8
-rw-r--r--include/asm-sparc64/cpudata.h24
-rw-r--r--include/asm-sparc64/hypervisor.h643
-rw-r--r--include/asm-sparc64/kdebug.h1
-rw-r--r--include/asm-sparc64/mdesc.h39
-rw-r--r--include/asm-sparc64/oplib.h7
-rw-r--r--include/asm-sparc64/percpu.h4
-rw-r--r--include/asm-sparc64/prom.h1
-rw-r--r--include/asm-sparc64/smp.h4
-rw-r--r--include/asm-sparc64/sstate.h13
-rw-r--r--include/asm-sparc64/thread_info.h8
-rw-r--r--include/asm-sparc64/topology.h3
-rw-r--r--include/asm-sparc64/tsb.h2
47 files changed, 2728 insertions, 853 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index bd992c0048f0..fbcc00c6c06e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -178,6 +178,13 @@ config ARCH_HAS_ILOG2_U64
178 bool 178 bool
179 default n 179 default n
180 180
181config EMULATED_CMPXCHG
182 bool
183 default y
184 help
185 Sparc32 does not have a CAS instruction like sparc64. cmpxchg()
186 is emulated, and therefore it is not completely atomic.
187
181config SUN_PM 188config SUN_PM
182 bool 189 bool
183 default y 190 default y
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index f1401b57ccc7..7b4612da74a6 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
148} 148}
149 149
150/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 150/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
151static void __init kick_start_clock(void) 151static void __devinit kick_start_clock(void)
152{ 152{
153 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs; 153 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
154 unsigned char sec; 154 unsigned char sec;
@@ -223,7 +223,7 @@ static __inline__ int has_low_battery(void)
223 return (data1 == data2); /* Was the write blocked? */ 223 return (data1 == data2); /* Was the write blocked? */
224} 224}
225 225
226static void __init mostek_set_system_time(void) 226static void __devinit mostek_set_system_time(void)
227{ 227{
228 unsigned int year, mon, day, hour, min, sec; 228 unsigned int year, mon, day, hour, min, sec;
229 struct mostek48t02 *mregs; 229 struct mostek48t02 *mregs;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 559335f4917d..617d29832e19 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -2,6 +2,7 @@
2 * atomic32.c: 32-bit atomic_t implementation 2 * atomic32.c: 32-bit atomic_t implementation
3 * 3 *
4 * Copyright (C) 2004 Keith M Wesolowski 4 * Copyright (C) 2004 Keith M Wesolowski
5 * Copyright (C) 2007 Kyle McMartin
5 * 6 *
6 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf 7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
7 */ 8 */
@@ -117,3 +118,17 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
117 return old & mask; 118 return old & mask;
118} 119}
119EXPORT_SYMBOL(___change_bit); 120EXPORT_SYMBOL(___change_bit);
121
122unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
123{
124 unsigned long flags;
125 u32 prev;
126
127 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
128 if ((prev = *ptr) == old)
129 *ptr = new;
130 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
131
132 return (unsigned long)prev;
133}
134EXPORT_SYMBOL(__cmpxchg_u32);
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 831781cab271..bd00f89eed1e 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -147,10 +147,10 @@ config SMP
147 If you don't know what to do here, say N. 147 If you don't know what to do here, say N.
148 148
149config NR_CPUS 149config NR_CPUS
150 int "Maximum number of CPUs (2-64)" 150 int "Maximum number of CPUs (2-1024)"
151 range 2 64 151 range 2 1024
152 depends on SMP 152 depends on SMP
153 default "32" 153 default "64"
154 154
155source "drivers/cpufreq/Kconfig" 155source "drivers/cpufreq/Kconfig"
156 156
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index c749dccacc32..d8d19093d12f 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror
8extra-y := head.o init_task.o vmlinux.lds 8extra-y := head.o init_task.o vmlinux.lds
9 9
10obj-y := process.o setup.o cpu.o idprom.o \ 10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o devices.o auxio.o una_asm.o \ 11 traps.o auxio.o una_asm.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o hvapi.o 15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
16 16
17obj-$(CONFIG_STACKTRACE) += stacktrace.o 17obj-$(CONFIG_STACKTRACE) += stacktrace.o
18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
deleted file mode 100644
index 0e03c8e218cd..000000000000
--- a/arch/sparc64/kernel/devices.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/kernel.h>
8#include <linux/threads.h>
9#include <linux/init.h>
10#include <linux/ioport.h>
11#include <linux/string.h>
12#include <linux/spinlock.h>
13#include <linux/errno.h>
14#include <linux/bootmem.h>
15
16#include <asm/page.h>
17#include <asm/oplib.h>
18#include <asm/system.h>
19#include <asm/smp.h>
20#include <asm/spitfire.h>
21#include <asm/timer.h>
22#include <asm/cpudata.h>
23
24/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
25 * operations in asm/ns87303.h
26 */
27DEFINE_SPINLOCK(ns87303_lock);
28
29extern void cpu_probe(void);
30extern void central_probe(void);
31
32static const char *cpu_mid_prop(void)
33{
34 if (tlb_type == spitfire)
35 return "upa-portid";
36 return "portid";
37}
38
39static int get_cpu_mid(struct device_node *dp)
40{
41 struct property *prop;
42
43 if (tlb_type == hypervisor) {
44 struct linux_prom64_registers *reg;
45 int len;
46
47 prop = of_find_property(dp, "cpuid", &len);
48 if (prop && len == 4)
49 return *(int *) prop->value;
50
51 prop = of_find_property(dp, "reg", NULL);
52 reg = prop->value;
53 return (reg[0].phys_addr >> 32) & 0x0fffffffUL;
54 } else {
55 const char *prop_name = cpu_mid_prop();
56
57 prop = of_find_property(dp, prop_name, NULL);
58 if (prop)
59 return *(int *) prop->value;
60 return 0;
61 }
62}
63
64static int check_cpu_node(struct device_node *dp, int *cur_inst,
65 int (*compare)(struct device_node *, int, void *),
66 void *compare_arg,
67 struct device_node **dev_node, int *mid)
68{
69 if (!compare(dp, *cur_inst, compare_arg)) {
70 if (dev_node)
71 *dev_node = dp;
72 if (mid)
73 *mid = get_cpu_mid(dp);
74 return 0;
75 }
76
77 (*cur_inst)++;
78
79 return -ENODEV;
80}
81
82static int __cpu_find_by(int (*compare)(struct device_node *, int, void *),
83 void *compare_arg,
84 struct device_node **dev_node, int *mid)
85{
86 struct device_node *dp;
87 int cur_inst;
88
89 cur_inst = 0;
90 for_each_node_by_type(dp, "cpu") {
91 int err = check_cpu_node(dp, &cur_inst,
92 compare, compare_arg,
93 dev_node, mid);
94 if (err == 0)
95 return 0;
96 }
97
98 return -ENODEV;
99}
100
101static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg)
102{
103 int desired_instance = (int) (long) _arg;
104
105 if (instance == desired_instance)
106 return 0;
107 return -ENODEV;
108}
109
110int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid)
111{
112 return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
113 dev_node, mid);
114}
115
116static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg)
117{
118 int desired_mid = (int) (long) _arg;
119 int this_mid;
120
121 this_mid = get_cpu_mid(dp);
122 if (this_mid == desired_mid)
123 return 0;
124 return -ENODEV;
125}
126
127int cpu_find_by_mid(int mid, struct device_node **dev_node)
128{
129 return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
130 dev_node, NULL);
131}
132
133void __init device_scan(void)
134{
135 /* FIX ME FAST... -DaveM */
136 ioport_resource.end = 0xffffffffffffffffUL;
137
138 prom_printf("Booting Linux...\n");
139
140#ifndef CONFIG_SMP
141 {
142 struct device_node *dp;
143 int err, def;
144
145 err = cpu_find_by_instance(0, &dp, NULL);
146 if (err) {
147 prom_printf("No cpu nodes, cannot continue\n");
148 prom_halt();
149 }
150 cpu_data(0).clock_tick =
151 of_getintprop_default(dp, "clock-frequency", 0);
152
153 def = ((tlb_type == hypervisor) ?
154 (8 * 1024) :
155 (16 * 1024));
156 cpu_data(0).dcache_size = of_getintprop_default(dp,
157 "dcache-size",
158 def);
159
160 def = 32;
161 cpu_data(0).dcache_line_size =
162 of_getintprop_default(dp, "dcache-line-size", def);
163
164 def = 16 * 1024;
165 cpu_data(0).icache_size = of_getintprop_default(dp,
166 "icache-size",
167 def);
168
169 def = 32;
170 cpu_data(0).icache_line_size =
171 of_getintprop_default(dp, "icache-line-size", def);
172
173 def = ((tlb_type == hypervisor) ?
174 (3 * 1024 * 1024) :
175 (4 * 1024 * 1024));
176 cpu_data(0).ecache_size = of_getintprop_default(dp,
177 "ecache-size",
178 def);
179
180 def = 64;
181 cpu_data(0).ecache_line_size =
182 of_getintprop_default(dp, "ecache-line-size", def);
183 printk("CPU[0]: Caches "
184 "D[sz(%d):line_sz(%d)] "
185 "I[sz(%d):line_sz(%d)] "
186 "E[sz(%d):line_sz(%d)]\n",
187 cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
188 cpu_data(0).icache_size, cpu_data(0).icache_line_size,
189 cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
190 }
191#endif
192
193 central_probe();
194
195 cpu_probe();
196}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 732b77cb71f8..8f10dda0f5c0 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1725,96 +1725,142 @@ real_hard_smp_processor_id:
1725 * returns %o0: sysino 1725 * returns %o0: sysino
1726 */ 1726 */
1727 .globl sun4v_devino_to_sysino 1727 .globl sun4v_devino_to_sysino
1728 .type sun4v_devino_to_sysino,#function
1728sun4v_devino_to_sysino: 1729sun4v_devino_to_sysino:
1729 mov HV_FAST_INTR_DEVINO2SYSINO, %o5 1730 mov HV_FAST_INTR_DEVINO2SYSINO, %o5
1730 ta HV_FAST_TRAP 1731 ta HV_FAST_TRAP
1731 retl 1732 retl
1732 mov %o1, %o0 1733 mov %o1, %o0
1734 .size sun4v_devino_to_sysino, .-sun4v_devino_to_sysino
1733 1735
1734 /* %o0: sysino 1736 /* %o0: sysino
1735 * 1737 *
1736 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1738 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1737 */ 1739 */
1738 .globl sun4v_intr_getenabled 1740 .globl sun4v_intr_getenabled
1741 .type sun4v_intr_getenabled,#function
1739sun4v_intr_getenabled: 1742sun4v_intr_getenabled:
1740 mov HV_FAST_INTR_GETENABLED, %o5 1743 mov HV_FAST_INTR_GETENABLED, %o5
1741 ta HV_FAST_TRAP 1744 ta HV_FAST_TRAP
1742 retl 1745 retl
1743 mov %o1, %o0 1746 mov %o1, %o0
1747 .size sun4v_intr_getenabled, .-sun4v_intr_getenabled
1744 1748
1745 /* %o0: sysino 1749 /* %o0: sysino
1746 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1750 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1747 */ 1751 */
1748 .globl sun4v_intr_setenabled 1752 .globl sun4v_intr_setenabled
1753 .type sun4v_intr_setenabled,#function
1749sun4v_intr_setenabled: 1754sun4v_intr_setenabled:
1750 mov HV_FAST_INTR_SETENABLED, %o5 1755 mov HV_FAST_INTR_SETENABLED, %o5
1751 ta HV_FAST_TRAP 1756 ta HV_FAST_TRAP
1752 retl 1757 retl
1753 nop 1758 nop
1759 .size sun4v_intr_setenabled, .-sun4v_intr_setenabled
1754 1760
1755 /* %o0: sysino 1761 /* %o0: sysino
1756 * 1762 *
1757 * returns %o0: intr_state (HV_INTR_STATE_*) 1763 * returns %o0: intr_state (HV_INTR_STATE_*)
1758 */ 1764 */
1759 .globl sun4v_intr_getstate 1765 .globl sun4v_intr_getstate
1766 .type sun4v_intr_getstate,#function
1760sun4v_intr_getstate: 1767sun4v_intr_getstate:
1761 mov HV_FAST_INTR_GETSTATE, %o5 1768 mov HV_FAST_INTR_GETSTATE, %o5
1762 ta HV_FAST_TRAP 1769 ta HV_FAST_TRAP
1763 retl 1770 retl
1764 mov %o1, %o0 1771 mov %o1, %o0
1772 .size sun4v_intr_getstate, .-sun4v_intr_getstate
1765 1773
1766 /* %o0: sysino 1774 /* %o0: sysino
1767 * %o1: intr_state (HV_INTR_STATE_*) 1775 * %o1: intr_state (HV_INTR_STATE_*)
1768 */ 1776 */
1769 .globl sun4v_intr_setstate 1777 .globl sun4v_intr_setstate
1778 .type sun4v_intr_setstate,#function
1770sun4v_intr_setstate: 1779sun4v_intr_setstate:
1771 mov HV_FAST_INTR_SETSTATE, %o5 1780 mov HV_FAST_INTR_SETSTATE, %o5
1772 ta HV_FAST_TRAP 1781 ta HV_FAST_TRAP
1773 retl 1782 retl
1774 nop 1783 nop
1784 .size sun4v_intr_setstate, .-sun4v_intr_setstate
1775 1785
1776 /* %o0: sysino 1786 /* %o0: sysino
1777 * 1787 *
1778 * returns %o0: cpuid 1788 * returns %o0: cpuid
1779 */ 1789 */
1780 .globl sun4v_intr_gettarget 1790 .globl sun4v_intr_gettarget
1791 .type sun4v_intr_gettarget,#function
1781sun4v_intr_gettarget: 1792sun4v_intr_gettarget:
1782 mov HV_FAST_INTR_GETTARGET, %o5 1793 mov HV_FAST_INTR_GETTARGET, %o5
1783 ta HV_FAST_TRAP 1794 ta HV_FAST_TRAP
1784 retl 1795 retl
1785 mov %o1, %o0 1796 mov %o1, %o0
1797 .size sun4v_intr_gettarget, .-sun4v_intr_gettarget
1786 1798
1787 /* %o0: sysino 1799 /* %o0: sysino
1788 * %o1: cpuid 1800 * %o1: cpuid
1789 */ 1801 */
1790 .globl sun4v_intr_settarget 1802 .globl sun4v_intr_settarget
1803 .type sun4v_intr_settarget,#function
1791sun4v_intr_settarget: 1804sun4v_intr_settarget:
1792 mov HV_FAST_INTR_SETTARGET, %o5 1805 mov HV_FAST_INTR_SETTARGET, %o5
1793 ta HV_FAST_TRAP 1806 ta HV_FAST_TRAP
1794 retl 1807 retl
1795 nop 1808 nop
1809 .size sun4v_intr_settarget, .-sun4v_intr_settarget
1796 1810
1797 /* %o0: type 1811 /* %o0: cpuid
1798 * %o1: queue paddr 1812 * %o1: pc
1799 * %o2: num queue entries 1813 * %o2: rtba
1814 * %o3: arg0
1800 * 1815 *
1801 * returns %o0: status 1816 * returns %o0: status
1802 */ 1817 */
1803 .globl sun4v_cpu_qconf 1818 .globl sun4v_cpu_start
1804sun4v_cpu_qconf: 1819 .type sun4v_cpu_start,#function
1805 mov HV_FAST_CPU_QCONF, %o5 1820sun4v_cpu_start:
1821 mov HV_FAST_CPU_START, %o5
1806 ta HV_FAST_TRAP 1822 ta HV_FAST_TRAP
1807 retl 1823 retl
1808 nop 1824 nop
1825 .size sun4v_cpu_start, .-sun4v_cpu_start
1809 1826
1810 /* returns %o0: status 1827 /* %o0: cpuid
1828 *
1829 * returns %o0: status
1811 */ 1830 */
1831 .globl sun4v_cpu_stop
1832 .type sun4v_cpu_stop,#function
1833sun4v_cpu_stop:
1834 mov HV_FAST_CPU_STOP, %o5
1835 ta HV_FAST_TRAP
1836 retl
1837 nop
1838 .size sun4v_cpu_stop, .-sun4v_cpu_stop
1839
1840 /* returns %o0: status */
1812 .globl sun4v_cpu_yield 1841 .globl sun4v_cpu_yield
1842 .type sun4v_cpu_yield, #function
1813sun4v_cpu_yield: 1843sun4v_cpu_yield:
1814 mov HV_FAST_CPU_YIELD, %o5 1844 mov HV_FAST_CPU_YIELD, %o5
1815 ta HV_FAST_TRAP 1845 ta HV_FAST_TRAP
1816 retl 1846 retl
1817 nop 1847 nop
1848 .size sun4v_cpu_yield, .-sun4v_cpu_yield
1849
1850 /* %o0: type
1851 * %o1: queue paddr
1852 * %o2: num queue entries
1853 *
1854 * returns %o0: status
1855 */
1856 .globl sun4v_cpu_qconf
1857 .type sun4v_cpu_qconf,#function
1858sun4v_cpu_qconf:
1859 mov HV_FAST_CPU_QCONF, %o5
1860 ta HV_FAST_TRAP
1861 retl
1862 nop
1863 .size sun4v_cpu_qconf, .-sun4v_cpu_qconf
1818 1864
1819 /* %o0: num cpus in cpu list 1865 /* %o0: num cpus in cpu list
1820 * %o1: cpu list paddr 1866 * %o1: cpu list paddr
@@ -1823,11 +1869,13 @@ sun4v_cpu_yield:
1823 * returns %o0: status 1869 * returns %o0: status
1824 */ 1870 */
1825 .globl sun4v_cpu_mondo_send 1871 .globl sun4v_cpu_mondo_send
1872 .type sun4v_cpu_mondo_send,#function
1826sun4v_cpu_mondo_send: 1873sun4v_cpu_mondo_send:
1827 mov HV_FAST_CPU_MONDO_SEND, %o5 1874 mov HV_FAST_CPU_MONDO_SEND, %o5
1828 ta HV_FAST_TRAP 1875 ta HV_FAST_TRAP
1829 retl 1876 retl
1830 nop 1877 nop
1878 .size sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send
1831 1879
1832 /* %o0: CPU ID 1880 /* %o0: CPU ID
1833 * 1881 *
@@ -1835,6 +1883,7 @@ sun4v_cpu_mondo_send:
1835 * %o0: cpu state as HV_CPU_STATE_* 1883 * %o0: cpu state as HV_CPU_STATE_*
1836 */ 1884 */
1837 .globl sun4v_cpu_state 1885 .globl sun4v_cpu_state
1886 .type sun4v_cpu_state,#function
1838sun4v_cpu_state: 1887sun4v_cpu_state:
1839 mov HV_FAST_CPU_STATE, %o5 1888 mov HV_FAST_CPU_STATE, %o5
1840 ta HV_FAST_TRAP 1889 ta HV_FAST_TRAP
@@ -1843,6 +1892,37 @@ sun4v_cpu_state:
1843 mov %o1, %o0 1892 mov %o1, %o0
18441: retl 18931: retl
1845 nop 1894 nop
1895 .size sun4v_cpu_state, .-sun4v_cpu_state
1896
1897 /* %o0: virtual address
1898 * %o1: must be zero
1899 * %o2: TTE
1900 * %o3: HV_MMU_* flags
1901 *
1902 * returns %o0: status
1903 */
1904 .globl sun4v_mmu_map_perm_addr
1905 .type sun4v_mmu_map_perm_addr,#function
1906sun4v_mmu_map_perm_addr:
1907 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
1908 ta HV_FAST_TRAP
1909 retl
1910 nop
1911 .size sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr
1912
1913 /* %o0: number of TSB descriptions
1914 * %o1: TSB descriptions real address
1915 *
1916 * returns %o0: status
1917 */
1918 .globl sun4v_mmu_tsb_ctx0
1919 .type sun4v_mmu_tsb_ctx0,#function
1920sun4v_mmu_tsb_ctx0:
1921 mov HV_FAST_MMU_TSB_CTX0, %o5
1922 ta HV_FAST_TRAP
1923 retl
1924 nop
1925 .size sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0
1846 1926
1847 /* %o0: API group number 1927 /* %o0: API group number
1848 * %o1: pointer to unsigned long major number storage 1928 * %o1: pointer to unsigned long major number storage
@@ -1851,6 +1931,7 @@ sun4v_cpu_state:
1851 * returns %o0: status 1931 * returns %o0: status
1852 */ 1932 */
1853 .globl sun4v_get_version 1933 .globl sun4v_get_version
1934 .type sun4v_get_version,#function
1854sun4v_get_version: 1935sun4v_get_version:
1855 mov HV_CORE_GET_VER, %o5 1936 mov HV_CORE_GET_VER, %o5
1856 mov %o1, %o3 1937 mov %o1, %o3
@@ -1859,6 +1940,7 @@ sun4v_get_version:
1859 stx %o1, [%o3] 1940 stx %o1, [%o3]
1860 retl 1941 retl
1861 stx %o2, [%o4] 1942 stx %o2, [%o4]
1943 .size sun4v_get_version, .-sun4v_get_version
1862 1944
1863 /* %o0: API group number 1945 /* %o0: API group number
1864 * %o1: desired major number 1946 * %o1: desired major number
@@ -1868,18 +1950,49 @@ sun4v_get_version:
1868 * returns %o0: status 1950 * returns %o0: status
1869 */ 1951 */
1870 .globl sun4v_set_version 1952 .globl sun4v_set_version
1953 .type sun4v_set_version,#function
1871sun4v_set_version: 1954sun4v_set_version:
1872 mov HV_CORE_SET_VER, %o5 1955 mov HV_CORE_SET_VER, %o5
1873 mov %o3, %o4 1956 mov %o3, %o4
1874 ta HV_CORE_TRAP 1957 ta HV_CORE_TRAP
1875 retl 1958 retl
1876 stx %o1, [%o4] 1959 stx %o1, [%o4]
1960 .size sun4v_set_version, .-sun4v_set_version
1961
1962 /* %o0: pointer to unsigned long time
1963 *
1964 * returns %o0: status
1965 */
1966 .globl sun4v_tod_get
1967 .type sun4v_tod_get,#function
1968sun4v_tod_get:
1969 mov %o0, %o4
1970 mov HV_FAST_TOD_GET, %o5
1971 ta HV_FAST_TRAP
1972 stx %o1, [%o4]
1973 retl
1974 nop
1975 .size sun4v_tod_get, .-sun4v_tod_get
1976
1977 /* %o0: time
1978 *
1979 * returns %o0: status
1980 */
1981 .globl sun4v_tod_set
1982 .type sun4v_tod_set,#function
1983sun4v_tod_set:
1984 mov HV_FAST_TOD_SET, %o5
1985 ta HV_FAST_TRAP
1986 retl
1987 nop
1988 .size sun4v_tod_set, .-sun4v_tod_set
1877 1989
1878 /* %o0: pointer to unsigned long status 1990 /* %o0: pointer to unsigned long status
1879 * 1991 *
1880 * returns %o0: signed character 1992 * returns %o0: signed character
1881 */ 1993 */
1882 .globl sun4v_con_getchar 1994 .globl sun4v_con_getchar
1995 .type sun4v_con_getchar,#function
1883sun4v_con_getchar: 1996sun4v_con_getchar:
1884 mov %o0, %o4 1997 mov %o0, %o4
1885 mov HV_FAST_CONS_GETCHAR, %o5 1998 mov HV_FAST_CONS_GETCHAR, %o5
@@ -1889,17 +2002,20 @@ sun4v_con_getchar:
1889 stx %o0, [%o4] 2002 stx %o0, [%o4]
1890 retl 2003 retl
1891 sra %o1, 0, %o0 2004 sra %o1, 0, %o0
2005 .size sun4v_con_getchar, .-sun4v_con_getchar
1892 2006
1893 /* %o0: signed long character 2007 /* %o0: signed long character
1894 * 2008 *
1895 * returns %o0: status 2009 * returns %o0: status
1896 */ 2010 */
1897 .globl sun4v_con_putchar 2011 .globl sun4v_con_putchar
2012 .type sun4v_con_putchar,#function
1898sun4v_con_putchar: 2013sun4v_con_putchar:
1899 mov HV_FAST_CONS_PUTCHAR, %o5 2014 mov HV_FAST_CONS_PUTCHAR, %o5
1900 ta HV_FAST_TRAP 2015 ta HV_FAST_TRAP
1901 retl 2016 retl
1902 sra %o0, 0, %o0 2017 sra %o0, 0, %o0
2018 .size sun4v_con_putchar, .-sun4v_con_putchar
1903 2019
1904 /* %o0: buffer real address 2020 /* %o0: buffer real address
1905 * %o1: buffer size 2021 * %o1: buffer size
@@ -1908,6 +2024,7 @@ sun4v_con_putchar:
1908 * returns %o0: status 2024 * returns %o0: status
1909 */ 2025 */
1910 .globl sun4v_con_read 2026 .globl sun4v_con_read
2027 .type sun4v_con_read,#function
1911sun4v_con_read: 2028sun4v_con_read:
1912 mov %o2, %o4 2029 mov %o2, %o4
1913 mov HV_FAST_CONS_READ, %o5 2030 mov HV_FAST_CONS_READ, %o5
@@ -1922,6 +2039,7 @@ sun4v_con_read:
1922 stx %o1, [%o4] 2039 stx %o1, [%o4]
19231: retl 20401: retl
1924 nop 2041 nop
2042 .size sun4v_con_read, .-sun4v_con_read
1925 2043
1926 /* %o0: buffer real address 2044 /* %o0: buffer real address
1927 * %o1: buffer size 2045 * %o1: buffer size
@@ -1930,6 +2048,7 @@ sun4v_con_read:
1930 * returns %o0: status 2048 * returns %o0: status
1931 */ 2049 */
1932 .globl sun4v_con_write 2050 .globl sun4v_con_write
2051 .type sun4v_con_write,#function
1933sun4v_con_write: 2052sun4v_con_write:
1934 mov %o2, %o4 2053 mov %o2, %o4
1935 mov HV_FAST_CONS_WRITE, %o5 2054 mov HV_FAST_CONS_WRITE, %o5
@@ -1937,3 +2056,445 @@ sun4v_con_write:
1937 stx %o1, [%o4] 2056 stx %o1, [%o4]
1938 retl 2057 retl
1939 nop 2058 nop
2059 .size sun4v_con_write, .-sun4v_con_write
2060
2061 /* %o0: soft state
2062 * %o1: address of description string
2063 *
2064 * returns %o0: status
2065 */
2066 .globl sun4v_mach_set_soft_state
2067 .type sun4v_mach_set_soft_state,#function
2068sun4v_mach_set_soft_state:
2069 mov HV_FAST_MACH_SET_SOFT_STATE, %o5
2070 ta HV_FAST_TRAP
2071 retl
2072 nop
2073 .size sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state
2074
2075 /* %o0: exit code
2076 *
2077 * Does not return.
2078 */
2079 .globl sun4v_mach_exit
2080 .type sun4v_mach_exit,#function
2081sun4v_mach_exit:
2082 mov HV_FAST_MACH_EXIT, %o5
2083 ta HV_FAST_TRAP
2084 retl
2085 nop
2086 .size sun4v_mach_exit, .-sun4v_mach_exit
2087
2088 /* %o0: buffer real address
2089 * %o1: buffer length
2090 * %o2: pointer to unsigned long real_buf_len
2091 *
2092 * returns %o0: status
2093 */
2094 .globl sun4v_mach_desc
2095 .type sun4v_mach_desc,#function
2096sun4v_mach_desc:
2097 mov %o2, %o4
2098 mov HV_FAST_MACH_DESC, %o5
2099 ta HV_FAST_TRAP
2100 stx %o1, [%o4]
2101 retl
2102 nop
2103 .size sun4v_mach_desc, .-sun4v_mach_desc
2104
2105 /* %o0: new timeout in milliseconds
2106 * %o1: pointer to unsigned long orig_timeout
2107 *
2108 * returns %o0: status
2109 */
2110 .globl sun4v_mach_set_watchdog
2111 .type sun4v_mach_set_watchdog,#function
2112sun4v_mach_set_watchdog:
2113 mov %o1, %o4
2114 mov HV_FAST_MACH_SET_WATCHDOG, %o5
2115 ta HV_FAST_TRAP
2116 stx %o1, [%o4]
2117 retl
2118 nop
2119 .size sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog
2120
2121 /* No inputs and does not return. */
2122 .globl sun4v_mach_sir
2123 .type sun4v_mach_sir,#function
2124sun4v_mach_sir:
2125 mov %o1, %o4
2126 mov HV_FAST_MACH_SIR, %o5
2127 ta HV_FAST_TRAP
2128 stx %o1, [%o4]
2129 retl
2130 nop
2131 .size sun4v_mach_sir, .-sun4v_mach_sir
2132
2133 /* %o0: channel
2134 * %o1: ra
2135 * %o2: num_entries
2136 *
2137 * returns %o0: status
2138 */
2139 .globl sun4v_ldc_tx_qconf
2140 .type sun4v_ldc_tx_qconf,#function
2141sun4v_ldc_tx_qconf:
2142 mov HV_FAST_LDC_TX_QCONF, %o5
2143 ta HV_FAST_TRAP
2144 retl
2145 nop
2146 .size sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf
2147
2148 /* %o0: channel
2149 * %o1: pointer to unsigned long ra
2150 * %o2: pointer to unsigned long num_entries
2151 *
2152 * returns %o0: status
2153 */
2154 .globl sun4v_ldc_tx_qinfo
2155 .type sun4v_ldc_tx_qinfo,#function
2156sun4v_ldc_tx_qinfo:
2157 mov %o1, %g1
2158 mov %o2, %g2
2159 mov HV_FAST_LDC_TX_QINFO, %o5
2160 ta HV_FAST_TRAP
2161 stx %o1, [%g1]
2162 stx %o2, [%g2]
2163 retl
2164 nop
2165 .size sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo
2166
2167 /* %o0: channel
2168 * %o1: pointer to unsigned long head_off
2169 * %o2: pointer to unsigned long tail_off
2170 * %o2: pointer to unsigned long chan_state
2171 *
2172 * returns %o0: status
2173 */
2174 .globl sun4v_ldc_tx_get_state
2175 .type sun4v_ldc_tx_get_state,#function
2176sun4v_ldc_tx_get_state:
2177 mov %o1, %g1
2178 mov %o2, %g2
2179 mov %o3, %g3
2180 mov HV_FAST_LDC_TX_GET_STATE, %o5
2181 ta HV_FAST_TRAP
2182 stx %o1, [%g1]
2183 stx %o2, [%g2]
2184 stx %o3, [%g3]
2185 retl
2186 nop
2187 .size sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state
2188
2189 /* %o0: channel
2190 * %o1: tail_off
2191 *
2192 * returns %o0: status
2193 */
2194 .globl sun4v_ldc_tx_set_qtail
2195 .type sun4v_ldc_tx_set_qtail,#function
2196sun4v_ldc_tx_set_qtail:
2197 mov HV_FAST_LDC_TX_SET_QTAIL, %o5
2198 ta HV_FAST_TRAP
2199 retl
2200 nop
2201 .size sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail
2202
2203 /* %o0: channel
2204 * %o1: ra
2205 * %o2: num_entries
2206 *
2207 * returns %o0: status
2208 */
2209 .globl sun4v_ldc_rx_qconf
2210 .type sun4v_ldc_rx_qconf,#function
2211sun4v_ldc_rx_qconf:
2212 mov HV_FAST_LDC_RX_QCONF, %o5
2213 ta HV_FAST_TRAP
2214 retl
2215 nop
2216 .size sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf
2217
2218 /* %o0: channel
2219 * %o1: pointer to unsigned long ra
2220 * %o2: pointer to unsigned long num_entries
2221 *
2222 * returns %o0: status
2223 */
2224 .globl sun4v_ldc_rx_qinfo
2225 .type sun4v_ldc_rx_qinfo,#function
2226sun4v_ldc_rx_qinfo:
2227 mov %o1, %g1
2228 mov %o2, %g2
2229 mov HV_FAST_LDC_RX_QINFO, %o5
2230 ta HV_FAST_TRAP
2231 stx %o1, [%g1]
2232 stx %o2, [%g2]
2233 retl
2234 nop
2235 .size sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo
2236
2237 /* %o0: channel
2238 * %o1: pointer to unsigned long head_off
2239 * %o2: pointer to unsigned long tail_off
2240 * %o2: pointer to unsigned long chan_state
2241 *
2242 * returns %o0: status
2243 */
2244 .globl sun4v_ldc_rx_get_state
2245 .type sun4v_ldc_rx_get_state,#function
2246sun4v_ldc_rx_get_state:
2247 mov %o1, %g1
2248 mov %o2, %g2
2249 mov %o3, %g3
2250 mov HV_FAST_LDC_RX_GET_STATE, %o5
2251 ta HV_FAST_TRAP
2252 stx %o1, [%g1]
2253 stx %o2, [%g2]
2254 stx %o3, [%g3]
2255 retl
2256 nop
2257 .size sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state
2258
2259 /* %o0: channel
2260 * %o1: head_off
2261 *
2262 * returns %o0: status
2263 */
2264 .globl sun4v_ldc_rx_set_qhead
2265 .type sun4v_ldc_rx_set_qhead,#function
2266sun4v_ldc_rx_set_qhead:
2267 mov HV_FAST_LDC_RX_SET_QHEAD, %o5
2268 ta HV_FAST_TRAP
2269 retl
2270 nop
2271 .size sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead
2272
2273 /* %o0: channel
2274 * %o1: ra
2275 * %o2: num_entries
2276 *
2277 * returns %o0: status
2278 */
2279 .globl sun4v_ldc_set_map_table
2280 .type sun4v_ldc_set_map_table,#function
2281sun4v_ldc_set_map_table:
2282 mov HV_FAST_LDC_SET_MAP_TABLE, %o5
2283 ta HV_FAST_TRAP
2284 retl
2285 nop
2286 .size sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table
2287
2288 /* %o0: channel
2289 * %o1: pointer to unsigned long ra
2290 * %o2: pointer to unsigned long num_entries
2291 *
2292 * returns %o0: status
2293 */
2294 .globl sun4v_ldc_get_map_table
2295 .type sun4v_ldc_get_map_table,#function
2296sun4v_ldc_get_map_table:
2297 mov %o1, %g1
2298 mov %o2, %g2
2299 mov HV_FAST_LDC_GET_MAP_TABLE, %o5
2300 ta HV_FAST_TRAP
2301 stx %o1, [%g1]
2302 stx %o2, [%g2]
2303 retl
2304 nop
2305 .size sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table
2306
2307 /* %o0: channel
2308 * %o1: dir_code
2309 * %o2: tgt_raddr
2310 * %o3: lcl_raddr
2311 * %o4: len
2312 * %o5: pointer to unsigned long actual_len
2313 *
2314 * returns %o0: status
2315 */
2316 .globl sun4v_ldc_copy
2317 .type sun4v_ldc_copy,#function
2318sun4v_ldc_copy:
2319 mov %o5, %g1
2320 mov HV_FAST_LDC_COPY, %o5
2321 ta HV_FAST_TRAP
2322 stx %o1, [%g1]
2323 retl
2324 nop
2325 .size sun4v_ldc_copy, .-sun4v_ldc_copy
2326
2327 /* %o0: channel
2328 * %o1: cookie
2329 * %o2: pointer to unsigned long ra
2330 * %o3: pointer to unsigned long perm
2331 *
2332 * returns %o0: status
2333 */
2334 .globl sun4v_ldc_mapin
2335 .type sun4v_ldc_mapin,#function
2336sun4v_ldc_mapin:
2337 mov %o2, %g1
2338 mov %o3, %g2
2339 mov HV_FAST_LDC_MAPIN, %o5
2340 ta HV_FAST_TRAP
2341 stx %o1, [%g1]
2342 stx %o2, [%g2]
2343 retl
2344 nop
2345 .size sun4v_ldc_mapin, .-sun4v_ldc_mapin
2346
2347 /* %o0: ra
2348 *
2349 * returns %o0: status
2350 */
2351 .globl sun4v_ldc_unmap
2352 .type sun4v_ldc_unmap,#function
2353sun4v_ldc_unmap:
2354 mov HV_FAST_LDC_UNMAP, %o5
2355 ta HV_FAST_TRAP
2356 retl
2357 nop
2358 .size sun4v_ldc_unmap, .-sun4v_ldc_unmap
2359
2360 /* %o0: cookie
2361 * %o1: mte_cookie
2362 *
2363 * returns %o0: status
2364 */
2365 .globl sun4v_ldc_revoke
2366 .type sun4v_ldc_revoke,#function
2367sun4v_ldc_revoke:
2368 mov HV_FAST_LDC_REVOKE, %o5
2369 ta HV_FAST_TRAP
2370 retl
2371 nop
2372 .size sun4v_ldc_revoke, .-sun4v_ldc_revoke
2373
2374 /* %o0: device handle
2375 * %o1: device INO
2376 * %o2: pointer to unsigned long cookie
2377 *
2378 * returns %o0: status
2379 */
2380 .globl sun4v_vintr_get_cookie
2381 .type sun4v_vintr_get_cookie,#function
2382sun4v_vintr_get_cookie:
2383 mov %o2, %g1
2384 mov HV_FAST_VINTR_GET_COOKIE, %o5
2385 ta HV_FAST_TRAP
2386 stx %o1, [%g1]
2387 retl
2388 nop
2389 .size sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie
2390
2391 /* %o0: device handle
2392 * %o1: device INO
2393 * %o2: cookie
2394 *
2395 * returns %o0: status
2396 */
2397 .globl sun4v_vintr_set_cookie
2398 .type sun4v_vintr_set_cookie,#function
2399sun4v_vintr_set_cookie:
2400 mov HV_FAST_VINTR_SET_COOKIE, %o5
2401 ta HV_FAST_TRAP
2402 retl
2403 nop
2404 .size sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie
2405
2406 /* %o0: device handle
2407 * %o1: device INO
2408 * %o2: pointer to unsigned long valid_state
2409 *
2410 * returns %o0: status
2411 */
2412 .globl sun4v_vintr_get_valid
2413 .type sun4v_vintr_get_valid,#function
2414sun4v_vintr_get_valid:
2415 mov %o2, %g1
2416 mov HV_FAST_VINTR_GET_VALID, %o5
2417 ta HV_FAST_TRAP
2418 stx %o1, [%g1]
2419 retl
2420 nop
2421 .size sun4v_vintr_get_valid, .-sun4v_vintr_get_valid
2422
2423 /* %o0: device handle
2424 * %o1: device INO
2425 * %o2: valid_state
2426 *
2427 * returns %o0: status
2428 */
2429 .globl sun4v_vintr_set_valid
2430 .type sun4v_vintr_set_valid,#function
2431sun4v_vintr_set_valid:
2432 mov HV_FAST_VINTR_SET_VALID, %o5
2433 ta HV_FAST_TRAP
2434 retl
2435 nop
2436 .size sun4v_vintr_set_valid, .-sun4v_vintr_set_valid
2437
2438 /* %o0: device handle
2439 * %o1: device INO
2440 * %o2: pointer to unsigned long state
2441 *
2442 * returns %o0: status
2443 */
2444 .globl sun4v_vintr_get_state
2445 .type sun4v_vintr_get_state,#function
2446sun4v_vintr_get_state:
2447 mov %o2, %g1
2448 mov HV_FAST_VINTR_GET_STATE, %o5
2449 ta HV_FAST_TRAP
2450 stx %o1, [%g1]
2451 retl
2452 nop
2453 .size sun4v_vintr_get_state, .-sun4v_vintr_get_state
2454
2455 /* %o0: device handle
2456 * %o1: device INO
2457 * %o2: state
2458 *
2459 * returns %o0: status
2460 */
2461 .globl sun4v_vintr_set_state
2462 .type sun4v_vintr_set_state,#function
2463sun4v_vintr_set_state:
2464 mov HV_FAST_VINTR_SET_STATE, %o5
2465 ta HV_FAST_TRAP
2466 retl
2467 nop
2468 .size sun4v_vintr_set_state, .-sun4v_vintr_set_state
2469
2470 /* %o0: device handle
2471 * %o1: device INO
2472 * %o2: pointer to unsigned long cpuid
2473 *
2474 * returns %o0: status
2475 */
2476 .globl sun4v_vintr_get_target
2477 .type sun4v_vintr_get_target,#function
2478sun4v_vintr_get_target:
2479 mov %o2, %g1
2480 mov HV_FAST_VINTR_GET_TARGET, %o5
2481 ta HV_FAST_TRAP
2482 stx %o1, [%g1]
2483 retl
2484 nop
2485 .size sun4v_vintr_get_target, .-sun4v_vintr_get_target
2486
2487 /* %o0: device handle
2488 * %o1: device INO
2489 * %o2: cpuid
2490 *
2491 * returns %o0: status
2492 */
2493 .globl sun4v_vintr_set_target
2494 .type sun4v_vintr_set_target,#function
2495sun4v_vintr_set_target:
2496 mov HV_FAST_VINTR_SET_TARGET, %o5
2497 ta HV_FAST_TRAP
2498 retl
2499 nop
2500 .size sun4v_vintr_set_target, .-sun4v_vintr_set_target
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index baea10a98196..77259526cb15 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -523,7 +523,7 @@ tlb_fixup_done:
523#else 523#else
524 mov 0, %o0 524 mov 0, %o0
525#endif 525#endif
526 stb %o0, [%g6 + TI_CPU] 526 sth %o0, [%g6 + TI_CPU]
527 527
528 /* Off we go.... */ 528 /* Off we go.... */
529 call start_kernel 529 call start_kernel
@@ -653,33 +653,54 @@ setup_tba:
653 restore 653 restore
654sparc64_boot_end: 654sparc64_boot_end:
655 655
656#include "ktlb.S"
657#include "tsb.S"
658#include "etrap.S" 656#include "etrap.S"
659#include "rtrap.S" 657#include "rtrap.S"
660#include "winfixup.S" 658#include "winfixup.S"
661#include "entry.S" 659#include "entry.S"
662#include "sun4v_tlb_miss.S" 660#include "sun4v_tlb_miss.S"
663#include "sun4v_ivec.S" 661#include "sun4v_ivec.S"
662#include "ktlb.S"
663#include "tsb.S"
664 664
665/* 665/*
666 * The following skip makes sure the trap table in ttable.S is aligned 666 * The following skip makes sure the trap table in ttable.S is aligned
667 * on a 32K boundary as required by the v9 specs for TBA register. 667 * on a 32K boundary as required by the v9 specs for TBA register.
668 * 668 *
669 * We align to a 32K boundary, then we have the 32K kernel TSB, 669 * We align to a 32K boundary, then we have the 32K kernel TSB,
670 * then the 32K aligned trap table. 670 * the 64K kernel 4MB TSB, and then the 32K aligned trap table.
671 */ 671 */
6721: 6721:
673 .skip 0x4000 + _start - 1b 673 .skip 0x4000 + _start - 1b
674 674
675! 0x0000000000408000
676
675 .globl swapper_tsb 677 .globl swapper_tsb
676swapper_tsb: 678swapper_tsb:
677 .skip (32 * 1024) 679 .skip (32 * 1024)
678 680
679! 0x0000000000408000 681 .globl swapper_4m_tsb
682swapper_4m_tsb:
683 .skip (64 * 1024)
684
685! 0x0000000000420000
680 686
687 /* Some care needs to be exercised if you try to move the
688 * location of the trap table relative to other things. For
689 * one thing there are br* instructions in some of the
690 * trap table entires which branch back to code in ktlb.S
691 * Those instructions can only handle a signed 16-bit
692 * displacement.
693 *
694 * There is a binutils bug (bugzilla #4558) which causes
695 * the relocation overflow checks for such instructions to
696 * not be done correctly. So bintuils will not notice the
697 * error and will instead write junk into the relocation and
698 * you'll have an unbootable kernel.
699 */
681#include "ttable.S" 700#include "ttable.S"
682 701
702! 0x0000000000428000
703
683#include "systbls.S" 704#include "systbls.S"
684 705
685 .data 706 .data
diff --git a/arch/sparc64/kernel/hvapi.c b/arch/sparc64/kernel/hvapi.c
index f03ffc829c7a..f34f5d6181ef 100644
--- a/arch/sparc64/kernel/hvapi.c
+++ b/arch/sparc64/kernel/hvapi.c
@@ -9,6 +9,7 @@
9 9
10#include <asm/hypervisor.h> 10#include <asm/hypervisor.h>
11#include <asm/oplib.h> 11#include <asm/oplib.h>
12#include <asm/sstate.h>
12 13
13/* If the hypervisor indicates that the API setting 14/* If the hypervisor indicates that the API setting
14 * calls are unsupported, by returning HV_EBADTRAP or 15 * calls are unsupported, by returning HV_EBADTRAP or
@@ -107,7 +108,7 @@ int sun4v_hvapi_register(unsigned long group, unsigned long major,
107 p->minor = actual_minor; 108 p->minor = actual_minor;
108 ret = 0; 109 ret = 0;
109 } else if (hv_ret == HV_EBADTRAP || 110 } else if (hv_ret == HV_EBADTRAP ||
110 HV_ENOTSUPPORTED) { 111 hv_ret == HV_ENOTSUPPORTED) {
111 if (p->flags & FLAG_PRE_API) { 112 if (p->flags & FLAG_PRE_API) {
112 if (major == 1) { 113 if (major == 1) {
113 p->major = 1; 114 p->major = 1;
@@ -179,6 +180,8 @@ void __init sun4v_hvapi_init(void)
179 if (sun4v_hvapi_register(group, major, &minor)) 180 if (sun4v_hvapi_register(group, major, &minor))
180 goto bad; 181 goto bad;
181 182
183 sun4v_sstate_init();
184
182 return; 185 return;
183 186
184bad: 187bad:
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3edc18e1b818..a36f8dd0c021 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -171,8 +171,6 @@ skip:
171 return 0; 171 return 0;
172} 172}
173 173
174extern unsigned long real_hard_smp_processor_id(void);
175
176static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) 174static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
177{ 175{
178 unsigned int tid; 176 unsigned int tid;
@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void)
694 trap_block[cpu].irq_worklist = 0; 692 trap_block[cpu].irq_worklist = 0;
695} 693}
696 694
697static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) 695/* Please be very careful with register_one_mondo() and
696 * sun4v_register_mondo_queues().
697 *
698 * On SMP this gets invoked from the CPU trampoline before
699 * the cpu has fully taken over the trap table from OBP,
700 * and it's kernel stack + %g6 thread register state is
701 * not fully cooked yet.
702 *
703 * Therefore you cannot make any OBP calls, not even prom_printf,
704 * from these two routines.
705 */
706static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
698{ 707{
699 unsigned long num_entries = 128; 708 unsigned long num_entries = (qmask + 1) / 64;
700 unsigned long status; 709 unsigned long status;
701 710
702 status = sun4v_cpu_qconf(type, paddr, num_entries); 711 status = sun4v_cpu_qconf(type, paddr, num_entries);
@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
711{ 720{
712 struct trap_per_cpu *tb = &trap_block[this_cpu]; 721 struct trap_per_cpu *tb = &trap_block[this_cpu];
713 722
714 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); 723 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
715 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); 724 tb->cpu_mondo_qmask);
716 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); 725 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
717 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); 726 tb->dev_mondo_qmask);
727 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
728 tb->resum_qmask);
729 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
730 tb->nonresum_qmask);
718} 731}
719 732
720static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) 733static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
721{ 734{
722 void *page; 735 unsigned long size = PAGE_ALIGN(qmask + 1);
736 unsigned long order = get_order(size);
737 void *p = NULL;
723 738
724 if (use_bootmem) 739 if (use_bootmem) {
725 page = alloc_bootmem_low_pages(PAGE_SIZE); 740 p = __alloc_bootmem_low(size, size, 0);
726 else 741 } else {
727 page = (void *) get_zeroed_page(GFP_ATOMIC); 742 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
743 if (page)
744 p = page_address(page);
745 }
728 746
729 if (!page) { 747 if (!p) {
730 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); 748 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
731 prom_halt(); 749 prom_halt();
732 } 750 }
733 751
734 *pa_ptr = __pa(page); 752 *pa_ptr = __pa(p);
735} 753}
736 754
737static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) 755static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
738{ 756{
739 void *page; 757 unsigned long size = PAGE_ALIGN(qmask + 1);
758 unsigned long order = get_order(size);
759 void *p = NULL;
740 760
741 if (use_bootmem) 761 if (use_bootmem) {
742 page = alloc_bootmem_low_pages(PAGE_SIZE); 762 p = __alloc_bootmem_low(size, size, 0);
743 else 763 } else {
744 page = (void *) get_zeroed_page(GFP_ATOMIC); 764 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
765 if (page)
766 p = page_address(page);
767 }
745 768
746 if (!page) { 769 if (!p) {
747 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); 770 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
748 prom_halt(); 771 prom_halt();
749 } 772 }
750 773
751 *pa_ptr = __pa(page); 774 *pa_ptr = __pa(p);
752} 775}
753 776
754static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) 777static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
779 struct trap_per_cpu *tb = &trap_block[cpu]; 802 struct trap_per_cpu *tb = &trap_block[cpu];
780 803
781 if (alloc) { 804 if (alloc) {
782 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); 805 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
783 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); 806 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
784 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); 807 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
785 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); 808 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
786 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); 809 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
787 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); 810 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
788 811
789 init_cpu_send_mondo_info(tb, use_bootmem); 812 init_cpu_send_mondo_info(tb, use_bootmem);
790 } 813 }
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index ad46e2024f4b..5a8377b54955 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -11,12 +11,12 @@
11/* ITLB ** ICACHE line 2: TSB compare and TLB load */ 11/* ITLB ** ICACHE line 2: TSB compare and TLB load */
12 bne,pn %xcc, tsb_miss_itlb ! Miss 12 bne,pn %xcc, tsb_miss_itlb ! Miss
13 mov FAULT_CODE_ITLB, %g3 13 mov FAULT_CODE_ITLB, %g3
14 andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? 14 sethi %hi(_PAGE_EXEC_4U), %g4
15 andcc %g5, %g4, %g0 ! Executable?
15 be,pn %xcc, tsb_do_fault 16 be,pn %xcc, tsb_do_fault
16 nop ! Delay slot, fill me 17 nop ! Delay slot, fill me
17 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB 18 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
18 retry ! Trap done 19 retry ! Trap done
19 nop
20 20
21/* ITLB ** ICACHE line 3: */ 21/* ITLB ** ICACHE line 3: */
22 nop 22 nop
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
new file mode 100644
index 000000000000..9246c2cf9574
--- /dev/null
+++ b/arch/sparc64/kernel/mdesc.c
@@ -0,0 +1,619 @@
1/* mdesc.c: Sun4V machine description handling.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/bootmem.h>
8#include <linux/log2.h>
9
10#include <asm/hypervisor.h>
11#include <asm/mdesc.h>
12#include <asm/prom.h>
13#include <asm/oplib.h>
14#include <asm/smp.h>
15
16/* Unlike the OBP device tree, the machine description is a full-on
17 * DAG. An arbitrary number of ARCs are possible from one
18 * node to other nodes and thus we can't use the OBP device_node
19 * data structure to represent these nodes inside of the kernel.
20 *
21 * Actually, it isn't even a DAG, because there are back pointers
22 * which create cycles in the graph.
23 *
24 * mdesc_hdr and mdesc_elem describe the layout of the data structure
25 * we get from the Hypervisor.
26 */
27struct mdesc_hdr {
28 u32 version; /* Transport version */
29 u32 node_sz; /* node block size */
30 u32 name_sz; /* name block size */
31 u32 data_sz; /* data block size */
32};
33
34struct mdesc_elem {
35 u8 tag;
36#define MD_LIST_END 0x00
37#define MD_NODE 0x4e
38#define MD_NODE_END 0x45
39#define MD_NOOP 0x20
40#define MD_PROP_ARC 0x61
41#define MD_PROP_VAL 0x76
42#define MD_PROP_STR 0x73
43#define MD_PROP_DATA 0x64
44 u8 name_len;
45 u16 resv;
46 u32 name_offset;
47 union {
48 struct {
49 u32 data_len;
50 u32 data_offset;
51 } data;
52 u64 val;
53 } d;
54};
55
56static struct mdesc_hdr *main_mdesc;
57static struct mdesc_node *allnodes;
58
59static struct mdesc_node *allnodes_tail;
60static unsigned int unique_id;
61
62static struct mdesc_node **mdesc_hash;
63static unsigned int mdesc_hash_size;
64
65static inline unsigned int node_hashfn(u64 node)
66{
67 return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16)))
68 & (mdesc_hash_size - 1);
69}
70
71static inline void hash_node(struct mdesc_node *mp)
72{
73 struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)];
74
75 mp->hash_next = *head;
76 *head = mp;
77
78 if (allnodes_tail) {
79 allnodes_tail->allnodes_next = mp;
80 allnodes_tail = mp;
81 } else {
82 allnodes = allnodes_tail = mp;
83 }
84}
85
86static struct mdesc_node *find_node(u64 node)
87{
88 struct mdesc_node *mp = mdesc_hash[node_hashfn(node)];
89
90 while (mp) {
91 if (mp->node == node)
92 return mp;
93
94 mp = mp->hash_next;
95 }
96 return NULL;
97}
98
99struct property *md_find_property(const struct mdesc_node *mp,
100 const char *name,
101 int *lenp)
102{
103 struct property *pp;
104
105 for (pp = mp->properties; pp != 0; pp = pp->next) {
106 if (strcasecmp(pp->name, name) == 0) {
107 if (lenp)
108 *lenp = pp->length;
109 break;
110 }
111 }
112 return pp;
113}
114EXPORT_SYMBOL(md_find_property);
115
116/*
117 * Find a property with a given name for a given node
118 * and return the value.
119 */
120const void *md_get_property(const struct mdesc_node *mp, const char *name,
121 int *lenp)
122{
123 struct property *pp = md_find_property(mp, name, lenp);
124 return pp ? pp->value : NULL;
125}
126EXPORT_SYMBOL(md_get_property);
127
128struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
129 const char *name)
130{
131 struct mdesc_node *mp;
132
133 mp = from ? from->allnodes_next : allnodes;
134 for (; mp != NULL; mp = mp->allnodes_next) {
135 if (strcmp(mp->name, name) == 0)
136 break;
137 }
138 return mp;
139}
140EXPORT_SYMBOL(md_find_node_by_name);
141
142static unsigned int mdesc_early_allocated;
143
144static void * __init mdesc_early_alloc(unsigned long size)
145{
146 void *ret;
147
148 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
149 if (ret == NULL) {
150 prom_printf("MDESC: alloc of %lu bytes failed.\n", size);
151 prom_halt();
152 }
153
154 memset(ret, 0, size);
155
156 mdesc_early_allocated += size;
157
158 return ret;
159}
160
161static unsigned int __init count_arcs(struct mdesc_elem *ep)
162{
163 unsigned int ret = 0;
164
165 ep++;
166 while (ep->tag != MD_NODE_END) {
167 if (ep->tag == MD_PROP_ARC)
168 ret++;
169 ep++;
170 }
171 return ret;
172}
173
174static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names)
175{
176 unsigned int num_arcs = count_arcs(ep);
177 struct mdesc_node *mp;
178
179 mp = mdesc_early_alloc(sizeof(*mp) +
180 (num_arcs * sizeof(struct mdesc_arc)));
181 mp->name = names + ep->name_offset;
182 mp->node = node;
183 mp->unique_id = unique_id++;
184 mp->num_arcs = num_arcs;
185
186 hash_node(mp);
187}
188
189static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
190{
191 return (struct mdesc_elem *) (mdesc + 1);
192}
193
194static inline void *name_block(struct mdesc_hdr *mdesc)
195{
196 return ((void *) node_block(mdesc)) + mdesc->node_sz;
197}
198
199static inline void *data_block(struct mdesc_hdr *mdesc)
200{
201 return ((void *) name_block(mdesc)) + mdesc->name_sz;
202}
203
204/* In order to avoid recursion (the graph can be very deep) we use a
205 * two pass algorithm. First we allocate all the nodes and hash them.
206 * Then we iterate over each node, filling in the arcs and properties.
207 */
208static void __init build_all_nodes(struct mdesc_hdr *mdesc)
209{
210 struct mdesc_elem *start, *ep;
211 struct mdesc_node *mp;
212 const char *names;
213 void *data;
214 u64 last_node;
215
216 start = ep = node_block(mdesc);
217 last_node = mdesc->node_sz / 16;
218
219 names = name_block(mdesc);
220
221 while (1) {
222 u64 node = ep - start;
223
224 if (ep->tag == MD_LIST_END)
225 break;
226
227 if (ep->tag != MD_NODE) {
228 prom_printf("MDESC: Inconsistent element list.\n");
229 prom_halt();
230 }
231
232 mdesc_node_alloc(node, ep, names);
233
234 if (ep->d.val >= last_node) {
235 printk("MDESC: Warning, early break out of node scan.\n");
236 printk("MDESC: Next node [%lu] last_node [%lu].\n",
237 node, last_node);
238 break;
239 }
240
241 ep = start + ep->d.val;
242 }
243
244 data = data_block(mdesc);
245 for (mp = allnodes; mp; mp = mp->allnodes_next) {
246 struct mdesc_elem *ep = start + mp->node;
247 struct property **link = &mp->properties;
248 unsigned int this_arc = 0;
249
250 ep++;
251 while (ep->tag != MD_NODE_END) {
252 switch (ep->tag) {
253 case MD_PROP_ARC: {
254 struct mdesc_node *target;
255
256 if (this_arc >= mp->num_arcs) {
257 prom_printf("MDESC: ARC overrun [%u:%u]\n",
258 this_arc, mp->num_arcs);
259 prom_halt();
260 }
261 target = find_node(ep->d.val);
262 if (!target) {
263 printk("MDESC: Warning, arc points to "
264 "missing node, ignoring.\n");
265 break;
266 }
267 mp->arcs[this_arc].name =
268 (names + ep->name_offset);
269 mp->arcs[this_arc].arc = target;
270 this_arc++;
271 break;
272 }
273
274 case MD_PROP_VAL:
275 case MD_PROP_STR:
276 case MD_PROP_DATA: {
277 struct property *p = mdesc_early_alloc(sizeof(*p));
278
279 p->unique_id = unique_id++;
280 p->name = (char *) names + ep->name_offset;
281 if (ep->tag == MD_PROP_VAL) {
282 p->value = &ep->d.val;
283 p->length = 8;
284 } else {
285 p->value = data + ep->d.data.data_offset;
286 p->length = ep->d.data.data_len;
287 }
288 *link = p;
289 link = &p->next;
290 break;
291 }
292
293 case MD_NOOP:
294 break;
295
296 default:
297 printk("MDESC: Warning, ignoring unknown tag type %02x\n",
298 ep->tag);
299 }
300 ep++;
301 }
302 }
303}
304
305static unsigned int __init count_nodes(struct mdesc_hdr *mdesc)
306{
307 struct mdesc_elem *ep = node_block(mdesc);
308 struct mdesc_elem *end;
309 unsigned int cnt = 0;
310
311 end = ((void *)ep) + mdesc->node_sz;
312 while (ep < end) {
313 if (ep->tag == MD_NODE)
314 cnt++;
315 ep++;
316 }
317 return cnt;
318}
319
320static void __init report_platform_properties(void)
321{
322 struct mdesc_node *pn = md_find_node_by_name(NULL, "platform");
323 const char *s;
324 const u64 *v;
325
326 if (!pn) {
327 prom_printf("No platform node in machine-description.\n");
328 prom_halt();
329 }
330
331 s = md_get_property(pn, "banner-name", NULL);
332 printk("PLATFORM: banner-name [%s]\n", s);
333 s = md_get_property(pn, "name", NULL);
334 printk("PLATFORM: name [%s]\n", s);
335
336 v = md_get_property(pn, "hostid", NULL);
337 if (v)
338 printk("PLATFORM: hostid [%08lx]\n", *v);
339 v = md_get_property(pn, "serial#", NULL);
340 if (v)
341 printk("PLATFORM: serial# [%08lx]\n", *v);
342 v = md_get_property(pn, "stick-frequency", NULL);
343 printk("PLATFORM: stick-frequency [%08lx]\n", *v);
344 v = md_get_property(pn, "mac-address", NULL);
345 if (v)
346 printk("PLATFORM: mac-address [%lx]\n", *v);
347 v = md_get_property(pn, "watchdog-resolution", NULL);
348 if (v)
349 printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
350 v = md_get_property(pn, "watchdog-max-timeout", NULL);
351 if (v)
352 printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
353 v = md_get_property(pn, "max-cpus", NULL);
354 if (v)
355 printk("PLATFORM: max-cpus [%lu]\n", *v);
356}
357
358static int inline find_in_proplist(const char *list, const char *match, int len)
359{
360 while (len > 0) {
361 int l;
362
363 if (!strcmp(list, match))
364 return 1;
365 l = strlen(list) + 1;
366 list += l;
367 len -= l;
368 }
369 return 0;
370}
371
372static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
373{
374 const u64 *level = md_get_property(mp, "level", NULL);
375 const u64 *size = md_get_property(mp, "size", NULL);
376 const u64 *line_size = md_get_property(mp, "line-size", NULL);
377 const char *type;
378 int type_len;
379
380 type = md_get_property(mp, "type", &type_len);
381
382 switch (*level) {
383 case 1:
384 if (find_in_proplist(type, "instn", type_len)) {
385 c->icache_size = *size;
386 c->icache_line_size = *line_size;
387 } else if (find_in_proplist(type, "data", type_len)) {
388 c->dcache_size = *size;
389 c->dcache_line_size = *line_size;
390 }
391 break;
392
393 case 2:
394 c->ecache_size = *size;
395 c->ecache_line_size = *line_size;
396 break;
397
398 default:
399 break;
400 }
401
402 if (*level == 1) {
403 unsigned int i;
404
405 for (i = 0; i < mp->num_arcs; i++) {
406 struct mdesc_node *t = mp->arcs[i].arc;
407
408 if (strcmp(mp->arcs[i].name, "fwd"))
409 continue;
410
411 if (!strcmp(t->name, "cache"))
412 fill_in_one_cache(c, t);
413 }
414 }
415}
416
417static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
418{
419 unsigned int i;
420
421 for (i = 0; i < mp->num_arcs; i++) {
422 struct mdesc_node *t = mp->arcs[i].arc;
423 const u64 *id;
424
425 if (strcmp(mp->arcs[i].name, "back"))
426 continue;
427
428 if (!strcmp(t->name, "cpu")) {
429 id = md_get_property(t, "id", NULL);
430 if (*id < NR_CPUS)
431 cpu_data(*id).core_id = core_id;
432 } else {
433 unsigned int j;
434
435 for (j = 0; j < t->num_arcs; j++) {
436 struct mdesc_node *n = t->arcs[j].arc;
437
438 if (strcmp(t->arcs[j].name, "back"))
439 continue;
440
441 if (strcmp(n->name, "cpu"))
442 continue;
443
444 id = md_get_property(n, "id", NULL);
445 if (*id < NR_CPUS)
446 cpu_data(*id).core_id = core_id;
447 }
448 }
449 }
450}
451
452static void __init set_core_ids(void)
453{
454 struct mdesc_node *mp;
455 int idx;
456
457 idx = 1;
458 md_for_each_node_by_name(mp, "cache") {
459 const u64 *level = md_get_property(mp, "level", NULL);
460 const char *type;
461 int len;
462
463 if (*level != 1)
464 continue;
465
466 type = md_get_property(mp, "type", &len);
467 if (!find_in_proplist(type, "instn", len))
468 continue;
469
470 mark_core_ids(mp, idx);
471
472 idx++;
473 }
474}
475
476static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
477{
478 u64 val;
479
480 if (!p)
481 goto use_default;
482 val = *p;
483
484 if (!val || val >= 64)
485 goto use_default;
486
487 *mask = ((1U << val) * 64U) - 1U;
488 return;
489
490use_default:
491 *mask = ((1U << def) * 64U) - 1U;
492}
493
494static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb)
495{
496 const u64 *val;
497
498 val = md_get_property(mp, "q-cpu-mondo-#bits", NULL);
499 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
500
501 val = md_get_property(mp, "q-dev-mondo-#bits", NULL);
502 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
503
504 val = md_get_property(mp, "q-resumable-#bits", NULL);
505 get_one_mondo_bits(val, &tb->resum_qmask, 6);
506
507 val = md_get_property(mp, "q-nonresumable-#bits", NULL);
508 get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
509}
510
511static void __init mdesc_fill_in_cpu_data(void)
512{
513 struct mdesc_node *mp;
514
515 ncpus_probed = 0;
516 md_for_each_node_by_name(mp, "cpu") {
517 const u64 *id = md_get_property(mp, "id", NULL);
518 const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL);
519 struct trap_per_cpu *tb;
520 cpuinfo_sparc *c;
521 unsigned int i;
522 int cpuid;
523
524 ncpus_probed++;
525
526 cpuid = *id;
527
528#ifdef CONFIG_SMP
529 if (cpuid >= NR_CPUS)
530 continue;
531#else
532 /* On uniprocessor we only want the values for the
533 * real physical cpu the kernel booted onto, however
534 * cpu_data() only has one entry at index 0.
535 */
536 if (cpuid != real_hard_smp_processor_id())
537 continue;
538 cpuid = 0;
539#endif
540
541 c = &cpu_data(cpuid);
542 c->clock_tick = *cfreq;
543
544 tb = &trap_block[cpuid];
545 get_mondo_data(mp, tb);
546
547 for (i = 0; i < mp->num_arcs; i++) {
548 struct mdesc_node *t = mp->arcs[i].arc;
549 unsigned int j;
550
551 if (strcmp(mp->arcs[i].name, "fwd"))
552 continue;
553
554 if (!strcmp(t->name, "cache")) {
555 fill_in_one_cache(c, t);
556 continue;
557 }
558
559 for (j = 0; j < t->num_arcs; j++) {
560 struct mdesc_node *n;
561
562 n = t->arcs[j].arc;
563 if (strcmp(t->arcs[j].name, "fwd"))
564 continue;
565
566 if (!strcmp(n->name, "cache"))
567 fill_in_one_cache(c, n);
568 }
569 }
570
571#ifdef CONFIG_SMP
572 cpu_set(cpuid, cpu_present_map);
573 cpu_set(cpuid, phys_cpu_present_map);
574#endif
575
576 c->core_id = 0;
577 }
578
579 set_core_ids();
580
581 smp_fill_in_sib_core_maps();
582}
583
584void __init sun4v_mdesc_init(void)
585{
586 unsigned long len, real_len, status;
587
588 (void) sun4v_mach_desc(0UL, 0UL, &len);
589
590 printk("MDESC: Size is %lu bytes.\n", len);
591
592 main_mdesc = mdesc_early_alloc(len);
593
594 status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len);
595 if (status != HV_EOK || real_len > len) {
596 prom_printf("sun4v_mach_desc fails, err(%lu), "
597 "len(%lu), real_len(%lu)\n",
598 status, len, real_len);
599 prom_halt();
600 }
601
602 len = count_nodes(main_mdesc);
603 printk("MDESC: %lu nodes.\n", len);
604
605 len = roundup_pow_of_two(len);
606
607 mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *));
608 mdesc_hash_size = len;
609
610 printk("MDESC: Hash size %lu entries.\n", len);
611
612 build_all_nodes(main_mdesc);
613
614 printk("MDESC: Built graph with %u bytes of memory.\n",
615 mdesc_early_allocated);
616
617 report_platform_properties();
618 mdesc_fill_in_cpu_data();
619}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index d4c077dc5e85..38a32bc95d22 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -306,6 +306,20 @@ static void __init pci_controller_probe(void)
306 pci_controller_scan(pci_controller_init); 306 pci_controller_scan(pci_controller_init);
307} 307}
308 308
309static int ofpci_verbose;
310
311static int __init ofpci_debug(char *str)
312{
313 int val = 0;
314
315 get_option(&str, &val);
316 if (val)
317 ofpci_verbose = 1;
318 return 1;
319}
320
321__setup("ofpci_debug=", ofpci_debug);
322
309static unsigned long pci_parse_of_flags(u32 addr0) 323static unsigned long pci_parse_of_flags(u32 addr0)
310{ 324{
311 unsigned long flags = 0; 325 unsigned long flags = 0;
@@ -337,7 +351,9 @@ static void pci_parse_of_addrs(struct of_device *op,
337 addrs = of_get_property(node, "assigned-addresses", &proplen); 351 addrs = of_get_property(node, "assigned-addresses", &proplen);
338 if (!addrs) 352 if (!addrs)
339 return; 353 return;
340 printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 354 if (ofpci_verbose)
355 printk(" parse addresses (%d bytes) @ %p\n",
356 proplen, addrs);
341 op_res = &op->resource[0]; 357 op_res = &op->resource[0];
342 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 358 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
343 struct resource *res; 359 struct resource *res;
@@ -348,8 +364,9 @@ static void pci_parse_of_addrs(struct of_device *op,
348 if (!flags) 364 if (!flags)
349 continue; 365 continue;
350 i = addrs[0] & 0xff; 366 i = addrs[0] & 0xff;
351 printk(" start: %lx, end: %lx, i: %x\n", 367 if (ofpci_verbose)
352 op_res->start, op_res->end, i); 368 printk(" start: %lx, end: %lx, i: %x\n",
369 op_res->start, op_res->end, i);
353 370
354 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 371 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
355 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 372 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -393,8 +410,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
393 if (type == NULL) 410 if (type == NULL)
394 type = ""; 411 type = "";
395 412
396 printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n", 413 if (ofpci_verbose)
397 devfn, type, host_controller); 414 printk(" create device, devfn: %x, type: %s\n",
415 devfn, type);
398 416
399 dev->bus = bus; 417 dev->bus = bus;
400 dev->sysdata = node; 418 dev->sysdata = node;
@@ -434,8 +452,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
434 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 452 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
435 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 453 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
436 } 454 }
437 printk(" class: 0x%x device name: %s\n", 455 if (ofpci_verbose)
438 dev->class, pci_name(dev)); 456 printk(" class: 0x%x device name: %s\n",
457 dev->class, pci_name(dev));
439 458
440 /* I have seen IDE devices which will not respond to 459 /* I have seen IDE devices which will not respond to
441 * the bmdma simplex check reads if bus mastering is 460 * the bmdma simplex check reads if bus mastering is
@@ -469,7 +488,8 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
469 } 488 }
470 pci_parse_of_addrs(sd->op, node, dev); 489 pci_parse_of_addrs(sd->op, node, dev);
471 490
472 printk(" adding to system ...\n"); 491 if (ofpci_verbose)
492 printk(" adding to system ...\n");
473 493
474 pci_device_add(dev, bus); 494 pci_device_add(dev, bus);
475 495
@@ -547,7 +567,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
547 unsigned int flags; 567 unsigned int flags;
548 u64 size; 568 u64 size;
549 569
550 printk("of_scan_pci_bridge(%s)\n", node->full_name); 570 if (ofpci_verbose)
571 printk("of_scan_pci_bridge(%s)\n", node->full_name);
551 572
552 /* parse bus-range property */ 573 /* parse bus-range property */
553 busrange = of_get_property(node, "bus-range", &len); 574 busrange = of_get_property(node, "bus-range", &len);
@@ -632,7 +653,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
632simba_cont: 653simba_cont:
633 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 654 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
634 bus->number); 655 bus->number);
635 printk(" bus name: %s\n", bus->name); 656 if (ofpci_verbose)
657 printk(" bus name: %s\n", bus->name);
636 658
637 pci_of_scan_bus(pbm, node, bus); 659 pci_of_scan_bus(pbm, node, bus);
638} 660}
@@ -646,12 +668,14 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
646 int reglen, devfn; 668 int reglen, devfn;
647 struct pci_dev *dev; 669 struct pci_dev *dev;
648 670
649 printk("PCI: scan_bus[%s] bus no %d\n", 671 if (ofpci_verbose)
650 node->full_name, bus->number); 672 printk("PCI: scan_bus[%s] bus no %d\n",
673 node->full_name, bus->number);
651 674
652 child = NULL; 675 child = NULL;
653 while ((child = of_get_next_child(node, child)) != NULL) { 676 while ((child = of_get_next_child(node, child)) != NULL) {
654 printk(" * %s\n", child->full_name); 677 if (ofpci_verbose)
678 printk(" * %s\n", child->full_name);
655 reg = of_get_property(child, "reg", &reglen); 679 reg = of_get_property(child, "reg", &reglen);
656 if (reg == NULL || reglen < 20) 680 if (reg == NULL || reglen < 20)
657 continue; 681 continue;
@@ -661,7 +685,9 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
661 dev = of_create_pci_dev(pbm, child, bus, devfn, 0); 685 dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
662 if (!dev) 686 if (!dev)
663 continue; 687 continue;
664 printk("PCI: dev header type: %x\n", dev->hdr_type); 688 if (ofpci_verbose)
689 printk("PCI: dev header type: %x\n",
690 dev->hdr_type);
665 691
666 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 692 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
667 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 693 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index e2377796de89..323d6c278518 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -762,9 +762,10 @@ void sabre_init(struct device_node *dp, char *model_name)
762 /* Of course, Sun has to encode things a thousand 762 /* Of course, Sun has to encode things a thousand
763 * different ways, inconsistently. 763 * different ways, inconsistently.
764 */ 764 */
765 cpu_find_by_instance(0, &dp, NULL); 765 for_each_node_by_type(dp, "cpu") {
766 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) 766 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
767 hummingbird_p = 1; 767 hummingbird_p = 1;
768 }
768 } 769 }
769 } 770 }
770 771
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 044e8ec4c0f5..6b3fe2c1d65e 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/msi.h> 14#include <linux/msi.h>
15#include <linux/log2.h>
15 16
16#include <asm/iommu.h> 17#include <asm/iommu.h>
17#include <asm/irq.h> 18#include <asm/irq.h>
@@ -26,6 +27,9 @@
26 27
27#include "pci_sun4v.h" 28#include "pci_sun4v.h"
28 29
30static unsigned long vpci_major = 1;
31static unsigned long vpci_minor = 1;
32
29#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
30 34
31struct iommu_batch { 35struct iommu_batch {
@@ -638,9 +642,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
638{ 642{
639 struct iommu *iommu = pbm->iommu; 643 struct iommu *iommu = pbm->iommu;
640 struct property *prop; 644 struct property *prop;
641 unsigned long num_tsb_entries, sz; 645 unsigned long num_tsb_entries, sz, tsbsize;
642 u32 vdma[2], dma_mask, dma_offset; 646 u32 vdma[2], dma_mask, dma_offset;
643 int tsbsize;
644 647
645 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 648 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
646 if (prop) { 649 if (prop) {
@@ -654,31 +657,15 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
654 vdma[1] = 0x80000000; 657 vdma[1] = 0x80000000;
655 } 658 }
656 659
657 dma_mask = vdma[0]; 660 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
658 switch (vdma[1]) { 661 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
659 case 0x20000000: 662 vdma[0], vdma[1]);
660 dma_mask |= 0x1fffffff; 663 prom_halt();
661 tsbsize = 64;
662 break;
663
664 case 0x40000000:
665 dma_mask |= 0x3fffffff;
666 tsbsize = 128;
667 break;
668
669 case 0x80000000:
670 dma_mask |= 0x7fffffff;
671 tsbsize = 256;
672 break;
673
674 default:
675 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
676 prom_halt();
677 }; 664 };
678 665
679 tsbsize *= (8 * 1024); 666 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
680 667 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
681 num_tsb_entries = tsbsize / sizeof(iopte_t); 668 tsbsize = num_tsb_entries * sizeof(iopte_t);
682 669
683 dma_offset = vdma[0]; 670 dma_offset = vdma[0];
684 671
@@ -689,7 +676,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
689 iommu->dma_addr_mask = dma_mask; 676 iommu->dma_addr_mask = dma_mask;
690 677
691 /* Allocate and initialize the free area map. */ 678 /* Allocate and initialize the free area map. */
692 sz = num_tsb_entries / 8; 679 sz = (num_tsb_entries + 7) / 8;
693 sz = (sz + 7UL) & ~7UL; 680 sz = (sz + 7UL) & ~7UL;
694 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 681 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
695 if (!iommu->arena.map) { 682 if (!iommu->arena.map) {
@@ -1178,6 +1165,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1178 1165
1179void sun4v_pci_init(struct device_node *dp, char *model_name) 1166void sun4v_pci_init(struct device_node *dp, char *model_name)
1180{ 1167{
1168 static int hvapi_negotiated = 0;
1181 struct pci_controller_info *p; 1169 struct pci_controller_info *p;
1182 struct pci_pbm_info *pbm; 1170 struct pci_pbm_info *pbm;
1183 struct iommu *iommu; 1171 struct iommu *iommu;
@@ -1186,6 +1174,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1186 u32 devhandle; 1174 u32 devhandle;
1187 int i; 1175 int i;
1188 1176
1177 if (!hvapi_negotiated++) {
1178 int err = sun4v_hvapi_register(HV_GRP_PCI,
1179 vpci_major,
1180 &vpci_minor);
1181
1182 if (err) {
1183 prom_printf("SUN4V_PCI: Could not register hvapi, "
1184 "err=%d\n", err);
1185 prom_halt();
1186 }
1187 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1188 vpci_major, vpci_minor);
1189 }
1190
1189 prop = of_find_property(dp, "reg", NULL); 1191 prop = of_find_property(dp, "reg", NULL);
1190 regs = prop->value; 1192 regs = prop->value;
1191 1193
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 699b24b890df..5d6adea3967f 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -19,6 +19,7 @@
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/of_device.h> 20#include <asm/of_device.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sstate.h>
22 23
23#include <linux/unistd.h> 24#include <linux/unistd.h>
24 25
@@ -53,6 +54,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
53 54
54void machine_power_off(void) 55void machine_power_off(void)
55{ 56{
57 sstate_poweroff();
56 if (!serial_console || scons_pwroff) { 58 if (!serial_console || scons_pwroff) {
57#ifdef CONFIG_PCI 59#ifdef CONFIG_PCI
58 if (power_reg) { 60 if (power_reg) {
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 952762bfb4c0..f5f97e2c669c 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -45,6 +45,7 @@
45#include <asm/mmu_context.h> 45#include <asm/mmu_context.h>
46#include <asm/unistd.h> 46#include <asm/unistd.h>
47#include <asm/hypervisor.h> 47#include <asm/hypervisor.h>
48#include <asm/sstate.h>
48 49
49/* #define VERBOSE_SHOWREGS */ 50/* #define VERBOSE_SHOWREGS */
50 51
@@ -106,6 +107,7 @@ extern void (*prom_keyboard)(void);
106 107
107void machine_halt(void) 108void machine_halt(void)
108{ 109{
110 sstate_halt();
109 if (!serial_console && prom_palette) 111 if (!serial_console && prom_palette)
110 prom_palette (1); 112 prom_palette (1);
111 if (prom_keyboard) 113 if (prom_keyboard)
@@ -116,6 +118,7 @@ void machine_halt(void)
116 118
117void machine_alt_power_off(void) 119void machine_alt_power_off(void)
118{ 120{
121 sstate_poweroff();
119 if (!serial_console && prom_palette) 122 if (!serial_console && prom_palette)
120 prom_palette(1); 123 prom_palette(1);
121 if (prom_keyboard) 124 if (prom_keyboard)
@@ -128,6 +131,7 @@ void machine_restart(char * cmd)
128{ 131{
129 char *p; 132 char *p;
130 133
134 sstate_reboot();
131 p = strchr (reboot_command, '\n'); 135 p = strchr (reboot_command, '\n');
132 if (p) *p = 0; 136 if (p) *p = 0;
133 if (!serial_console && prom_palette) 137 if (!serial_console && prom_palette)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 02830e4671f5..dad4b3ba705f 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -28,6 +28,7 @@
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/asi.h> 29#include <asm/asi.h>
30#include <asm/upa.h> 30#include <asm/upa.h>
31#include <asm/smp.h>
31 32
32static struct device_node *allnodes; 33static struct device_node *allnodes;
33 34
@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
1665 return ret; 1666 return ret;
1666} 1667}
1667 1668
1669static const char *get_mid_prop(void)
1670{
1671 return (tlb_type == spitfire ? "upa-portid" : "portid");
1672}
1673
1674struct device_node *of_find_node_by_cpuid(int cpuid)
1675{
1676 struct device_node *dp;
1677 const char *mid_prop = get_mid_prop();
1678
1679 for_each_node_by_type(dp, "cpu") {
1680 int id = of_getintprop_default(dp, mid_prop, -1);
1681 const char *this_mid_prop = mid_prop;
1682
1683 if (id < 0) {
1684 this_mid_prop = "cpuid";
1685 id = of_getintprop_default(dp, this_mid_prop, -1);
1686 }
1687
1688 if (id < 0) {
1689 prom_printf("OF: Serious problem, cpu lacks "
1690 "%s property", this_mid_prop);
1691 prom_halt();
1692 }
1693 if (cpuid == id)
1694 return dp;
1695 }
1696 return NULL;
1697}
1698
1699static void __init of_fill_in_cpu_data(void)
1700{
1701 struct device_node *dp;
1702 const char *mid_prop = get_mid_prop();
1703
1704 ncpus_probed = 0;
1705 for_each_node_by_type(dp, "cpu") {
1706 int cpuid = of_getintprop_default(dp, mid_prop, -1);
1707 const char *this_mid_prop = mid_prop;
1708 struct device_node *portid_parent;
1709 int portid = -1;
1710
1711 portid_parent = NULL;
1712 if (cpuid < 0) {
1713 this_mid_prop = "cpuid";
1714 cpuid = of_getintprop_default(dp, this_mid_prop, -1);
1715 if (cpuid >= 0) {
1716 int limit = 2;
1717
1718 portid_parent = dp;
1719 while (limit--) {
1720 portid_parent = portid_parent->parent;
1721 if (!portid_parent)
1722 break;
1723 portid = of_getintprop_default(portid_parent,
1724 "portid", -1);
1725 if (portid >= 0)
1726 break;
1727 }
1728 }
1729 }
1730
1731 if (cpuid < 0) {
1732 prom_printf("OF: Serious problem, cpu lacks "
1733 "%s property", this_mid_prop);
1734 prom_halt();
1735 }
1736
1737 ncpus_probed++;
1738
1739#ifdef CONFIG_SMP
1740 if (cpuid >= NR_CPUS)
1741 continue;
1742#else
1743 /* On uniprocessor we only want the values for the
1744 * real physical cpu the kernel booted onto, however
1745 * cpu_data() only has one entry at index 0.
1746 */
1747 if (cpuid != real_hard_smp_processor_id())
1748 continue;
1749 cpuid = 0;
1750#endif
1751
1752 cpu_data(cpuid).clock_tick =
1753 of_getintprop_default(dp, "clock-frequency", 0);
1754
1755 if (portid_parent) {
1756 cpu_data(cpuid).dcache_size =
1757 of_getintprop_default(dp, "l1-dcache-size",
1758 16 * 1024);
1759 cpu_data(cpuid).dcache_line_size =
1760 of_getintprop_default(dp, "l1-dcache-line-size",
1761 32);
1762 cpu_data(cpuid).icache_size =
1763 of_getintprop_default(dp, "l1-icache-size",
1764 8 * 1024);
1765 cpu_data(cpuid).icache_line_size =
1766 of_getintprop_default(dp, "l1-icache-line-size",
1767 32);
1768 cpu_data(cpuid).ecache_size =
1769 of_getintprop_default(dp, "l2-cache-size", 0);
1770 cpu_data(cpuid).ecache_line_size =
1771 of_getintprop_default(dp, "l2-cache-line-size", 0);
1772 if (!cpu_data(cpuid).ecache_size ||
1773 !cpu_data(cpuid).ecache_line_size) {
1774 cpu_data(cpuid).ecache_size =
1775 of_getintprop_default(portid_parent,
1776 "l2-cache-size",
1777 (4 * 1024 * 1024));
1778 cpu_data(cpuid).ecache_line_size =
1779 of_getintprop_default(portid_parent,
1780 "l2-cache-line-size", 64);
1781 }
1782
1783 cpu_data(cpuid).core_id = portid + 1;
1784 } else {
1785 cpu_data(cpuid).dcache_size =
1786 of_getintprop_default(dp, "dcache-size", 16 * 1024);
1787 cpu_data(cpuid).dcache_line_size =
1788 of_getintprop_default(dp, "dcache-line-size", 32);
1789
1790 cpu_data(cpuid).icache_size =
1791 of_getintprop_default(dp, "icache-size", 16 * 1024);
1792 cpu_data(cpuid).icache_line_size =
1793 of_getintprop_default(dp, "icache-line-size", 32);
1794
1795 cpu_data(cpuid).ecache_size =
1796 of_getintprop_default(dp, "ecache-size",
1797 (4 * 1024 * 1024));
1798 cpu_data(cpuid).ecache_line_size =
1799 of_getintprop_default(dp, "ecache-line-size", 64);
1800
1801 cpu_data(cpuid).core_id = 0;
1802 }
1803
1804#ifdef CONFIG_SMP
1805 cpu_set(cpuid, cpu_present_map);
1806 cpu_set(cpuid, phys_cpu_present_map);
1807#endif
1808 }
1809
1810 smp_fill_in_sib_core_maps();
1811}
1812
1668void __init prom_build_devicetree(void) 1813void __init prom_build_devicetree(void)
1669{ 1814{
1670 struct device_node **nextp; 1815 struct device_node **nextp;
@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void)
1679 &nextp); 1824 &nextp);
1680 printk("PROM: Built device tree with %u bytes of memory.\n", 1825 printk("PROM: Built device tree with %u bytes of memory.\n",
1681 prom_early_allocated); 1826 prom_early_allocated);
1827
1828 if (tlb_type != hypervisor)
1829 of_fill_in_cpu_data();
1682} 1830}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index dea9c3c9ec5f..de9b4c13f1c7 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -46,11 +46,17 @@
46#include <asm/sections.h> 46#include <asm/sections.h>
47#include <asm/setup.h> 47#include <asm/setup.h>
48#include <asm/mmu.h> 48#include <asm/mmu.h>
49#include <asm/ns87303.h>
49 50
50#ifdef CONFIG_IP_PNP 51#ifdef CONFIG_IP_PNP
51#include <net/ipconfig.h> 52#include <net/ipconfig.h>
52#endif 53#endif
53 54
55/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
56 * operations in asm/ns87303.h
57 */
58DEFINE_SPINLOCK(ns87303_lock);
59
54struct screen_info screen_info = { 60struct screen_info screen_info = {
55 0, 0, /* orig-x, orig-y */ 61 0, 0, /* orig-x, orig-y */
56 0, /* unused */ 62 0, /* unused */
@@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p)
370 init_cur_cpu_trap(current_thread_info()); 376 init_cur_cpu_trap(current_thread_info());
371 377
372 paging_init(); 378 paging_init();
373
374 smp_setup_cpu_possible_map();
375} 379}
376 380
377static int __init set_preferred_console(void) 381static int __init set_preferred_console(void)
@@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *);
424unsigned int dcache_parity_tl1_occurred; 428unsigned int dcache_parity_tl1_occurred;
425unsigned int icache_parity_tl1_occurred; 429unsigned int icache_parity_tl1_occurred;
426 430
427static int ncpus_probed; 431int ncpus_probed;
428 432
429static int show_cpuinfo(struct seq_file *m, void *__unused) 433static int show_cpuinfo(struct seq_file *m, void *__unused)
430{ 434{
@@ -516,14 +520,6 @@ static int __init topology_init(void)
516 520
517 err = -ENOMEM; 521 err = -ENOMEM;
518 522
519 /* Count the number of physically present processors in
520 * the machine, even on uniprocessor, so that /proc/cpuinfo
521 * output is consistent with 2.4.x
522 */
523 ncpus_probed = 0;
524 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
525 ncpus_probed++;
526
527 for_each_possible_cpu(i) { 523 for_each_possible_cpu(i) {
528 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 524 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
529 if (p) { 525 if (p) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 24fdf1d0adc5..c550bba3490a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -40,6 +40,7 @@
40#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/prom.h> 42#include <asm/prom.h>
43#include <asm/mdesc.h>
43 44
44extern void calibrate_delay(void); 45extern void calibrate_delay(void);
45 46
@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m)
75 i, cpu_data(i).clock_tick); 76 i, cpu_data(i).clock_tick);
76} 77}
77 78
78void __init smp_store_cpu_info(int id)
79{
80 struct device_node *dp;
81 int def;
82
83 cpu_data(id).udelay_val = loops_per_jiffy;
84
85 cpu_find_by_mid(id, &dp);
86 cpu_data(id).clock_tick =
87 of_getintprop_default(dp, "clock-frequency", 0);
88
89 def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
90 cpu_data(id).dcache_size =
91 of_getintprop_default(dp, "dcache-size", def);
92
93 def = 32;
94 cpu_data(id).dcache_line_size =
95 of_getintprop_default(dp, "dcache-line-size", def);
96
97 def = 16 * 1024;
98 cpu_data(id).icache_size =
99 of_getintprop_default(dp, "icache-size", def);
100
101 def = 32;
102 cpu_data(id).icache_line_size =
103 of_getintprop_default(dp, "icache-line-size", def);
104
105 def = ((tlb_type == hypervisor) ?
106 (3 * 1024 * 1024) :
107 (4 * 1024 * 1024));
108 cpu_data(id).ecache_size =
109 of_getintprop_default(dp, "ecache-size", def);
110
111 def = 64;
112 cpu_data(id).ecache_line_size =
113 of_getintprop_default(dp, "ecache-line-size", def);
114
115 printk("CPU[%d]: Caches "
116 "D[sz(%d):line_sz(%d)] "
117 "I[sz(%d):line_sz(%d)] "
118 "E[sz(%d):line_sz(%d)]\n",
119 id,
120 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
121 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
122 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
123}
124
125extern void setup_sparc64_timer(void); 79extern void setup_sparc64_timer(void);
126 80
127static volatile unsigned long callin_flag = 0; 81static volatile unsigned long callin_flag = 0;
@@ -145,7 +99,7 @@ void __init smp_callin(void)
145 local_irq_enable(); 99 local_irq_enable();
146 100
147 calibrate_delay(); 101 calibrate_delay();
148 smp_store_cpu_info(cpuid); 102 cpu_data(cpuid).udelay_val = loops_per_jiffy;
149 callin_flag = 1; 103 callin_flag = 1;
150 __asm__ __volatile__("membar #Sync\n\t" 104 __asm__ __volatile__("membar #Sync\n\t"
151 "flush %%g6" : : : "memory"); 105 "flush %%g6" : : : "memory");
@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
340 294
341 prom_startcpu_cpuid(cpu, entry, cookie); 295 prom_startcpu_cpuid(cpu, entry, cookie);
342 } else { 296 } else {
343 struct device_node *dp; 297 struct device_node *dp = of_find_node_by_cpuid(cpu);
344 298
345 cpu_find_by_mid(cpu, &dp);
346 prom_startcpu(dp->node, entry, cookie); 299 prom_startcpu(dp->node, entry, cookie);
347 } 300 }
348 301
@@ -447,7 +400,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
447static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 400static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
448{ 401{
449 u64 pstate, ver; 402 u64 pstate, ver;
450 int nack_busy_id, is_jbus; 403 int nack_busy_id, is_jbus, need_more;
451 404
452 if (cpus_empty(mask)) 405 if (cpus_empty(mask))
453 return; 406 return;
@@ -463,6 +416,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
463 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 416 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
464 417
465retry: 418retry:
419 need_more = 0;
466 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 420 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
467 : : "r" (pstate), "i" (PSTATE_IE)); 421 : : "r" (pstate), "i" (PSTATE_IE));
468 422
@@ -491,6 +445,10 @@ retry:
491 : /* no outputs */ 445 : /* no outputs */
492 : "r" (target), "i" (ASI_INTR_W)); 446 : "r" (target), "i" (ASI_INTR_W));
493 nack_busy_id++; 447 nack_busy_id++;
448 if (nack_busy_id == 32) {
449 need_more = 1;
450 break;
451 }
494 } 452 }
495 } 453 }
496 454
@@ -507,6 +465,16 @@ retry:
507 if (dispatch_stat == 0UL) { 465 if (dispatch_stat == 0UL) {
508 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 466 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
509 : : "r" (pstate)); 467 : : "r" (pstate));
468 if (unlikely(need_more)) {
469 int i, cnt = 0;
470 for_each_cpu_mask(i, mask) {
471 cpu_clear(i, mask);
472 cnt++;
473 if (cnt == 32)
474 break;
475 }
476 goto retry;
477 }
510 return; 478 return;
511 } 479 }
512 if (!--stuck) 480 if (!--stuck)
@@ -544,6 +512,8 @@ retry:
544 if ((dispatch_stat & check_mask) == 0) 512 if ((dispatch_stat & check_mask) == 0)
545 cpu_clear(i, mask); 513 cpu_clear(i, mask);
546 this_busy_nack += 2; 514 this_busy_nack += 2;
515 if (this_busy_nack == 64)
516 break;
547 } 517 }
548 518
549 goto retry; 519 goto retry;
@@ -1191,23 +1161,14 @@ int setup_profiling_timer(unsigned int multiplier)
1191 1161
1192static void __init smp_tune_scheduling(void) 1162static void __init smp_tune_scheduling(void)
1193{ 1163{
1194 struct device_node *dp; 1164 unsigned int smallest = ~0U;
1195 int instance; 1165 int i;
1196 unsigned int def, smallest = ~0U;
1197
1198 def = ((tlb_type == hypervisor) ?
1199 (3 * 1024 * 1024) :
1200 (4 * 1024 * 1024));
1201 1166
1202 instance = 0; 1167 for (i = 0; i < NR_CPUS; i++) {
1203 while (!cpu_find_by_instance(instance, &dp, NULL)) { 1168 unsigned int val = cpu_data(i).ecache_size;
1204 unsigned int val;
1205 1169
1206 val = of_getintprop_default(dp, "ecache-size", def); 1170 if (val && val < smallest)
1207 if (val < smallest)
1208 smallest = val; 1171 smallest = val;
1209
1210 instance++;
1211 } 1172 }
1212 1173
1213 /* Any value less than 256K is nonsense. */ 1174 /* Any value less than 256K is nonsense. */
@@ -1230,58 +1191,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1230 int i; 1191 int i;
1231 1192
1232 if (num_possible_cpus() > max_cpus) { 1193 if (num_possible_cpus() > max_cpus) {
1233 int instance, mid; 1194 for_each_possible_cpu(i) {
1234 1195 if (i != boot_cpu_id) {
1235 instance = 0; 1196 cpu_clear(i, phys_cpu_present_map);
1236 while (!cpu_find_by_instance(instance, NULL, &mid)) { 1197 cpu_clear(i, cpu_present_map);
1237 if (mid != boot_cpu_id) {
1238 cpu_clear(mid, phys_cpu_present_map);
1239 cpu_clear(mid, cpu_present_map);
1240 if (num_possible_cpus() <= max_cpus) 1198 if (num_possible_cpus() <= max_cpus)
1241 break; 1199 break;
1242 } 1200 }
1243 instance++;
1244 } 1201 }
1245 } 1202 }
1246 1203
1247 for_each_possible_cpu(i) { 1204 cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
1248 if (tlb_type == hypervisor) {
1249 int j;
1250
1251 /* XXX get this mapping from machine description */
1252 for_each_possible_cpu(j) {
1253 if ((j >> 2) == (i >> 2))
1254 cpu_set(j, cpu_sibling_map[i]);
1255 }
1256 } else {
1257 cpu_set(i, cpu_sibling_map[i]);
1258 }
1259 }
1260
1261 smp_store_cpu_info(boot_cpu_id);
1262 smp_tune_scheduling(); 1205 smp_tune_scheduling();
1263} 1206}
1264 1207
1265/* Set this up early so that things like the scheduler can init 1208void __devinit smp_prepare_boot_cpu(void)
1266 * properly. We use the same cpu mask for both the present and
1267 * possible cpu map.
1268 */
1269void __init smp_setup_cpu_possible_map(void)
1270{ 1209{
1271 int instance, mid;
1272
1273 instance = 0;
1274 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1275 if (mid < NR_CPUS) {
1276 cpu_set(mid, phys_cpu_present_map);
1277 cpu_set(mid, cpu_present_map);
1278 }
1279 instance++;
1280 }
1281} 1210}
1282 1211
1283void __devinit smp_prepare_boot_cpu(void) 1212void __devinit smp_fill_in_sib_core_maps(void)
1284{ 1213{
1214 unsigned int i;
1215
1216 for_each_possible_cpu(i) {
1217 unsigned int j;
1218
1219 if (cpu_data(i).core_id == 0) {
1220 cpu_set(i, cpu_sibling_map[i]);
1221 continue;
1222 }
1223
1224 for_each_possible_cpu(j) {
1225 if (cpu_data(i).core_id ==
1226 cpu_data(j).core_id)
1227 cpu_set(j, cpu_sibling_map[i]);
1228 }
1229 }
1285} 1230}
1286 1231
1287int __cpuinit __cpu_up(unsigned int cpu) 1232int __cpuinit __cpu_up(unsigned int cpu)
@@ -1337,7 +1282,7 @@ unsigned long __per_cpu_shift __read_mostly;
1337EXPORT_SYMBOL(__per_cpu_base); 1282EXPORT_SYMBOL(__per_cpu_base);
1338EXPORT_SYMBOL(__per_cpu_shift); 1283EXPORT_SYMBOL(__per_cpu_shift);
1339 1284
1340void __init setup_per_cpu_areas(void) 1285void __init real_setup_per_cpu_areas(void)
1341{ 1286{
1342 unsigned long goal, size, i; 1287 unsigned long goal, size, i;
1343 char *ptr; 1288 char *ptr;
diff --git a/arch/sparc64/kernel/sstate.c b/arch/sparc64/kernel/sstate.c
new file mode 100644
index 000000000000..5b6e75b7f052
--- /dev/null
+++ b/arch/sparc64/kernel/sstate.c
@@ -0,0 +1,104 @@
1/* sstate.c: System soft state support.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/notifier.h>
8#include <linux/init.h>
9
10#include <asm/hypervisor.h>
11#include <asm/sstate.h>
12#include <asm/oplib.h>
13#include <asm/head.h>
14#include <asm/io.h>
15
16static int hv_supports_soft_state;
17
18static unsigned long kimage_addr_to_ra(const char *p)
19{
20 unsigned long val = (unsigned long) p;
21
22 return kern_base + (val - KERNBASE);
23}
24
25static void do_set_sstate(unsigned long state, const char *msg)
26{
27 unsigned long err;
28
29 if (!hv_supports_soft_state)
30 return;
31
32 err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
33 if (err) {
34 printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
35 "state[%lx] msg[%s], err=%lu\n",
36 state, msg, err);
37 }
38}
39
40static const char booting_msg[32] __attribute__((aligned(32))) =
41 "Linux booting";
42static const char running_msg[32] __attribute__((aligned(32))) =
43 "Linux running";
44static const char halting_msg[32] __attribute__((aligned(32))) =
45 "Linux halting";
46static const char poweroff_msg[32] __attribute__((aligned(32))) =
47 "Linux powering off";
48static const char rebooting_msg[32] __attribute__((aligned(32))) =
49 "Linux rebooting";
50static const char panicing_msg[32] __attribute__((aligned(32))) =
51 "Linux panicing";
52
53void sstate_booting(void)
54{
55 do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
56}
57
58void sstate_running(void)
59{
60 do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
61}
62
63void sstate_halt(void)
64{
65 do_set_sstate(HV_SOFT_STATE_TRANSITION, halting_msg);
66}
67
68void sstate_poweroff(void)
69{
70 do_set_sstate(HV_SOFT_STATE_TRANSITION, poweroff_msg);
71}
72
73void sstate_reboot(void)
74{
75 do_set_sstate(HV_SOFT_STATE_TRANSITION, rebooting_msg);
76}
77
78static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
79{
80 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
81
82 return NOTIFY_DONE;
83}
84
85static struct notifier_block sstate_panic_block = {
86 .notifier_call = sstate_panic_event,
87 .priority = INT_MAX,
88};
89
90void __init sun4v_sstate_init(void)
91{
92 unsigned long major, minor;
93
94 major = 1;
95 minor = 0;
96 if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
97 return;
98
99 hv_supports_soft_state = 1;
100
101 prom_sun4v_guest_soft_state();
102 atomic_notifier_chain_register(&panic_notifier_list,
103 &sstate_panic_block);
104}
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 405855dd886b..574bc248bca6 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -22,12 +22,12 @@ sun4v_cpu_mondo:
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty 22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop 23 nop
24 24
25 /* Get &trap_block[smp_processor_id()] into %g3. */ 25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g3 26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
28 28
29 /* Get CPU mondo queue base phys address into %g7. */ 29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31 31
32 /* Now get the cross-call arguments and handler PC, same 32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u: 33 * layout as sun4u:
@@ -47,8 +47,7 @@ sun4v_cpu_mondo:
47 add %g2, 0x40 - 0x8 - 0x8, %g2 47 add %g2, 0x40 - 0x8 - 0x8, %g2
48 48
49 /* Update queue head pointer. */ 49 /* Update queue head pointer. */
50 sethi %hi(8192 - 1), %g4 50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
51 or %g4, %lo(8192 - 1), %g4
52 and %g2, %g4, %g2 51 and %g2, %g4, %g2
53 52
54 mov INTRQ_CPU_MONDO_HEAD, %g4 53 mov INTRQ_CPU_MONDO_HEAD, %g4
@@ -71,12 +70,12 @@ sun4v_dev_mondo:
71 be,pn %xcc, sun4v_dev_mondo_queue_empty 70 be,pn %xcc, sun4v_dev_mondo_queue_empty
72 nop 71 nop
73 72
74 /* Get &trap_block[smp_processor_id()] into %g3. */ 73 /* Get &trap_block[smp_processor_id()] into %g4. */
75 ldxa [%g0] ASI_SCRATCHPAD, %g3 74 ldxa [%g0] ASI_SCRATCHPAD, %g4
76 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
77 76
78 /* Get DEV mondo queue base phys address into %g5. */ 77 /* Get DEV mondo queue base phys address into %g5. */
79 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80 79
81 /* Load IVEC into %g3. */ 80 /* Load IVEC into %g3. */
82 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
@@ -90,8 +89,7 @@ sun4v_dev_mondo:
90 */ 89 */
91 90
92 /* Update queue head pointer, this frees up some registers. */ 91 /* Update queue head pointer, this frees up some registers. */
93 sethi %hi(8192 - 1), %g4 92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
94 or %g4, %lo(8192 - 1), %g4
95 and %g2, %g4, %g2 93 and %g2, %g4, %g2
96 94
97 mov INTRQ_DEVICE_MONDO_HEAD, %g4 95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
@@ -143,6 +141,8 @@ sun4v_res_mondo:
143 brnz,pn %g1, sun4v_res_mondo_queue_full 141 brnz,pn %g1, sun4v_res_mondo_queue_full
144 nop 142 nop
145 143
144 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
145
146 /* Remember this entry's offset in %g1. */ 146 /* Remember this entry's offset in %g1. */
147 mov %g2, %g1 147 mov %g2, %g1
148 148
@@ -173,8 +173,6 @@ sun4v_res_mondo:
173 add %g2, 0x08, %g2 173 add %g2, 0x08, %g2
174 174
175 /* Update queue head pointer. */ 175 /* Update queue head pointer. */
176 sethi %hi(8192 - 1), %g4
177 or %g4, %lo(8192 - 1), %g4
178 and %g2, %g4, %g2 176 and %g2, %g4, %g2
179 177
180 mov INTRQ_RESUM_MONDO_HEAD, %g4 178 mov INTRQ_RESUM_MONDO_HEAD, %g4
@@ -254,6 +252,8 @@ sun4v_nonres_mondo:
254 brnz,pn %g1, sun4v_nonres_mondo_queue_full 252 brnz,pn %g1, sun4v_nonres_mondo_queue_full
255 nop 253 nop
256 254
255 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
256
257 /* Remember this entry's offset in %g1. */ 257 /* Remember this entry's offset in %g1. */
258 mov %g2, %g1 258 mov %g2, %g1
259 259
@@ -284,8 +284,6 @@ sun4v_nonres_mondo:
284 add %g2, 0x08, %g2 284 add %g2, 0x08, %g2
285 285
286 /* Update queue head pointer. */ 286 /* Update queue head pointer. */
287 sethi %hi(8192 - 1), %g4
288 or %g4, %lo(8192 - 1), %g4
289 and %g2, %g4, %g2 287 and %g2, %g4, %g2
290 288
291 mov INTRQ_NONRESUM_MONDO_HEAD, %g4 289 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 2d63d7689962..a31a0439244f 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -680,22 +680,14 @@ static int starfire_set_time(u32 val)
680 680
681static u32 hypervisor_get_time(void) 681static u32 hypervisor_get_time(void)
682{ 682{
683 register unsigned long func asm("%o5"); 683 unsigned long ret, time;
684 register unsigned long arg0 asm("%o0");
685 register unsigned long arg1 asm("%o1");
686 int retries = 10000; 684 int retries = 10000;
687 685
688retry: 686retry:
689 func = HV_FAST_TOD_GET; 687 ret = sun4v_tod_get(&time);
690 arg0 = 0; 688 if (ret == HV_EOK)
691 arg1 = 0; 689 return time;
692 __asm__ __volatile__("ta %6" 690 if (ret == HV_EWOULDBLOCK) {
693 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
694 : "0" (func), "1" (arg0), "2" (arg1),
695 "i" (HV_FAST_TRAP));
696 if (arg0 == HV_EOK)
697 return arg1;
698 if (arg0 == HV_EWOULDBLOCK) {
699 if (--retries > 0) { 691 if (--retries > 0) {
700 udelay(100); 692 udelay(100);
701 goto retry; 693 goto retry;
@@ -709,20 +701,14 @@ retry:
709 701
710static int hypervisor_set_time(u32 secs) 702static int hypervisor_set_time(u32 secs)
711{ 703{
712 register unsigned long func asm("%o5"); 704 unsigned long ret;
713 register unsigned long arg0 asm("%o0");
714 int retries = 10000; 705 int retries = 10000;
715 706
716retry: 707retry:
717 func = HV_FAST_TOD_SET; 708 ret = sun4v_tod_set(secs);
718 arg0 = secs; 709 if (ret == HV_EOK)
719 __asm__ __volatile__("ta %4"
720 : "=&r" (func), "=&r" (arg0)
721 : "0" (func), "1" (arg0),
722 "i" (HV_FAST_TRAP));
723 if (arg0 == HV_EOK)
724 return 0; 710 return 0;
725 if (arg0 == HV_EWOULDBLOCK) { 711 if (ret == HV_EWOULDBLOCK) {
726 if (--retries > 0) { 712 if (--retries > 0) {
727 udelay(100); 713 udelay(100);
728 goto retry; 714 goto retry;
@@ -862,7 +848,6 @@ fs_initcall(clock_init);
862static unsigned long sparc64_init_timers(void) 848static unsigned long sparc64_init_timers(void)
863{ 849{
864 struct device_node *dp; 850 struct device_node *dp;
865 struct property *prop;
866 unsigned long clock; 851 unsigned long clock;
867#ifdef CONFIG_SMP 852#ifdef CONFIG_SMP
868 extern void smp_tick_init(void); 853 extern void smp_tick_init(void);
@@ -879,17 +864,15 @@ static unsigned long sparc64_init_timers(void)
879 if (manuf == 0x17 && impl == 0x13) { 864 if (manuf == 0x17 && impl == 0x13) {
880 /* Hummingbird, aka Ultra-IIe */ 865 /* Hummingbird, aka Ultra-IIe */
881 tick_ops = &hbtick_operations; 866 tick_ops = &hbtick_operations;
882 prop = of_find_property(dp, "stick-frequency", NULL); 867 clock = of_getintprop_default(dp, "stick-frequency", 0);
883 } else { 868 } else {
884 tick_ops = &tick_operations; 869 tick_ops = &tick_operations;
885 cpu_find_by_instance(0, &dp, NULL); 870 clock = local_cpu_data().clock_tick;
886 prop = of_find_property(dp, "clock-frequency", NULL);
887 } 871 }
888 } else { 872 } else {
889 tick_ops = &stick_operations; 873 tick_ops = &stick_operations;
890 prop = of_find_property(dp, "stick-frequency", NULL); 874 clock = of_getintprop_default(dp, "stick-frequency", 0);
891 } 875 }
892 clock = *(unsigned int *) prop->value;
893 876
894#ifdef CONFIG_SMP 877#ifdef CONFIG_SMP
895 smp_tick_init(); 878 smp_tick_init();
@@ -1365,6 +1348,7 @@ static int hypervisor_set_rtc_time(struct rtc_time *time)
1365 return hypervisor_set_time(seconds); 1348 return hypervisor_set_time(seconds);
1366} 1349}
1367 1350
1351#ifdef CONFIG_PCI
1368static void bq4802_get_rtc_time(struct rtc_time *time) 1352static void bq4802_get_rtc_time(struct rtc_time *time)
1369{ 1353{
1370 unsigned char val = readb(bq4802_regs + 0x0e); 1354 unsigned char val = readb(bq4802_regs + 0x0e);
@@ -1436,6 +1420,7 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
1436 1420
1437 return 0; 1421 return 0;
1438} 1422}
1423#endif /* CONFIG_PCI */
1439 1424
1440struct mini_rtc_ops { 1425struct mini_rtc_ops {
1441 void (*get_rtc_time)(struct rtc_time *); 1426 void (*get_rtc_time)(struct rtc_time *);
@@ -1452,10 +1437,12 @@ static struct mini_rtc_ops hypervisor_rtc_ops = {
1452 .set_rtc_time = hypervisor_set_rtc_time, 1437 .set_rtc_time = hypervisor_set_rtc_time,
1453}; 1438};
1454 1439
1440#ifdef CONFIG_PCI
1455static struct mini_rtc_ops bq4802_rtc_ops = { 1441static struct mini_rtc_ops bq4802_rtc_ops = {
1456 .get_rtc_time = bq4802_get_rtc_time, 1442 .get_rtc_time = bq4802_get_rtc_time,
1457 .set_rtc_time = bq4802_set_rtc_time, 1443 .set_rtc_time = bq4802_set_rtc_time,
1458}; 1444};
1445#endif /* CONFIG_PCI */
1459 1446
1460static struct mini_rtc_ops *mini_rtc_ops; 1447static struct mini_rtc_ops *mini_rtc_ops;
1461 1448
@@ -1579,8 +1566,10 @@ static int __init rtc_mini_init(void)
1579 mini_rtc_ops = &hypervisor_rtc_ops; 1566 mini_rtc_ops = &hypervisor_rtc_ops;
1580 else if (this_is_starfire) 1567 else if (this_is_starfire)
1581 mini_rtc_ops = &starfire_rtc_ops; 1568 mini_rtc_ops = &starfire_rtc_ops;
1569#ifdef CONFIG_PCI
1582 else if (bq4802_regs) 1570 else if (bq4802_regs)
1583 mini_rtc_ops = &bq4802_rtc_ops; 1571 mini_rtc_ops = &bq4802_rtc_ops;
1572#endif /* CONFIG_PCI */
1584 else 1573 else
1585 return -ENODEV; 1574 return -ENODEV;
1586 1575
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index d0fde36395b4..00a9e3286c83 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
795void __init cheetah_ecache_flush_init(void) 795void __init cheetah_ecache_flush_init(void)
796{ 796{
797 unsigned long largest_size, smallest_linesize, order, ver; 797 unsigned long largest_size, smallest_linesize, order, ver;
798 struct device_node *dp; 798 int i, sz;
799 int i, instance, sz;
800 799
801 /* Scan all cpu device tree nodes, note two values: 800 /* Scan all cpu device tree nodes, note two values:
802 * 1) largest E-cache size 801 * 1) largest E-cache size
@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void)
805 largest_size = 0UL; 804 largest_size = 0UL;
806 smallest_linesize = ~0UL; 805 smallest_linesize = ~0UL;
807 806
808 instance = 0; 807 for (i = 0; i < NR_CPUS; i++) {
809 while (!cpu_find_by_instance(instance, &dp, NULL)) {
810 unsigned long val; 808 unsigned long val;
811 809
812 val = of_getintprop_default(dp, "ecache-size", 810 val = cpu_data(i).ecache_size;
813 (2 * 1024 * 1024)); 811 if (!val)
812 continue;
813
814 if (val > largest_size) 814 if (val > largest_size)
815 largest_size = val; 815 largest_size = val;
816 val = of_getintprop_default(dp, "ecache-line-size", 64); 816
817 val = cpu_data(i).ecache_line_size;
817 if (val < smallest_linesize) 818 if (val < smallest_linesize)
818 smallest_linesize = val; 819 smallest_linesize = val;
819 instance++; 820
820 } 821 }
821 822
822 if (largest_size == 0UL || smallest_linesize == ~0UL) { 823 if (largest_size == 0UL || smallest_linesize == ~0UL) {
@@ -2564,7 +2565,15 @@ void __init trap_init(void)
2564 (TRAP_PER_CPU_TSB_HUGE_TEMP != 2565 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2565 offsetof(struct trap_per_cpu, tsb_huge_temp)) || 2566 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2566 (TRAP_PER_CPU_IRQ_WORKLIST != 2567 (TRAP_PER_CPU_IRQ_WORKLIST !=
2567 offsetof(struct trap_per_cpu, irq_worklist))) 2568 offsetof(struct trap_per_cpu, irq_worklist)) ||
2569 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2570 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2571 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2572 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2573 (TRAP_PER_CPU_RESUM_QMASK !=
2574 offsetof(struct trap_per_cpu, resum_qmask)) ||
2575 (TRAP_PER_CPU_NONRESUM_QMASK !=
2576 offsetof(struct trap_per_cpu, nonresum_qmask)))
2568 trap_per_cpu_offsets_are_bolixed_dave(); 2577 trap_per_cpu_offsets_are_bolixed_dave();
2569 2578
2570 if ((TSB_CONFIG_TSB != 2579 if ((TSB_CONFIG_TSB !=
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 6e5b01d779d2..3010227fe243 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/percpu.h>
26 27
27#include <asm/head.h> 28#include <asm/head.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -43,8 +44,8 @@
43#include <asm/tsb.h> 44#include <asm/tsb.h>
44#include <asm/hypervisor.h> 45#include <asm/hypervisor.h>
45#include <asm/prom.h> 46#include <asm/prom.h>
46 47#include <asm/sstate.h>
47extern void device_scan(void); 48#include <asm/mdesc.h>
48 49
49#define MAX_PHYS_ADDRESS (1UL << 42UL) 50#define MAX_PHYS_ADDRESS (1UL << 42UL)
50#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 51#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
60unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 61unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
61 62
62#ifndef CONFIG_DEBUG_PAGEALLOC 63#ifndef CONFIG_DEBUG_PAGEALLOC
63/* A special kernel TSB for 4MB and 256MB linear mappings. */ 64/* A special kernel TSB for 4MB and 256MB linear mappings.
64struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
67 */
68extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
65#endif 69#endif
66 70
67#define MAX_BANKS 32 71#define MAX_BANKS 32
@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page)
190} 194}
191 195
192#define PG_dcache_dirty PG_arch_1 196#define PG_dcache_dirty PG_arch_1
193#define PG_dcache_cpu_shift 24UL 197#define PG_dcache_cpu_shift 32UL
194#define PG_dcache_cpu_mask (256UL - 1UL) 198#define PG_dcache_cpu_mask \
195 199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
196#if NR_CPUS > 256
197#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
198#endif
199 200
200#define dcache_dirty_cpu(page) \ 201#define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
557 unsigned long pte, 558 unsigned long pte,
558 unsigned long mmu) 559 unsigned long mmu)
559{ 560{
560 register unsigned long func asm("%o5"); 561 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
561 register unsigned long arg0 asm("%o0"); 562
562 register unsigned long arg1 asm("%o1"); 563 if (ret != 0) {
563 register unsigned long arg2 asm("%o2");
564 register unsigned long arg3 asm("%o3");
565
566 func = HV_FAST_MMU_MAP_PERM_ADDR;
567 arg0 = vaddr;
568 arg1 = 0;
569 arg2 = pte;
570 arg3 = mmu;
571 __asm__ __volatile__("ta 0x80"
572 : "=&r" (func), "=&r" (arg0),
573 "=&r" (arg1), "=&r" (arg2),
574 "=&r" (arg3)
575 : "0" (func), "1" (arg0), "2" (arg1),
576 "3" (arg2), "4" (arg3));
577 if (arg0 != 0) {
578 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 564 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
579 "errors with %lx\n", vaddr, 0, pte, mmu, arg0); 565 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
580 prom_halt(); 566 prom_halt();
581 } 567 }
582} 568}
@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
1313 1299
1314void __cpuinit sun4v_ktsb_register(void) 1300void __cpuinit sun4v_ktsb_register(void)
1315{ 1301{
1316 register unsigned long func asm("%o5"); 1302 unsigned long pa, ret;
1317 register unsigned long arg0 asm("%o0");
1318 register unsigned long arg1 asm("%o1");
1319 unsigned long pa;
1320 1303
1321 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1304 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1322 1305
1323 func = HV_FAST_MMU_TSB_CTX0; 1306 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1324 arg0 = NUM_KTSB_DESCR; 1307 if (ret != 0) {
1325 arg1 = pa; 1308 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1326 __asm__ __volatile__("ta %6" 1309 "errors with %lx\n", pa, ret);
1327 : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1310 prom_halt();
1328 : "0" (func), "1" (arg0), "2" (arg1), 1311 }
1329 "i" (HV_FAST_TRAP));
1330} 1312}
1331 1313
1332/* paging_init() sets up the page tables */ 1314/* paging_init() sets up the page tables */
@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void)
1334extern void cheetah_ecache_flush_init(void); 1316extern void cheetah_ecache_flush_init(void);
1335extern void sun4v_patch_tlb_handlers(void); 1317extern void sun4v_patch_tlb_handlers(void);
1336 1318
1319extern void cpu_probe(void);
1320extern void central_probe(void);
1321
1337static unsigned long last_valid_pfn; 1322static unsigned long last_valid_pfn;
1338pgd_t swapper_pg_dir[2048]; 1323pgd_t swapper_pg_dir[2048];
1339 1324
@@ -1345,9 +1330,24 @@ void __init paging_init(void)
1345 unsigned long end_pfn, pages_avail, shift, phys_base; 1330 unsigned long end_pfn, pages_avail, shift, phys_base;
1346 unsigned long real_end, i; 1331 unsigned long real_end, i;
1347 1332
1333 /* These build time checkes make sure that the dcache_dirty_cpu()
1334 * page->flags usage will work.
1335 *
1336 * When a page gets marked as dcache-dirty, we store the
1337 * cpu number starting at bit 32 in the page->flags. Also,
1338 * functions like clear_dcache_dirty_cpu use the cpu mask
1339 * in 13-bit signed-immediate instruction fields.
1340 */
1341 BUILD_BUG_ON(FLAGS_RESERVED != 32);
1342 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1343 ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
1344 BUILD_BUG_ON(NR_CPUS > 4096);
1345
1348 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1346 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1349 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1347 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1350 1348
1349 sstate_booting();
1350
1351 /* Invalidate both kernel TSBs. */ 1351 /* Invalidate both kernel TSBs. */
1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1353#ifndef CONFIG_DEBUG_PAGEALLOC 1353#ifndef CONFIG_DEBUG_PAGEALLOC
@@ -1416,8 +1416,13 @@ void __init paging_init(void)
1416 1416
1417 kernel_physical_mapping_init(); 1417 kernel_physical_mapping_init();
1418 1418
1419 real_setup_per_cpu_areas();
1420
1419 prom_build_devicetree(); 1421 prom_build_devicetree();
1420 1422
1423 if (tlb_type == hypervisor)
1424 sun4v_mdesc_init();
1425
1421 { 1426 {
1422 unsigned long zones_size[MAX_NR_ZONES]; 1427 unsigned long zones_size[MAX_NR_ZONES];
1423 unsigned long zholes_size[MAX_NR_ZONES]; 1428 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1434,7 +1439,10 @@ void __init paging_init(void)
1434 zholes_size); 1439 zholes_size);
1435 } 1440 }
1436 1441
1437 device_scan(); 1442 prom_printf("Booting Linux...\n");
1443
1444 central_probe();
1445 cpu_probe();
1438} 1446}
1439 1447
1440static void __init taint_real_pages(void) 1448static void __init taint_real_pages(void)
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 0b4213720d43..f3e0c14e9eef 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -15,6 +15,25 @@
15#include <asm/oplib.h> 15#include <asm/oplib.h>
16#include <asm/system.h> 16#include <asm/system.h>
17 17
18int prom_service_exists(const char *service_name)
19{
20 int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
21 P1275_INOUT(1, 1), service_name);
22
23 if (err)
24 return 0;
25 return 1;
26}
27
28void prom_sun4v_guest_soft_state(void)
29{
30 const char *svc = "SUNW,soft-state-supported";
31
32 if (!prom_service_exists(svc))
33 return;
34 p1275_cmd(svc, P1275_INOUT(0, 0));
35}
36
18/* Reset and reboot the machine with the command 'bcommand'. */ 37/* Reset and reboot the machine with the command 'bcommand'. */
19void prom_reboot(const char *bcommand) 38void prom_reboot(const char *bcommand)
20{ 39{
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index ef833a1c27eb..0b7ffa5191c6 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -6,7 +6,7 @@
6# 6#
7config DRM 7config DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && PCI 9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
10 help 10 help
11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 11 Kernel-level support for the Direct Rendering Infrastructure (DRI)
12 introduced in XFree86 4.0. If you say Y here, you need to select 12 introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d28c14e23c32..572034ceb143 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1753,23 +1753,9 @@ config SUN3X_ESP
1753 The ESP was an on-board SCSI controller used on Sun 3/80 1753 The ESP was an on-board SCSI controller used on Sun 3/80
1754 machines. Say Y here to compile in support for it. 1754 machines. Say Y here to compile in support for it.
1755 1755
1756config SCSI_ESP_CORE
1757 tristate "ESP Scsi Driver Core"
1758 depends on SCSI
1759 select SCSI_SPI_ATTRS
1760 help
1761 This is a core driver for NCR53c9x based scsi chipsets,
1762 also known as "ESP" for Emulex Scsi Processor or
1763 Enhanced Scsi Processor. This driver does not exist by
1764 itself, there are front-end drivers which, when enabled,
1765 select and enable this driver. One example is SCSI_SUNESP.
1766 These front-end drivers provide probing, DMA, and register
1767 access support for the core driver.
1768
1769config SCSI_SUNESP 1756config SCSI_SUNESP
1770 tristate "Sparc ESP Scsi Driver" 1757 tristate "Sparc ESP Scsi Driver"
1771 depends on SBUS && SCSI 1758 depends on SBUS && SCSI
1772 select SCSI_ESP_CORE
1773 help 1759 help
1774 This is the driver for the Sun ESP SCSI host adapter. The ESP 1760 This is the driver for the Sun ESP SCSI host adapter. The ESP
1775 chipset is present in most SPARC SBUS-based computers. 1761 chipset is present in most SPARC SBUS-based computers.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 51e884fa10b0..b1b632791580 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,8 +106,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
106obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 106obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
107obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 107obj-$(CONFIG_MEGARAID_SAS) += megaraid/
108obj-$(CONFIG_SCSI_ACARD) += atp870u.o 108obj-$(CONFIG_SCSI_ACARD) += atp870u.o
109obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o 109obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
110obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o
111obj-$(CONFIG_SCSI_GDTH) += gdth.o 110obj-$(CONFIG_SCSI_GDTH) += gdth.o
112obj-$(CONFIG_SCSI_INITIO) += initio.o 111obj-$(CONFIG_SCSI_INITIO) += initio.o
113obj-$(CONFIG_SCSI_INIA100) += a100u2w.o 112obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
@@ -121,7 +120,7 @@ obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
121obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 120obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
122obj-$(CONFIG_SCSI_PPA) += ppa.o 121obj-$(CONFIG_SCSI_PPA) += ppa.o
123obj-$(CONFIG_SCSI_IMM) += imm.o 122obj-$(CONFIG_SCSI_IMM) += imm.o
124obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o 123obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
125obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 124obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
126obj-$(CONFIG_SCSI_FCAL) += fcal.o 125obj-$(CONFIG_SCSI_FCAL) += fcal.o
127obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 126obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 19dd4b962e18..81e497d9eae0 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,307 +1,244 @@
1/* 1/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
2 * jazz_esp.c: Driver for SCSI chip on Mips Magnum Boards (JAZZ architecture)
3 * 2 *
4 * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 3 * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
5 *
6 * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
7 */ 4 */
8 5
9#include <linux/init.h>
10#include <linux/kernel.h> 6#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/types.h> 7#include <linux/types.h>
13#include <linux/string.h> 8#include <linux/module.h>
14#include <linux/slab.h> 9#include <linux/init.h>
15#include <linux/blkdev.h> 10#include <linux/interrupt.h>
16#include <linux/proc_fs.h> 11#include <linux/platform_device.h>
17#include <linux/stat.h> 12#include <linux/dma-mapping.h>
18
19#include "scsi.h"
20#include <scsi/scsi_host.h>
21#include "NCR53C9x.h"
22 13
23#include <asm/irq.h> 14#include <asm/irq.h>
15#include <asm/io.h>
16#include <asm/dma.h>
17
24#include <asm/jazz.h> 18#include <asm/jazz.h>
25#include <asm/jazzdma.h> 19#include <asm/jazzdma.h>
26#include <asm/dma.h>
27 20
28#include <asm/pgtable.h> 21#include <scsi/scsi_host.h>
29
30static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
31static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
32static void dma_dump_state(struct NCR_ESP *esp);
33static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
34static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
35static void dma_ints_off(struct NCR_ESP *esp);
36static void dma_ints_on(struct NCR_ESP *esp);
37static int dma_irq_p(struct NCR_ESP *esp);
38static int dma_ports_p(struct NCR_ESP *esp);
39static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
40static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
41static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
42static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
43static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
44static void dma_advance_sg (struct scsi_cmnd *sp);
45static void dma_led_off(struct NCR_ESP *);
46static void dma_led_on(struct NCR_ESP *);
47
48
49static volatile unsigned char cmd_buffer[16];
50 /* This is where all commands are put
51 * before they are trasfered to the ESP chip
52 * via PIO.
53 */
54
55static int jazz_esp_release(struct Scsi_Host *shost)
56{
57 if (shost->irq)
58 free_irq(shost->irq, NULL);
59 if (shost->dma_channel != 0xff)
60 free_dma(shost->dma_channel);
61 if (shost->io_port && shost->n_io_port)
62 release_region(shost->io_port, shost->n_io_port);
63 scsi_unregister(shost);
64 return 0;
65}
66 22
67/***************************************************************** Detection */ 23#include "esp_scsi.h"
68static int jazz_esp_detect(struct scsi_host_template *tpnt)
69{
70 struct NCR_ESP *esp;
71 struct ConfigDev *esp_dev;
72
73 /*
74 * first assumption it is there:-)
75 */
76 if (1) {
77 esp_dev = NULL;
78 esp = esp_allocate(tpnt, esp_dev, 0);
79
80 /* Do command transfer with programmed I/O */
81 esp->do_pio_cmds = 1;
82
83 /* Required functions */
84 esp->dma_bytes_sent = &dma_bytes_sent;
85 esp->dma_can_transfer = &dma_can_transfer;
86 esp->dma_dump_state = &dma_dump_state;
87 esp->dma_init_read = &dma_init_read;
88 esp->dma_init_write = &dma_init_write;
89 esp->dma_ints_off = &dma_ints_off;
90 esp->dma_ints_on = &dma_ints_on;
91 esp->dma_irq_p = &dma_irq_p;
92 esp->dma_ports_p = &dma_ports_p;
93 esp->dma_setup = &dma_setup;
94
95 /* Optional functions */
96 esp->dma_barrier = NULL;
97 esp->dma_drain = NULL;
98 esp->dma_invalidate = NULL;
99 esp->dma_irq_entry = NULL;
100 esp->dma_irq_exit = NULL;
101 esp->dma_poll = NULL;
102 esp->dma_reset = NULL;
103 esp->dma_led_off = &dma_led_off;
104 esp->dma_led_on = &dma_led_on;
105
106 /* virtual DMA functions */
107 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
108 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
109 esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
110 esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
111 esp->dma_advance_sg = &dma_advance_sg;
112
113
114 /* SCSI chip speed */
115 esp->cfreq = 40000000;
116 24
117 /* 25#define DRV_MODULE_NAME "jazz_esp"
118 * we don't give the address of DMA channel, but the number 26#define PFX DRV_MODULE_NAME ": "
119 * of DMA channel, so we can use the jazz DMA functions 27#define DRV_VERSION "1.000"
120 * 28#define DRV_MODULE_RELDATE "May 19, 2007"
121 */
122 esp->dregs = (void *) JAZZ_SCSI_DMA;
123
124 /* ESP register base */
125 esp->eregs = (struct ESP_regs *)(JAZZ_SCSI_BASE);
126
127 /* Set the command buffer */
128 esp->esp_command = (volatile unsigned char *)cmd_buffer;
129
130 /* get virtual dma address for command buffer */
131 esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer));
132
133 esp->irq = JAZZ_SCSI_IRQ;
134 request_irq(JAZZ_SCSI_IRQ, esp_intr, IRQF_DISABLED, "JAZZ SCSI",
135 esp->ehost);
136
137 /*
138 * FIXME, look if the scsi id is available from NVRAM
139 */
140 esp->scsi_id = 7;
141
142 /* Check for differential SCSI-bus */
143 /* What is this stuff? */
144 esp->diff = 0;
145
146 esp_initialize(esp);
147
148 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
149 esps_running = esps_in_use;
150 return esps_in_use;
151 }
152 return 0;
153}
154 29
155/************************************************************* DMA Functions */ 30static void jazz_esp_write8(struct esp *esp, u8 val, unsigned long reg)
156static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
157{ 31{
158 return fifo_count; 32 *(volatile u8 *)(esp->regs + reg) = val;
159} 33}
160 34
161static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp) 35static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
162{ 36{
163 /* 37 return *(volatile u8 *)(esp->regs + reg);
164 * maximum DMA size is 1MB
165 */
166 unsigned long sz = sp->SCp.this_residual;
167 if(sz > 0x100000)
168 sz = 0x100000;
169 return sz;
170} 38}
171 39
172static void dma_dump_state(struct NCR_ESP *esp) 40static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
41 size_t sz, int dir)
173{ 42{
174 43 return dma_map_single(esp->dev, buf, sz, dir);
175 ESPLOG(("esp%d: dma -- enable <%08x> residue <%08x\n",
176 esp->esp_id, vdma_get_enable((int)esp->dregs), vdma_get_residue((int)esp->dregs)));
177} 44}
178 45
179static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 46static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
47 int num_sg, int dir)
180{ 48{
181 dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length); 49 return dma_map_sg(esp->dev, sg, num_sg, dir);
182 vdma_disable ((int)esp->dregs);
183 vdma_set_mode ((int)esp->dregs, DMA_MODE_READ);
184 vdma_set_addr ((int)esp->dregs, vaddress);
185 vdma_set_count ((int)esp->dregs, length);
186 vdma_enable ((int)esp->dregs);
187} 50}
188 51
189static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 52static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
53 size_t sz, int dir)
190{ 54{
191 dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length); 55 dma_unmap_single(esp->dev, addr, sz, dir);
192 vdma_disable ((int)esp->dregs);
193 vdma_set_mode ((int)esp->dregs, DMA_MODE_WRITE);
194 vdma_set_addr ((int)esp->dregs, vaddress);
195 vdma_set_count ((int)esp->dregs, length);
196 vdma_enable ((int)esp->dregs);
197} 56}
198 57
199static void dma_ints_off(struct NCR_ESP *esp) 58static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
59 int num_sg, int dir)
200{ 60{
201 disable_irq(esp->irq); 61 dma_unmap_sg(esp->dev, sg, num_sg, dir);
202} 62}
203 63
204static void dma_ints_on(struct NCR_ESP *esp) 64static int jazz_esp_irq_pending(struct esp *esp)
205{ 65{
206 enable_irq(esp->irq); 66 if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
67 return 1;
68 return 0;
207} 69}
208 70
209static int dma_irq_p(struct NCR_ESP *esp) 71static void jazz_esp_reset_dma(struct esp *esp)
210{ 72{
211 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 73 vdma_disable ((int)esp->dma_regs);
212} 74}
213 75
214static int dma_ports_p(struct NCR_ESP *esp) 76static void jazz_esp_dma_drain(struct esp *esp)
215{ 77{
216 int enable = vdma_get_enable((int)esp->dregs); 78 /* nothing to do */
217
218 return (enable & R4030_CHNL_ENABLE);
219} 79}
220 80
221static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 81static void jazz_esp_dma_invalidate(struct esp *esp)
222{ 82{
223 /* 83 vdma_disable ((int)esp->dma_regs);
224 * On the Sparc, DMA_ST_WRITE means "move data from device to memory"
225 * so when (write) is true, it actually means READ!
226 */
227 if(write){
228 dma_init_read(esp, addr, count);
229 } else {
230 dma_init_write(esp, addr, count);
231 }
232} 84}
233 85
234static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) 86static void jazz_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
87 u32 dma_count, int write, u8 cmd)
235{ 88{
236 sp->SCp.have_data_in = vdma_alloc(CPHYSADDR(sp->SCp.buffer), sp->SCp.this_residual); 89 BUG_ON(!(cmd & ESP_CMD_DMA));
237 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); 90
91 jazz_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
92 jazz_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
93 vdma_disable ((int)esp->dma_regs);
94 if (write)
95 vdma_set_mode ((int)esp->dma_regs, DMA_MODE_READ);
96 else
97 vdma_set_mode ((int)esp->dma_regs, DMA_MODE_WRITE);
98
99 vdma_set_addr ((int)esp->dma_regs, addr);
100 vdma_set_count ((int)esp->dma_regs, dma_count);
101 vdma_enable ((int)esp->dma_regs);
102
103 scsi_esp_cmd(esp, cmd);
238} 104}
239 105
240static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 106static int jazz_esp_dma_error(struct esp *esp)
241{
242 int sz = sp->SCp.buffers_residual;
243 struct scatterlist *sg = (struct scatterlist *) sp->SCp.buffer;
244
245 while (sz >= 0) {
246 sg[sz].dma_address = vdma_alloc(CPHYSADDR(page_address(sg[sz].page) + sg[sz].offset), sg[sz].length);
247 sz--;
248 }
249 sp->SCp.ptr=(char *)(sp->SCp.buffer->dma_address);
250}
251
252static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
253{ 107{
254 vdma_free(sp->SCp.have_data_in); 108 u32 enable = vdma_get_enable((int)esp->dma_regs);
109
110 if (enable & (R4030_MEM_INTR|R4030_ADDR_INTR))
111 return 1;
112
113 return 0;
255} 114}
256 115
257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 116static const struct esp_driver_ops jazz_esp_ops = {
117 .esp_write8 = jazz_esp_write8,
118 .esp_read8 = jazz_esp_read8,
119 .map_single = jazz_esp_map_single,
120 .map_sg = jazz_esp_map_sg,
121 .unmap_single = jazz_esp_unmap_single,
122 .unmap_sg = jazz_esp_unmap_sg,
123 .irq_pending = jazz_esp_irq_pending,
124 .reset_dma = jazz_esp_reset_dma,
125 .dma_drain = jazz_esp_dma_drain,
126 .dma_invalidate = jazz_esp_dma_invalidate,
127 .send_dma_cmd = jazz_esp_send_dma_cmd,
128 .dma_error = jazz_esp_dma_error,
129};
130
131static int __devinit esp_jazz_probe(struct platform_device *dev)
258{ 132{
259 int sz = sp->use_sg - 1; 133 struct scsi_host_template *tpnt = &scsi_esp_template;
260 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; 134 struct Scsi_Host *host;
261 135 struct esp *esp;
262 while(sz >= 0) { 136 struct resource *res;
263 vdma_free(sg[sz].dma_address); 137 int err;
264 sz--; 138
265 } 139 host = scsi_host_alloc(tpnt, sizeof(struct esp));
140
141 err = -ENOMEM;
142 if (!host)
143 goto fail;
144
145 host->max_id = 8;
146 esp = host_to_esp(host);
147
148 esp->host = host;
149 esp->dev = dev;
150 esp->ops = &jazz_esp_ops;
151
152 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
153 if (!res)
154 goto fail_unlink;
155
156 esp->regs = (void __iomem *)res->start;
157 if (!esp->regs)
158 goto fail_unlink;
159
160 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
161 if (!res)
162 goto fail_unlink;
163
164 esp->dma_regs = (void __iomem *)res->start;
165
166 esp->command_block = dma_alloc_coherent(esp->dev, 16,
167 &esp->command_block_dma,
168 GFP_KERNEL);
169 if (!esp->command_block)
170 goto fail_unmap_regs;
171
172 host->irq = platform_get_irq(dev, 0);
173 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
174 if (err < 0)
175 goto fail_unmap_command_block;
176
177 esp->scsi_id = 7;
178 esp->host->this_id = esp->scsi_id;
179 esp->scsi_id_mask = (1 << esp->scsi_id);
180 esp->cfreq = 40000000;
181
182 dev_set_drvdata(&dev->dev, esp);
183
184 err = scsi_esp_register(esp, &dev->dev);
185 if (err)
186 goto fail_free_irq;
187
188 return 0;
189
190fail_free_irq:
191 free_irq(host->irq, esp);
192fail_unmap_command_block:
193 dma_free_coherent(esp->dev, 16,
194 esp->command_block,
195 esp->command_block_dma);
196fail_unmap_regs:
197fail_unlink:
198 scsi_host_put(host);
199fail:
200 return err;
266} 201}
267 202
268static void dma_advance_sg (struct scsi_cmnd *sp) 203static int __devexit esp_jazz_remove(struct platform_device *dev)
269{ 204{
270 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); 205 struct esp *esp = dev_get_drvdata(&dev->dev);
206 unsigned int irq = esp->host->irq;
207
208 scsi_esp_unregister(esp);
209
210 free_irq(irq, esp);
211 dma_free_coherent(esp->dev, 16,
212 esp->command_block,
213 esp->command_block_dma);
214
215 scsi_host_put(esp->host);
216
217 return 0;
271} 218}
272 219
273#define JAZZ_HDC_LED 0xe000d100 /* FIXME, find correct address */ 220static struct platform_driver esp_jazz_driver = {
221 .probe = esp_jazz_probe,
222 .remove = __devexit_p(esp_jazz_remove),
223 .driver = {
224 .name = "jazz_esp",
225 },
226};
274 227
275static void dma_led_off(struct NCR_ESP *esp) 228static int __init jazz_esp_init(void)
276{ 229{
277#if 0 230 return platform_driver_register(&esp_jazz_driver);
278 *(unsigned char *)JAZZ_HDC_LED = 0;
279#endif
280} 231}
281 232
282static void dma_led_on(struct NCR_ESP *esp) 233static void __exit jazz_esp_exit(void)
283{ 234{
284#if 0 235 platform_driver_unregister(&esp_jazz_driver);
285 *(unsigned char *)JAZZ_HDC_LED = 1;
286#endif
287} 236}
288 237
289static struct scsi_host_template driver_template = { 238MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
290 .proc_name = "jazz_esp", 239MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
291 .proc_info = esp_proc_info, 240MODULE_LICENSE("GPL");
292 .name = "ESP 100/100a/200", 241MODULE_VERSION(DRV_VERSION);
293 .detect = jazz_esp_detect, 242
294 .slave_alloc = esp_slave_alloc, 243module_init(jazz_esp_init);
295 .slave_destroy = esp_slave_destroy, 244module_exit(jazz_esp_exit);
296 .release = jazz_esp_release,
297 .info = esp_info,
298 .queuecommand = esp_queue,
299 .eh_abort_handler = esp_abort,
300 .eh_bus_reset_handler = esp_reset,
301 .can_queue = 7,
302 .this_id = 7,
303 .sg_tablesize = SG_ALL,
304 .cmd_per_lun = 1,
305 .use_clustering = DISABLE_CLUSTERING,
306};
307#include "scsi_module.c"
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 3b2e1a53e6e2..d953d43fe2e6 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -4,6 +4,7 @@
4 * 4 *
5 */ 5 */
6 6
7#include <linux/completion.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/delay.h> 9#include <linux/delay.h>
9#include <linux/types.h> 10#include <linux/types.h>
@@ -50,16 +51,10 @@ static struct ctrl_inquiry {
50} *fcs __initdata; 51} *fcs __initdata;
51static int fcscount __initdata = 0; 52static int fcscount __initdata = 0;
52static atomic_t fcss __initdata = ATOMIC_INIT(0); 53static atomic_t fcss __initdata = ATOMIC_INIT(0);
53DECLARE_MUTEX_LOCKED(fc_sem); 54static DECLARE_COMPLETION(fc_detect_complete);
54 55
55static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd); 56static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd);
56 57
57static void __init pluto_detect_timeout(unsigned long data)
58{
59 PLND(("Timeout\n"))
60 up(&fc_sem);
61}
62
63static void __init pluto_detect_done(Scsi_Cmnd *SCpnt) 58static void __init pluto_detect_done(Scsi_Cmnd *SCpnt)
64{ 59{
65 /* Do nothing */ 60 /* Do nothing */
@@ -69,7 +64,7 @@ static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
69{ 64{
70 PLND(("Detect done %08lx\n", (long)SCpnt)) 65 PLND(("Detect done %08lx\n", (long)SCpnt))
71 if (atomic_dec_and_test (&fcss)) 66 if (atomic_dec_and_test (&fcss))
72 up(&fc_sem); 67 complete(&fc_detect_complete);
73} 68}
74 69
75int pluto_slave_configure(struct scsi_device *device) 70int pluto_slave_configure(struct scsi_device *device)
@@ -96,7 +91,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
96 int i, retry, nplutos; 91 int i, retry, nplutos;
97 fc_channel *fc; 92 fc_channel *fc;
98 struct scsi_device dev; 93 struct scsi_device dev;
99 DEFINE_TIMER(fc_timer, pluto_detect_timeout, 0, 0);
100 94
101 tpnt->proc_name = "pluto"; 95 tpnt->proc_name = "pluto";
102 fcscount = 0; 96 fcscount = 0;
@@ -187,15 +181,11 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
187 } 181 }
188 } 182 }
189 183
190 fc_timer.expires = jiffies + 10 * HZ; 184 wait_for_completion_timeout(&fc_detect_complete, 10 * HZ);
191 add_timer(&fc_timer);
192
193 down(&fc_sem);
194 PLND(("Woken up\n")) 185 PLND(("Woken up\n"))
195 if (!atomic_read(&fcss)) 186 if (!atomic_read(&fcss))
196 break; /* All fc channels have answered us */ 187 break; /* All fc channels have answered us */
197 } 188 }
198 del_timer_sync(&fc_timer);
199 189
200 PLND(("Finished search\n")) 190 PLND(("Finished search\n"))
201 for (i = 0, nplutos = 0; i < fcscount; i++) { 191 for (i = 0, nplutos = 0; i < fcscount; i++) {
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index e35d9ab359f1..b45ba5392dd3 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -30,9 +30,9 @@ void
30sunserial_console_termios(struct console *con) 30sunserial_console_termios(struct console *con)
31{ 31{
32 char mode[16], buf[16], *s; 32 char mode[16], buf[16], *s;
33 char *mode_prop = "ttyX-mode"; 33 char mode_prop[] = "ttyX-mode";
34 char *cd_prop = "ttyX-ignore-cd"; 34 char cd_prop[] = "ttyX-ignore-cd";
35 char *dtr_prop = "ttyX-rts-dtr-off"; 35 char dtr_prop[] = "ttyX-rts-dtr-off";
36 char *ssp_console_modes_prop = "ssp-console-modes"; 36 char *ssp_console_modes_prop = "ssp-console-modes";
37 int baud, bits, stop, cflag; 37 int baud, bits, stop, cflag;
38 char parity; 38 char parity;
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0985193dc57d..15b6e1cb040b 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1239,7 +1239,7 @@ static inline struct console *SUNZILOG_CONSOLE(void)
1239#define SUNZILOG_CONSOLE() (NULL) 1239#define SUNZILOG_CONSOLE() (NULL)
1240#endif 1240#endif
1241 1241
1242static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel) 1242static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
1243{ 1243{
1244 int baud, brg; 1244 int baud, brg;
1245 1245
@@ -1259,7 +1259,7 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1259} 1259}
1260 1260
1261#ifdef CONFIG_SERIO 1261#ifdef CONFIG_SERIO
1262static void __init sunzilog_register_serio(struct uart_sunzilog_port *up) 1262static void __devinit sunzilog_register_serio(struct uart_sunzilog_port *up)
1263{ 1263{
1264 struct serio *serio = &up->serio; 1264 struct serio *serio = &up->serio;
1265 1265
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 731fa56e0c37..bdca5416d8b0 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) 4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
5 * 6 *
6 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based 7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
7 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. 8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
@@ -10,11 +11,48 @@
10#ifndef __ARCH_SPARC_ATOMIC__ 11#ifndef __ARCH_SPARC_ATOMIC__
11#define __ARCH_SPARC_ATOMIC__ 12#define __ARCH_SPARC_ATOMIC__
12 13
14#include <linux/types.h>
13 15
14typedef struct { volatile int counter; } atomic_t; 16typedef struct { volatile int counter; } atomic_t;
15 17
16#ifdef __KERNEL__ 18#ifdef __KERNEL__
17 19
20/* Emulate cmpxchg() the same way we emulate atomics,
21 * by hashing the object address and indexing into an array
22 * of spinlocks to get a bit of performance...
23 *
24 * See arch/sparc/lib/atomic32.c for implementation.
25 *
26 * Cribbed from <asm-parisc/atomic.h>
27 */
28#define __HAVE_ARCH_CMPXCHG 1
29
30/* bug catcher for when unsupported size is used - won't link */
31extern void __cmpxchg_called_with_bad_pointer(void);
32/* we only need to support cmpxchg of a u32 on sparc */
33extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
34
35/* don't worry...optimizer will get rid of most of this */
36static __inline__ unsigned long
37__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
38{
39 switch(size) {
40 case 4:
41 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
42 default:
43 __cmpxchg_called_with_bad_pointer();
44 break;
45 }
46 return old;
47}
48
49#define cmpxchg(ptr,o,n) ({ \
50 __typeof__(*(ptr)) _o_ = (o); \
51 __typeof__(*(ptr)) _n_ = (n); \
52 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
53 (unsigned long)_n_, sizeof(*(ptr))); \
54})
55
18#define ATOMIC_INIT(i) { (i) } 56#define ATOMIC_INIT(i) { (i) }
19 57
20extern int __atomic_add_return(int, atomic_t *); 58extern int __atomic_add_return(int, atomic_t *);
diff --git a/include/asm-sparc64/bugs.h b/include/asm-sparc64/bugs.h
index 120422fdb02f..bf39d86c0c9e 100644
--- a/include/asm-sparc64/bugs.h
+++ b/include/asm-sparc64/bugs.h
@@ -1,9 +1,8 @@
1/* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $ 1/* bugs.h: Sparc64 probes for various bugs.
2 * include/asm-sparc64/bugs.h: Sparc probes for various bugs.
3 * 2 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5#include <asm/sstate.h>
7 6
8extern unsigned long loops_per_jiffy; 7extern unsigned long loops_per_jiffy;
9 8
@@ -12,4 +11,5 @@ static void __init check_bugs(void)
12#ifndef CONFIG_SMP 11#ifndef CONFIG_SMP
13 cpu_data(0).udelay_val = loops_per_jiffy; 12 cpu_data(0).udelay_val = loops_per_jiffy;
14#endif 13#endif
14 sstate_running();
15} 15}
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index e89922d6718c..03c385de7619 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -17,11 +17,11 @@
17typedef struct { 17typedef struct {
18 /* Dcache line 1 */ 18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0_1; 20 unsigned int __pad0;
21 unsigned int __pad0_2;
22 unsigned int __pad1;
23 unsigned long clock_tick; /* %tick's per second */ 21 unsigned long clock_tick; /* %tick's per second */
24 unsigned long udelay_val; 22 unsigned long udelay_val;
23 unsigned int __pad1;
24 unsigned int __pad2;
25 25
26 /* Dcache line 2, rarely used */ 26 /* Dcache line 2, rarely used */
27 unsigned int dcache_size; 27 unsigned int dcache_size;
@@ -30,8 +30,8 @@ typedef struct {
30 unsigned int icache_line_size; 30 unsigned int icache_line_size;
31 unsigned int ecache_size; 31 unsigned int ecache_size;
32 unsigned int ecache_line_size; 32 unsigned int ecache_line_size;
33 int core_id;
33 unsigned int __pad3; 34 unsigned int __pad3;
34 unsigned int __pad4;
35} cpuinfo_sparc; 35} cpuinfo_sparc;
36 36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
@@ -76,12 +76,18 @@ struct trap_per_cpu {
76 76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ 77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned int irq_worklist; 78 unsigned int irq_worklist;
79 unsigned int __pad1; 79 unsigned int cpu_mondo_qmask;
80 unsigned long __pad2[3]; 80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 unsigned int __pad2[3];
81} __attribute__((aligned(64))); 84} __attribute__((aligned(64)));
82extern struct trap_per_cpu trap_block[NR_CPUS]; 85extern struct trap_per_cpu trap_block[NR_CPUS];
83extern void init_cur_cpu_trap(struct thread_info *); 86extern void init_cur_cpu_trap(struct thread_info *);
84extern void setup_tba(void); 87extern void setup_tba(void);
88extern int ncpus_probed;
89
90extern unsigned long real_hard_smp_processor_id(void);
85 91
86struct cpuid_patch_entry { 92struct cpuid_patch_entry {
87 unsigned int addr; 93 unsigned int addr;
@@ -122,6 +128,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
122#define TRAP_PER_CPU_TSB_HUGE 0xd0 128#define TRAP_PER_CPU_TSB_HUGE 0xd0
123#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 129#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
124#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 130#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
131#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4
132#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8
133#define TRAP_PER_CPU_RESUM_QMASK 0xec
134#define TRAP_PER_CPU_NONRESUM_QMASK 0xf0
125 135
126#define TRAP_BLOCK_SZ_SHIFT 8 136#define TRAP_BLOCK_SZ_SHIFT 8
127 137
@@ -192,7 +202,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
192 * the calculations done by the macro mid-stream. 202 * the calculations done by the macro mid-stream.
193 */ 203 */
194#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ 204#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
195 ldub [THR + TI_CPU], REG1; \ 205 lduh [THR + TI_CPU], REG1; \
196 sethi %hi(__per_cpu_shift), REG3; \ 206 sethi %hi(__per_cpu_shift), REG3; \
197 sethi %hi(__per_cpu_base), REG2; \ 207 sethi %hi(__per_cpu_base), REG2; \
198 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ 208 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index a5558c87556d..5cdb1ff04838 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -73,6 +73,8 @@
73#define HV_ENOTSUPPORTED 13 /* Function not supported */ 73#define HV_ENOTSUPPORTED 13 /* Function not supported */
74#define HV_ENOMAP 14 /* No mapping found */ 74#define HV_ENOMAP 14 /* No mapping found */
75#define HV_ETOOMANY 15 /* Too many items specified */ 75#define HV_ETOOMANY 15 /* Too many items specified */
76#define HV_ECHANNEL 16 /* Invalid LDC channel */
77#define HV_EBUSY 17 /* Resource busy */
76 78
77/* mach_exit() 79/* mach_exit()
78 * TRAP: HV_FAST_TRAP 80 * TRAP: HV_FAST_TRAP
@@ -95,6 +97,10 @@
95 */ 97 */
96#define HV_FAST_MACH_EXIT 0x00 98#define HV_FAST_MACH_EXIT 0x00
97 99
100#ifndef __ASSEMBLY__
101extern void sun4v_mach_exit(unsigned long exit_core);
102#endif
103
98/* Domain services. */ 104/* Domain services. */
99 105
100/* mach_desc() 106/* mach_desc()
@@ -120,7 +126,13 @@
120 */ 126 */
121#define HV_FAST_MACH_DESC 0x01 127#define HV_FAST_MACH_DESC 0x01
122 128
123/* mach_exit() 129#ifndef __ASSEMBLY__
130extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
131 unsigned long buf_len,
132 unsigned long *real_buf_len);
133#endif
134
135/* mach_sir()
124 * TRAP: HV_FAST_TRAP 136 * TRAP: HV_FAST_TRAP
125 * FUNCTION: HV_FAST_MACH_SIR 137 * FUNCTION: HV_FAST_MACH_SIR
126 * ERRORS: This service does not return. 138 * ERRORS: This service does not return.
@@ -135,53 +147,66 @@
135 */ 147 */
136#define HV_FAST_MACH_SIR 0x02 148#define HV_FAST_MACH_SIR 0x02
137 149
138/* mach_set_soft_state() 150#ifndef __ASSEMBLY__
151extern void sun4v_mach_sir(void);
152#endif
153
154/* mach_set_watchdog()
139 * TRAP: HV_FAST_TRAP 155 * TRAP: HV_FAST_TRAP
140 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE 156 * FUNCTION: HV_FAST_MACH_SET_WATCHDOG
141 * ARG0: software state 157 * ARG0: timeout in milliseconds
142 * ARG1: software state description pointer
143 * RET0: status 158 * RET0: status
144 * ERRORS: EINVAL software state not valid or software state 159 * RET1: time remaining in milliseconds
145 * description is not NULL terminated
146 * ENORADDR software state description pointer is not a
147 * valid real address
148 * EBADALIGNED software state description is not correctly
149 * aligned
150 * 160 *
151 * This allows the guest to report it's soft state to the hypervisor. There 161 * A guest uses this API to set a watchdog timer. Once the gues has set
152 * are two primary components to this state. The first part states whether 162 * the timer, it must call the timer service again either to disable or
153 * the guest software is running or not. The second containts optional 163 * postpone the expiration. If the timer expires before being reset or
154 * details specific to the software. 164 * disabled, then the hypervisor take a platform specific action leading
165 * to guest termination within a bounded time period. The platform action
166 * may include recovery actions such as reporting the expiration to a
167 * Service Processor, and/or automatically restarting the gues.
155 * 168 *
156 * The software state argument is defined below in HV_SOFT_STATE_*, and 169 * The 'timeout' parameter is specified in milliseconds, however the
157 * indicates whether the guest is operating normally or in a transitional 170 * implementated granularity is given by the 'watchdog-resolution'
158 * state. 171 * property in the 'platform' node of the guest's machine description.
172 * The largest allowed timeout value is specified by the
173 * 'watchdog-max-timeout' property of the 'platform' node.
159 * 174 *
160 * The software state description argument is a real address of a data buffer 175 * If the 'timeout' argument is not zero, the watchdog timer is set to
161 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL 176 * expire after a minimum of 'timeout' milliseconds.
162 * terminated 7-bit ASCII string of up to 31 characters not including the
163 * NULL termination.
164 */
165#define HV_FAST_MACH_SET_SOFT_STATE 0x03
166#define HV_SOFT_STATE_NORMAL 0x01
167#define HV_SOFT_STATE_TRANSITION 0x02
168
169/* mach_get_soft_state()
170 * TRAP: HV_FAST_TRAP
171 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
172 * ARG0: software state description pointer
173 * RET0: status
174 * RET1: software state
175 * ERRORS: ENORADDR software state description pointer is not a
176 * valid real address
177 * EBADALIGNED software state description is not correctly
178 * aligned
179 * 177 *
180 * Retrieve the current value of the guest's software state. The rules 178 * If the 'timeout' argument is zero, the watchdog timer is disabled.
181 * for the software state pointer are the same as for mach_set_soft_state() 179 *
182 * above. 180 * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout'
181 * property, the hypervisor leaves the watchdog timer state unchanged,
182 * and returns a status of EINVAL.
183 *
184 * The 'time remaining' return value is valid regardless of whether the
185 * return status is EOK or EINVAL. A non-zero return value indicates the
186 * number of milliseconds that were remaining until the timer was to expire.
187 * If less than one millisecond remains, the return value is '1'. If the
188 * watchdog timer was disabled at the time of the call, the return value is
189 * zero.
190 *
191 * If the hypervisor cannot support the exact timeout value requested, but
192 * can support a larger timeout value, the hypervisor may round the actual
193 * timeout to a value larger than the requested timeout, consequently the
194 * 'time remaining' return value may be larger than the previously requested
195 * timeout value.
196 *
197 * Any guest OS debugger should be aware that the watchdog service may be in
198 * use. Consequently, it is recommended that the watchdog service is
199 * disabled upon debugger entry (e.g. reaching a breakpoint), and then
200 * re-enabled upon returning to normal execution. The API has been designed
201 * with this in mind, and the 'time remaining' result of the disable call may
202 * be used directly as the timeout argument of the re-enable call.
183 */ 203 */
184#define HV_FAST_MACH_GET_SOFT_STATE 0x04 204#define HV_FAST_MACH_SET_WATCHDOG 0x05
205
206#ifndef __ASSEMBLY__
207extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
208 unsigned long *orig_timeout);
209#endif
185 210
186/* CPU services. 211/* CPU services.
187 * 212 *
@@ -206,8 +231,8 @@
206 * FUNCTION: HV_FAST_CPU_START 231 * FUNCTION: HV_FAST_CPU_START
207 * ARG0: CPU ID 232 * ARG0: CPU ID
208 * ARG1: PC 233 * ARG1: PC
209 * ARG1: RTBA 234 * ARG2: RTBA
210 * ARG1: target ARG0 235 * ARG3: target ARG0
211 * RET0: status 236 * RET0: status
212 * ERRORS: ENOCPU Invalid CPU ID 237 * ERRORS: ENOCPU Invalid CPU ID
213 * EINVAL Target CPU ID is not in the stopped state 238 * EINVAL Target CPU ID is not in the stopped state
@@ -224,6 +249,13 @@
224 */ 249 */
225#define HV_FAST_CPU_START 0x10 250#define HV_FAST_CPU_START 0x10
226 251
252#ifndef __ASSEMBLY__
253extern unsigned long sun4v_cpu_start(unsigned long cpuid,
254 unsigned long pc,
255 unsigned long rtba,
256 unsigned long arg0);
257#endif
258
227/* cpu_stop() 259/* cpu_stop()
228 * TRAP: HV_FAST_TRAP 260 * TRAP: HV_FAST_TRAP
229 * FUNCTION: HV_FAST_CPU_STOP 261 * FUNCTION: HV_FAST_CPU_STOP
@@ -245,6 +277,10 @@
245 */ 277 */
246#define HV_FAST_CPU_STOP 0x11 278#define HV_FAST_CPU_STOP 0x11
247 279
280#ifndef __ASSEMBLY__
281extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
282#endif
283
248/* cpu_yield() 284/* cpu_yield()
249 * TRAP: HV_FAST_TRAP 285 * TRAP: HV_FAST_TRAP
250 * FUNCTION: HV_FAST_CPU_YIELD 286 * FUNCTION: HV_FAST_CPU_YIELD
@@ -588,6 +624,11 @@ struct hv_fault_status {
588 */ 624 */
589#define HV_FAST_MMU_TSB_CTX0 0x20 625#define HV_FAST_MMU_TSB_CTX0 0x20
590 626
627#ifndef __ASSEMBLY__
628extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
629 unsigned long tsb_desc_ra);
630#endif
631
591/* mmu_tsb_ctxnon0() 632/* mmu_tsb_ctxnon0()
592 * TRAP: HV_FAST_TRAP 633 * TRAP: HV_FAST_TRAP
593 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 634 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0
@@ -694,6 +735,13 @@ struct hv_fault_status {
694 */ 735 */
695#define HV_FAST_MMU_MAP_PERM_ADDR 0x25 736#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
696 737
738#ifndef __ASSEMBLY__
739extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
740 unsigned long set_to_zero,
741 unsigned long tte,
742 unsigned long flags);
743#endif
744
697/* mmu_fault_area_conf() 745/* mmu_fault_area_conf()
698 * TRAP: HV_FAST_TRAP 746 * TRAP: HV_FAST_TRAP
699 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF 747 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
@@ -892,6 +940,10 @@ struct hv_fault_status {
892 */ 940 */
893#define HV_FAST_TOD_GET 0x50 941#define HV_FAST_TOD_GET 0x50
894 942
943#ifndef __ASSEMBLY__
944extern unsigned long sun4v_tod_get(unsigned long *time);
945#endif
946
895/* tod_set() 947/* tod_set()
896 * TRAP: HV_FAST_TRAP 948 * TRAP: HV_FAST_TRAP
897 * FUNCTION: HV_FAST_TOD_SET 949 * FUNCTION: HV_FAST_TOD_SET
@@ -905,6 +957,10 @@ struct hv_fault_status {
905 */ 957 */
906#define HV_FAST_TOD_SET 0x51 958#define HV_FAST_TOD_SET 0x51
907 959
960#ifndef __ASSEMBLY__
961extern unsigned long sun4v_tod_set(unsigned long time);
962#endif
963
908/* Console services */ 964/* Console services */
909 965
910/* con_getchar() 966/* con_getchar()
@@ -988,6 +1044,59 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
988 unsigned long *bytes_written); 1044 unsigned long *bytes_written);
989#endif 1045#endif
990 1046
1047/* mach_set_soft_state()
1048 * TRAP: HV_FAST_TRAP
1049 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
1050 * ARG0: software state
1051 * ARG1: software state description pointer
1052 * RET0: status
1053 * ERRORS: EINVAL software state not valid or software state
1054 * description is not NULL terminated
1055 * ENORADDR software state description pointer is not a
1056 * valid real address
1057 * EBADALIGNED software state description is not correctly
1058 * aligned
1059 *
1060 * This allows the guest to report it's soft state to the hypervisor. There
1061 * are two primary components to this state. The first part states whether
1062 * the guest software is running or not. The second containts optional
1063 * details specific to the software.
1064 *
1065 * The software state argument is defined below in HV_SOFT_STATE_*, and
1066 * indicates whether the guest is operating normally or in a transitional
1067 * state.
1068 *
1069 * The software state description argument is a real address of a data buffer
1070 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
1071 * terminated 7-bit ASCII string of up to 31 characters not including the
1072 * NULL termination.
1073 */
1074#define HV_FAST_MACH_SET_SOFT_STATE 0x70
1075#define HV_SOFT_STATE_NORMAL 0x01
1076#define HV_SOFT_STATE_TRANSITION 0x02
1077
1078#ifndef __ASSEMBLY__
1079extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
1080 unsigned long msg_string_ra);
1081#endif
1082
1083/* mach_get_soft_state()
1084 * TRAP: HV_FAST_TRAP
1085 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
1086 * ARG0: software state description pointer
1087 * RET0: status
1088 * RET1: software state
1089 * ERRORS: ENORADDR software state description pointer is not a
1090 * valid real address
1091 * EBADALIGNED software state description is not correctly
1092 * aligned
1093 *
1094 * Retrieve the current value of the guest's software state. The rules
1095 * for the software state pointer are the same as for mach_set_soft_state()
1096 * above.
1097 */
1098#define HV_FAST_MACH_GET_SOFT_STATE 0x71
1099
991/* Trap trace services. 1100/* Trap trace services.
992 * 1101 *
993 * The hypervisor provides a trap tracing capability for privileged 1102 * The hypervisor provides a trap tracing capability for privileged
@@ -1379,6 +1488,113 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
1379extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); 1488extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
1380#endif 1489#endif
1381 1490
1491/* vintr_get_cookie()
1492 * TRAP: HV_FAST_TRAP
1493 * FUNCTION: HV_FAST_VINTR_GET_COOKIE
1494 * ARG0: device handle
1495 * ARG1: device ino
1496 * RET0: status
1497 * RET1: cookie
1498 */
1499#define HV_FAST_VINTR_GET_COOKIE 0xa7
1500
1501/* vintr_set_cookie()
1502 * TRAP: HV_FAST_TRAP
1503 * FUNCTION: HV_FAST_VINTR_SET_COOKIE
1504 * ARG0: device handle
1505 * ARG1: device ino
1506 * ARG2: cookie
1507 * RET0: status
1508 */
1509#define HV_FAST_VINTR_SET_COOKIE 0xa8
1510
1511/* vintr_get_valid()
1512 * TRAP: HV_FAST_TRAP
1513 * FUNCTION: HV_FAST_VINTR_GET_VALID
1514 * ARG0: device handle
1515 * ARG1: device ino
1516 * RET0: status
1517 * RET1: valid state
1518 */
1519#define HV_FAST_VINTR_GET_VALID 0xa9
1520
1521/* vintr_set_valid()
1522 * TRAP: HV_FAST_TRAP
1523 * FUNCTION: HV_FAST_VINTR_SET_VALID
1524 * ARG0: device handle
1525 * ARG1: device ino
1526 * ARG2: valid state
1527 * RET0: status
1528 */
1529#define HV_FAST_VINTR_SET_VALID 0xaa
1530
1531/* vintr_get_state()
1532 * TRAP: HV_FAST_TRAP
1533 * FUNCTION: HV_FAST_VINTR_GET_STATE
1534 * ARG0: device handle
1535 * ARG1: device ino
1536 * RET0: status
1537 * RET1: state
1538 */
1539#define HV_FAST_VINTR_GET_STATE 0xab
1540
1541/* vintr_set_state()
1542 * TRAP: HV_FAST_TRAP
1543 * FUNCTION: HV_FAST_VINTR_SET_STATE
1544 * ARG0: device handle
1545 * ARG1: device ino
1546 * ARG2: state
1547 * RET0: status
1548 */
1549#define HV_FAST_VINTR_SET_STATE 0xac
1550
1551/* vintr_get_target()
1552 * TRAP: HV_FAST_TRAP
1553 * FUNCTION: HV_FAST_VINTR_GET_TARGET
1554 * ARG0: device handle
1555 * ARG1: device ino
1556 * RET0: status
1557 * RET1: cpuid
1558 */
1559#define HV_FAST_VINTR_GET_TARGET 0xad
1560
1561/* vintr_set_target()
1562 * TRAP: HV_FAST_TRAP
1563 * FUNCTION: HV_FAST_VINTR_SET_TARGET
1564 * ARG0: device handle
1565 * ARG1: device ino
1566 * ARG2: cpuid
1567 * RET0: status
1568 */
1569#define HV_FAST_VINTR_SET_TARGET 0xae
1570
1571#ifndef __ASSEMBLY__
1572extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
1573 unsigned long dev_ino,
1574 unsigned long *cookie);
1575extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
1576 unsigned long dev_ino,
1577 unsigned long cookie);
1578extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
1579 unsigned long dev_ino,
1580 unsigned long *valid);
1581extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
1582 unsigned long dev_ino,
1583 unsigned long valid);
1584extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
1585 unsigned long dev_ino,
1586 unsigned long *state);
1587extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
1588 unsigned long dev_ino,
1589 unsigned long state);
1590extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
1591 unsigned long dev_ino,
1592 unsigned long *cpuid);
1593extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
1594 unsigned long dev_ino,
1595 unsigned long cpuid);
1596#endif
1597
1382/* PCI IO services. 1598/* PCI IO services.
1383 * 1599 *
1384 * See the terminology descriptions in the device interrupt services 1600 * See the terminology descriptions in the device interrupt services
@@ -2037,6 +2253,346 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
2037 */ 2253 */
2038#define HV_FAST_PCI_MSG_SETVALID 0xd3 2254#define HV_FAST_PCI_MSG_SETVALID 0xd3
2039 2255
2256/* Logical Domain Channel services. */
2257
2258#define LDC_CHANNEL_DOWN 0
2259#define LDC_CHANNEL_UP 1
2260#define LDC_CHANNEL_RESETTING 2
2261
2262/* ldc_tx_qconf()
2263 * TRAP: HV_FAST_TRAP
2264 * FUNCTION: HV_FAST_LDC_TX_QCONF
2265 * ARG0: channel ID
2266 * ARG1: real address base of queue
2267 * ARG2: num entries in queue
2268 * RET0: status
2269 *
2270 * Configure transmit queue for the LDC endpoint specified by the
2271 * given channel ID, to be placed at the given real address, and
2272 * be of the given num entries. Num entries must be a power of two.
2273 * The real address base of the queue must be aligned on the queue
2274 * size. Each queue entry is 64-bytes, so for example, a 32 entry
2275 * queue must be aligned on a 2048 byte real address boundary.
2276 *
2277 * Upon configuration of a valid transmit queue the head and tail
2278 * pointers are set to a hypervisor specific identical value indicating
2279 * that the queue initially is empty.
2280 *
2281 * The endpoint's transmit queue is un-configured if num entries is zero.
2282 *
2283 * The maximum number of entries for each queue for a specific cpu may be
2284 * determined from the machine description. A transmit queue may be
2285 * specified even in the event that the LDC is down (peer endpoint has no
2286 * receive queue specified). Transmission will begin as soon as the peer
2287 * endpoint defines a receive queue.
2288 *
2289 * It is recommended that a guest wait for a transmit queue to empty prior
2290 * to reconfiguring it, or un-configuring it. Re or un-configuring of a
2291 * non-empty transmit queue behaves exactly as defined above, however it
2292 * is undefined as to how many of the pending entries in the original queue
2293 * will be delivered prior to the re-configuration taking effect.
2294 * Furthermore, as the queue configuration causes a reset of the head and
2295 * tail pointers there is no way for a guest to determine how many entries
2296 * have been sent after the configuration operation.
2297 */
2298#define HV_FAST_LDC_TX_QCONF 0xe0
2299
2300/* ldc_tx_qinfo()
2301 * TRAP: HV_FAST_TRAP
2302 * FUNCTION: HV_FAST_LDC_TX_QINFO
2303 * ARG0: channel ID
2304 * RET0: status
2305 * RET1: real address base of queue
2306 * RET2: num entries in queue
2307 *
2308 * Return the configuration info for the transmit queue of LDC endpoint
2309 * defined by the given channel ID. The real address is the currently
2310 * defined real address base of the defined queue, and num entries is the
2311 * size of the queue in terms of number of entries.
2312 *
2313 * If the specified channel ID is a valid endpoint number, but no transmit
2314 * queue has been defined this service will return success, but with num
2315 * entries set to zero and the real address will have an undefined value.
2316 */
2317#define HV_FAST_LDC_TX_QINFO 0xe1
2318
2319/* ldc_tx_get_state()
2320 * TRAP: HV_FAST_TRAP
2321 * FUNCTION: HV_FAST_LDC_TX_GET_STATE
2322 * ARG0: channel ID
2323 * RET0: status
2324 * RET1: head offset
2325 * RET2: tail offset
2326 * RET3: channel state
2327 *
2328 * Return the transmit state, and the head and tail queue pointers, for
2329 * the transmit queue of the LDC endpoint defined by the given channel ID.
2330 * The head and tail values are the byte offset of the head and tail
2331 * positions of the transmit queue for the specified endpoint.
2332 */
2333#define HV_FAST_LDC_TX_GET_STATE 0xe2
2334
2335/* ldc_tx_set_qtail()
2336 * TRAP: HV_FAST_TRAP
2337 * FUNCTION: HV_FAST_LDC_TX_SET_QTAIL
2338 * ARG0: channel ID
2339 * ARG1: tail offset
2340 * RET0: status
2341 *
2342 * Update the tail pointer for the transmit queue associated with the LDC
2343 * endpoint defined by the given channel ID. The tail offset specified
2344 * must be aligned on a 64 byte boundary, and calculated so as to increase
2345 * the number of pending entries on the transmit queue. Any attempt to
2346 * decrease the number of pending transmit queue entires is considered
2347 * an invalid tail offset and will result in an EINVAL error.
2348 *
2349 * Since the tail of the transmit queue may not be moved backwards, the
2350 * transmit queue may be flushed by configuring a new transmit queue,
2351 * whereupon the hypervisor will configure the initial transmit head and
2352 * tail pointers to be equal.
2353 */
2354#define HV_FAST_LDC_TX_SET_QTAIL 0xe3
2355
2356/* ldc_rx_qconf()
2357 * TRAP: HV_FAST_TRAP
2358 * FUNCTION: HV_FAST_LDC_RX_QCONF
2359 * ARG0: channel ID
2360 * ARG1: real address base of queue
2361 * ARG2: num entries in queue
2362 * RET0: status
2363 *
2364 * Configure receive queue for the LDC endpoint specified by the
2365 * given channel ID, to be placed at the given real address, and
2366 * be of the given num entries. Num entries must be a power of two.
2367 * The real address base of the queue must be aligned on the queue
2368 * size. Each queue entry is 64-bytes, so for example, a 32 entry
2369 * queue must be aligned on a 2048 byte real address boundary.
2370 *
2371 * The endpoint's transmit queue is un-configured if num entries is zero.
2372 *
2373 * If a valid receive queue is specified for a local endpoint the LDC is
2374 * in the up state for the purpose of transmission to this endpoint.
2375 *
2376 * The maximum number of entries for each queue for a specific cpu may be
2377 * determined from the machine description.
2378 *
2379 * As receive queue configuration causes a reset of the queue's head and
2380 * tail pointers there is no way for a gues to determine how many entries
2381 * have been received between a preceeding ldc_get_rx_state() API call
2382 * and the completion of the configuration operation. It should be noted
2383 * that datagram delivery is not guarenteed via domain channels anyway,
2384 * and therefore any higher protocol should be resilient to datagram
2385 * loss if necessary. However, to overcome this specific race potential
2386 * it is recommended, for example, that a higher level protocol be employed
2387 * to ensure either retransmission, or ensure that no datagrams are pending
2388 * on the peer endpoint's transmit queue prior to the configuration process.
2389 */
2390#define HV_FAST_LDC_RX_QCONF 0xe4
2391
2392/* ldc_rx_qinfo()
2393 * TRAP: HV_FAST_TRAP
2394 * FUNCTION: HV_FAST_LDC_RX_QINFO
2395 * ARG0: channel ID
2396 * RET0: status
2397 * RET1: real address base of queue
2398 * RET2: num entries in queue
2399 *
2400 * Return the configuration info for the receive queue of LDC endpoint
2401 * defined by the given channel ID. The real address is the currently
2402 * defined real address base of the defined queue, and num entries is the
2403 * size of the queue in terms of number of entries.
2404 *
2405 * If the specified channel ID is a valid endpoint number, but no receive
2406 * queue has been defined this service will return success, but with num
2407 * entries set to zero and the real address will have an undefined value.
2408 */
2409#define HV_FAST_LDC_RX_QINFO 0xe5
2410
2411/* ldc_rx_get_state()
2412 * TRAP: HV_FAST_TRAP
2413 * FUNCTION: HV_FAST_LDC_RX_GET_STATE
2414 * ARG0: channel ID
2415 * RET0: status
2416 * RET1: head offset
2417 * RET2: tail offset
2418 * RET3: channel state
2419 *
2420 * Return the receive state, and the head and tail queue pointers, for
2421 * the receive queue of the LDC endpoint defined by the given channel ID.
2422 * The head and tail values are the byte offset of the head and tail
2423 * positions of the receive queue for the specified endpoint.
2424 */
2425#define HV_FAST_LDC_RX_GET_STATE 0xe6
2426
2427/* ldc_rx_set_qhead()
2428 * TRAP: HV_FAST_TRAP
2429 * FUNCTION: HV_FAST_LDC_RX_SET_QHEAD
2430 * ARG0: channel ID
2431 * ARG1: head offset
2432 * RET0: status
2433 *
2434 * Update the head pointer for the receive queue associated with the LDC
2435 * endpoint defined by the given channel ID. The head offset specified
2436 * must be aligned on a 64 byte boundary, and calculated so as to decrease
2437 * the number of pending entries on the receive queue. Any attempt to
2438 * increase the number of pending receive queue entires is considered
2439 * an invalid head offset and will result in an EINVAL error.
2440 *
2441 * The receive queue may be flushed by setting the head offset equal
2442 * to the current tail offset.
2443 */
2444#define HV_FAST_LDC_RX_SET_QHEAD 0xe7
2445
2446/* LDC Map Table Entry. Each slot is defined by a translation table
2447 * entry, as specified by the LDC_MTE_* bits below, and a 64-bit
2448 * hypervisor invalidation cookie.
2449 */
2450#define LDC_MTE_PADDR 0x0fffffffffffe000 /* pa[55:13] */
2451#define LDC_MTE_COPY_W 0x0000000000000400 /* copy write access */
2452#define LDC_MTE_COPY_R 0x0000000000000200 /* copy read access */
2453#define LDC_MTE_IOMMU_W 0x0000000000000100 /* IOMMU write access */
2454#define LDC_MTE_IOMMU_R 0x0000000000000080 /* IOMMU read access */
2455#define LDC_MTE_EXEC 0x0000000000000040 /* execute */
2456#define LDC_MTE_WRITE 0x0000000000000020 /* read */
2457#define LDC_MTE_READ 0x0000000000000010 /* write */
2458#define LDC_MTE_SZALL 0x000000000000000f /* page size bits */
2459#define LDC_MTE_SZ16GB 0x0000000000000007 /* 16GB page */
2460#define LDC_MTE_SZ2GB 0x0000000000000006 /* 2GB page */
2461#define LDC_MTE_SZ256MB 0x0000000000000005 /* 256MB page */
2462#define LDC_MTE_SZ32MB 0x0000000000000004 /* 32MB page */
2463#define LDC_MTE_SZ4MB 0x0000000000000003 /* 4MB page */
2464#define LDC_MTE_SZ512K 0x0000000000000002 /* 512K page */
2465#define LDC_MTE_SZ64K 0x0000000000000001 /* 64K page */
2466#define LDC_MTE_SZ8K 0x0000000000000000 /* 8K page */
2467
2468#ifndef __ASSEMBLY__
2469struct ldc_mtable_entry {
2470 unsigned long mte;
2471 unsigned long cookie;
2472};
2473#endif
2474
2475/* ldc_set_map_table()
2476 * TRAP: HV_FAST_TRAP
2477 * FUNCTION: HV_FAST_LDC_SET_MAP_TABLE
2478 * ARG0: channel ID
2479 * ARG1: table real address
2480 * ARG2: num entries
2481 * RET0: status
2482 *
2483 * Register the MTE table at the given table real address, with the
2484 * specified num entries, for the LDC indicated by the given channel
2485 * ID.
2486 */
2487#define HV_FAST_LDC_SET_MAP_TABLE 0xea
2488
2489/* ldc_get_map_table()
2490 * TRAP: HV_FAST_TRAP
2491 * FUNCTION: HV_FAST_LDC_GET_MAP_TABLE
2492 * ARG0: channel ID
2493 * RET0: status
2494 * RET1: table real address
2495 * RET2: num entries
2496 *
2497 * Return the configuration of the current mapping table registered
2498 * for the given channel ID.
2499 */
2500#define HV_FAST_LDC_GET_MAP_TABLE 0xeb
2501
2502#define LDC_COPY_IN 0
2503#define LDC_COPY_OUT 1
2504
2505/* ldc_copy()
2506 * TRAP: HV_FAST_TRAP
2507 * FUNCTION: HV_FAST_LDC_COPY
2508 * ARG0: channel ID
2509 * ARG1: LDC_COPY_* direction code
2510 * ARG2: target real address
2511 * ARG3: local real address
2512 * ARG4: length in bytes
2513 * RET0: status
2514 * RET1: actual length in bytes
2515 */
2516#define HV_FAST_LDC_COPY 0xec
2517
2518#define LDC_MEM_READ 1
2519#define LDC_MEM_WRITE 2
2520#define LDC_MEM_EXEC 4
2521
2522/* ldc_mapin()
2523 * TRAP: HV_FAST_TRAP
2524 * FUNCTION: HV_FAST_LDC_MAPIN
2525 * ARG0: channel ID
2526 * ARG1: cookie
2527 * RET0: status
2528 * RET1: real address
2529 * RET2: LDC_MEM_* permissions
2530 */
2531#define HV_FAST_LDC_MAPIN 0xed
2532
2533/* ldc_unmap()
2534 * TRAP: HV_FAST_TRAP
2535 * FUNCTION: HV_FAST_LDC_UNMAP
2536 * ARG0: real address
2537 * RET0: status
2538 */
2539#define HV_FAST_LDC_UNMAP 0xee
2540
2541/* ldc_revoke()
2542 * TRAP: HV_FAST_TRAP
2543 * FUNCTION: HV_FAST_LDC_REVOKE
2544 * ARG0: cookie
2545 * ARG1: ldc_mtable_entry cookie
2546 * RET0: status
2547 */
2548#define HV_FAST_LDC_REVOKE 0xef
2549
2550#ifndef __ASSEMBLY__
2551extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
2552 unsigned long ra,
2553 unsigned long num_entries);
2554extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
2555 unsigned long *ra,
2556 unsigned long *num_entries);
2557extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
2558 unsigned long *head_off,
2559 unsigned long *tail_off,
2560 unsigned long *chan_state);
2561extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
2562 unsigned long tail_off);
2563extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
2564 unsigned long ra,
2565 unsigned long num_entries);
2566extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
2567 unsigned long *ra,
2568 unsigned long *num_entries);
2569extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
2570 unsigned long *head_off,
2571 unsigned long *tail_off,
2572 unsigned long *chan_state);
2573extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
2574 unsigned long head_off);
2575extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
2576 unsigned long ra,
2577 unsigned long num_entries);
2578extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
2579 unsigned long *ra,
2580 unsigned long *num_entries);
2581extern unsigned long sun4v_ldc_copy(unsigned long channel,
2582 unsigned long dir_code,
2583 unsigned long tgt_raddr,
2584 unsigned long lcl_raddr,
2585 unsigned long len,
2586 unsigned long *actual_len);
2587extern unsigned long sun4v_ldc_mapin(unsigned long channel,
2588 unsigned long cookie,
2589 unsigned long *ra,
2590 unsigned long *perm);
2591extern unsigned long sun4v_ldc_unmap(unsigned long ra);
2592extern unsigned long sun4v_ldc_revoke(unsigned long cookie,
2593 unsigned long mte_cookie);
2594#endif
2595
2040/* Performance counter services. */ 2596/* Performance counter services. */
2041 2597
2042#define HV_PERF_JBUS_PERF_CTRL_REG 0x00 2598#define HV_PERF_JBUS_PERF_CTRL_REG 0x00
@@ -2204,6 +2760,7 @@ extern void sun4v_hvapi_unregister(unsigned long group);
2204extern int sun4v_hvapi_get(unsigned long group, 2760extern int sun4v_hvapi_get(unsigned long group,
2205 unsigned long *major, 2761 unsigned long *major,
2206 unsigned long *minor); 2762 unsigned long *minor);
2763extern void sun4v_hvapi_init(void);
2207#endif 2764#endif
2208 2765
2209#endif /* !(_SPARC64_HYPERVISOR_H) */ 2766#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 627e3396a5f0..9974c7b0aebc 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -32,7 +32,6 @@ enum die_val {
32 DIE_TRAP, 32 DIE_TRAP,
33 DIE_TRAP_TL1, 33 DIE_TRAP_TL1,
34 DIE_CALL, 34 DIE_CALL,
35 DIE_PAGE_FAULT,
36}; 35};
37 36
38#endif 37#endif
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h
new file mode 100644
index 000000000000..124eb8ca2378
--- /dev/null
+++ b/include/asm-sparc64/mdesc.h
@@ -0,0 +1,39 @@
1#ifndef _SPARC64_MDESC_H
2#define _SPARC64_MDESC_H
3
4#include <linux/types.h>
5#include <asm/prom.h>
6
7struct mdesc_node;
8struct mdesc_arc {
9 const char *name;
10 struct mdesc_node *arc;
11};
12
13struct mdesc_node {
14 const char *name;
15 u64 node;
16 unsigned int unique_id;
17 unsigned int num_arcs;
18 struct property *properties;
19 struct mdesc_node *hash_next;
20 struct mdesc_node *allnodes_next;
21 struct mdesc_arc arcs[0];
22};
23
24extern struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
25 const char *name);
26#define md_for_each_node_by_name(__mn, __name) \
27 for (__mn = md_find_node_by_name(NULL, __name); __mn; \
28 __mn = md_find_node_by_name(__mn, __name))
29
30extern struct property *md_find_property(const struct mdesc_node *mp,
31 const char *name,
32 int *lenp);
33extern const void *md_get_property(const struct mdesc_node *mp,
34 const char *name,
35 int *lenp);
36
37extern void sun4v_mdesc_init(void);
38
39#endif
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 6a0da3b1695c..992f9f7a476c 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -316,11 +316,8 @@ extern int prom_setprop(int node, const char *prop_name, char *prop_value,
316 316
317extern int prom_pathtoinode(const char *path); 317extern int prom_pathtoinode(const char *path);
318extern int prom_inst2pkg(int); 318extern int prom_inst2pkg(int);
319 319extern int prom_service_exists(const char *service_name);
320/* CPU probing helpers. */ 320extern void prom_sun4v_guest_soft_state(void);
321struct device_node;
322int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid);
323int cpu_find_by_mid(int mid, struct device_node **prom_node);
324 321
325/* Client interface level routines. */ 322/* Client interface level routines. */
326extern void prom_set_trap_table(unsigned long tba); 323extern void prom_set_trap_table(unsigned long tba);
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index ced8cbde046d..88db872ce2f8 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -5,7 +5,8 @@
5 5
6#ifdef CONFIG_SMP 6#ifdef CONFIG_SMP
7 7
8extern void setup_per_cpu_areas(void); 8#define setup_per_cpu_areas() do { } while (0)
9extern void real_setup_per_cpu_areas(void);
9 10
10extern unsigned long __per_cpu_base; 11extern unsigned long __per_cpu_base;
11extern unsigned long __per_cpu_shift; 12extern unsigned long __per_cpu_shift;
@@ -34,6 +35,7 @@ do { \
34} while (0) 35} while (0)
35#else /* ! SMP */ 36#else /* ! SMP */
36 37
38#define real_setup_per_cpu_areas() do { } while (0)
37#define DEFINE_PER_CPU(type, name) \ 39#define DEFINE_PER_CPU(type, name) \
38 __typeof__(type) per_cpu__##name 40 __typeof__(type) per_cpu__##name
39 41
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h
index ddad5f99ac7f..b4df3042add0 100644
--- a/include/asm-sparc64/prom.h
+++ b/include/asm-sparc64/prom.h
@@ -90,6 +90,7 @@ extern struct device_node *of_find_compatible_node(struct device_node *from,
90 const char *type, const char *compat); 90 const char *type, const char *compat);
91extern struct device_node *of_find_node_by_path(const char *path); 91extern struct device_node *of_find_node_by_path(const char *path);
92extern struct device_node *of_find_node_by_phandle(phandle handle); 92extern struct device_node *of_find_node_by_phandle(phandle handle);
93extern struct device_node *of_find_node_by_cpuid(int cpuid);
93extern struct device_node *of_get_parent(const struct device_node *node); 94extern struct device_node *of_get_parent(const struct device_node *node);
94extern struct device_node *of_get_next_child(const struct device_node *node, 95extern struct device_node *of_get_next_child(const struct device_node *node,
95 struct device_node *prev); 96 struct device_node *prev);
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 869d16fb907b..f76e1492add5 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -41,7 +41,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
41extern int hard_smp_processor_id(void); 41extern int hard_smp_processor_id(void);
42#define raw_smp_processor_id() (current_thread_info()->cpu) 42#define raw_smp_processor_id() (current_thread_info()->cpu)
43 43
44extern void smp_setup_cpu_possible_map(void); 44extern void smp_fill_in_sib_core_maps(void);
45extern unsigned char boot_cpu_id; 45extern unsigned char boot_cpu_id;
46 46
47#endif /* !(__ASSEMBLY__) */ 47#endif /* !(__ASSEMBLY__) */
@@ -49,7 +49,7 @@ extern unsigned char boot_cpu_id;
49#else 49#else
50 50
51#define hard_smp_processor_id() 0 51#define hard_smp_processor_id() 0
52#define smp_setup_cpu_possible_map() do { } while (0) 52#define smp_fill_in_sib_core_maps() do { } while (0)
53#define boot_cpu_id (0) 53#define boot_cpu_id (0)
54 54
55#endif /* !(CONFIG_SMP) */ 55#endif /* !(CONFIG_SMP) */
diff --git a/include/asm-sparc64/sstate.h b/include/asm-sparc64/sstate.h
new file mode 100644
index 000000000000..a7c35dbcb281
--- /dev/null
+++ b/include/asm-sparc64/sstate.h
@@ -0,0 +1,13 @@
1#ifndef _SPARC64_SSTATE_H
2#define _SPARC64_SSTATE_H
3
4extern void sstate_booting(void);
5extern void sstate_running(void);
6extern void sstate_halt(void);
7extern void sstate_poweroff(void);
8extern void sstate_panic(void);
9extern void sstate_reboot(void);
10
11extern void sun4v_sstate_init(void);
12
13#endif /* _SPARC64_SSTATE_H */
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index 2ebf7f27bf91..98252cd44dd6 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -38,8 +38,8 @@ struct thread_info {
38 /* D$ line 1 */ 38 /* D$ line 1 */
39 struct task_struct *task; 39 struct task_struct *task;
40 unsigned long flags; 40 unsigned long flags;
41 __u8 cpu;
42 __u8 fpsaved[7]; 41 __u8 fpsaved[7];
42 __u8 pad;
43 unsigned long ksp; 43 unsigned long ksp;
44 44
45 /* D$ line 2 */ 45 /* D$ line 2 */
@@ -49,7 +49,7 @@ struct thread_info {
49 int preempt_count; /* 0 => preemptable, <0 => BUG */ 49 int preempt_count; /* 0 => preemptable, <0 => BUG */
50 __u8 new_child; 50 __u8 new_child;
51 __u8 syscall_noerror; 51 __u8 syscall_noerror;
52 __u16 __pad; 52 __u16 cpu;
53 53
54 unsigned long *utraps; 54 unsigned long *utraps;
55 55
@@ -83,8 +83,7 @@ struct thread_info {
83#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS) 83#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
84#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) 84#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
85#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) 85#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
86#define TI_CPU 0x00000010 86#define TI_FPSAVED 0x00000010
87#define TI_FPSAVED 0x00000011
88#define TI_KSP 0x00000018 87#define TI_KSP 0x00000018
89#define TI_FAULT_ADDR 0x00000020 88#define TI_FAULT_ADDR 0x00000020
90#define TI_KREGS 0x00000028 89#define TI_KREGS 0x00000028
@@ -92,6 +91,7 @@ struct thread_info {
92#define TI_PRE_COUNT 0x00000038 91#define TI_PRE_COUNT 0x00000038
93#define TI_NEW_CHILD 0x0000003c 92#define TI_NEW_CHILD 0x0000003c
94#define TI_SYS_NOERROR 0x0000003d 93#define TI_SYS_NOERROR 0x0000003d
94#define TI_CPU 0x0000003e
95#define TI_UTRAPS 0x00000040 95#define TI_UTRAPS 0x00000040
96#define TI_REG_WINDOW 0x00000048 96#define TI_REG_WINDOW 0x00000048
97#define TI_RWIN_SPTRS 0x000003c8 97#define TI_RWIN_SPTRS 0x000003c8
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 98a6c613589d..e0d450d600ec 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -6,4 +6,7 @@
6 6
7#include <asm-generic/topology.h> 7#include <asm-generic/topology.h>
8 8
9#define topology_core_id(cpu) (cpu_data(cpu).core_id)
10#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
11
9#endif /* _ASM_SPARC64_TOPOLOGY_H */ 12#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index ab55ffcb7bf4..76e4299dd9bc 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -271,7 +271,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
271#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ 271#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
272 sethi %hi(swapper_4m_tsb), REG1; \ 272 sethi %hi(swapper_4m_tsb), REG1; \
273 or REG1, %lo(swapper_4m_tsb), REG1; \ 273 or REG1, %lo(swapper_4m_tsb), REG1; \
274 and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ 274 and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
275 sllx REG2, 4, REG2; \ 275 sllx REG2, 4, REG2; \
276 add REG1, REG2, REG2; \ 276 add REG1, REG2, REG2; \
277 KTSB_LOAD_QUAD(REG2, REG3); \ 277 KTSB_LOAD_QUAD(REG2, REG3); \