aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig6
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/devices.c196
-rw-r--r--arch/sparc64/kernel/entry.S669
-rw-r--r--arch/sparc64/kernel/head.S31
-rw-r--r--arch/sparc64/kernel/hvapi.c192
-rw-r--r--arch/sparc64/kernel/irq.c83
-rw-r--r--arch/sparc64/kernel/itlb_miss.S4
-rw-r--r--arch/sparc64/kernel/mdesc.c619
-rw-r--r--arch/sparc64/kernel/pci.c54
-rw-r--r--arch/sparc64/kernel/pci_sabre.c7
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c54
-rw-r--r--arch/sparc64/kernel/power.c2
-rw-r--r--arch/sparc64/kernel/process.c4
-rw-r--r--arch/sparc64/kernel/prom.c148
-rw-r--r--arch/sparc64/kernel/setup.c21
-rw-r--r--arch/sparc64/kernel/smp.c155
-rw-r--r--arch/sparc64/kernel/sstate.c104
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S30
-rw-r--r--arch/sparc64/kernel/time.c49
-rw-r--r--arch/sparc64/kernel/traps.c27
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S4
-rw-r--r--arch/sparc64/mm/init.c90
-rw-r--r--arch/sparc64/prom/misc.c19
24 files changed, 2070 insertions, 502 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 831781cab271..bd00f89eed1e 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -147,10 +147,10 @@ config SMP
147 If you don't know what to do here, say N. 147 If you don't know what to do here, say N.
148 148
149config NR_CPUS 149config NR_CPUS
150 int "Maximum number of CPUs (2-64)" 150 int "Maximum number of CPUs (2-1024)"
151 range 2 64 151 range 2 1024
152 depends on SMP 152 depends on SMP
153 default "32" 153 default "64"
154 154
155source "drivers/cpufreq/Kconfig" 155source "drivers/cpufreq/Kconfig"
156 156
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 6bf6fb65bc20..d8d19093d12f 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror
8extra-y := head.o init_task.o vmlinux.lds 8extra-y := head.o init_task.o vmlinux.lds
9 9
10obj-y := process.o setup.o cpu.o idprom.o \ 10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o devices.o auxio.o una_asm.o \ 11 traps.o auxio.o una_asm.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o 15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
16 16
17obj-$(CONFIG_STACKTRACE) += stacktrace.o 17obj-$(CONFIG_STACKTRACE) += stacktrace.o
18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
deleted file mode 100644
index 0e03c8e218cd..000000000000
--- a/arch/sparc64/kernel/devices.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/kernel.h>
8#include <linux/threads.h>
9#include <linux/init.h>
10#include <linux/ioport.h>
11#include <linux/string.h>
12#include <linux/spinlock.h>
13#include <linux/errno.h>
14#include <linux/bootmem.h>
15
16#include <asm/page.h>
17#include <asm/oplib.h>
18#include <asm/system.h>
19#include <asm/smp.h>
20#include <asm/spitfire.h>
21#include <asm/timer.h>
22#include <asm/cpudata.h>
23
24/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
25 * operations in asm/ns87303.h
26 */
27DEFINE_SPINLOCK(ns87303_lock);
28
29extern void cpu_probe(void);
30extern void central_probe(void);
31
32static const char *cpu_mid_prop(void)
33{
34 if (tlb_type == spitfire)
35 return "upa-portid";
36 return "portid";
37}
38
39static int get_cpu_mid(struct device_node *dp)
40{
41 struct property *prop;
42
43 if (tlb_type == hypervisor) {
44 struct linux_prom64_registers *reg;
45 int len;
46
47 prop = of_find_property(dp, "cpuid", &len);
48 if (prop && len == 4)
49 return *(int *) prop->value;
50
51 prop = of_find_property(dp, "reg", NULL);
52 reg = prop->value;
53 return (reg[0].phys_addr >> 32) & 0x0fffffffUL;
54 } else {
55 const char *prop_name = cpu_mid_prop();
56
57 prop = of_find_property(dp, prop_name, NULL);
58 if (prop)
59 return *(int *) prop->value;
60 return 0;
61 }
62}
63
64static int check_cpu_node(struct device_node *dp, int *cur_inst,
65 int (*compare)(struct device_node *, int, void *),
66 void *compare_arg,
67 struct device_node **dev_node, int *mid)
68{
69 if (!compare(dp, *cur_inst, compare_arg)) {
70 if (dev_node)
71 *dev_node = dp;
72 if (mid)
73 *mid = get_cpu_mid(dp);
74 return 0;
75 }
76
77 (*cur_inst)++;
78
79 return -ENODEV;
80}
81
82static int __cpu_find_by(int (*compare)(struct device_node *, int, void *),
83 void *compare_arg,
84 struct device_node **dev_node, int *mid)
85{
86 struct device_node *dp;
87 int cur_inst;
88
89 cur_inst = 0;
90 for_each_node_by_type(dp, "cpu") {
91 int err = check_cpu_node(dp, &cur_inst,
92 compare, compare_arg,
93 dev_node, mid);
94 if (err == 0)
95 return 0;
96 }
97
98 return -ENODEV;
99}
100
101static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg)
102{
103 int desired_instance = (int) (long) _arg;
104
105 if (instance == desired_instance)
106 return 0;
107 return -ENODEV;
108}
109
110int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid)
111{
112 return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
113 dev_node, mid);
114}
115
116static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg)
117{
118 int desired_mid = (int) (long) _arg;
119 int this_mid;
120
121 this_mid = get_cpu_mid(dp);
122 if (this_mid == desired_mid)
123 return 0;
124 return -ENODEV;
125}
126
127int cpu_find_by_mid(int mid, struct device_node **dev_node)
128{
129 return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
130 dev_node, NULL);
131}
132
133void __init device_scan(void)
134{
135 /* FIX ME FAST... -DaveM */
136 ioport_resource.end = 0xffffffffffffffffUL;
137
138 prom_printf("Booting Linux...\n");
139
140#ifndef CONFIG_SMP
141 {
142 struct device_node *dp;
143 int err, def;
144
145 err = cpu_find_by_instance(0, &dp, NULL);
146 if (err) {
147 prom_printf("No cpu nodes, cannot continue\n");
148 prom_halt();
149 }
150 cpu_data(0).clock_tick =
151 of_getintprop_default(dp, "clock-frequency", 0);
152
153 def = ((tlb_type == hypervisor) ?
154 (8 * 1024) :
155 (16 * 1024));
156 cpu_data(0).dcache_size = of_getintprop_default(dp,
157 "dcache-size",
158 def);
159
160 def = 32;
161 cpu_data(0).dcache_line_size =
162 of_getintprop_default(dp, "dcache-line-size", def);
163
164 def = 16 * 1024;
165 cpu_data(0).icache_size = of_getintprop_default(dp,
166 "icache-size",
167 def);
168
169 def = 32;
170 cpu_data(0).icache_line_size =
171 of_getintprop_default(dp, "icache-line-size", def);
172
173 def = ((tlb_type == hypervisor) ?
174 (3 * 1024 * 1024) :
175 (4 * 1024 * 1024));
176 cpu_data(0).ecache_size = of_getintprop_default(dp,
177 "ecache-size",
178 def);
179
180 def = 64;
181 cpu_data(0).ecache_line_size =
182 of_getintprop_default(dp, "ecache-line-size", def);
183 printk("CPU[0]: Caches "
184 "D[sz(%d):line_sz(%d)] "
185 "I[sz(%d):line_sz(%d)] "
186 "E[sz(%d):line_sz(%d)]\n",
187 cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
188 cpu_data(0).icache_size, cpu_data(0).icache_line_size,
189 cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
190 }
191#endif
192
193 central_probe();
194
195 cpu_probe();
196}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index c15a3edcb826..8f10dda0f5c0 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1725,96 +1725,142 @@ real_hard_smp_processor_id:
1725 * returns %o0: sysino 1725 * returns %o0: sysino
1726 */ 1726 */
1727 .globl sun4v_devino_to_sysino 1727 .globl sun4v_devino_to_sysino
1728 .type sun4v_devino_to_sysino,#function
1728sun4v_devino_to_sysino: 1729sun4v_devino_to_sysino:
1729 mov HV_FAST_INTR_DEVINO2SYSINO, %o5 1730 mov HV_FAST_INTR_DEVINO2SYSINO, %o5
1730 ta HV_FAST_TRAP 1731 ta HV_FAST_TRAP
1731 retl 1732 retl
1732 mov %o1, %o0 1733 mov %o1, %o0
1734 .size sun4v_devino_to_sysino, .-sun4v_devino_to_sysino
1733 1735
1734 /* %o0: sysino 1736 /* %o0: sysino
1735 * 1737 *
1736 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1738 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1737 */ 1739 */
1738 .globl sun4v_intr_getenabled 1740 .globl sun4v_intr_getenabled
1741 .type sun4v_intr_getenabled,#function
1739sun4v_intr_getenabled: 1742sun4v_intr_getenabled:
1740 mov HV_FAST_INTR_GETENABLED, %o5 1743 mov HV_FAST_INTR_GETENABLED, %o5
1741 ta HV_FAST_TRAP 1744 ta HV_FAST_TRAP
1742 retl 1745 retl
1743 mov %o1, %o0 1746 mov %o1, %o0
1747 .size sun4v_intr_getenabled, .-sun4v_intr_getenabled
1744 1748
1745 /* %o0: sysino 1749 /* %o0: sysino
1746 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1750 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1747 */ 1751 */
1748 .globl sun4v_intr_setenabled 1752 .globl sun4v_intr_setenabled
1753 .type sun4v_intr_setenabled,#function
1749sun4v_intr_setenabled: 1754sun4v_intr_setenabled:
1750 mov HV_FAST_INTR_SETENABLED, %o5 1755 mov HV_FAST_INTR_SETENABLED, %o5
1751 ta HV_FAST_TRAP 1756 ta HV_FAST_TRAP
1752 retl 1757 retl
1753 nop 1758 nop
1759 .size sun4v_intr_setenabled, .-sun4v_intr_setenabled
1754 1760
1755 /* %o0: sysino 1761 /* %o0: sysino
1756 * 1762 *
1757 * returns %o0: intr_state (HV_INTR_STATE_*) 1763 * returns %o0: intr_state (HV_INTR_STATE_*)
1758 */ 1764 */
1759 .globl sun4v_intr_getstate 1765 .globl sun4v_intr_getstate
1766 .type sun4v_intr_getstate,#function
1760sun4v_intr_getstate: 1767sun4v_intr_getstate:
1761 mov HV_FAST_INTR_GETSTATE, %o5 1768 mov HV_FAST_INTR_GETSTATE, %o5
1762 ta HV_FAST_TRAP 1769 ta HV_FAST_TRAP
1763 retl 1770 retl
1764 mov %o1, %o0 1771 mov %o1, %o0
1772 .size sun4v_intr_getstate, .-sun4v_intr_getstate
1765 1773
1766 /* %o0: sysino 1774 /* %o0: sysino
1767 * %o1: intr_state (HV_INTR_STATE_*) 1775 * %o1: intr_state (HV_INTR_STATE_*)
1768 */ 1776 */
1769 .globl sun4v_intr_setstate 1777 .globl sun4v_intr_setstate
1778 .type sun4v_intr_setstate,#function
1770sun4v_intr_setstate: 1779sun4v_intr_setstate:
1771 mov HV_FAST_INTR_SETSTATE, %o5 1780 mov HV_FAST_INTR_SETSTATE, %o5
1772 ta HV_FAST_TRAP 1781 ta HV_FAST_TRAP
1773 retl 1782 retl
1774 nop 1783 nop
1784 .size sun4v_intr_setstate, .-sun4v_intr_setstate
1775 1785
1776 /* %o0: sysino 1786 /* %o0: sysino
1777 * 1787 *
1778 * returns %o0: cpuid 1788 * returns %o0: cpuid
1779 */ 1789 */
1780 .globl sun4v_intr_gettarget 1790 .globl sun4v_intr_gettarget
1791 .type sun4v_intr_gettarget,#function
1781sun4v_intr_gettarget: 1792sun4v_intr_gettarget:
1782 mov HV_FAST_INTR_GETTARGET, %o5 1793 mov HV_FAST_INTR_GETTARGET, %o5
1783 ta HV_FAST_TRAP 1794 ta HV_FAST_TRAP
1784 retl 1795 retl
1785 mov %o1, %o0 1796 mov %o1, %o0
1797 .size sun4v_intr_gettarget, .-sun4v_intr_gettarget
1786 1798
1787 /* %o0: sysino 1799 /* %o0: sysino
1788 * %o1: cpuid 1800 * %o1: cpuid
1789 */ 1801 */
1790 .globl sun4v_intr_settarget 1802 .globl sun4v_intr_settarget
1803 .type sun4v_intr_settarget,#function
1791sun4v_intr_settarget: 1804sun4v_intr_settarget:
1792 mov HV_FAST_INTR_SETTARGET, %o5 1805 mov HV_FAST_INTR_SETTARGET, %o5
1793 ta HV_FAST_TRAP 1806 ta HV_FAST_TRAP
1794 retl 1807 retl
1795 nop 1808 nop
1809 .size sun4v_intr_settarget, .-sun4v_intr_settarget
1796 1810
1797 /* %o0: type 1811 /* %o0: cpuid
1798 * %o1: queue paddr 1812 * %o1: pc
1799 * %o2: num queue entries 1813 * %o2: rtba
1814 * %o3: arg0
1800 * 1815 *
1801 * returns %o0: status 1816 * returns %o0: status
1802 */ 1817 */
1803 .globl sun4v_cpu_qconf 1818 .globl sun4v_cpu_start
1804sun4v_cpu_qconf: 1819 .type sun4v_cpu_start,#function
1805 mov HV_FAST_CPU_QCONF, %o5 1820sun4v_cpu_start:
1821 mov HV_FAST_CPU_START, %o5
1806 ta HV_FAST_TRAP 1822 ta HV_FAST_TRAP
1807 retl 1823 retl
1808 nop 1824 nop
1825 .size sun4v_cpu_start, .-sun4v_cpu_start
1809 1826
1810 /* returns %o0: status 1827 /* %o0: cpuid
1828 *
1829 * returns %o0: status
1811 */ 1830 */
1831 .globl sun4v_cpu_stop
1832 .type sun4v_cpu_stop,#function
1833sun4v_cpu_stop:
1834 mov HV_FAST_CPU_STOP, %o5
1835 ta HV_FAST_TRAP
1836 retl
1837 nop
1838 .size sun4v_cpu_stop, .-sun4v_cpu_stop
1839
1840 /* returns %o0: status */
1812 .globl sun4v_cpu_yield 1841 .globl sun4v_cpu_yield
1842 .type sun4v_cpu_yield, #function
1813sun4v_cpu_yield: 1843sun4v_cpu_yield:
1814 mov HV_FAST_CPU_YIELD, %o5 1844 mov HV_FAST_CPU_YIELD, %o5
1815 ta HV_FAST_TRAP 1845 ta HV_FAST_TRAP
1816 retl 1846 retl
1817 nop 1847 nop
1848 .size sun4v_cpu_yield, .-sun4v_cpu_yield
1849
1850 /* %o0: type
1851 * %o1: queue paddr
1852 * %o2: num queue entries
1853 *
1854 * returns %o0: status
1855 */
1856 .globl sun4v_cpu_qconf
1857 .type sun4v_cpu_qconf,#function
1858sun4v_cpu_qconf:
1859 mov HV_FAST_CPU_QCONF, %o5
1860 ta HV_FAST_TRAP
1861 retl
1862 nop
1863 .size sun4v_cpu_qconf, .-sun4v_cpu_qconf
1818 1864
1819 /* %o0: num cpus in cpu list 1865 /* %o0: num cpus in cpu list
1820 * %o1: cpu list paddr 1866 * %o1: cpu list paddr
@@ -1823,11 +1869,13 @@ sun4v_cpu_yield:
1823 * returns %o0: status 1869 * returns %o0: status
1824 */ 1870 */
1825 .globl sun4v_cpu_mondo_send 1871 .globl sun4v_cpu_mondo_send
1872 .type sun4v_cpu_mondo_send,#function
1826sun4v_cpu_mondo_send: 1873sun4v_cpu_mondo_send:
1827 mov HV_FAST_CPU_MONDO_SEND, %o5 1874 mov HV_FAST_CPU_MONDO_SEND, %o5
1828 ta HV_FAST_TRAP 1875 ta HV_FAST_TRAP
1829 retl 1876 retl
1830 nop 1877 nop
1878 .size sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send
1831 1879
1832 /* %o0: CPU ID 1880 /* %o0: CPU ID
1833 * 1881 *
@@ -1835,6 +1883,7 @@ sun4v_cpu_mondo_send:
1835 * %o0: cpu state as HV_CPU_STATE_* 1883 * %o0: cpu state as HV_CPU_STATE_*
1836 */ 1884 */
1837 .globl sun4v_cpu_state 1885 .globl sun4v_cpu_state
1886 .type sun4v_cpu_state,#function
1838sun4v_cpu_state: 1887sun4v_cpu_state:
1839 mov HV_FAST_CPU_STATE, %o5 1888 mov HV_FAST_CPU_STATE, %o5
1840 ta HV_FAST_TRAP 1889 ta HV_FAST_TRAP
@@ -1843,3 +1892,609 @@ sun4v_cpu_state:
1843 mov %o1, %o0 1892 mov %o1, %o0
18441: retl 18931: retl
1845 nop 1894 nop
1895 .size sun4v_cpu_state, .-sun4v_cpu_state
1896
1897 /* %o0: virtual address
1898 * %o1: must be zero
1899 * %o2: TTE
1900 * %o3: HV_MMU_* flags
1901 *
1902 * returns %o0: status
1903 */
1904 .globl sun4v_mmu_map_perm_addr
1905 .type sun4v_mmu_map_perm_addr,#function
1906sun4v_mmu_map_perm_addr:
1907 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
1908 ta HV_FAST_TRAP
1909 retl
1910 nop
1911 .size sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr
1912
1913 /* %o0: number of TSB descriptions
1914 * %o1: TSB descriptions real address
1915 *
1916 * returns %o0: status
1917 */
1918 .globl sun4v_mmu_tsb_ctx0
1919 .type sun4v_mmu_tsb_ctx0,#function
1920sun4v_mmu_tsb_ctx0:
1921 mov HV_FAST_MMU_TSB_CTX0, %o5
1922 ta HV_FAST_TRAP
1923 retl
1924 nop
1925 .size sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0
1926
1927 /* %o0: API group number
1928 * %o1: pointer to unsigned long major number storage
1929 * %o2: pointer to unsigned long minor number storage
1930 *
1931 * returns %o0: status
1932 */
1933 .globl sun4v_get_version
1934 .type sun4v_get_version,#function
1935sun4v_get_version:
1936 mov HV_CORE_GET_VER, %o5
1937 mov %o1, %o3
1938 mov %o2, %o4
1939 ta HV_CORE_TRAP
1940 stx %o1, [%o3]
1941 retl
1942 stx %o2, [%o4]
1943 .size sun4v_get_version, .-sun4v_get_version
1944
1945 /* %o0: API group number
1946 * %o1: desired major number
1947 * %o2: desired minor number
1948 * %o3: pointer to unsigned long actual minor number storage
1949 *
1950 * returns %o0: status
1951 */
1952 .globl sun4v_set_version
1953 .type sun4v_set_version,#function
1954sun4v_set_version:
1955 mov HV_CORE_SET_VER, %o5
1956 mov %o3, %o4
1957 ta HV_CORE_TRAP
1958 retl
1959 stx %o1, [%o4]
1960 .size sun4v_set_version, .-sun4v_set_version
1961
1962 /* %o0: pointer to unsigned long time
1963 *
1964 * returns %o0: status
1965 */
1966 .globl sun4v_tod_get
1967 .type sun4v_tod_get,#function
1968sun4v_tod_get:
1969 mov %o0, %o4
1970 mov HV_FAST_TOD_GET, %o5
1971 ta HV_FAST_TRAP
1972 stx %o1, [%o4]
1973 retl
1974 nop
1975 .size sun4v_tod_get, .-sun4v_tod_get
1976
1977 /* %o0: time
1978 *
1979 * returns %o0: status
1980 */
1981 .globl sun4v_tod_set
1982 .type sun4v_tod_set,#function
1983sun4v_tod_set:
1984 mov HV_FAST_TOD_SET, %o5
1985 ta HV_FAST_TRAP
1986 retl
1987 nop
1988 .size sun4v_tod_set, .-sun4v_tod_set
1989
1990 /* %o0: pointer to unsigned long status
1991 *
1992 * returns %o0: signed character
1993 */
1994 .globl sun4v_con_getchar
1995 .type sun4v_con_getchar,#function
1996sun4v_con_getchar:
1997 mov %o0, %o4
1998 mov HV_FAST_CONS_GETCHAR, %o5
1999 clr %o0
2000 clr %o1
2001 ta HV_FAST_TRAP
2002 stx %o0, [%o4]
2003 retl
2004 sra %o1, 0, %o0
2005 .size sun4v_con_getchar, .-sun4v_con_getchar
2006
2007 /* %o0: signed long character
2008 *
2009 * returns %o0: status
2010 */
2011 .globl sun4v_con_putchar
2012 .type sun4v_con_putchar,#function
2013sun4v_con_putchar:
2014 mov HV_FAST_CONS_PUTCHAR, %o5
2015 ta HV_FAST_TRAP
2016 retl
2017 sra %o0, 0, %o0
2018 .size sun4v_con_putchar, .-sun4v_con_putchar
2019
2020 /* %o0: buffer real address
2021 * %o1: buffer size
2022 * %o2: pointer to unsigned long bytes_read
2023 *
2024 * returns %o0: status
2025 */
2026 .globl sun4v_con_read
2027 .type sun4v_con_read,#function
2028sun4v_con_read:
2029 mov %o2, %o4
2030 mov HV_FAST_CONS_READ, %o5
2031 ta HV_FAST_TRAP
2032 brnz %o0, 1f
2033 cmp %o1, -1 /* break */
2034 be,a,pn %icc, 1f
2035 mov %o1, %o0
2036 cmp %o1, -2 /* hup */
2037 be,a,pn %icc, 1f
2038 mov %o1, %o0
2039 stx %o1, [%o4]
20401: retl
2041 nop
2042 .size sun4v_con_read, .-sun4v_con_read
2043
2044 /* %o0: buffer real address
2045 * %o1: buffer size
2046 * %o2: pointer to unsigned long bytes_written
2047 *
2048 * returns %o0: status
2049 */
2050 .globl sun4v_con_write
2051 .type sun4v_con_write,#function
2052sun4v_con_write:
2053 mov %o2, %o4
2054 mov HV_FAST_CONS_WRITE, %o5
2055 ta HV_FAST_TRAP
2056 stx %o1, [%o4]
2057 retl
2058 nop
2059 .size sun4v_con_write, .-sun4v_con_write
2060
2061 /* %o0: soft state
2062 * %o1: address of description string
2063 *
2064 * returns %o0: status
2065 */
2066 .globl sun4v_mach_set_soft_state
2067 .type sun4v_mach_set_soft_state,#function
2068sun4v_mach_set_soft_state:
2069 mov HV_FAST_MACH_SET_SOFT_STATE, %o5
2070 ta HV_FAST_TRAP
2071 retl
2072 nop
2073 .size sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state
2074
2075 /* %o0: exit code
2076 *
2077 * Does not return.
2078 */
2079 .globl sun4v_mach_exit
2080 .type sun4v_mach_exit,#function
2081sun4v_mach_exit:
2082 mov HV_FAST_MACH_EXIT, %o5
2083 ta HV_FAST_TRAP
2084 retl
2085 nop
2086 .size sun4v_mach_exit, .-sun4v_mach_exit
2087
2088 /* %o0: buffer real address
2089 * %o1: buffer length
2090 * %o2: pointer to unsigned long real_buf_len
2091 *
2092 * returns %o0: status
2093 */
2094 .globl sun4v_mach_desc
2095 .type sun4v_mach_desc,#function
2096sun4v_mach_desc:
2097 mov %o2, %o4
2098 mov HV_FAST_MACH_DESC, %o5
2099 ta HV_FAST_TRAP
2100 stx %o1, [%o4]
2101 retl
2102 nop
2103 .size sun4v_mach_desc, .-sun4v_mach_desc
2104
2105 /* %o0: new timeout in milliseconds
2106 * %o1: pointer to unsigned long orig_timeout
2107 *
2108 * returns %o0: status
2109 */
2110 .globl sun4v_mach_set_watchdog
2111 .type sun4v_mach_set_watchdog,#function
2112sun4v_mach_set_watchdog:
2113 mov %o1, %o4
2114 mov HV_FAST_MACH_SET_WATCHDOG, %o5
2115 ta HV_FAST_TRAP
2116 stx %o1, [%o4]
2117 retl
2118 nop
2119 .size sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog
2120
2121 /* No inputs and does not return. */
2122 .globl sun4v_mach_sir
2123 .type sun4v_mach_sir,#function
2124sun4v_mach_sir:
2125 mov %o1, %o4
2126 mov HV_FAST_MACH_SIR, %o5
2127 ta HV_FAST_TRAP
2128 stx %o1, [%o4]
2129 retl
2130 nop
2131 .size sun4v_mach_sir, .-sun4v_mach_sir
2132
2133 /* %o0: channel
2134 * %o1: ra
2135 * %o2: num_entries
2136 *
2137 * returns %o0: status
2138 */
2139 .globl sun4v_ldc_tx_qconf
2140 .type sun4v_ldc_tx_qconf,#function
2141sun4v_ldc_tx_qconf:
2142 mov HV_FAST_LDC_TX_QCONF, %o5
2143 ta HV_FAST_TRAP
2144 retl
2145 nop
2146 .size sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf
2147
2148 /* %o0: channel
2149 * %o1: pointer to unsigned long ra
2150 * %o2: pointer to unsigned long num_entries
2151 *
2152 * returns %o0: status
2153 */
2154 .globl sun4v_ldc_tx_qinfo
2155 .type sun4v_ldc_tx_qinfo,#function
2156sun4v_ldc_tx_qinfo:
2157 mov %o1, %g1
2158 mov %o2, %g2
2159 mov HV_FAST_LDC_TX_QINFO, %o5
2160 ta HV_FAST_TRAP
2161 stx %o1, [%g1]
2162 stx %o2, [%g2]
2163 retl
2164 nop
2165 .size sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo
2166
2167 /* %o0: channel
2168 * %o1: pointer to unsigned long head_off
2169 * %o2: pointer to unsigned long tail_off
2170 * %o2: pointer to unsigned long chan_state
2171 *
2172 * returns %o0: status
2173 */
2174 .globl sun4v_ldc_tx_get_state
2175 .type sun4v_ldc_tx_get_state,#function
2176sun4v_ldc_tx_get_state:
2177 mov %o1, %g1
2178 mov %o2, %g2
2179 mov %o3, %g3
2180 mov HV_FAST_LDC_TX_GET_STATE, %o5
2181 ta HV_FAST_TRAP
2182 stx %o1, [%g1]
2183 stx %o2, [%g2]
2184 stx %o3, [%g3]
2185 retl
2186 nop
2187 .size sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state
2188
2189 /* %o0: channel
2190 * %o1: tail_off
2191 *
2192 * returns %o0: status
2193 */
2194 .globl sun4v_ldc_tx_set_qtail
2195 .type sun4v_ldc_tx_set_qtail,#function
2196sun4v_ldc_tx_set_qtail:
2197 mov HV_FAST_LDC_TX_SET_QTAIL, %o5
2198 ta HV_FAST_TRAP
2199 retl
2200 nop
2201 .size sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail
2202
2203 /* %o0: channel
2204 * %o1: ra
2205 * %o2: num_entries
2206 *
2207 * returns %o0: status
2208 */
2209 .globl sun4v_ldc_rx_qconf
2210 .type sun4v_ldc_rx_qconf,#function
2211sun4v_ldc_rx_qconf:
2212 mov HV_FAST_LDC_RX_QCONF, %o5
2213 ta HV_FAST_TRAP
2214 retl
2215 nop
2216 .size sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf
2217
2218 /* %o0: channel
2219 * %o1: pointer to unsigned long ra
2220 * %o2: pointer to unsigned long num_entries
2221 *
2222 * returns %o0: status
2223 */
2224 .globl sun4v_ldc_rx_qinfo
2225 .type sun4v_ldc_rx_qinfo,#function
2226sun4v_ldc_rx_qinfo:
2227 mov %o1, %g1
2228 mov %o2, %g2
2229 mov HV_FAST_LDC_RX_QINFO, %o5
2230 ta HV_FAST_TRAP
2231 stx %o1, [%g1]
2232 stx %o2, [%g2]
2233 retl
2234 nop
2235 .size sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo
2236
2237 /* %o0: channel
2238 * %o1: pointer to unsigned long head_off
2239 * %o2: pointer to unsigned long tail_off
2240 * %o2: pointer to unsigned long chan_state
2241 *
2242 * returns %o0: status
2243 */
2244 .globl sun4v_ldc_rx_get_state
2245 .type sun4v_ldc_rx_get_state,#function
2246sun4v_ldc_rx_get_state:
2247 mov %o1, %g1
2248 mov %o2, %g2
2249 mov %o3, %g3
2250 mov HV_FAST_LDC_RX_GET_STATE, %o5
2251 ta HV_FAST_TRAP
2252 stx %o1, [%g1]
2253 stx %o2, [%g2]
2254 stx %o3, [%g3]
2255 retl
2256 nop
2257 .size sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state
2258
2259 /* %o0: channel
2260 * %o1: head_off
2261 *
2262 * returns %o0: status
2263 */
2264 .globl sun4v_ldc_rx_set_qhead
2265 .type sun4v_ldc_rx_set_qhead,#function
2266sun4v_ldc_rx_set_qhead:
2267 mov HV_FAST_LDC_RX_SET_QHEAD, %o5
2268 ta HV_FAST_TRAP
2269 retl
2270 nop
2271 .size sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead
2272
2273 /* %o0: channel
2274 * %o1: ra
2275 * %o2: num_entries
2276 *
2277 * returns %o0: status
2278 */
2279 .globl sun4v_ldc_set_map_table
2280 .type sun4v_ldc_set_map_table,#function
2281sun4v_ldc_set_map_table:
2282 mov HV_FAST_LDC_SET_MAP_TABLE, %o5
2283 ta HV_FAST_TRAP
2284 retl
2285 nop
2286 .size sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table
2287
2288 /* %o0: channel
2289 * %o1: pointer to unsigned long ra
2290 * %o2: pointer to unsigned long num_entries
2291 *
2292 * returns %o0: status
2293 */
2294 .globl sun4v_ldc_get_map_table
2295 .type sun4v_ldc_get_map_table,#function
2296sun4v_ldc_get_map_table:
2297 mov %o1, %g1
2298 mov %o2, %g2
2299 mov HV_FAST_LDC_GET_MAP_TABLE, %o5
2300 ta HV_FAST_TRAP
2301 stx %o1, [%g1]
2302 stx %o2, [%g2]
2303 retl
2304 nop
2305 .size sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table
2306
2307 /* %o0: channel
2308 * %o1: dir_code
2309 * %o2: tgt_raddr
2310 * %o3: lcl_raddr
2311 * %o4: len
2312 * %o5: pointer to unsigned long actual_len
2313 *
2314 * returns %o0: status
2315 */
2316 .globl sun4v_ldc_copy
2317 .type sun4v_ldc_copy,#function
2318sun4v_ldc_copy:
2319 mov %o5, %g1
2320 mov HV_FAST_LDC_COPY, %o5
2321 ta HV_FAST_TRAP
2322 stx %o1, [%g1]
2323 retl
2324 nop
2325 .size sun4v_ldc_copy, .-sun4v_ldc_copy
2326
2327 /* %o0: channel
2328 * %o1: cookie
2329 * %o2: pointer to unsigned long ra
2330 * %o3: pointer to unsigned long perm
2331 *
2332 * returns %o0: status
2333 */
2334 .globl sun4v_ldc_mapin
2335 .type sun4v_ldc_mapin,#function
2336sun4v_ldc_mapin:
2337 mov %o2, %g1
2338 mov %o3, %g2
2339 mov HV_FAST_LDC_MAPIN, %o5
2340 ta HV_FAST_TRAP
2341 stx %o1, [%g1]
2342 stx %o2, [%g2]
2343 retl
2344 nop
2345 .size sun4v_ldc_mapin, .-sun4v_ldc_mapin
2346
2347 /* %o0: ra
2348 *
2349 * returns %o0: status
2350 */
2351 .globl sun4v_ldc_unmap
2352 .type sun4v_ldc_unmap,#function
2353sun4v_ldc_unmap:
2354 mov HV_FAST_LDC_UNMAP, %o5
2355 ta HV_FAST_TRAP
2356 retl
2357 nop
2358 .size sun4v_ldc_unmap, .-sun4v_ldc_unmap
2359
2360 /* %o0: cookie
2361 * %o1: mte_cookie
2362 *
2363 * returns %o0: status
2364 */
2365 .globl sun4v_ldc_revoke
2366 .type sun4v_ldc_revoke,#function
2367sun4v_ldc_revoke:
2368 mov HV_FAST_LDC_REVOKE, %o5
2369 ta HV_FAST_TRAP
2370 retl
2371 nop
2372 .size sun4v_ldc_revoke, .-sun4v_ldc_revoke
2373
2374 /* %o0: device handle
2375 * %o1: device INO
2376 * %o2: pointer to unsigned long cookie
2377 *
2378 * returns %o0: status
2379 */
2380 .globl sun4v_vintr_get_cookie
2381 .type sun4v_vintr_get_cookie,#function
2382sun4v_vintr_get_cookie:
2383 mov %o2, %g1
2384 mov HV_FAST_VINTR_GET_COOKIE, %o5
2385 ta HV_FAST_TRAP
2386 stx %o1, [%g1]
2387 retl
2388 nop
2389 .size sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie
2390
2391 /* %o0: device handle
2392 * %o1: device INO
2393 * %o2: cookie
2394 *
2395 * returns %o0: status
2396 */
2397 .globl sun4v_vintr_set_cookie
2398 .type sun4v_vintr_set_cookie,#function
2399sun4v_vintr_set_cookie:
2400 mov HV_FAST_VINTR_SET_COOKIE, %o5
2401 ta HV_FAST_TRAP
2402 retl
2403 nop
2404 .size sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie
2405
2406 /* %o0: device handle
2407 * %o1: device INO
2408 * %o2: pointer to unsigned long valid_state
2409 *
2410 * returns %o0: status
2411 */
2412 .globl sun4v_vintr_get_valid
2413 .type sun4v_vintr_get_valid,#function
2414sun4v_vintr_get_valid:
2415 mov %o2, %g1
2416 mov HV_FAST_VINTR_GET_VALID, %o5
2417 ta HV_FAST_TRAP
2418 stx %o1, [%g1]
2419 retl
2420 nop
2421 .size sun4v_vintr_get_valid, .-sun4v_vintr_get_valid
2422
2423 /* %o0: device handle
2424 * %o1: device INO
2425 * %o2: valid_state
2426 *
2427 * returns %o0: status
2428 */
2429 .globl sun4v_vintr_set_valid
2430 .type sun4v_vintr_set_valid,#function
2431sun4v_vintr_set_valid:
2432 mov HV_FAST_VINTR_SET_VALID, %o5
2433 ta HV_FAST_TRAP
2434 retl
2435 nop
2436 .size sun4v_vintr_set_valid, .-sun4v_vintr_set_valid
2437
2438 /* %o0: device handle
2439 * %o1: device INO
2440 * %o2: pointer to unsigned long state
2441 *
2442 * returns %o0: status
2443 */
2444 .globl sun4v_vintr_get_state
2445 .type sun4v_vintr_get_state,#function
2446sun4v_vintr_get_state:
2447 mov %o2, %g1
2448 mov HV_FAST_VINTR_GET_STATE, %o5
2449 ta HV_FAST_TRAP
2450 stx %o1, [%g1]
2451 retl
2452 nop
2453 .size sun4v_vintr_get_state, .-sun4v_vintr_get_state
2454
2455 /* %o0: device handle
2456 * %o1: device INO
2457 * %o2: state
2458 *
2459 * returns %o0: status
2460 */
2461 .globl sun4v_vintr_set_state
2462 .type sun4v_vintr_set_state,#function
2463sun4v_vintr_set_state:
2464 mov HV_FAST_VINTR_SET_STATE, %o5
2465 ta HV_FAST_TRAP
2466 retl
2467 nop
2468 .size sun4v_vintr_set_state, .-sun4v_vintr_set_state
2469
2470 /* %o0: device handle
2471 * %o1: device INO
2472 * %o2: pointer to unsigned long cpuid
2473 *
2474 * returns %o0: status
2475 */
2476 .globl sun4v_vintr_get_target
2477 .type sun4v_vintr_get_target,#function
2478sun4v_vintr_get_target:
2479 mov %o2, %g1
2480 mov HV_FAST_VINTR_GET_TARGET, %o5
2481 ta HV_FAST_TRAP
2482 stx %o1, [%g1]
2483 retl
2484 nop
2485 .size sun4v_vintr_get_target, .-sun4v_vintr_get_target
2486
2487 /* %o0: device handle
2488 * %o1: device INO
2489 * %o2: cpuid
2490 *
2491 * returns %o0: status
2492 */
2493 .globl sun4v_vintr_set_target
2494 .type sun4v_vintr_set_target,#function
2495sun4v_vintr_set_target:
2496 mov HV_FAST_VINTR_SET_TARGET, %o5
2497 ta HV_FAST_TRAP
2498 retl
2499 nop
2500 .size sun4v_vintr_set_target, .-sun4v_vintr_set_target
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index baea10a98196..77259526cb15 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -523,7 +523,7 @@ tlb_fixup_done:
523#else 523#else
524 mov 0, %o0 524 mov 0, %o0
525#endif 525#endif
526 stb %o0, [%g6 + TI_CPU] 526 sth %o0, [%g6 + TI_CPU]
527 527
528 /* Off we go.... */ 528 /* Off we go.... */
529 call start_kernel 529 call start_kernel
@@ -653,33 +653,54 @@ setup_tba:
653 restore 653 restore
654sparc64_boot_end: 654sparc64_boot_end:
655 655
656#include "ktlb.S"
657#include "tsb.S"
658#include "etrap.S" 656#include "etrap.S"
659#include "rtrap.S" 657#include "rtrap.S"
660#include "winfixup.S" 658#include "winfixup.S"
661#include "entry.S" 659#include "entry.S"
662#include "sun4v_tlb_miss.S" 660#include "sun4v_tlb_miss.S"
663#include "sun4v_ivec.S" 661#include "sun4v_ivec.S"
662#include "ktlb.S"
663#include "tsb.S"
664 664
665/* 665/*
666 * The following skip makes sure the trap table in ttable.S is aligned 666 * The following skip makes sure the trap table in ttable.S is aligned
667 * on a 32K boundary as required by the v9 specs for TBA register. 667 * on a 32K boundary as required by the v9 specs for TBA register.
668 * 668 *
669 * We align to a 32K boundary, then we have the 32K kernel TSB, 669 * We align to a 32K boundary, then we have the 32K kernel TSB,
670 * then the 32K aligned trap table. 670 * the 64K kernel 4MB TSB, and then the 32K aligned trap table.
671 */ 671 */
6721: 6721:
673 .skip 0x4000 + _start - 1b 673 .skip 0x4000 + _start - 1b
674 674
675! 0x0000000000408000
676
675 .globl swapper_tsb 677 .globl swapper_tsb
676swapper_tsb: 678swapper_tsb:
677 .skip (32 * 1024) 679 .skip (32 * 1024)
678 680
679! 0x0000000000408000 681 .globl swapper_4m_tsb
682swapper_4m_tsb:
683 .skip (64 * 1024)
684
685! 0x0000000000420000
680 686
687 /* Some care needs to be exercised if you try to move the
688 * location of the trap table relative to other things. For
689 * one thing there are br* instructions in some of the
690 * trap table entires which branch back to code in ktlb.S
691 * Those instructions can only handle a signed 16-bit
692 * displacement.
693 *
694 * There is a binutils bug (bugzilla #4558) which causes
695 * the relocation overflow checks for such instructions to
696 * not be done correctly. So bintuils will not notice the
697 * error and will instead write junk into the relocation and
698 * you'll have an unbootable kernel.
699 */
681#include "ttable.S" 700#include "ttable.S"
682 701
702! 0x0000000000428000
703
683#include "systbls.S" 704#include "systbls.S"
684 705
685 .data 706 .data
diff --git a/arch/sparc64/kernel/hvapi.c b/arch/sparc64/kernel/hvapi.c
new file mode 100644
index 000000000000..f34f5d6181ef
--- /dev/null
+++ b/arch/sparc64/kernel/hvapi.c
@@ -0,0 +1,192 @@
1/* hvapi.c: Hypervisor API management.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9
10#include <asm/hypervisor.h>
11#include <asm/oplib.h>
12#include <asm/sstate.h>
13
14/* If the hypervisor indicates that the API setting
15 * calls are unsupported, by returning HV_EBADTRAP or
16 * HV_ENOTSUPPORTED, we assume that API groups with the
17 * PRE_API flag set are major 1 minor 0.
18 */
19struct api_info {
20 unsigned long group;
21 unsigned long major;
22 unsigned long minor;
23 unsigned int refcnt;
24 unsigned int flags;
25#define FLAG_PRE_API 0x00000001
26};
27
28static struct api_info api_table[] = {
29 { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API },
30 { .group = HV_GRP_CORE, .flags = FLAG_PRE_API },
31 { .group = HV_GRP_INTR, },
32 { .group = HV_GRP_SOFT_STATE, },
33 { .group = HV_GRP_PCI, .flags = FLAG_PRE_API },
34 { .group = HV_GRP_LDOM, },
35 { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
36 { .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
37 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
38 { .group = HV_GRP_FIRE_PERF, },
39 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
40};
41
42static DEFINE_SPINLOCK(hvapi_lock);
43
44static struct api_info *__get_info(unsigned long group)
45{
46 int i;
47
48 for (i = 0; i < ARRAY_SIZE(api_table); i++) {
49 if (api_table[i].group == group)
50 return &api_table[i];
51 }
52 return NULL;
53}
54
55static void __get_ref(struct api_info *p)
56{
57 p->refcnt++;
58}
59
60static void __put_ref(struct api_info *p)
61{
62 if (--p->refcnt == 0) {
63 unsigned long ignore;
64
65 sun4v_set_version(p->group, 0, 0, &ignore);
66 p->major = p->minor = 0;
67 }
68}
69
70/* Register a hypervisor API specification. It indicates the
71 * API group and desired major+minor.
72 *
73 * If an existing API registration exists '0' (success) will
74 * be returned if it is compatible with the one being registered.
75 * Otherwise a negative error code will be returned.
76 *
77 * Otherwise an attempt will be made to negotiate the requested
78 * API group/major/minor with the hypervisor, and errors returned
79 * if that does not succeed.
80 */
81int sun4v_hvapi_register(unsigned long group, unsigned long major,
82 unsigned long *minor)
83{
84 struct api_info *p;
85 unsigned long flags;
86 int ret;
87
88 spin_lock_irqsave(&hvapi_lock, flags);
89 p = __get_info(group);
90 ret = -EINVAL;
91 if (p) {
92 if (p->refcnt) {
93 ret = -EINVAL;
94 if (p->major == major) {
95 *minor = p->minor;
96 ret = 0;
97 }
98 } else {
99 unsigned long actual_minor;
100 unsigned long hv_ret;
101
102 hv_ret = sun4v_set_version(group, major, *minor,
103 &actual_minor);
104 ret = -EINVAL;
105 if (hv_ret == HV_EOK) {
106 *minor = actual_minor;
107 p->major = major;
108 p->minor = actual_minor;
109 ret = 0;
110 } else if (hv_ret == HV_EBADTRAP ||
111 hv_ret == HV_ENOTSUPPORTED) {
112 if (p->flags & FLAG_PRE_API) {
113 if (major == 1) {
114 p->major = 1;
115 p->minor = 0;
116 *minor = 0;
117 ret = 0;
118 }
119 }
120 }
121 }
122
123 if (ret == 0)
124 __get_ref(p);
125 }
126 spin_unlock_irqrestore(&hvapi_lock, flags);
127
128 return ret;
129}
130EXPORT_SYMBOL(sun4v_hvapi_register);
131
132void sun4v_hvapi_unregister(unsigned long group)
133{
134 struct api_info *p;
135 unsigned long flags;
136
137 spin_lock_irqsave(&hvapi_lock, flags);
138 p = __get_info(group);
139 if (p)
140 __put_ref(p);
141 spin_unlock_irqrestore(&hvapi_lock, flags);
142}
143EXPORT_SYMBOL(sun4v_hvapi_unregister);
144
145int sun4v_hvapi_get(unsigned long group,
146 unsigned long *major,
147 unsigned long *minor)
148{
149 struct api_info *p;
150 unsigned long flags;
151 int ret;
152
153 spin_lock_irqsave(&hvapi_lock, flags);
154 ret = -EINVAL;
155 p = __get_info(group);
156 if (p && p->refcnt) {
157 *major = p->major;
158 *minor = p->minor;
159 ret = 0;
160 }
161 spin_unlock_irqrestore(&hvapi_lock, flags);
162
163 return ret;
164}
165EXPORT_SYMBOL(sun4v_hvapi_get);
166
167void __init sun4v_hvapi_init(void)
168{
169 unsigned long group, major, minor;
170
171 group = HV_GRP_SUN4V;
172 major = 1;
173 minor = 0;
174 if (sun4v_hvapi_register(group, major, &minor))
175 goto bad;
176
177 group = HV_GRP_CORE;
178 major = 1;
179 minor = 1;
180 if (sun4v_hvapi_register(group, major, &minor))
181 goto bad;
182
183 sun4v_sstate_init();
184
185 return;
186
187bad:
188 prom_printf("HVAPI: Cannot register API group "
189 "%lx with major(%u) minor(%u)\n",
190 group, major, minor);
191 prom_halt();
192}
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3edc18e1b818..a36f8dd0c021 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -171,8 +171,6 @@ skip:
171 return 0; 171 return 0;
172} 172}
173 173
174extern unsigned long real_hard_smp_processor_id(void);
175
176static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) 174static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
177{ 175{
178 unsigned int tid; 176 unsigned int tid;
@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void)
694 trap_block[cpu].irq_worklist = 0; 692 trap_block[cpu].irq_worklist = 0;
695} 693}
696 694
697static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) 695/* Please be very careful with register_one_mondo() and
696 * sun4v_register_mondo_queues().
697 *
698 * On SMP this gets invoked from the CPU trampoline before
699 * the cpu has fully taken over the trap table from OBP,
700 * and it's kernel stack + %g6 thread register state is
701 * not fully cooked yet.
702 *
703 * Therefore you cannot make any OBP calls, not even prom_printf,
704 * from these two routines.
705 */
706static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
698{ 707{
699 unsigned long num_entries = 128; 708 unsigned long num_entries = (qmask + 1) / 64;
700 unsigned long status; 709 unsigned long status;
701 710
702 status = sun4v_cpu_qconf(type, paddr, num_entries); 711 status = sun4v_cpu_qconf(type, paddr, num_entries);
@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
711{ 720{
712 struct trap_per_cpu *tb = &trap_block[this_cpu]; 721 struct trap_per_cpu *tb = &trap_block[this_cpu];
713 722
714 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); 723 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
715 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); 724 tb->cpu_mondo_qmask);
716 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); 725 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
717 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); 726 tb->dev_mondo_qmask);
727 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
728 tb->resum_qmask);
729 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
730 tb->nonresum_qmask);
718} 731}
719 732
720static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) 733static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
721{ 734{
722 void *page; 735 unsigned long size = PAGE_ALIGN(qmask + 1);
736 unsigned long order = get_order(size);
737 void *p = NULL;
723 738
724 if (use_bootmem) 739 if (use_bootmem) {
725 page = alloc_bootmem_low_pages(PAGE_SIZE); 740 p = __alloc_bootmem_low(size, size, 0);
726 else 741 } else {
727 page = (void *) get_zeroed_page(GFP_ATOMIC); 742 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
743 if (page)
744 p = page_address(page);
745 }
728 746
729 if (!page) { 747 if (!p) {
730 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); 748 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
731 prom_halt(); 749 prom_halt();
732 } 750 }
733 751
734 *pa_ptr = __pa(page); 752 *pa_ptr = __pa(p);
735} 753}
736 754
737static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) 755static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
738{ 756{
739 void *page; 757 unsigned long size = PAGE_ALIGN(qmask + 1);
758 unsigned long order = get_order(size);
759 void *p = NULL;
740 760
741 if (use_bootmem) 761 if (use_bootmem) {
742 page = alloc_bootmem_low_pages(PAGE_SIZE); 762 p = __alloc_bootmem_low(size, size, 0);
743 else 763 } else {
744 page = (void *) get_zeroed_page(GFP_ATOMIC); 764 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
765 if (page)
766 p = page_address(page);
767 }
745 768
746 if (!page) { 769 if (!p) {
747 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); 770 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
748 prom_halt(); 771 prom_halt();
749 } 772 }
750 773
751 *pa_ptr = __pa(page); 774 *pa_ptr = __pa(p);
752} 775}
753 776
754static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) 777static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
779 struct trap_per_cpu *tb = &trap_block[cpu]; 802 struct trap_per_cpu *tb = &trap_block[cpu];
780 803
781 if (alloc) { 804 if (alloc) {
782 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); 805 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
783 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); 806 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
784 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); 807 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
785 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); 808 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
786 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); 809 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
787 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); 810 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
788 811
789 init_cpu_send_mondo_info(tb, use_bootmem); 812 init_cpu_send_mondo_info(tb, use_bootmem);
790 } 813 }
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index ad46e2024f4b..5a8377b54955 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -11,12 +11,12 @@
11/* ITLB ** ICACHE line 2: TSB compare and TLB load */ 11/* ITLB ** ICACHE line 2: TSB compare and TLB load */
12 bne,pn %xcc, tsb_miss_itlb ! Miss 12 bne,pn %xcc, tsb_miss_itlb ! Miss
13 mov FAULT_CODE_ITLB, %g3 13 mov FAULT_CODE_ITLB, %g3
14 andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? 14 sethi %hi(_PAGE_EXEC_4U), %g4
15 andcc %g5, %g4, %g0 ! Executable?
15 be,pn %xcc, tsb_do_fault 16 be,pn %xcc, tsb_do_fault
16 nop ! Delay slot, fill me 17 nop ! Delay slot, fill me
17 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB 18 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
18 retry ! Trap done 19 retry ! Trap done
19 nop
20 20
21/* ITLB ** ICACHE line 3: */ 21/* ITLB ** ICACHE line 3: */
22 nop 22 nop
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
new file mode 100644
index 000000000000..9246c2cf9574
--- /dev/null
+++ b/arch/sparc64/kernel/mdesc.c
@@ -0,0 +1,619 @@
1/* mdesc.c: Sun4V machine description handling.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/bootmem.h>
8#include <linux/log2.h>
9
10#include <asm/hypervisor.h>
11#include <asm/mdesc.h>
12#include <asm/prom.h>
13#include <asm/oplib.h>
14#include <asm/smp.h>
15
16/* Unlike the OBP device tree, the machine description is a full-on
17 * DAG. An arbitrary number of ARCs are possible from one
18 * node to other nodes and thus we can't use the OBP device_node
19 * data structure to represent these nodes inside of the kernel.
20 *
21 * Actually, it isn't even a DAG, because there are back pointers
22 * which create cycles in the graph.
23 *
24 * mdesc_hdr and mdesc_elem describe the layout of the data structure
25 * we get from the Hypervisor.
26 */
27struct mdesc_hdr {
28 u32 version; /* Transport version */
29 u32 node_sz; /* node block size */
30 u32 name_sz; /* name block size */
31 u32 data_sz; /* data block size */
32};
33
34struct mdesc_elem {
35 u8 tag;
36#define MD_LIST_END 0x00
37#define MD_NODE 0x4e
38#define MD_NODE_END 0x45
39#define MD_NOOP 0x20
40#define MD_PROP_ARC 0x61
41#define MD_PROP_VAL 0x76
42#define MD_PROP_STR 0x73
43#define MD_PROP_DATA 0x64
44 u8 name_len;
45 u16 resv;
46 u32 name_offset;
47 union {
48 struct {
49 u32 data_len;
50 u32 data_offset;
51 } data;
52 u64 val;
53 } d;
54};
55
56static struct mdesc_hdr *main_mdesc;
57static struct mdesc_node *allnodes;
58
59static struct mdesc_node *allnodes_tail;
60static unsigned int unique_id;
61
62static struct mdesc_node **mdesc_hash;
63static unsigned int mdesc_hash_size;
64
65static inline unsigned int node_hashfn(u64 node)
66{
67 return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16)))
68 & (mdesc_hash_size - 1);
69}
70
71static inline void hash_node(struct mdesc_node *mp)
72{
73 struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)];
74
75 mp->hash_next = *head;
76 *head = mp;
77
78 if (allnodes_tail) {
79 allnodes_tail->allnodes_next = mp;
80 allnodes_tail = mp;
81 } else {
82 allnodes = allnodes_tail = mp;
83 }
84}
85
86static struct mdesc_node *find_node(u64 node)
87{
88 struct mdesc_node *mp = mdesc_hash[node_hashfn(node)];
89
90 while (mp) {
91 if (mp->node == node)
92 return mp;
93
94 mp = mp->hash_next;
95 }
96 return NULL;
97}
98
99struct property *md_find_property(const struct mdesc_node *mp,
100 const char *name,
101 int *lenp)
102{
103 struct property *pp;
104
105 for (pp = mp->properties; pp != 0; pp = pp->next) {
106 if (strcasecmp(pp->name, name) == 0) {
107 if (lenp)
108 *lenp = pp->length;
109 break;
110 }
111 }
112 return pp;
113}
114EXPORT_SYMBOL(md_find_property);
115
116/*
117 * Find a property with a given name for a given node
118 * and return the value.
119 */
120const void *md_get_property(const struct mdesc_node *mp, const char *name,
121 int *lenp)
122{
123 struct property *pp = md_find_property(mp, name, lenp);
124 return pp ? pp->value : NULL;
125}
126EXPORT_SYMBOL(md_get_property);
127
128struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
129 const char *name)
130{
131 struct mdesc_node *mp;
132
133 mp = from ? from->allnodes_next : allnodes;
134 for (; mp != NULL; mp = mp->allnodes_next) {
135 if (strcmp(mp->name, name) == 0)
136 break;
137 }
138 return mp;
139}
140EXPORT_SYMBOL(md_find_node_by_name);
141
142static unsigned int mdesc_early_allocated;
143
144static void * __init mdesc_early_alloc(unsigned long size)
145{
146 void *ret;
147
148 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
149 if (ret == NULL) {
150 prom_printf("MDESC: alloc of %lu bytes failed.\n", size);
151 prom_halt();
152 }
153
154 memset(ret, 0, size);
155
156 mdesc_early_allocated += size;
157
158 return ret;
159}
160
161static unsigned int __init count_arcs(struct mdesc_elem *ep)
162{
163 unsigned int ret = 0;
164
165 ep++;
166 while (ep->tag != MD_NODE_END) {
167 if (ep->tag == MD_PROP_ARC)
168 ret++;
169 ep++;
170 }
171 return ret;
172}
173
174static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names)
175{
176 unsigned int num_arcs = count_arcs(ep);
177 struct mdesc_node *mp;
178
179 mp = mdesc_early_alloc(sizeof(*mp) +
180 (num_arcs * sizeof(struct mdesc_arc)));
181 mp->name = names + ep->name_offset;
182 mp->node = node;
183 mp->unique_id = unique_id++;
184 mp->num_arcs = num_arcs;
185
186 hash_node(mp);
187}
188
189static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
190{
191 return (struct mdesc_elem *) (mdesc + 1);
192}
193
194static inline void *name_block(struct mdesc_hdr *mdesc)
195{
196 return ((void *) node_block(mdesc)) + mdesc->node_sz;
197}
198
199static inline void *data_block(struct mdesc_hdr *mdesc)
200{
201 return ((void *) name_block(mdesc)) + mdesc->name_sz;
202}
203
204/* In order to avoid recursion (the graph can be very deep) we use a
205 * two pass algorithm. First we allocate all the nodes and hash them.
206 * Then we iterate over each node, filling in the arcs and properties.
207 */
208static void __init build_all_nodes(struct mdesc_hdr *mdesc)
209{
210 struct mdesc_elem *start, *ep;
211 struct mdesc_node *mp;
212 const char *names;
213 void *data;
214 u64 last_node;
215
216 start = ep = node_block(mdesc);
217 last_node = mdesc->node_sz / 16;
218
219 names = name_block(mdesc);
220
221 while (1) {
222 u64 node = ep - start;
223
224 if (ep->tag == MD_LIST_END)
225 break;
226
227 if (ep->tag != MD_NODE) {
228 prom_printf("MDESC: Inconsistent element list.\n");
229 prom_halt();
230 }
231
232 mdesc_node_alloc(node, ep, names);
233
234 if (ep->d.val >= last_node) {
235 printk("MDESC: Warning, early break out of node scan.\n");
236 printk("MDESC: Next node [%lu] last_node [%lu].\n",
237 node, last_node);
238 break;
239 }
240
241 ep = start + ep->d.val;
242 }
243
244 data = data_block(mdesc);
245 for (mp = allnodes; mp; mp = mp->allnodes_next) {
246 struct mdesc_elem *ep = start + mp->node;
247 struct property **link = &mp->properties;
248 unsigned int this_arc = 0;
249
250 ep++;
251 while (ep->tag != MD_NODE_END) {
252 switch (ep->tag) {
253 case MD_PROP_ARC: {
254 struct mdesc_node *target;
255
256 if (this_arc >= mp->num_arcs) {
257 prom_printf("MDESC: ARC overrun [%u:%u]\n",
258 this_arc, mp->num_arcs);
259 prom_halt();
260 }
261 target = find_node(ep->d.val);
262 if (!target) {
263 printk("MDESC: Warning, arc points to "
264 "missing node, ignoring.\n");
265 break;
266 }
267 mp->arcs[this_arc].name =
268 (names + ep->name_offset);
269 mp->arcs[this_arc].arc = target;
270 this_arc++;
271 break;
272 }
273
274 case MD_PROP_VAL:
275 case MD_PROP_STR:
276 case MD_PROP_DATA: {
277 struct property *p = mdesc_early_alloc(sizeof(*p));
278
279 p->unique_id = unique_id++;
280 p->name = (char *) names + ep->name_offset;
281 if (ep->tag == MD_PROP_VAL) {
282 p->value = &ep->d.val;
283 p->length = 8;
284 } else {
285 p->value = data + ep->d.data.data_offset;
286 p->length = ep->d.data.data_len;
287 }
288 *link = p;
289 link = &p->next;
290 break;
291 }
292
293 case MD_NOOP:
294 break;
295
296 default:
297 printk("MDESC: Warning, ignoring unknown tag type %02x\n",
298 ep->tag);
299 }
300 ep++;
301 }
302 }
303}
304
305static unsigned int __init count_nodes(struct mdesc_hdr *mdesc)
306{
307 struct mdesc_elem *ep = node_block(mdesc);
308 struct mdesc_elem *end;
309 unsigned int cnt = 0;
310
311 end = ((void *)ep) + mdesc->node_sz;
312 while (ep < end) {
313 if (ep->tag == MD_NODE)
314 cnt++;
315 ep++;
316 }
317 return cnt;
318}
319
320static void __init report_platform_properties(void)
321{
322 struct mdesc_node *pn = md_find_node_by_name(NULL, "platform");
323 const char *s;
324 const u64 *v;
325
326 if (!pn) {
327 prom_printf("No platform node in machine-description.\n");
328 prom_halt();
329 }
330
331 s = md_get_property(pn, "banner-name", NULL);
332 printk("PLATFORM: banner-name [%s]\n", s);
333 s = md_get_property(pn, "name", NULL);
334 printk("PLATFORM: name [%s]\n", s);
335
336 v = md_get_property(pn, "hostid", NULL);
337 if (v)
338 printk("PLATFORM: hostid [%08lx]\n", *v);
339 v = md_get_property(pn, "serial#", NULL);
340 if (v)
341 printk("PLATFORM: serial# [%08lx]\n", *v);
342 v = md_get_property(pn, "stick-frequency", NULL);
343 printk("PLATFORM: stick-frequency [%08lx]\n", *v);
344 v = md_get_property(pn, "mac-address", NULL);
345 if (v)
346 printk("PLATFORM: mac-address [%lx]\n", *v);
347 v = md_get_property(pn, "watchdog-resolution", NULL);
348 if (v)
349 printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
350 v = md_get_property(pn, "watchdog-max-timeout", NULL);
351 if (v)
352 printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
353 v = md_get_property(pn, "max-cpus", NULL);
354 if (v)
355 printk("PLATFORM: max-cpus [%lu]\n", *v);
356}
357
358static int inline find_in_proplist(const char *list, const char *match, int len)
359{
360 while (len > 0) {
361 int l;
362
363 if (!strcmp(list, match))
364 return 1;
365 l = strlen(list) + 1;
366 list += l;
367 len -= l;
368 }
369 return 0;
370}
371
372static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
373{
374 const u64 *level = md_get_property(mp, "level", NULL);
375 const u64 *size = md_get_property(mp, "size", NULL);
376 const u64 *line_size = md_get_property(mp, "line-size", NULL);
377 const char *type;
378 int type_len;
379
380 type = md_get_property(mp, "type", &type_len);
381
382 switch (*level) {
383 case 1:
384 if (find_in_proplist(type, "instn", type_len)) {
385 c->icache_size = *size;
386 c->icache_line_size = *line_size;
387 } else if (find_in_proplist(type, "data", type_len)) {
388 c->dcache_size = *size;
389 c->dcache_line_size = *line_size;
390 }
391 break;
392
393 case 2:
394 c->ecache_size = *size;
395 c->ecache_line_size = *line_size;
396 break;
397
398 default:
399 break;
400 }
401
402 if (*level == 1) {
403 unsigned int i;
404
405 for (i = 0; i < mp->num_arcs; i++) {
406 struct mdesc_node *t = mp->arcs[i].arc;
407
408 if (strcmp(mp->arcs[i].name, "fwd"))
409 continue;
410
411 if (!strcmp(t->name, "cache"))
412 fill_in_one_cache(c, t);
413 }
414 }
415}
416
417static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
418{
419 unsigned int i;
420
421 for (i = 0; i < mp->num_arcs; i++) {
422 struct mdesc_node *t = mp->arcs[i].arc;
423 const u64 *id;
424
425 if (strcmp(mp->arcs[i].name, "back"))
426 continue;
427
428 if (!strcmp(t->name, "cpu")) {
429 id = md_get_property(t, "id", NULL);
430 if (*id < NR_CPUS)
431 cpu_data(*id).core_id = core_id;
432 } else {
433 unsigned int j;
434
435 for (j = 0; j < t->num_arcs; j++) {
436 struct mdesc_node *n = t->arcs[j].arc;
437
438 if (strcmp(t->arcs[j].name, "back"))
439 continue;
440
441 if (strcmp(n->name, "cpu"))
442 continue;
443
444 id = md_get_property(n, "id", NULL);
445 if (*id < NR_CPUS)
446 cpu_data(*id).core_id = core_id;
447 }
448 }
449 }
450}
451
452static void __init set_core_ids(void)
453{
454 struct mdesc_node *mp;
455 int idx;
456
457 idx = 1;
458 md_for_each_node_by_name(mp, "cache") {
459 const u64 *level = md_get_property(mp, "level", NULL);
460 const char *type;
461 int len;
462
463 if (*level != 1)
464 continue;
465
466 type = md_get_property(mp, "type", &len);
467 if (!find_in_proplist(type, "instn", len))
468 continue;
469
470 mark_core_ids(mp, idx);
471
472 idx++;
473 }
474}
475
476static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
477{
478 u64 val;
479
480 if (!p)
481 goto use_default;
482 val = *p;
483
484 if (!val || val >= 64)
485 goto use_default;
486
487 *mask = ((1U << val) * 64U) - 1U;
488 return;
489
490use_default:
491 *mask = ((1U << def) * 64U) - 1U;
492}
493
494static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb)
495{
496 const u64 *val;
497
498 val = md_get_property(mp, "q-cpu-mondo-#bits", NULL);
499 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
500
501 val = md_get_property(mp, "q-dev-mondo-#bits", NULL);
502 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
503
504 val = md_get_property(mp, "q-resumable-#bits", NULL);
505 get_one_mondo_bits(val, &tb->resum_qmask, 6);
506
507 val = md_get_property(mp, "q-nonresumable-#bits", NULL);
508 get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
509}
510
511static void __init mdesc_fill_in_cpu_data(void)
512{
513 struct mdesc_node *mp;
514
515 ncpus_probed = 0;
516 md_for_each_node_by_name(mp, "cpu") {
517 const u64 *id = md_get_property(mp, "id", NULL);
518 const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL);
519 struct trap_per_cpu *tb;
520 cpuinfo_sparc *c;
521 unsigned int i;
522 int cpuid;
523
524 ncpus_probed++;
525
526 cpuid = *id;
527
528#ifdef CONFIG_SMP
529 if (cpuid >= NR_CPUS)
530 continue;
531#else
532 /* On uniprocessor we only want the values for the
533 * real physical cpu the kernel booted onto, however
534 * cpu_data() only has one entry at index 0.
535 */
536 if (cpuid != real_hard_smp_processor_id())
537 continue;
538 cpuid = 0;
539#endif
540
541 c = &cpu_data(cpuid);
542 c->clock_tick = *cfreq;
543
544 tb = &trap_block[cpuid];
545 get_mondo_data(mp, tb);
546
547 for (i = 0; i < mp->num_arcs; i++) {
548 struct mdesc_node *t = mp->arcs[i].arc;
549 unsigned int j;
550
551 if (strcmp(mp->arcs[i].name, "fwd"))
552 continue;
553
554 if (!strcmp(t->name, "cache")) {
555 fill_in_one_cache(c, t);
556 continue;
557 }
558
559 for (j = 0; j < t->num_arcs; j++) {
560 struct mdesc_node *n;
561
562 n = t->arcs[j].arc;
563 if (strcmp(t->arcs[j].name, "fwd"))
564 continue;
565
566 if (!strcmp(n->name, "cache"))
567 fill_in_one_cache(c, n);
568 }
569 }
570
571#ifdef CONFIG_SMP
572 cpu_set(cpuid, cpu_present_map);
573 cpu_set(cpuid, phys_cpu_present_map);
574#endif
575
576 c->core_id = 0;
577 }
578
579 set_core_ids();
580
581 smp_fill_in_sib_core_maps();
582}
583
584void __init sun4v_mdesc_init(void)
585{
586 unsigned long len, real_len, status;
587
588 (void) sun4v_mach_desc(0UL, 0UL, &len);
589
590 printk("MDESC: Size is %lu bytes.\n", len);
591
592 main_mdesc = mdesc_early_alloc(len);
593
594 status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len);
595 if (status != HV_EOK || real_len > len) {
596 prom_printf("sun4v_mach_desc fails, err(%lu), "
597 "len(%lu), real_len(%lu)\n",
598 status, len, real_len);
599 prom_halt();
600 }
601
602 len = count_nodes(main_mdesc);
603 printk("MDESC: %lu nodes.\n", len);
604
605 len = roundup_pow_of_two(len);
606
607 mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *));
608 mdesc_hash_size = len;
609
610 printk("MDESC: Hash size %lu entries.\n", len);
611
612 build_all_nodes(main_mdesc);
613
614 printk("MDESC: Built graph with %u bytes of memory.\n",
615 mdesc_early_allocated);
616
617 report_platform_properties();
618 mdesc_fill_in_cpu_data();
619}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index d4c077dc5e85..38a32bc95d22 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -306,6 +306,20 @@ static void __init pci_controller_probe(void)
306 pci_controller_scan(pci_controller_init); 306 pci_controller_scan(pci_controller_init);
307} 307}
308 308
309static int ofpci_verbose;
310
311static int __init ofpci_debug(char *str)
312{
313 int val = 0;
314
315 get_option(&str, &val);
316 if (val)
317 ofpci_verbose = 1;
318 return 1;
319}
320
321__setup("ofpci_debug=", ofpci_debug);
322
309static unsigned long pci_parse_of_flags(u32 addr0) 323static unsigned long pci_parse_of_flags(u32 addr0)
310{ 324{
311 unsigned long flags = 0; 325 unsigned long flags = 0;
@@ -337,7 +351,9 @@ static void pci_parse_of_addrs(struct of_device *op,
337 addrs = of_get_property(node, "assigned-addresses", &proplen); 351 addrs = of_get_property(node, "assigned-addresses", &proplen);
338 if (!addrs) 352 if (!addrs)
339 return; 353 return;
340 printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 354 if (ofpci_verbose)
355 printk(" parse addresses (%d bytes) @ %p\n",
356 proplen, addrs);
341 op_res = &op->resource[0]; 357 op_res = &op->resource[0];
342 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 358 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
343 struct resource *res; 359 struct resource *res;
@@ -348,8 +364,9 @@ static void pci_parse_of_addrs(struct of_device *op,
348 if (!flags) 364 if (!flags)
349 continue; 365 continue;
350 i = addrs[0] & 0xff; 366 i = addrs[0] & 0xff;
351 printk(" start: %lx, end: %lx, i: %x\n", 367 if (ofpci_verbose)
352 op_res->start, op_res->end, i); 368 printk(" start: %lx, end: %lx, i: %x\n",
369 op_res->start, op_res->end, i);
353 370
354 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 371 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
355 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 372 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -393,8 +410,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
393 if (type == NULL) 410 if (type == NULL)
394 type = ""; 411 type = "";
395 412
396 printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n", 413 if (ofpci_verbose)
397 devfn, type, host_controller); 414 printk(" create device, devfn: %x, type: %s\n",
415 devfn, type);
398 416
399 dev->bus = bus; 417 dev->bus = bus;
400 dev->sysdata = node; 418 dev->sysdata = node;
@@ -434,8 +452,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
434 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 452 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
435 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 453 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
436 } 454 }
437 printk(" class: 0x%x device name: %s\n", 455 if (ofpci_verbose)
438 dev->class, pci_name(dev)); 456 printk(" class: 0x%x device name: %s\n",
457 dev->class, pci_name(dev));
439 458
440 /* I have seen IDE devices which will not respond to 459 /* I have seen IDE devices which will not respond to
441 * the bmdma simplex check reads if bus mastering is 460 * the bmdma simplex check reads if bus mastering is
@@ -469,7 +488,8 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
469 } 488 }
470 pci_parse_of_addrs(sd->op, node, dev); 489 pci_parse_of_addrs(sd->op, node, dev);
471 490
472 printk(" adding to system ...\n"); 491 if (ofpci_verbose)
492 printk(" adding to system ...\n");
473 493
474 pci_device_add(dev, bus); 494 pci_device_add(dev, bus);
475 495
@@ -547,7 +567,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
547 unsigned int flags; 567 unsigned int flags;
548 u64 size; 568 u64 size;
549 569
550 printk("of_scan_pci_bridge(%s)\n", node->full_name); 570 if (ofpci_verbose)
571 printk("of_scan_pci_bridge(%s)\n", node->full_name);
551 572
552 /* parse bus-range property */ 573 /* parse bus-range property */
553 busrange = of_get_property(node, "bus-range", &len); 574 busrange = of_get_property(node, "bus-range", &len);
@@ -632,7 +653,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
632simba_cont: 653simba_cont:
633 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 654 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
634 bus->number); 655 bus->number);
635 printk(" bus name: %s\n", bus->name); 656 if (ofpci_verbose)
657 printk(" bus name: %s\n", bus->name);
636 658
637 pci_of_scan_bus(pbm, node, bus); 659 pci_of_scan_bus(pbm, node, bus);
638} 660}
@@ -646,12 +668,14 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
646 int reglen, devfn; 668 int reglen, devfn;
647 struct pci_dev *dev; 669 struct pci_dev *dev;
648 670
649 printk("PCI: scan_bus[%s] bus no %d\n", 671 if (ofpci_verbose)
650 node->full_name, bus->number); 672 printk("PCI: scan_bus[%s] bus no %d\n",
673 node->full_name, bus->number);
651 674
652 child = NULL; 675 child = NULL;
653 while ((child = of_get_next_child(node, child)) != NULL) { 676 while ((child = of_get_next_child(node, child)) != NULL) {
654 printk(" * %s\n", child->full_name); 677 if (ofpci_verbose)
678 printk(" * %s\n", child->full_name);
655 reg = of_get_property(child, "reg", &reglen); 679 reg = of_get_property(child, "reg", &reglen);
656 if (reg == NULL || reglen < 20) 680 if (reg == NULL || reglen < 20)
657 continue; 681 continue;
@@ -661,7 +685,9 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
661 dev = of_create_pci_dev(pbm, child, bus, devfn, 0); 685 dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
662 if (!dev) 686 if (!dev)
663 continue; 687 continue;
664 printk("PCI: dev header type: %x\n", dev->hdr_type); 688 if (ofpci_verbose)
689 printk("PCI: dev header type: %x\n",
690 dev->hdr_type);
665 691
666 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 692 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
667 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 693 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index e2377796de89..323d6c278518 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -762,9 +762,10 @@ void sabre_init(struct device_node *dp, char *model_name)
762 /* Of course, Sun has to encode things a thousand 762 /* Of course, Sun has to encode things a thousand
763 * different ways, inconsistently. 763 * different ways, inconsistently.
764 */ 764 */
765 cpu_find_by_instance(0, &dp, NULL); 765 for_each_node_by_type(dp, "cpu") {
766 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) 766 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
767 hummingbird_p = 1; 767 hummingbird_p = 1;
768 }
768 } 769 }
769 } 770 }
770 771
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 044e8ec4c0f5..6b3fe2c1d65e 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/msi.h> 14#include <linux/msi.h>
15#include <linux/log2.h>
15 16
16#include <asm/iommu.h> 17#include <asm/iommu.h>
17#include <asm/irq.h> 18#include <asm/irq.h>
@@ -26,6 +27,9 @@
26 27
27#include "pci_sun4v.h" 28#include "pci_sun4v.h"
28 29
30static unsigned long vpci_major = 1;
31static unsigned long vpci_minor = 1;
32
29#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
30 34
31struct iommu_batch { 35struct iommu_batch {
@@ -638,9 +642,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
638{ 642{
639 struct iommu *iommu = pbm->iommu; 643 struct iommu *iommu = pbm->iommu;
640 struct property *prop; 644 struct property *prop;
641 unsigned long num_tsb_entries, sz; 645 unsigned long num_tsb_entries, sz, tsbsize;
642 u32 vdma[2], dma_mask, dma_offset; 646 u32 vdma[2], dma_mask, dma_offset;
643 int tsbsize;
644 647
645 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 648 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
646 if (prop) { 649 if (prop) {
@@ -654,31 +657,15 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
654 vdma[1] = 0x80000000; 657 vdma[1] = 0x80000000;
655 } 658 }
656 659
657 dma_mask = vdma[0]; 660 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
658 switch (vdma[1]) { 661 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
659 case 0x20000000: 662 vdma[0], vdma[1]);
660 dma_mask |= 0x1fffffff; 663 prom_halt();
661 tsbsize = 64;
662 break;
663
664 case 0x40000000:
665 dma_mask |= 0x3fffffff;
666 tsbsize = 128;
667 break;
668
669 case 0x80000000:
670 dma_mask |= 0x7fffffff;
671 tsbsize = 256;
672 break;
673
674 default:
675 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
676 prom_halt();
677 }; 664 };
678 665
679 tsbsize *= (8 * 1024); 666 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
680 667 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
681 num_tsb_entries = tsbsize / sizeof(iopte_t); 668 tsbsize = num_tsb_entries * sizeof(iopte_t);
682 669
683 dma_offset = vdma[0]; 670 dma_offset = vdma[0];
684 671
@@ -689,7 +676,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
689 iommu->dma_addr_mask = dma_mask; 676 iommu->dma_addr_mask = dma_mask;
690 677
691 /* Allocate and initialize the free area map. */ 678 /* Allocate and initialize the free area map. */
692 sz = num_tsb_entries / 8; 679 sz = (num_tsb_entries + 7) / 8;
693 sz = (sz + 7UL) & ~7UL; 680 sz = (sz + 7UL) & ~7UL;
694 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 681 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
695 if (!iommu->arena.map) { 682 if (!iommu->arena.map) {
@@ -1178,6 +1165,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1178 1165
1179void sun4v_pci_init(struct device_node *dp, char *model_name) 1166void sun4v_pci_init(struct device_node *dp, char *model_name)
1180{ 1167{
1168 static int hvapi_negotiated = 0;
1181 struct pci_controller_info *p; 1169 struct pci_controller_info *p;
1182 struct pci_pbm_info *pbm; 1170 struct pci_pbm_info *pbm;
1183 struct iommu *iommu; 1171 struct iommu *iommu;
@@ -1186,6 +1174,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1186 u32 devhandle; 1174 u32 devhandle;
1187 int i; 1175 int i;
1188 1176
1177 if (!hvapi_negotiated++) {
1178 int err = sun4v_hvapi_register(HV_GRP_PCI,
1179 vpci_major,
1180 &vpci_minor);
1181
1182 if (err) {
1183 prom_printf("SUN4V_PCI: Could not register hvapi, "
1184 "err=%d\n", err);
1185 prom_halt();
1186 }
1187 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1188 vpci_major, vpci_minor);
1189 }
1190
1189 prop = of_find_property(dp, "reg", NULL); 1191 prop = of_find_property(dp, "reg", NULL);
1190 regs = prop->value; 1192 regs = prop->value;
1191 1193
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 699b24b890df..5d6adea3967f 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -19,6 +19,7 @@
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/of_device.h> 20#include <asm/of_device.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sstate.h>
22 23
23#include <linux/unistd.h> 24#include <linux/unistd.h>
24 25
@@ -53,6 +54,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
53 54
54void machine_power_off(void) 55void machine_power_off(void)
55{ 56{
57 sstate_poweroff();
56 if (!serial_console || scons_pwroff) { 58 if (!serial_console || scons_pwroff) {
57#ifdef CONFIG_PCI 59#ifdef CONFIG_PCI
58 if (power_reg) { 60 if (power_reg) {
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 952762bfb4c0..f5f97e2c669c 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -45,6 +45,7 @@
45#include <asm/mmu_context.h> 45#include <asm/mmu_context.h>
46#include <asm/unistd.h> 46#include <asm/unistd.h>
47#include <asm/hypervisor.h> 47#include <asm/hypervisor.h>
48#include <asm/sstate.h>
48 49
49/* #define VERBOSE_SHOWREGS */ 50/* #define VERBOSE_SHOWREGS */
50 51
@@ -106,6 +107,7 @@ extern void (*prom_keyboard)(void);
106 107
107void machine_halt(void) 108void machine_halt(void)
108{ 109{
110 sstate_halt();
109 if (!serial_console && prom_palette) 111 if (!serial_console && prom_palette)
110 prom_palette (1); 112 prom_palette (1);
111 if (prom_keyboard) 113 if (prom_keyboard)
@@ -116,6 +118,7 @@ void machine_halt(void)
116 118
117void machine_alt_power_off(void) 119void machine_alt_power_off(void)
118{ 120{
121 sstate_poweroff();
119 if (!serial_console && prom_palette) 122 if (!serial_console && prom_palette)
120 prom_palette(1); 123 prom_palette(1);
121 if (prom_keyboard) 124 if (prom_keyboard)
@@ -128,6 +131,7 @@ void machine_restart(char * cmd)
128{ 131{
129 char *p; 132 char *p;
130 133
134 sstate_reboot();
131 p = strchr (reboot_command, '\n'); 135 p = strchr (reboot_command, '\n');
132 if (p) *p = 0; 136 if (p) *p = 0;
133 if (!serial_console && prom_palette) 137 if (!serial_console && prom_palette)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 02830e4671f5..dad4b3ba705f 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -28,6 +28,7 @@
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/asi.h> 29#include <asm/asi.h>
30#include <asm/upa.h> 30#include <asm/upa.h>
31#include <asm/smp.h>
31 32
32static struct device_node *allnodes; 33static struct device_node *allnodes;
33 34
@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
1665 return ret; 1666 return ret;
1666} 1667}
1667 1668
1669static const char *get_mid_prop(void)
1670{
1671 return (tlb_type == spitfire ? "upa-portid" : "portid");
1672}
1673
1674struct device_node *of_find_node_by_cpuid(int cpuid)
1675{
1676 struct device_node *dp;
1677 const char *mid_prop = get_mid_prop();
1678
1679 for_each_node_by_type(dp, "cpu") {
1680 int id = of_getintprop_default(dp, mid_prop, -1);
1681 const char *this_mid_prop = mid_prop;
1682
1683 if (id < 0) {
1684 this_mid_prop = "cpuid";
1685 id = of_getintprop_default(dp, this_mid_prop, -1);
1686 }
1687
1688 if (id < 0) {
1689 prom_printf("OF: Serious problem, cpu lacks "
1690 "%s property", this_mid_prop);
1691 prom_halt();
1692 }
1693 if (cpuid == id)
1694 return dp;
1695 }
1696 return NULL;
1697}
1698
1699static void __init of_fill_in_cpu_data(void)
1700{
1701 struct device_node *dp;
1702 const char *mid_prop = get_mid_prop();
1703
1704 ncpus_probed = 0;
1705 for_each_node_by_type(dp, "cpu") {
1706 int cpuid = of_getintprop_default(dp, mid_prop, -1);
1707 const char *this_mid_prop = mid_prop;
1708 struct device_node *portid_parent;
1709 int portid = -1;
1710
1711 portid_parent = NULL;
1712 if (cpuid < 0) {
1713 this_mid_prop = "cpuid";
1714 cpuid = of_getintprop_default(dp, this_mid_prop, -1);
1715 if (cpuid >= 0) {
1716 int limit = 2;
1717
1718 portid_parent = dp;
1719 while (limit--) {
1720 portid_parent = portid_parent->parent;
1721 if (!portid_parent)
1722 break;
1723 portid = of_getintprop_default(portid_parent,
1724 "portid", -1);
1725 if (portid >= 0)
1726 break;
1727 }
1728 }
1729 }
1730
1731 if (cpuid < 0) {
1732 prom_printf("OF: Serious problem, cpu lacks "
1733 "%s property", this_mid_prop);
1734 prom_halt();
1735 }
1736
1737 ncpus_probed++;
1738
1739#ifdef CONFIG_SMP
1740 if (cpuid >= NR_CPUS)
1741 continue;
1742#else
1743 /* On uniprocessor we only want the values for the
1744 * real physical cpu the kernel booted onto, however
1745 * cpu_data() only has one entry at index 0.
1746 */
1747 if (cpuid != real_hard_smp_processor_id())
1748 continue;
1749 cpuid = 0;
1750#endif
1751
1752 cpu_data(cpuid).clock_tick =
1753 of_getintprop_default(dp, "clock-frequency", 0);
1754
1755 if (portid_parent) {
1756 cpu_data(cpuid).dcache_size =
1757 of_getintprop_default(dp, "l1-dcache-size",
1758 16 * 1024);
1759 cpu_data(cpuid).dcache_line_size =
1760 of_getintprop_default(dp, "l1-dcache-line-size",
1761 32);
1762 cpu_data(cpuid).icache_size =
1763 of_getintprop_default(dp, "l1-icache-size",
1764 8 * 1024);
1765 cpu_data(cpuid).icache_line_size =
1766 of_getintprop_default(dp, "l1-icache-line-size",
1767 32);
1768 cpu_data(cpuid).ecache_size =
1769 of_getintprop_default(dp, "l2-cache-size", 0);
1770 cpu_data(cpuid).ecache_line_size =
1771 of_getintprop_default(dp, "l2-cache-line-size", 0);
1772 if (!cpu_data(cpuid).ecache_size ||
1773 !cpu_data(cpuid).ecache_line_size) {
1774 cpu_data(cpuid).ecache_size =
1775 of_getintprop_default(portid_parent,
1776 "l2-cache-size",
1777 (4 * 1024 * 1024));
1778 cpu_data(cpuid).ecache_line_size =
1779 of_getintprop_default(portid_parent,
1780 "l2-cache-line-size", 64);
1781 }
1782
1783 cpu_data(cpuid).core_id = portid + 1;
1784 } else {
1785 cpu_data(cpuid).dcache_size =
1786 of_getintprop_default(dp, "dcache-size", 16 * 1024);
1787 cpu_data(cpuid).dcache_line_size =
1788 of_getintprop_default(dp, "dcache-line-size", 32);
1789
1790 cpu_data(cpuid).icache_size =
1791 of_getintprop_default(dp, "icache-size", 16 * 1024);
1792 cpu_data(cpuid).icache_line_size =
1793 of_getintprop_default(dp, "icache-line-size", 32);
1794
1795 cpu_data(cpuid).ecache_size =
1796 of_getintprop_default(dp, "ecache-size",
1797 (4 * 1024 * 1024));
1798 cpu_data(cpuid).ecache_line_size =
1799 of_getintprop_default(dp, "ecache-line-size", 64);
1800
1801 cpu_data(cpuid).core_id = 0;
1802 }
1803
1804#ifdef CONFIG_SMP
1805 cpu_set(cpuid, cpu_present_map);
1806 cpu_set(cpuid, phys_cpu_present_map);
1807#endif
1808 }
1809
1810 smp_fill_in_sib_core_maps();
1811}
1812
1668void __init prom_build_devicetree(void) 1813void __init prom_build_devicetree(void)
1669{ 1814{
1670 struct device_node **nextp; 1815 struct device_node **nextp;
@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void)
1679 &nextp); 1824 &nextp);
1680 printk("PROM: Built device tree with %u bytes of memory.\n", 1825 printk("PROM: Built device tree with %u bytes of memory.\n",
1681 prom_early_allocated); 1826 prom_early_allocated);
1827
1828 if (tlb_type != hypervisor)
1829 of_fill_in_cpu_data();
1682} 1830}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 451028341c75..de9b4c13f1c7 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -46,11 +46,17 @@
46#include <asm/sections.h> 46#include <asm/sections.h>
47#include <asm/setup.h> 47#include <asm/setup.h>
48#include <asm/mmu.h> 48#include <asm/mmu.h>
49#include <asm/ns87303.h>
49 50
50#ifdef CONFIG_IP_PNP 51#ifdef CONFIG_IP_PNP
51#include <net/ipconfig.h> 52#include <net/ipconfig.h>
52#endif 53#endif
53 54
55/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
56 * operations in asm/ns87303.h
57 */
58DEFINE_SPINLOCK(ns87303_lock);
59
54struct screen_info screen_info = { 60struct screen_info screen_info = {
55 0, 0, /* orig-x, orig-y */ 61 0, 0, /* orig-x, orig-y */
56 0, /* unused */ 62 0, /* unused */
@@ -269,6 +275,7 @@ void __init per_cpu_patch(void)
269 275
270void __init sun4v_patch(void) 276void __init sun4v_patch(void)
271{ 277{
278 extern void sun4v_hvapi_init(void);
272 struct sun4v_1insn_patch_entry *p1; 279 struct sun4v_1insn_patch_entry *p1;
273 struct sun4v_2insn_patch_entry *p2; 280 struct sun4v_2insn_patch_entry *p2;
274 281
@@ -300,6 +307,8 @@ void __init sun4v_patch(void)
300 307
301 p2++; 308 p2++;
302 } 309 }
310
311 sun4v_hvapi_init();
303} 312}
304 313
305#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
@@ -367,8 +376,6 @@ void __init setup_arch(char **cmdline_p)
367 init_cur_cpu_trap(current_thread_info()); 376 init_cur_cpu_trap(current_thread_info());
368 377
369 paging_init(); 378 paging_init();
370
371 smp_setup_cpu_possible_map();
372} 379}
373 380
374static int __init set_preferred_console(void) 381static int __init set_preferred_console(void)
@@ -421,7 +428,7 @@ extern void mmu_info(struct seq_file *);
421unsigned int dcache_parity_tl1_occurred; 428unsigned int dcache_parity_tl1_occurred;
422unsigned int icache_parity_tl1_occurred; 429unsigned int icache_parity_tl1_occurred;
423 430
424static int ncpus_probed; 431int ncpus_probed;
425 432
426static int show_cpuinfo(struct seq_file *m, void *__unused) 433static int show_cpuinfo(struct seq_file *m, void *__unused)
427{ 434{
@@ -513,14 +520,6 @@ static int __init topology_init(void)
513 520
514 err = -ENOMEM; 521 err = -ENOMEM;
515 522
516 /* Count the number of physically present processors in
517 * the machine, even on uniprocessor, so that /proc/cpuinfo
518 * output is consistent with 2.4.x
519 */
520 ncpus_probed = 0;
521 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
522 ncpus_probed++;
523
524 for_each_possible_cpu(i) { 523 for_each_possible_cpu(i) {
525 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 524 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
526 if (p) { 525 if (p) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 24fdf1d0adc5..c550bba3490a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -40,6 +40,7 @@
40#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/prom.h> 42#include <asm/prom.h>
43#include <asm/mdesc.h>
43 44
44extern void calibrate_delay(void); 45extern void calibrate_delay(void);
45 46
@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m)
75 i, cpu_data(i).clock_tick); 76 i, cpu_data(i).clock_tick);
76} 77}
77 78
78void __init smp_store_cpu_info(int id)
79{
80 struct device_node *dp;
81 int def;
82
83 cpu_data(id).udelay_val = loops_per_jiffy;
84
85 cpu_find_by_mid(id, &dp);
86 cpu_data(id).clock_tick =
87 of_getintprop_default(dp, "clock-frequency", 0);
88
89 def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
90 cpu_data(id).dcache_size =
91 of_getintprop_default(dp, "dcache-size", def);
92
93 def = 32;
94 cpu_data(id).dcache_line_size =
95 of_getintprop_default(dp, "dcache-line-size", def);
96
97 def = 16 * 1024;
98 cpu_data(id).icache_size =
99 of_getintprop_default(dp, "icache-size", def);
100
101 def = 32;
102 cpu_data(id).icache_line_size =
103 of_getintprop_default(dp, "icache-line-size", def);
104
105 def = ((tlb_type == hypervisor) ?
106 (3 * 1024 * 1024) :
107 (4 * 1024 * 1024));
108 cpu_data(id).ecache_size =
109 of_getintprop_default(dp, "ecache-size", def);
110
111 def = 64;
112 cpu_data(id).ecache_line_size =
113 of_getintprop_default(dp, "ecache-line-size", def);
114
115 printk("CPU[%d]: Caches "
116 "D[sz(%d):line_sz(%d)] "
117 "I[sz(%d):line_sz(%d)] "
118 "E[sz(%d):line_sz(%d)]\n",
119 id,
120 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
121 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
122 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
123}
124
125extern void setup_sparc64_timer(void); 79extern void setup_sparc64_timer(void);
126 80
127static volatile unsigned long callin_flag = 0; 81static volatile unsigned long callin_flag = 0;
@@ -145,7 +99,7 @@ void __init smp_callin(void)
145 local_irq_enable(); 99 local_irq_enable();
146 100
147 calibrate_delay(); 101 calibrate_delay();
148 smp_store_cpu_info(cpuid); 102 cpu_data(cpuid).udelay_val = loops_per_jiffy;
149 callin_flag = 1; 103 callin_flag = 1;
150 __asm__ __volatile__("membar #Sync\n\t" 104 __asm__ __volatile__("membar #Sync\n\t"
151 "flush %%g6" : : : "memory"); 105 "flush %%g6" : : : "memory");
@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
340 294
341 prom_startcpu_cpuid(cpu, entry, cookie); 295 prom_startcpu_cpuid(cpu, entry, cookie);
342 } else { 296 } else {
343 struct device_node *dp; 297 struct device_node *dp = of_find_node_by_cpuid(cpu);
344 298
345 cpu_find_by_mid(cpu, &dp);
346 prom_startcpu(dp->node, entry, cookie); 299 prom_startcpu(dp->node, entry, cookie);
347 } 300 }
348 301
@@ -447,7 +400,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
447static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 400static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
448{ 401{
449 u64 pstate, ver; 402 u64 pstate, ver;
450 int nack_busy_id, is_jbus; 403 int nack_busy_id, is_jbus, need_more;
451 404
452 if (cpus_empty(mask)) 405 if (cpus_empty(mask))
453 return; 406 return;
@@ -463,6 +416,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
463 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 416 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
464 417
465retry: 418retry:
419 need_more = 0;
466 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 420 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
467 : : "r" (pstate), "i" (PSTATE_IE)); 421 : : "r" (pstate), "i" (PSTATE_IE));
468 422
@@ -491,6 +445,10 @@ retry:
491 : /* no outputs */ 445 : /* no outputs */
492 : "r" (target), "i" (ASI_INTR_W)); 446 : "r" (target), "i" (ASI_INTR_W));
493 nack_busy_id++; 447 nack_busy_id++;
448 if (nack_busy_id == 32) {
449 need_more = 1;
450 break;
451 }
494 } 452 }
495 } 453 }
496 454
@@ -507,6 +465,16 @@ retry:
507 if (dispatch_stat == 0UL) { 465 if (dispatch_stat == 0UL) {
508 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 466 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
509 : : "r" (pstate)); 467 : : "r" (pstate));
468 if (unlikely(need_more)) {
469 int i, cnt = 0;
470 for_each_cpu_mask(i, mask) {
471 cpu_clear(i, mask);
472 cnt++;
473 if (cnt == 32)
474 break;
475 }
476 goto retry;
477 }
510 return; 478 return;
511 } 479 }
512 if (!--stuck) 480 if (!--stuck)
@@ -544,6 +512,8 @@ retry:
544 if ((dispatch_stat & check_mask) == 0) 512 if ((dispatch_stat & check_mask) == 0)
545 cpu_clear(i, mask); 513 cpu_clear(i, mask);
546 this_busy_nack += 2; 514 this_busy_nack += 2;
515 if (this_busy_nack == 64)
516 break;
547 } 517 }
548 518
549 goto retry; 519 goto retry;
@@ -1191,23 +1161,14 @@ int setup_profiling_timer(unsigned int multiplier)
1191 1161
1192static void __init smp_tune_scheduling(void) 1162static void __init smp_tune_scheduling(void)
1193{ 1163{
1194 struct device_node *dp; 1164 unsigned int smallest = ~0U;
1195 int instance; 1165 int i;
1196 unsigned int def, smallest = ~0U;
1197
1198 def = ((tlb_type == hypervisor) ?
1199 (3 * 1024 * 1024) :
1200 (4 * 1024 * 1024));
1201 1166
1202 instance = 0; 1167 for (i = 0; i < NR_CPUS; i++) {
1203 while (!cpu_find_by_instance(instance, &dp, NULL)) { 1168 unsigned int val = cpu_data(i).ecache_size;
1204 unsigned int val;
1205 1169
1206 val = of_getintprop_default(dp, "ecache-size", def); 1170 if (val && val < smallest)
1207 if (val < smallest)
1208 smallest = val; 1171 smallest = val;
1209
1210 instance++;
1211 } 1172 }
1212 1173
1213 /* Any value less than 256K is nonsense. */ 1174 /* Any value less than 256K is nonsense. */
@@ -1230,58 +1191,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1230 int i; 1191 int i;
1231 1192
1232 if (num_possible_cpus() > max_cpus) { 1193 if (num_possible_cpus() > max_cpus) {
1233 int instance, mid; 1194 for_each_possible_cpu(i) {
1234 1195 if (i != boot_cpu_id) {
1235 instance = 0; 1196 cpu_clear(i, phys_cpu_present_map);
1236 while (!cpu_find_by_instance(instance, NULL, &mid)) { 1197 cpu_clear(i, cpu_present_map);
1237 if (mid != boot_cpu_id) {
1238 cpu_clear(mid, phys_cpu_present_map);
1239 cpu_clear(mid, cpu_present_map);
1240 if (num_possible_cpus() <= max_cpus) 1198 if (num_possible_cpus() <= max_cpus)
1241 break; 1199 break;
1242 } 1200 }
1243 instance++;
1244 } 1201 }
1245 } 1202 }
1246 1203
1247 for_each_possible_cpu(i) { 1204 cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
1248 if (tlb_type == hypervisor) {
1249 int j;
1250
1251 /* XXX get this mapping from machine description */
1252 for_each_possible_cpu(j) {
1253 if ((j >> 2) == (i >> 2))
1254 cpu_set(j, cpu_sibling_map[i]);
1255 }
1256 } else {
1257 cpu_set(i, cpu_sibling_map[i]);
1258 }
1259 }
1260
1261 smp_store_cpu_info(boot_cpu_id);
1262 smp_tune_scheduling(); 1205 smp_tune_scheduling();
1263} 1206}
1264 1207
1265/* Set this up early so that things like the scheduler can init 1208void __devinit smp_prepare_boot_cpu(void)
1266 * properly. We use the same cpu mask for both the present and
1267 * possible cpu map.
1268 */
1269void __init smp_setup_cpu_possible_map(void)
1270{ 1209{
1271 int instance, mid;
1272
1273 instance = 0;
1274 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1275 if (mid < NR_CPUS) {
1276 cpu_set(mid, phys_cpu_present_map);
1277 cpu_set(mid, cpu_present_map);
1278 }
1279 instance++;
1280 }
1281} 1210}
1282 1211
1283void __devinit smp_prepare_boot_cpu(void) 1212void __devinit smp_fill_in_sib_core_maps(void)
1284{ 1213{
1214 unsigned int i;
1215
1216 for_each_possible_cpu(i) {
1217 unsigned int j;
1218
1219 if (cpu_data(i).core_id == 0) {
1220 cpu_set(i, cpu_sibling_map[i]);
1221 continue;
1222 }
1223
1224 for_each_possible_cpu(j) {
1225 if (cpu_data(i).core_id ==
1226 cpu_data(j).core_id)
1227 cpu_set(j, cpu_sibling_map[i]);
1228 }
1229 }
1285} 1230}
1286 1231
1287int __cpuinit __cpu_up(unsigned int cpu) 1232int __cpuinit __cpu_up(unsigned int cpu)
@@ -1337,7 +1282,7 @@ unsigned long __per_cpu_shift __read_mostly;
1337EXPORT_SYMBOL(__per_cpu_base); 1282EXPORT_SYMBOL(__per_cpu_base);
1338EXPORT_SYMBOL(__per_cpu_shift); 1283EXPORT_SYMBOL(__per_cpu_shift);
1339 1284
1340void __init setup_per_cpu_areas(void) 1285void __init real_setup_per_cpu_areas(void)
1341{ 1286{
1342 unsigned long goal, size, i; 1287 unsigned long goal, size, i;
1343 char *ptr; 1288 char *ptr;
diff --git a/arch/sparc64/kernel/sstate.c b/arch/sparc64/kernel/sstate.c
new file mode 100644
index 000000000000..5b6e75b7f052
--- /dev/null
+++ b/arch/sparc64/kernel/sstate.c
@@ -0,0 +1,104 @@
1/* sstate.c: System soft state support.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/notifier.h>
8#include <linux/init.h>
9
10#include <asm/hypervisor.h>
11#include <asm/sstate.h>
12#include <asm/oplib.h>
13#include <asm/head.h>
14#include <asm/io.h>
15
16static int hv_supports_soft_state;
17
18static unsigned long kimage_addr_to_ra(const char *p)
19{
20 unsigned long val = (unsigned long) p;
21
22 return kern_base + (val - KERNBASE);
23}
24
25static void do_set_sstate(unsigned long state, const char *msg)
26{
27 unsigned long err;
28
29 if (!hv_supports_soft_state)
30 return;
31
32 err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
33 if (err) {
34 printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
35 "state[%lx] msg[%s], err=%lu\n",
36 state, msg, err);
37 }
38}
39
40static const char booting_msg[32] __attribute__((aligned(32))) =
41 "Linux booting";
42static const char running_msg[32] __attribute__((aligned(32))) =
43 "Linux running";
44static const char halting_msg[32] __attribute__((aligned(32))) =
45 "Linux halting";
46static const char poweroff_msg[32] __attribute__((aligned(32))) =
47 "Linux powering off";
48static const char rebooting_msg[32] __attribute__((aligned(32))) =
49 "Linux rebooting";
50static const char panicing_msg[32] __attribute__((aligned(32))) =
51 "Linux panicing";
52
53void sstate_booting(void)
54{
55 do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
56}
57
58void sstate_running(void)
59{
60 do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
61}
62
63void sstate_halt(void)
64{
65 do_set_sstate(HV_SOFT_STATE_TRANSITION, halting_msg);
66}
67
68void sstate_poweroff(void)
69{
70 do_set_sstate(HV_SOFT_STATE_TRANSITION, poweroff_msg);
71}
72
73void sstate_reboot(void)
74{
75 do_set_sstate(HV_SOFT_STATE_TRANSITION, rebooting_msg);
76}
77
78static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
79{
80 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
81
82 return NOTIFY_DONE;
83}
84
85static struct notifier_block sstate_panic_block = {
86 .notifier_call = sstate_panic_event,
87 .priority = INT_MAX,
88};
89
90void __init sun4v_sstate_init(void)
91{
92 unsigned long major, minor;
93
94 major = 1;
95 minor = 0;
96 if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
97 return;
98
99 hv_supports_soft_state = 1;
100
101 prom_sun4v_guest_soft_state();
102 atomic_notifier_chain_register(&panic_notifier_list,
103 &sstate_panic_block);
104}
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 405855dd886b..574bc248bca6 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -22,12 +22,12 @@ sun4v_cpu_mondo:
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty 22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop 23 nop
24 24
25 /* Get &trap_block[smp_processor_id()] into %g3. */ 25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g3 26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
28 28
29 /* Get CPU mondo queue base phys address into %g7. */ 29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31 31
32 /* Now get the cross-call arguments and handler PC, same 32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u: 33 * layout as sun4u:
@@ -47,8 +47,7 @@ sun4v_cpu_mondo:
47 add %g2, 0x40 - 0x8 - 0x8, %g2 47 add %g2, 0x40 - 0x8 - 0x8, %g2
48 48
49 /* Update queue head pointer. */ 49 /* Update queue head pointer. */
50 sethi %hi(8192 - 1), %g4 50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
51 or %g4, %lo(8192 - 1), %g4
52 and %g2, %g4, %g2 51 and %g2, %g4, %g2
53 52
54 mov INTRQ_CPU_MONDO_HEAD, %g4 53 mov INTRQ_CPU_MONDO_HEAD, %g4
@@ -71,12 +70,12 @@ sun4v_dev_mondo:
71 be,pn %xcc, sun4v_dev_mondo_queue_empty 70 be,pn %xcc, sun4v_dev_mondo_queue_empty
72 nop 71 nop
73 72
74 /* Get &trap_block[smp_processor_id()] into %g3. */ 73 /* Get &trap_block[smp_processor_id()] into %g4. */
75 ldxa [%g0] ASI_SCRATCHPAD, %g3 74 ldxa [%g0] ASI_SCRATCHPAD, %g4
76 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
77 76
78 /* Get DEV mondo queue base phys address into %g5. */ 77 /* Get DEV mondo queue base phys address into %g5. */
79 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80 79
81 /* Load IVEC into %g3. */ 80 /* Load IVEC into %g3. */
82 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
@@ -90,8 +89,7 @@ sun4v_dev_mondo:
90 */ 89 */
91 90
92 /* Update queue head pointer, this frees up some registers. */ 91 /* Update queue head pointer, this frees up some registers. */
93 sethi %hi(8192 - 1), %g4 92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
94 or %g4, %lo(8192 - 1), %g4
95 and %g2, %g4, %g2 93 and %g2, %g4, %g2
96 94
97 mov INTRQ_DEVICE_MONDO_HEAD, %g4 95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
@@ -143,6 +141,8 @@ sun4v_res_mondo:
143 brnz,pn %g1, sun4v_res_mondo_queue_full 141 brnz,pn %g1, sun4v_res_mondo_queue_full
144 nop 142 nop
145 143
144 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
145
146 /* Remember this entry's offset in %g1. */ 146 /* Remember this entry's offset in %g1. */
147 mov %g2, %g1 147 mov %g2, %g1
148 148
@@ -173,8 +173,6 @@ sun4v_res_mondo:
173 add %g2, 0x08, %g2 173 add %g2, 0x08, %g2
174 174
175 /* Update queue head pointer. */ 175 /* Update queue head pointer. */
176 sethi %hi(8192 - 1), %g4
177 or %g4, %lo(8192 - 1), %g4
178 and %g2, %g4, %g2 176 and %g2, %g4, %g2
179 177
180 mov INTRQ_RESUM_MONDO_HEAD, %g4 178 mov INTRQ_RESUM_MONDO_HEAD, %g4
@@ -254,6 +252,8 @@ sun4v_nonres_mondo:
254 brnz,pn %g1, sun4v_nonres_mondo_queue_full 252 brnz,pn %g1, sun4v_nonres_mondo_queue_full
255 nop 253 nop
256 254
255 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
256
257 /* Remember this entry's offset in %g1. */ 257 /* Remember this entry's offset in %g1. */
258 mov %g2, %g1 258 mov %g2, %g1
259 259
@@ -284,8 +284,6 @@ sun4v_nonres_mondo:
284 add %g2, 0x08, %g2 284 add %g2, 0x08, %g2
285 285
286 /* Update queue head pointer. */ 286 /* Update queue head pointer. */
287 sethi %hi(8192 - 1), %g4
288 or %g4, %lo(8192 - 1), %g4
289 and %g2, %g4, %g2 287 and %g2, %g4, %g2
290 288
291 mov INTRQ_NONRESUM_MONDO_HEAD, %g4 289 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 6b9a06e42542..a31a0439244f 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -680,22 +680,14 @@ static int starfire_set_time(u32 val)
680 680
681static u32 hypervisor_get_time(void) 681static u32 hypervisor_get_time(void)
682{ 682{
683 register unsigned long func asm("%o5"); 683 unsigned long ret, time;
684 register unsigned long arg0 asm("%o0");
685 register unsigned long arg1 asm("%o1");
686 int retries = 10000; 684 int retries = 10000;
687 685
688retry: 686retry:
689 func = HV_FAST_TOD_GET; 687 ret = sun4v_tod_get(&time);
690 arg0 = 0; 688 if (ret == HV_EOK)
691 arg1 = 0; 689 return time;
692 __asm__ __volatile__("ta %6" 690 if (ret == HV_EWOULDBLOCK) {
693 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
694 : "0" (func), "1" (arg0), "2" (arg1),
695 "i" (HV_FAST_TRAP));
696 if (arg0 == HV_EOK)
697 return arg1;
698 if (arg0 == HV_EWOULDBLOCK) {
699 if (--retries > 0) { 691 if (--retries > 0) {
700 udelay(100); 692 udelay(100);
701 goto retry; 693 goto retry;
@@ -709,20 +701,14 @@ retry:
709 701
710static int hypervisor_set_time(u32 secs) 702static int hypervisor_set_time(u32 secs)
711{ 703{
712 register unsigned long func asm("%o5"); 704 unsigned long ret;
713 register unsigned long arg0 asm("%o0");
714 int retries = 10000; 705 int retries = 10000;
715 706
716retry: 707retry:
717 func = HV_FAST_TOD_SET; 708 ret = sun4v_tod_set(secs);
718 arg0 = secs; 709 if (ret == HV_EOK)
719 __asm__ __volatile__("ta %4"
720 : "=&r" (func), "=&r" (arg0)
721 : "0" (func), "1" (arg0),
722 "i" (HV_FAST_TRAP));
723 if (arg0 == HV_EOK)
724 return 0; 710 return 0;
725 if (arg0 == HV_EWOULDBLOCK) { 711 if (ret == HV_EWOULDBLOCK) {
726 if (--retries > 0) { 712 if (--retries > 0) {
727 udelay(100); 713 udelay(100);
728 goto retry; 714 goto retry;
@@ -862,7 +848,6 @@ fs_initcall(clock_init);
862static unsigned long sparc64_init_timers(void) 848static unsigned long sparc64_init_timers(void)
863{ 849{
864 struct device_node *dp; 850 struct device_node *dp;
865 struct property *prop;
866 unsigned long clock; 851 unsigned long clock;
867#ifdef CONFIG_SMP 852#ifdef CONFIG_SMP
868 extern void smp_tick_init(void); 853 extern void smp_tick_init(void);
@@ -879,17 +864,15 @@ static unsigned long sparc64_init_timers(void)
879 if (manuf == 0x17 && impl == 0x13) { 864 if (manuf == 0x17 && impl == 0x13) {
880 /* Hummingbird, aka Ultra-IIe */ 865 /* Hummingbird, aka Ultra-IIe */
881 tick_ops = &hbtick_operations; 866 tick_ops = &hbtick_operations;
882 prop = of_find_property(dp, "stick-frequency", NULL); 867 clock = of_getintprop_default(dp, "stick-frequency", 0);
883 } else { 868 } else {
884 tick_ops = &tick_operations; 869 tick_ops = &tick_operations;
885 cpu_find_by_instance(0, &dp, NULL); 870 clock = local_cpu_data().clock_tick;
886 prop = of_find_property(dp, "clock-frequency", NULL);
887 } 871 }
888 } else { 872 } else {
889 tick_ops = &stick_operations; 873 tick_ops = &stick_operations;
890 prop = of_find_property(dp, "stick-frequency", NULL); 874 clock = of_getintprop_default(dp, "stick-frequency", 0);
891 } 875 }
892 clock = *(unsigned int *) prop->value;
893 876
894#ifdef CONFIG_SMP 877#ifdef CONFIG_SMP
895 smp_tick_init(); 878 smp_tick_init();
@@ -1030,7 +1013,7 @@ void __devinit setup_sparc64_timer(void)
1030 clockevents_register_device(sevt); 1013 clockevents_register_device(sevt);
1031} 1014}
1032 1015
1033#define SPARC64_NSEC_PER_CYC_SHIFT 32UL 1016#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
1034 1017
1035static struct clocksource clocksource_tick = { 1018static struct clocksource clocksource_tick = {
1036 .rating = 100, 1019 .rating = 100,
@@ -1365,6 +1348,7 @@ static int hypervisor_set_rtc_time(struct rtc_time *time)
1365 return hypervisor_set_time(seconds); 1348 return hypervisor_set_time(seconds);
1366} 1349}
1367 1350
1351#ifdef CONFIG_PCI
1368static void bq4802_get_rtc_time(struct rtc_time *time) 1352static void bq4802_get_rtc_time(struct rtc_time *time)
1369{ 1353{
1370 unsigned char val = readb(bq4802_regs + 0x0e); 1354 unsigned char val = readb(bq4802_regs + 0x0e);
@@ -1436,6 +1420,7 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
1436 1420
1437 return 0; 1421 return 0;
1438} 1422}
1423#endif /* CONFIG_PCI */
1439 1424
1440struct mini_rtc_ops { 1425struct mini_rtc_ops {
1441 void (*get_rtc_time)(struct rtc_time *); 1426 void (*get_rtc_time)(struct rtc_time *);
@@ -1452,10 +1437,12 @@ static struct mini_rtc_ops hypervisor_rtc_ops = {
1452 .set_rtc_time = hypervisor_set_rtc_time, 1437 .set_rtc_time = hypervisor_set_rtc_time,
1453}; 1438};
1454 1439
1440#ifdef CONFIG_PCI
1455static struct mini_rtc_ops bq4802_rtc_ops = { 1441static struct mini_rtc_ops bq4802_rtc_ops = {
1456 .get_rtc_time = bq4802_get_rtc_time, 1442 .get_rtc_time = bq4802_get_rtc_time,
1457 .set_rtc_time = bq4802_set_rtc_time, 1443 .set_rtc_time = bq4802_set_rtc_time,
1458}; 1444};
1445#endif /* CONFIG_PCI */
1459 1446
1460static struct mini_rtc_ops *mini_rtc_ops; 1447static struct mini_rtc_ops *mini_rtc_ops;
1461 1448
@@ -1579,8 +1566,10 @@ static int __init rtc_mini_init(void)
1579 mini_rtc_ops = &hypervisor_rtc_ops; 1566 mini_rtc_ops = &hypervisor_rtc_ops;
1580 else if (this_is_starfire) 1567 else if (this_is_starfire)
1581 mini_rtc_ops = &starfire_rtc_ops; 1568 mini_rtc_ops = &starfire_rtc_ops;
1569#ifdef CONFIG_PCI
1582 else if (bq4802_regs) 1570 else if (bq4802_regs)
1583 mini_rtc_ops = &bq4802_rtc_ops; 1571 mini_rtc_ops = &bq4802_rtc_ops;
1572#endif /* CONFIG_PCI */
1584 else 1573 else
1585 return -ENODEV; 1574 return -ENODEV;
1586 1575
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index d0fde36395b4..00a9e3286c83 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
795void __init cheetah_ecache_flush_init(void) 795void __init cheetah_ecache_flush_init(void)
796{ 796{
797 unsigned long largest_size, smallest_linesize, order, ver; 797 unsigned long largest_size, smallest_linesize, order, ver;
798 struct device_node *dp; 798 int i, sz;
799 int i, instance, sz;
800 799
801 /* Scan all cpu device tree nodes, note two values: 800 /* Scan all cpu device tree nodes, note two values:
802 * 1) largest E-cache size 801 * 1) largest E-cache size
@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void)
805 largest_size = 0UL; 804 largest_size = 0UL;
806 smallest_linesize = ~0UL; 805 smallest_linesize = ~0UL;
807 806
808 instance = 0; 807 for (i = 0; i < NR_CPUS; i++) {
809 while (!cpu_find_by_instance(instance, &dp, NULL)) {
810 unsigned long val; 808 unsigned long val;
811 809
812 val = of_getintprop_default(dp, "ecache-size", 810 val = cpu_data(i).ecache_size;
813 (2 * 1024 * 1024)); 811 if (!val)
812 continue;
813
814 if (val > largest_size) 814 if (val > largest_size)
815 largest_size = val; 815 largest_size = val;
816 val = of_getintprop_default(dp, "ecache-line-size", 64); 816
817 val = cpu_data(i).ecache_line_size;
817 if (val < smallest_linesize) 818 if (val < smallest_linesize)
818 smallest_linesize = val; 819 smallest_linesize = val;
819 instance++; 820
820 } 821 }
821 822
822 if (largest_size == 0UL || smallest_linesize == ~0UL) { 823 if (largest_size == 0UL || smallest_linesize == ~0UL) {
@@ -2564,7 +2565,15 @@ void __init trap_init(void)
2564 (TRAP_PER_CPU_TSB_HUGE_TEMP != 2565 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2565 offsetof(struct trap_per_cpu, tsb_huge_temp)) || 2566 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2566 (TRAP_PER_CPU_IRQ_WORKLIST != 2567 (TRAP_PER_CPU_IRQ_WORKLIST !=
2567 offsetof(struct trap_per_cpu, irq_worklist))) 2568 offsetof(struct trap_per_cpu, irq_worklist)) ||
2569 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2570 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2571 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2572 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2573 (TRAP_PER_CPU_RESUM_QMASK !=
2574 offsetof(struct trap_per_cpu, resum_qmask)) ||
2575 (TRAP_PER_CPU_NONRESUM_QMASK !=
2576 offsetof(struct trap_per_cpu, nonresum_qmask)))
2568 trap_per_cpu_offsets_are_bolixed_dave(); 2577 trap_per_cpu_offsets_are_bolixed_dave();
2569 2578
2570 if ((TSB_CONFIG_TSB != 2579 if ((TSB_CONFIG_TSB !=
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 13fa2a2e4513..fb648de18a8d 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -14,7 +14,7 @@ SECTIONS
14 .text 0x0000000000404000 : 14 .text 0x0000000000404000 :
15 { 15 {
16 _text = .; 16 _text = .;
17 *(.text) 17 TEXT_TEXT
18 SCHED_TEXT 18 SCHED_TEXT
19 LOCK_TEXT 19 LOCK_TEXT
20 KPROBES_TEXT 20 KPROBES_TEXT
@@ -27,7 +27,7 @@ SECTIONS
27 27
28 .data : 28 .data :
29 { 29 {
30 *(.data) 30 DATA_DATA
31 CONSTRUCTORS 31 CONSTRUCTORS
32 } 32 }
33 .data1 : { *(.data1) } 33 .data1 : { *(.data1) }
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 6e5b01d779d2..3010227fe243 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/percpu.h>
26 27
27#include <asm/head.h> 28#include <asm/head.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -43,8 +44,8 @@
43#include <asm/tsb.h> 44#include <asm/tsb.h>
44#include <asm/hypervisor.h> 45#include <asm/hypervisor.h>
45#include <asm/prom.h> 46#include <asm/prom.h>
46 47#include <asm/sstate.h>
47extern void device_scan(void); 48#include <asm/mdesc.h>
48 49
49#define MAX_PHYS_ADDRESS (1UL << 42UL) 50#define MAX_PHYS_ADDRESS (1UL << 42UL)
50#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 51#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
60unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 61unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
61 62
62#ifndef CONFIG_DEBUG_PAGEALLOC 63#ifndef CONFIG_DEBUG_PAGEALLOC
63/* A special kernel TSB for 4MB and 256MB linear mappings. */ 64/* A special kernel TSB for 4MB and 256MB linear mappings.
64struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
67 */
68extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
65#endif 69#endif
66 70
67#define MAX_BANKS 32 71#define MAX_BANKS 32
@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page)
190} 194}
191 195
192#define PG_dcache_dirty PG_arch_1 196#define PG_dcache_dirty PG_arch_1
193#define PG_dcache_cpu_shift 24UL 197#define PG_dcache_cpu_shift 32UL
194#define PG_dcache_cpu_mask (256UL - 1UL) 198#define PG_dcache_cpu_mask \
195 199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
196#if NR_CPUS > 256
197#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
198#endif
199 200
200#define dcache_dirty_cpu(page) \ 201#define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
557 unsigned long pte, 558 unsigned long pte,
558 unsigned long mmu) 559 unsigned long mmu)
559{ 560{
560 register unsigned long func asm("%o5"); 561 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
561 register unsigned long arg0 asm("%o0"); 562
562 register unsigned long arg1 asm("%o1"); 563 if (ret != 0) {
563 register unsigned long arg2 asm("%o2");
564 register unsigned long arg3 asm("%o3");
565
566 func = HV_FAST_MMU_MAP_PERM_ADDR;
567 arg0 = vaddr;
568 arg1 = 0;
569 arg2 = pte;
570 arg3 = mmu;
571 __asm__ __volatile__("ta 0x80"
572 : "=&r" (func), "=&r" (arg0),
573 "=&r" (arg1), "=&r" (arg2),
574 "=&r" (arg3)
575 : "0" (func), "1" (arg0), "2" (arg1),
576 "3" (arg2), "4" (arg3));
577 if (arg0 != 0) {
578 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 564 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
579 "errors with %lx\n", vaddr, 0, pte, mmu, arg0); 565 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
580 prom_halt(); 566 prom_halt();
581 } 567 }
582} 568}
@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
1313 1299
1314void __cpuinit sun4v_ktsb_register(void) 1300void __cpuinit sun4v_ktsb_register(void)
1315{ 1301{
1316 register unsigned long func asm("%o5"); 1302 unsigned long pa, ret;
1317 register unsigned long arg0 asm("%o0");
1318 register unsigned long arg1 asm("%o1");
1319 unsigned long pa;
1320 1303
1321 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1304 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1322 1305
1323 func = HV_FAST_MMU_TSB_CTX0; 1306 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1324 arg0 = NUM_KTSB_DESCR; 1307 if (ret != 0) {
1325 arg1 = pa; 1308 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1326 __asm__ __volatile__("ta %6" 1309 "errors with %lx\n", pa, ret);
1327 : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1310 prom_halt();
1328 : "0" (func), "1" (arg0), "2" (arg1), 1311 }
1329 "i" (HV_FAST_TRAP));
1330} 1312}
1331 1313
1332/* paging_init() sets up the page tables */ 1314/* paging_init() sets up the page tables */
@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void)
1334extern void cheetah_ecache_flush_init(void); 1316extern void cheetah_ecache_flush_init(void);
1335extern void sun4v_patch_tlb_handlers(void); 1317extern void sun4v_patch_tlb_handlers(void);
1336 1318
1319extern void cpu_probe(void);
1320extern void central_probe(void);
1321
1337static unsigned long last_valid_pfn; 1322static unsigned long last_valid_pfn;
1338pgd_t swapper_pg_dir[2048]; 1323pgd_t swapper_pg_dir[2048];
1339 1324
@@ -1345,9 +1330,24 @@ void __init paging_init(void)
1345 unsigned long end_pfn, pages_avail, shift, phys_base; 1330 unsigned long end_pfn, pages_avail, shift, phys_base;
1346 unsigned long real_end, i; 1331 unsigned long real_end, i;
1347 1332
1333 /* These build time checkes make sure that the dcache_dirty_cpu()
1334 * page->flags usage will work.
1335 *
1336 * When a page gets marked as dcache-dirty, we store the
1337 * cpu number starting at bit 32 in the page->flags. Also,
1338 * functions like clear_dcache_dirty_cpu use the cpu mask
1339 * in 13-bit signed-immediate instruction fields.
1340 */
1341 BUILD_BUG_ON(FLAGS_RESERVED != 32);
1342 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1343 ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
1344 BUILD_BUG_ON(NR_CPUS > 4096);
1345
1348 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1346 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1349 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1347 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1350 1348
1349 sstate_booting();
1350
1351 /* Invalidate both kernel TSBs. */ 1351 /* Invalidate both kernel TSBs. */
1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1353#ifndef CONFIG_DEBUG_PAGEALLOC 1353#ifndef CONFIG_DEBUG_PAGEALLOC
@@ -1416,8 +1416,13 @@ void __init paging_init(void)
1416 1416
1417 kernel_physical_mapping_init(); 1417 kernel_physical_mapping_init();
1418 1418
1419 real_setup_per_cpu_areas();
1420
1419 prom_build_devicetree(); 1421 prom_build_devicetree();
1420 1422
1423 if (tlb_type == hypervisor)
1424 sun4v_mdesc_init();
1425
1421 { 1426 {
1422 unsigned long zones_size[MAX_NR_ZONES]; 1427 unsigned long zones_size[MAX_NR_ZONES];
1423 unsigned long zholes_size[MAX_NR_ZONES]; 1428 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1434,7 +1439,10 @@ void __init paging_init(void)
1434 zholes_size); 1439 zholes_size);
1435 } 1440 }
1436 1441
1437 device_scan(); 1442 prom_printf("Booting Linux...\n");
1443
1444 central_probe();
1445 cpu_probe();
1438} 1446}
1439 1447
1440static void __init taint_real_pages(void) 1448static void __init taint_real_pages(void)
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 0b4213720d43..f3e0c14e9eef 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -15,6 +15,25 @@
15#include <asm/oplib.h> 15#include <asm/oplib.h>
16#include <asm/system.h> 16#include <asm/system.h>
17 17
18int prom_service_exists(const char *service_name)
19{
20 int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
21 P1275_INOUT(1, 1), service_name);
22
23 if (err)
24 return 0;
25 return 1;
26}
27
28void prom_sun4v_guest_soft_state(void)
29{
30 const char *svc = "SUNW,soft-state-supported";
31
32 if (!prom_service_exists(svc))
33 return;
34 p1275_cmd(svc, P1275_INOUT(0, 0));
35}
36
18/* Reset and reboot the machine with the command 'bcommand'. */ 37/* Reset and reboot the machine with the command 'bcommand'. */
19void prom_reboot(const char *bcommand) 38void prom_reboot(const char *bcommand)
20{ 39{